-From 3a302437605308079db398b67000a77a4fe92da8 Mon Sep 17 00:00:00 2001
+From 77cc39e936f87463f92f7fddaaf0de51eec3972f Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Mon, 25 Sep 2017 12:07:58 +0800
+Date: Fri, 6 Jul 2018 15:30:21 +0800
Subject: [PATCH] dpaa2: support layerscape
-This is a integrated patch for layerscape dpaa2 support.
+This is an integrated patch for layerscape dpaa2 support.
Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
Signed-off-by: costi <constantin.tudor@freescale.com>
Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
+Signed-off-by: Mathew McBride <matt@traverse.com.au>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- drivers/soc/fsl/ls2-console/Kconfig | 4 +
- drivers/soc/fsl/ls2-console/Makefile | 1 +
- drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++
- drivers/staging/fsl-dpaa2/ethernet/Makefile | 11 +
- drivers/staging/fsl-dpaa2/ethernet/README | 186 ++
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 350 +++
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 184 ++
- drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3155 ++++++++++++++++++++
- drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 460 +++
- drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 856 ++++++
- drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 ++
- drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 600 ++++
- drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1770 +++++++++++
- drivers/staging/fsl-dpaa2/ethernet/dpni.h | 989 ++++++
- drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++
- drivers/staging/fsl-dpaa2/ethsw/Kconfig | 6 +
- drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
- drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 851 ++++++
- drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 2762 +++++++++++++++++
- drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 1269 ++++++++
- drivers/staging/fsl-dpaa2/ethsw/switch.c | 1857 ++++++++++++
- drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
- drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
- drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++
- drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1112 +++++++
- drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 +++
- drivers/staging/fsl-dpaa2/evb/evb.c | 1350 +++++++++
- drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
- drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
- drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 ++
- drivers/staging/fsl-dpaa2/mac/dpmac.c | 620 ++++
- drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 +++
- drivers/staging/fsl-dpaa2/mac/mac.c | 666 +++++
- drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
- drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +
- drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 +++++
- drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 ++
- drivers/staging/fsl-dpaa2/rtc/rtc.c | 243 ++
- 39 files changed, 22696 insertions(+)
+ drivers/soc/fsl/ls2-console/Kconfig | 4 +
+ drivers/soc/fsl/ls2-console/Makefile | 1 +
+ drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++
+ drivers/staging/fsl-dpaa2/ethernet/Makefile | 12 +
+ drivers/staging/fsl-dpaa2/ethernet/README | 186 +
+ drivers/staging/fsl-dpaa2/ethernet/TODO | 18 +
+ .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c | 1253 ++++++
+ .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h | 182 +
+ .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 357 ++
+ .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
+ .../fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 185 +
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3734 +++++++++++++++++
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 601 +++
+ .../fsl-dpaa2/ethernet/dpaa2-ethtool.c | 878 ++++
+ drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 +
+ drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 719 ++++
+ drivers/staging/fsl-dpaa2/ethernet/dpni.c | 2112 ++++++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpni.h | 1172 ++++++
+ drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++
+ drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/ethsw/README | 106 +
+ drivers/staging/fsl-dpaa2/ethsw/TODO | 14 +
+ drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 359 ++
+ drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 1165 +++++
+ drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 592 +++
+ .../staging/fsl-dpaa2/ethsw/ethsw-ethtool.c | 206 +
+ drivers/staging/fsl-dpaa2/ethsw/ethsw.c | 1438 +++++++
+ drivers/staging/fsl-dpaa2/ethsw/ethsw.h | 90 +
+ drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
+ drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++
+ drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1111 +++++
+ drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 ++
+ drivers/staging/fsl-dpaa2/evb/evb.c | 1354 ++++++
+ drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
+ drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 +
+ drivers/staging/fsl-dpaa2/mac/dpmac.c | 619 +++
+ drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 ++
+ drivers/staging/fsl-dpaa2/mac/mac.c | 673 +++
+ drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +
+ drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 ++++
+ drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 +
+ drivers/staging/fsl-dpaa2/rtc/rtc.c | 242 ++
+ include/linux/filter.h | 3 +
+ 46 files changed, 22780 insertions(+)
create mode 100644 drivers/soc/fsl/ls2-console/Kconfig
create mode 100644 drivers/soc/fsl/ls2-console/Makefile
create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/README
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/TODO
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/net.h
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Kconfig
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/README
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/TODO
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/switch.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.h
create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig
create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile
create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
+MODULE_DESCRIPTION("Freescale LS2 console driver");
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
-@@ -0,0 +1,11 @@
+@@ -0,0 +1,12 @@
+#
+# Makefile for the Freescale DPAA2 Ethernet controller
+#
+
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
++fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_CEETM} += dpaa2-eth-ceetm.o
+
+# Needed by the tracing framework
+CFLAGS_dpaa2-eth.o := -I$(src)
+Hardware specific statistics for the network interface as well as some
+non-standard driver stats can be consulted through ethtool -S option.
--- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
-@@ -0,0 +1,350 @@
-+
-+/* Copyright 2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
++++ b/drivers/staging/fsl-dpaa2/ethernet/TODO
+@@ -0,0 +1,18 @@
++* Add a DPAA2 MAC kernel driver in order to allow PHY management; currently
++ the DPMAC objects and their link to DPNIs are handled by MC internally
++ and all PHYs are seen as fixed-link
++* add more debug support: decide how to expose detailed debug statistics,
++ add ingress error queue support
++* MC firmware uprev; the DPAA2 objects used by the Ethernet driver need to
++ be kept in sync with binary interface changes in MC
++* refine README file
++* cleanup
++
++NOTE: None of the above is must-have before getting the DPAA2 Ethernet driver
++out of staging. The main requirement for that is to have the drivers it
++depends on, fsl-mc bus and DPIO driver, moved to drivers/bus and drivers/soc
++respectively.
++
++ Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
++ ruxandra.radulescu@nxp.com, devel@driverdev.osuosl.org,
++ linux-kernel@vger.kernel.org
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
+@@ -0,0 +1,1253 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2017 NXP
+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
++#include <linux/init.h>
+#include <linux/module.h>
-+#include <linux/debugfs.h>
++
++#include "dpaa2-eth-ceetm.h"
+#include "dpaa2-eth.h"
-+#include "dpaa2-eth-debugfs.h"
+
-+#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
++#define DPAA2_CEETM_DESCRIPTION "FSL DPAA2 CEETM qdisc"
++/* Conversion formula from userspace passed Bps to expected Mbit */
++#define dpaa2_eth_bps_to_mbit(rate) (rate >> 17)
+
-+static struct dentry *dpaa2_dbg_root;
++static const struct nla_policy dpaa2_ceetm_policy[DPAA2_CEETM_TCA_MAX] = {
++ [DPAA2_CEETM_TCA_COPT] = { .len = sizeof(struct dpaa2_ceetm_tc_copt) },
++ [DPAA2_CEETM_TCA_QOPS] = { .len = sizeof(struct dpaa2_ceetm_tc_qopt) },
++};
+
-+static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
-+{
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
-+ struct rtnl_link_stats64 *stats;
-+ struct dpaa2_eth_drv_stats *extras;
-+ int i;
++struct Qdisc_ops dpaa2_ceetm_qdisc_ops;
+
-+ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
-+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n",
-+ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
-+ "Tx SG", "Enq busy");
++static inline int dpaa2_eth_set_ch_shaping(struct dpaa2_eth_priv *priv,
++ struct dpni_tx_shaping_cfg *scfg,
++ struct dpni_tx_shaping_cfg *ecfg,
++ int coupled, int ch_id)
++{
++ int err = 0;
+
-+ for_each_online_cpu(i) {
-+ stats = per_cpu_ptr(priv->percpu_stats, i);
-+ extras = per_cpu_ptr(priv->percpu_extras, i);
-+ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
-+ i,
-+ stats->rx_packets,
-+ stats->rx_errors,
-+ extras->rx_sg_frames,
-+ stats->tx_packets,
-+ stats->tx_errors,
-+ extras->tx_conf_frames,
-+ extras->tx_sg_frames,
-+ extras->tx_portal_busy);
-+ }
++ netdev_dbg(priv->net_dev, "%s: ch_id %d rate %d mbps\n", __func__,
++ ch_id, scfg->rate_limit);
++ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, scfg,
++ ecfg, coupled);
++ if (err)
++ netdev_err(priv->net_dev, "dpni_set_tx_shaping err\n");
+
-+ return 0;
++ return err;
+}
+
-+static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
++static inline int dpaa2_eth_reset_ch_shaping(struct dpaa2_eth_priv *priv,
++ int ch_id)
+{
-+ int err;
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-+
-+ err = single_open(file, dpaa2_dbg_cpu_show, priv);
-+ if (err < 0)
-+ netdev_err(priv->net_dev, "single_open() failed\n");
++ struct dpni_tx_shaping_cfg cfg = { 0 };
+
-+ return err;
++ return dpaa2_eth_set_ch_shaping(priv, &cfg, &cfg, 0, ch_id);
+}
+
-+static const struct file_operations dpaa2_dbg_cpu_ops = {
-+ .open = dpaa2_dbg_cpu_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
-+
-+static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
++static inline int
++dpaa2_eth_update_shaping_cfg(struct net_device *dev,
++ struct dpaa2_ceetm_shaping_cfg cfg,
++ struct dpni_tx_shaping_cfg *scfg,
++ struct dpni_tx_shaping_cfg *ecfg)
+{
-+ switch (fq->type) {
-+ case DPAA2_RX_FQ:
-+ return "Rx";
-+ case DPAA2_TX_CONF_FQ:
-+ return "Tx conf";
-+ case DPAA2_RX_ERR_FQ:
-+ return "Rx err";
-+ default:
-+ return "N/A";
++ scfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.cir);
++ ecfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.eir);
++
++ if (cfg.cbs > DPAA2_ETH_MAX_BURST_SIZE) {
++ netdev_err(dev, "Committed burst size must be under %d\n",
++ DPAA2_ETH_MAX_BURST_SIZE);
++ return -EINVAL;
+ }
-+}
+
-+static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
-+{
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
-+ struct dpaa2_eth_fq *fq;
-+ u32 fcnt, bcnt;
-+ int i, err;
++ scfg->max_burst_size = cfg.cbs;
+
-+ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
-+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
-+ "VFQID", "CPU", "Type", "Frames", "Pending frames",
-+ "Congestion");
++ if (cfg.ebs > DPAA2_ETH_MAX_BURST_SIZE) {
++ netdev_err(dev, "Excess burst size must be under %d\n",
++ DPAA2_ETH_MAX_BURST_SIZE);
++ return -EINVAL;
++ }
+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ fq = &priv->fq[i];
-+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
-+ if (err)
-+ fcnt = 0;
++ ecfg->max_burst_size = cfg.ebs;
+
-+ seq_printf(file, "%5d%16d%16s%16llu%16u%16llu\n",
-+ fq->fqid,
-+ fq->target_cpu,
-+ fq_type_to_str(fq),
-+ fq->stats.frames,
-+ fcnt,
-+ fq->stats.congestion_entry);
++ if ((!cfg.cir || !cfg.eir) && cfg.coupled) {
++ netdev_err(dev, "Coupling can be set when both CIR and EIR are finite\n");
++ return -EINVAL;
+ }
+
+ return 0;
+}
+
-+static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
++enum update_tx_prio {
++ DPAA2_ETH_ADD_CQ,
++ DPAA2_ETH_DEL_CQ,
++};
++
++/* Normalize weights based on max passed value */
++static inline int dpaa2_eth_normalize_tx_prio(struct dpaa2_ceetm_qdisc *priv)
+{
-+ int err;
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
++ struct dpni_tx_schedule_cfg *sched_cfg;
++ struct dpaa2_ceetm_class *cl;
++ u32 qpri;
++ u16 weight_max = 0, increment;
++ int i;
+
-+ err = single_open(file, dpaa2_dbg_fqs_show, priv);
-+ if (err < 0)
-+ netdev_err(priv->net_dev, "single_open() failed\n");
++ /* Check the boundaries of the provided values */
++ for (i = 0; i < priv->clhash.hashsize; i++)
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
++ weight_max = (weight_max == 0 ? cl->prio.weight :
++ (weight_max < cl->prio.weight ?
++ cl->prio.weight : weight_max));
+
-+ return err;
-+}
++ /* If there are no elements, there's nothing to do */
++ if (weight_max == 0)
++ return 0;
+
-+static const struct file_operations dpaa2_dbg_fq_ops = {
-+ .open = dpaa2_dbg_fqs_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
++ increment = (DPAA2_CEETM_MAX_WEIGHT - DPAA2_CEETM_MIN_WEIGHT) /
++ weight_max;
+
-+static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
-+{
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
-+ struct dpaa2_eth_channel *ch;
-+ int i;
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
++ if (cl->prio.mode == STRICT_PRIORITY)
++ continue;
+
-+ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
-+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
-+ "CHID", "CPU", "Deq busy", "Frames", "CDANs",
-+ "Avg frm/CDAN");
++ qpri = cl->prio.qpri;
++ sched_cfg = &priv->prio.tx_prio_cfg.tc_sched[qpri];
+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n",
-+ ch->ch_id,
-+ ch->nctx.desired_cpu,
-+ ch->stats.dequeue_portal_busy,
-+ ch->stats.frames,
-+ ch->stats.cdan,
-+ ch->stats.frames / ch->stats.cdan);
++ sched_cfg->delta_bandwidth =
++ DPAA2_CEETM_MIN_WEIGHT +
++ (cl->prio.weight * increment);
++
++ pr_debug("%s: Normalized CQ qpri %d weight to %d\n",
++ __func__, qpri, sched_cfg->delta_bandwidth);
++ }
+ }
+
+ return 0;
+}
+
-+static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
++static inline int dpaa2_eth_update_tx_prio(struct dpaa2_eth_priv *priv,
++ struct dpaa2_ceetm_class *cl,
++ enum update_tx_prio type)
+{
-+ int err;
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-+
-+ err = single_open(file, dpaa2_dbg_ch_show, priv);
-+ if (err < 0)
-+ netdev_err(priv->net_dev, "single_open() failed\n");
-+
-+ return err;
-+}
++ struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
++ struct dpni_congestion_notification_cfg notif_cfg = {0};
++ struct dpni_tx_schedule_cfg *sched_cfg;
++ struct dpni_taildrop td = {0};
++ u8 ch_id = 0, tc_id = 0;
++ u32 qpri = 0;
++ int err = 0;
+
-+static const struct file_operations dpaa2_dbg_ch_ops = {
-+ .open = dpaa2_dbg_ch_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
++ qpri = cl->prio.qpri;
++ tc_id = DPNI_BUILD_CH_TC(ch_id, qpri);
++
++ switch (type) {
++ case DPAA2_ETH_ADD_CQ:
++ /* Disable congestion notifications */
++ notif_cfg.threshold_entry = 0;
++ notif_cfg.threshold_exit = 0;
++ err = dpni_set_congestion_notification(priv->mc_io, 0,
++ priv->mc_token,
++ DPNI_QUEUE_TX, tc_id,
++ ¬if_cfg);
++ if (err) {
++ netdev_err(priv->net_dev, "Error disabling congestion notifications %d\n",
++ err);
++ return err;
++ }
++ /* Enable taildrop */
++ td.enable = 1;
++ td.units = DPNI_CONGESTION_UNIT_FRAMES;
++ td.threshold = DPAA2_CEETM_TD_THRESHOLD;
++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
++ DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
++ 0, &td);
++ if (err) {
++ netdev_err(priv->net_dev, "Error enabling Tx taildrop %d\n",
++ err);
++ return err;
++ }
++ break;
++ case DPAA2_ETH_DEL_CQ:
++ /* Disable taildrop */
++ td.enable = 0;
++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
++ DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
++ 0, &td);
++ if (err) {
++ netdev_err(priv->net_dev, "Error disabling Tx taildrop %d\n",
++ err);
++ return err;
++ }
++ /* Enable congestion notifications */
++ notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
++ notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
++ notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
++ notif_cfg.message_ctx = (u64)priv;
++ notif_cfg.message_iova = priv->cscn_dma;
++ notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
++ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
++ DPNI_CONG_OPT_COHERENT_WRITE;
++ err = dpni_set_congestion_notification(priv->mc_io, 0,
++ priv->mc_token,
++ DPNI_QUEUE_TX, tc_id,
++ ¬if_cfg);
++ if (err) {
++ netdev_err(priv->net_dev, "Error enabling congestion notifications %d\n",
++ err);
++ return err;
++ }
++ break;
++ }
+
-+static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
-+ size_t count, loff_t *offset)
-+{
-+ struct dpaa2_eth_priv *priv = file->private_data;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_eth_drv_stats *percpu_extras;
-+ struct dpaa2_eth_fq *fq;
-+ struct dpaa2_eth_channel *ch;
-+ int i;
++ /* We can zero out the structure in the tx_prio_conf array */
++ if (type == DPAA2_ETH_DEL_CQ) {
++ sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[qpri];
++ memset(sched_cfg, 0, sizeof(*sched_cfg));
++ }
+
-+ for_each_online_cpu(i) {
-+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
-+ memset(percpu_stats, 0, sizeof(*percpu_stats));
++ /* Normalize priorities */
++ err = dpaa2_eth_normalize_tx_prio(sch);
+
-+ percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
-+ memset(percpu_extras, 0, sizeof(*percpu_extras));
-+ }
++ /* Debug print goes here */
++ print_hex_dump_debug("tx_prio: ", DUMP_PREFIX_OFFSET, 16, 1,
++ &sch->prio.tx_prio_cfg,
++ sizeof(sch->prio.tx_prio_cfg), 0);
+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ fq = &priv->fq[i];
-+ memset(&fq->stats, 0, sizeof(fq->stats));
-+ }
++ /* Call dpni_set_tx_priorities for the entire prio qdisc */
++ err = dpni_set_tx_priorities(priv->mc_io, 0, priv->mc_token,
++ &sch->prio.tx_prio_cfg);
++ if (err)
++ netdev_err(priv->net_dev, "dpni_set_tx_priorities err %d\n",
++ err);
+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ memset(&ch->stats, 0, sizeof(ch->stats));
-+ }
++ return err;
++}
+
-+ return count;
++static void dpaa2_eth_ceetm_enable(struct dpaa2_eth_priv *priv)
++{
++ priv->ceetm_en = true;
+}
+
-+static const struct file_operations dpaa2_dbg_reset_ops = {
-+ .open = simple_open,
-+ .write = dpaa2_dbg_reset_write,
-+};
++static void dpaa2_eth_ceetm_disable(struct dpaa2_eth_priv *priv)
++{
++ priv->ceetm_en = false;
++}
+
-+static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
-+ const char __user *buf,
-+ size_t count, loff_t *offset)
++/* Find class in qdisc hash table using given handle */
++static inline struct dpaa2_ceetm_class *dpaa2_ceetm_find(u32 handle,
++ struct Qdisc *sch)
+{
-+ struct dpaa2_eth_priv *priv = file->private_data;
-+ int err;
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct Qdisc_class_common *clc;
+
-+ err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
-+ if (err)
-+ netdev_err(priv->net_dev,
-+ "dpni_reset_statistics() failed %d\n", err);
++ pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
++ __func__, handle, sch->handle);
+
-+ return count;
++ clc = qdisc_class_find(&priv->clhash, handle);
++ return clc ? container_of(clc, struct dpaa2_ceetm_class, common) : NULL;
+}
+
-+static const struct file_operations dpaa2_dbg_reset_mc_ops = {
-+ .open = simple_open,
-+ .write = dpaa2_dbg_reset_mc_write,
-+};
++/* Insert a class in the qdisc's class hash */
++static void dpaa2_ceetm_link_class(struct Qdisc *sch,
++ struct Qdisc_class_hash *clhash,
++ struct Qdisc_class_common *common)
++{
++ sch_tree_lock(sch);
++ qdisc_class_hash_insert(clhash, common);
++ sch_tree_unlock(sch);
++ qdisc_class_hash_grow(sch, clhash);
++}
+
-+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
++/* Destroy a ceetm class */
++static void dpaa2_ceetm_cls_destroy(struct Qdisc *sch,
++ struct dpaa2_ceetm_class *cl)
+{
-+ if (!dpaa2_dbg_root)
-+ return;
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
-+ /* Create a directory for the interface */
-+ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
-+ dpaa2_dbg_root);
-+ if (!priv->dbg.dir) {
-+ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
++ if (!cl)
+ return;
-+ }
+
-+ /* per-cpu stats file */
-+ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
-+ priv->dbg.dir, priv,
-+ &dpaa2_dbg_cpu_ops);
-+ if (!priv->dbg.cpu_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_cpu_stats;
++ pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
++ __func__, cl->common.classid, sch->handle);
++
++ /* Recurse into child first */
++ if (cl->child) {
++ qdisc_destroy(cl->child);
++ cl->child = NULL;
+ }
+
-+ /* per-fq stats file */
-+ priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
-+ priv->dbg.dir, priv,
-+ &dpaa2_dbg_fq_ops);
-+ if (!priv->dbg.fq_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_fq_stats;
-+ }
++ switch (cl->type) {
++ case CEETM_ROOT:
++ if (dpaa2_eth_reset_ch_shaping(priv, cl->root.ch_id))
++ netdev_err(dev, "Error resetting channel shaping\n");
+
-+ /* per-fq stats file */
-+ priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
-+ priv->dbg.dir, priv,
-+ &dpaa2_dbg_ch_ops);
-+ if (!priv->dbg.fq_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_ch_stats;
-+ }
++ break;
+
-+ /* reset stats */
-+ priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
-+ priv->dbg.dir, priv,
-+ &dpaa2_dbg_reset_ops);
-+ if (!priv->dbg.reset_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_reset_stats;
-+ }
++ case CEETM_PRIO:
++ if (dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_DEL_CQ))
++ netdev_err(dev, "Error resetting tx_priorities\n");
+
-+ /* reset MC stats */
-+ priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
-+ 0222, priv->dbg.dir, priv,
-+ &dpaa2_dbg_reset_mc_ops);
-+ if (!priv->dbg.reset_mc_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_reset_mc_stats;
-+ }
++ if (cl->prio.cstats)
++ free_percpu(cl->prio.cstats);
+
-+ return;
++ break;
++ }
+
-+err_reset_mc_stats:
-+ debugfs_remove(priv->dbg.reset_stats);
-+err_reset_stats:
-+ debugfs_remove(priv->dbg.ch_stats);
-+err_ch_stats:
-+ debugfs_remove(priv->dbg.fq_stats);
-+err_fq_stats:
-+ debugfs_remove(priv->dbg.cpu_stats);
-+err_cpu_stats:
-+ debugfs_remove(priv->dbg.dir);
++ tcf_destroy_chain(&cl->filter_list);
++ kfree(cl);
+}
+
-+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
++/* Destroy a ceetm qdisc */
++static void dpaa2_ceetm_destroy(struct Qdisc *sch)
+{
-+ debugfs_remove(priv->dbg.reset_mc_stats);
-+ debugfs_remove(priv->dbg.reset_stats);
-+ debugfs_remove(priv->dbg.fq_stats);
-+ debugfs_remove(priv->dbg.ch_stats);
-+ debugfs_remove(priv->dbg.cpu_stats);
-+ debugfs_remove(priv->dbg.dir);
-+}
++ unsigned int i;
++ struct hlist_node *next;
++ struct dpaa2_ceetm_class *cl;
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
+
-+void dpaa2_eth_dbg_init(void)
-+{
-+ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
-+ if (!dpaa2_dbg_root) {
-+ pr_err("DPAA2-ETH: debugfs create failed\n");
-+ return;
-+ }
++ pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
++ __func__, sch->handle);
+
-+ pr_info("DPAA2-ETH: debugfs created\n");
-+}
++ /* All filters need to be removed before destroying the classes */
++ tcf_destroy_chain(&priv->filter_list);
+
-+void __exit dpaa2_eth_dbg_exit(void)
-+{
-+ debugfs_remove(dpaa2_dbg_root);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
-@@ -0,0 +1,60 @@
-+/* Copyright 2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
++ tcf_destroy_chain(&cl->filter_list);
++ }
+
-+#ifndef DPAA2_ETH_DEBUGFS_H
-+#define DPAA2_ETH_DEBUGFS_H
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
++ common.hnode)
++ dpaa2_ceetm_cls_destroy(sch, cl);
++ }
+
-+#include <linux/dcache.h>
++ qdisc_class_hash_destroy(&priv->clhash);
+
-+struct dpaa2_eth_priv;
++ switch (priv->type) {
++ case CEETM_ROOT:
++ dpaa2_eth_ceetm_disable(priv_eth);
+
-+struct dpaa2_debugfs {
-+ struct dentry *dir;
-+ struct dentry *fq_stats;
-+ struct dentry *ch_stats;
-+ struct dentry *cpu_stats;
-+ struct dentry *reset_stats;
-+ struct dentry *reset_mc_stats;
-+};
++ if (priv->root.qstats)
++ free_percpu(priv->root.qstats);
+
-+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
-+void dpaa2_eth_dbg_init(void);
-+void dpaa2_eth_dbg_exit(void);
-+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
-+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
-+#else
-+static inline void dpaa2_eth_dbg_init(void) {}
-+static inline void dpaa2_eth_dbg_exit(void) {}
-+static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
-+static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
-+#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
++ if (!priv->root.qdiscs)
++ break;
+
-+#endif /* DPAA2_ETH_DEBUGFS_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
-@@ -0,0 +1,184 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
++ /* Destroy the pfifo qdiscs in case they haven't been attached
++ * to the netdev queues yet.
++ */
++ for (i = 0; i < dev->num_tx_queues; i++)
++ if (priv->root.qdiscs[i])
++ qdisc_destroy(priv->root.qdiscs[i]);
+
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM dpaa2_eth
++ kfree(priv->root.qdiscs);
++ break;
+
-+#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _DPAA2_ETH_TRACE_H
++ case CEETM_PRIO:
++ if (priv->prio.parent)
++ priv->prio.parent->child = NULL;
++ break;
++ }
++}
+
-+#include <linux/skbuff.h>
-+#include <linux/netdevice.h>
-+#include <linux/tracepoint.h>
++static int dpaa2_ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
++{
++ struct Qdisc *qdisc;
++ unsigned int ntx, i;
++ struct nlattr *nest;
++ struct dpaa2_ceetm_tc_qopt qopt;
++ struct dpaa2_ceetm_qdisc_stats *qstats;
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ sch_tree_lock(sch);
++ memset(&qopt, 0, sizeof(qopt));
++ qopt.type = priv->type;
++ qopt.shaped = priv->shaped;
++
++ switch (priv->type) {
++ case CEETM_ROOT:
++ /* Gather statistics from the underlying pfifo qdiscs */
++ sch->q.qlen = 0;
++ memset(&sch->bstats, 0, sizeof(sch->bstats));
++ memset(&sch->qstats, 0, sizeof(sch->qstats));
++
++ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
++ qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
++ sch->q.qlen += qdisc->q.qlen;
++ sch->bstats.bytes += qdisc->bstats.bytes;
++ sch->bstats.packets += qdisc->bstats.packets;
++ sch->qstats.qlen += qdisc->qstats.qlen;
++ sch->qstats.backlog += qdisc->qstats.backlog;
++ sch->qstats.drops += qdisc->qstats.drops;
++ sch->qstats.requeues += qdisc->qstats.requeues;
++ sch->qstats.overlimits += qdisc->qstats.overlimits;
++ }
+
-+#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
-+/* trace_printk format for raw buffer event class */
-+#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
++ for_each_online_cpu(i) {
++ qstats = per_cpu_ptr(priv->root.qstats, i);
++ sch->qstats.drops += qstats->drops;
++ }
+
-+/* This is used to declare a class of events.
-+ * individual events of this type will be defined below.
-+ */
++ break;
+
-+/* Store details about a frame descriptor */
-+DECLARE_EVENT_CLASS(dpaa2_eth_fd,
-+ /* Trace function prototype */
-+ TP_PROTO(struct net_device *netdev,
-+ const struct dpaa2_fd *fd),
++ case CEETM_PRIO:
++ qopt.prio_group_A = priv->prio.tx_prio_cfg.prio_group_A;
++ qopt.prio_group_B = priv->prio.tx_prio_cfg.prio_group_B;
++ qopt.separate_groups = priv->prio.tx_prio_cfg.separate_groups;
++ break;
+
-+ /* Repeat argument list here */
-+ TP_ARGS(netdev, fd),
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
++ sch_tree_unlock(sch);
++ return -EINVAL;
++ }
+
-+ /* A structure containing the relevant information we want
-+ * to record. Declare name and type for each normal element,
-+ * name, type and size for arrays. Use __string for variable
-+ * length strings.
-+ */
-+ TP_STRUCT__entry(
-+ __field(u64, fd_addr)
-+ __field(u32, fd_len)
-+ __field(u16, fd_offset)
-+ __string(name, netdev->name)
-+ ),
++ nest = nla_nest_start(skb, TCA_OPTIONS);
++ if (!nest)
++ goto nla_put_failure;
++ if (nla_put(skb, DPAA2_CEETM_TCA_QOPS, sizeof(qopt), &qopt))
++ goto nla_put_failure;
++ nla_nest_end(skb, nest);
+
-+ /* The function that assigns values to the above declared
-+ * fields
-+ */
-+ TP_fast_assign(
-+ __entry->fd_addr = dpaa2_fd_get_addr(fd);
-+ __entry->fd_len = dpaa2_fd_get_len(fd);
-+ __entry->fd_offset = dpaa2_fd_get_offset(fd);
-+ __assign_str(name, netdev->name);
-+ ),
++ sch_tree_unlock(sch);
++ return skb->len;
+
-+ /* This is what gets printed when the trace event is
-+ * triggered.
-+ */
-+ TP_printk(TR_FMT,
-+ __get_str(name),
-+ __entry->fd_addr,
-+ __entry->fd_len,
-+ __entry->fd_offset)
-+);
++nla_put_failure:
++ sch_tree_unlock(sch);
++ nla_nest_cancel(skb, nest);
++ return -EMSGSIZE;
++}
+
-+/* Now declare events of the above type. Format is:
-+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
-+ */
++static int dpaa2_ceetm_change_prio(struct Qdisc *sch,
++ struct dpaa2_ceetm_qdisc *priv,
++ struct dpaa2_ceetm_tc_qopt *qopt)
++{
++ /* TODO: Once LX2 support is added */
++ /* priv->shaped = parent_cl->shaped; */
++ priv->prio.tx_prio_cfg.prio_group_A = qopt->prio_group_A;
++ priv->prio.tx_prio_cfg.prio_group_B = qopt->prio_group_B;
++ priv->prio.tx_prio_cfg.separate_groups = qopt->separate_groups;
+
-+/* Tx (egress) fd */
-+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
-+ TP_PROTO(struct net_device *netdev,
-+ const struct dpaa2_fd *fd),
++ return 0;
++}
+
-+ TP_ARGS(netdev, fd)
-+);
++/* Edit a ceetm qdisc */
++static int dpaa2_ceetm_change(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
++ struct dpaa2_ceetm_tc_qopt *qopt;
++ int err;
+
-+/* Rx fd */
-+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
-+ TP_PROTO(struct net_device *netdev,
-+ const struct dpaa2_fd *fd),
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
-+ TP_ARGS(netdev, fd)
-+);
++ err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
++ dpaa2_ceetm_policy);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "nla_parse_nested");
++ return err;
++ }
+
-+/* Tx confirmation fd */
-+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
-+ TP_PROTO(struct net_device *netdev,
-+ const struct dpaa2_fd *fd),
++ if (!tb[DPAA2_CEETM_TCA_QOPS]) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "tb");
++ return -EINVAL;
++ }
+
-+ TP_ARGS(netdev, fd)
-+);
++ if (TC_H_MIN(sch->handle)) {
++ pr_err("CEETM: a qdisc should not have a minor\n");
++ return -EINVAL;
++ }
+
-+/* Log data about raw buffers. Useful for tracing DPBP content. */
-+TRACE_EVENT(dpaa2_eth_buf_seed,
-+ /* Trace function prototype */
-+ TP_PROTO(struct net_device *netdev,
-+ /* virtual address and size */
-+ void *vaddr,
-+ size_t size,
-+ /* dma map address and size */
-+ dma_addr_t dma_addr,
-+ size_t map_size,
-+ /* buffer pool id, if relevant */
-+ u16 bpid),
++ qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
+
-+ /* Repeat argument list here */
-+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
++ if (priv->type != qopt->type) {
++ pr_err("CEETM: qdisc %X is not of the provided type\n",
++ sch->handle);
++ return -EINVAL;
++ }
+
-+ /* A structure containing the relevant information we want
-+ * to record. Declare name and type for each normal element,
-+ * name, type and size for arrays. Use __string for variable
-+ * length strings.
-+ */
-+ TP_STRUCT__entry(
-+ __field(void *, vaddr)
-+ __field(size_t, size)
-+ __field(dma_addr_t, dma_addr)
-+ __field(size_t, map_size)
-+ __field(u16, bpid)
-+ __string(name, netdev->name)
-+ ),
++ switch (priv->type) {
++ case CEETM_PRIO:
++ err = dpaa2_ceetm_change_prio(sch, priv, qopt);
++ break;
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
++ err = -EINVAL;
++ }
+
-+ /* The function that assigns values to the above declared
-+ * fields
-+ */
-+ TP_fast_assign(
-+ __entry->vaddr = vaddr;
-+ __entry->size = size;
-+ __entry->dma_addr = dma_addr;
-+ __entry->map_size = map_size;
-+ __entry->bpid = bpid;
-+ __assign_str(name, netdev->name);
-+ ),
++ return err;
++}
+
-+ /* This is what gets printed when the trace event is
-+ * triggered.
-+ */
-+ TP_printk(TR_BUF_FMT,
-+ __get_str(name),
-+ __entry->vaddr,
-+ __entry->size,
-+ &__entry->dma_addr,
-+ __entry->map_size,
-+ __entry->bpid)
-+);
++/* Configure a root ceetm qdisc */
++static int dpaa2_ceetm_init_root(struct Qdisc *sch,
++ struct dpaa2_ceetm_qdisc *priv,
++ struct dpaa2_ceetm_tc_qopt *qopt)
++{
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
++ struct netdev_queue *dev_queue;
++ unsigned int i, parent_id;
++ struct Qdisc *qdisc;
++ int err;
+
-+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
-+ * The syntax is the same as for DECLARE_EVENT_CLASS().
-+ */
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
-+#endif /* _DPAA2_ETH_TRACE_H */
++ /* Validate inputs */
++ if (sch->parent != TC_H_ROOT) {
++ pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
++ tcf_destroy_chain(&priv->filter_list);
++ qdisc_class_hash_destroy(&priv->clhash);
++ return -EINVAL;
++ }
+
-+/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
-+#undef TRACE_INCLUDE_PATH
-+#define TRACE_INCLUDE_PATH .
-+#undef TRACE_INCLUDE_FILE
-+#define TRACE_INCLUDE_FILE dpaa2-eth-trace
-+#include <trace/define_trace.h>
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
-@@ -0,0 +1,3155 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+#include <linux/etherdevice.h>
-+#include <linux/of_net.h>
-+#include <linux/interrupt.h>
-+#include <linux/debugfs.h>
-+#include <linux/kthread.h>
-+#include <linux/msi.h>
-+#include <linux/net_tstamp.h>
-+#include <linux/iommu.h>
++ /* Pre-allocate underlying pfifo qdiscs.
++ *
++ * We want to offload shaping and scheduling decisions to the hardware.
++ * The pfifo qdiscs will be attached to the netdev queues and will
++ * guide the traffic from the IP stack down to the driver with minimum
++ * interference.
++ *
++ * The CEETM qdiscs and classes will be crossed when the traffic
++ * reaches the driver.
++ */
++ priv->root.qdiscs = kcalloc(dev->num_tx_queues,
++ sizeof(priv->root.qdiscs[0]),
++ GFP_KERNEL);
++ if (!priv->root.qdiscs) {
++ err = -ENOMEM;
++ goto err_init_root;
++ }
+
-+#include "../../fsl-mc/include/dpbp.h"
-+#include "../../fsl-mc/include/dpcon.h"
-+#include "../../fsl-mc/include/mc.h"
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "dpaa2-eth.h"
-+#include "dpkg.h"
++ for (i = 0; i < dev->num_tx_queues; i++) {
++ dev_queue = netdev_get_tx_queue(dev, i);
++ parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
++ TC_H_MIN(i + PFIFO_MIN_OFFSET));
+
-+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
-+ * using trace events only need to #include <trace/events/sched.h>
-+ */
-+#define CREATE_TRACE_POINTS
-+#include "dpaa2-eth-trace.h"
++ qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
++ parent_id);
++ if (!qdisc) {
++ err = -ENOMEM;
++ goto err_init_root;
++ }
+
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_AUTHOR("Freescale Semiconductor, Inc");
-+MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
++ priv->root.qdiscs[i] = qdisc;
++ qdisc->flags |= TCQ_F_ONETXQUEUE;
++ }
+
-+const char dpaa2_eth_drv_version[] = "0.1";
++ sch->flags |= TCQ_F_MQROOT;
+
-+void *dpaa2_eth_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr)
-+{
-+ phys_addr_t phys_addr;
++ priv->root.qstats = alloc_percpu(struct dpaa2_ceetm_qdisc_stats);
++ if (!priv->root.qstats) {
++ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
++ __func__);
++ err = -ENOMEM;
++ goto err_init_root;
++ }
+
-+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
++ dpaa2_eth_ceetm_enable(priv_eth);
++ return 0;
+
-+ return phys_to_virt(phys_addr);
++err_init_root:
++ dpaa2_ceetm_destroy(sch);
++ return err;
+}
+
-+static void validate_rx_csum(struct dpaa2_eth_priv *priv,
-+ u32 fd_status,
-+ struct sk_buff *skb)
++/* Configure a prio ceetm qdisc */
++static int dpaa2_ceetm_init_prio(struct Qdisc *sch,
++ struct dpaa2_ceetm_qdisc *priv,
++ struct dpaa2_ceetm_tc_qopt *qopt)
+{
-+ skb_checksum_none_assert(skb);
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_ceetm_class *parent_cl;
++ struct Qdisc *parent_qdisc;
++ int err;
+
-+ /* HW checksum validation is disabled, nothing to do here */
-+ if (!(priv->net_dev->features & NETIF_F_RXCSUM))
-+ return;
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
-+ /* Read checksum validation bits */
-+ if (!((fd_status & DPAA2_FAS_L3CV) &&
-+ (fd_status & DPAA2_FAS_L4CV)))
-+ return;
++ if (sch->parent == TC_H_ROOT) {
++ pr_err("CEETM: a prio ceetm qdisc can not be root\n");
++ err = -EINVAL;
++ goto err_init_prio;
++ }
+
-+ /* Inform the stack there's no need to compute L3/L4 csum anymore */
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
++ if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
++ err = -EINVAL;
++ goto err_init_prio;
++ }
++
++ /* Obtain the parent root ceetm_class */
++ parent_cl = dpaa2_ceetm_find(sch->parent, parent_qdisc);
++
++ if (!parent_cl || parent_cl->type != CEETM_ROOT) {
++ pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
++ err = -EINVAL;
++ goto err_init_prio;
++ }
++
++ priv->prio.parent = parent_cl;
++ parent_cl->child = sch;
++
++ err = dpaa2_ceetm_change_prio(sch, priv, qopt);
++
++ return 0;
++
++err_init_prio:
++ dpaa2_ceetm_destroy(sch);
++ return err;
+}
+
-+/* Free a received FD.
-+ * Not to be used for Tx conf FDs or on any other paths.
-+ */
-+static void free_rx_fd(struct dpaa2_eth_priv *priv,
-+ const struct dpaa2_fd *fd,
-+ void *vaddr)
++/* Configure a generic ceetm qdisc */
++static int dpaa2_ceetm_init(struct Qdisc *sch, struct nlattr *opt)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
-+ u8 fd_format = dpaa2_fd_get_format(fd);
-+ struct dpaa2_sg_entry *sgt;
-+ void *sg_vaddr;
-+ int i;
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++ struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
++ struct dpaa2_ceetm_tc_qopt *qopt;
++ int err;
+
-+ /* If single buffer frame, just free the data buffer */
-+ if (fd_format == dpaa2_fd_single)
-+ goto free_buf;
-+ else if (fd_format != dpaa2_fd_sg)
-+ /* we don't support any other format */
-+ return;
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
-+ /* For S/G frames, we first need to free all SG entries */
-+ sgt = vaddr + dpaa2_fd_get_offset(fd);
-+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
-+ addr = dpaa2_sg_get_addr(&sgt[i]);
-+ sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
++ if (!netif_is_multiqueue(dev))
++ return -EOPNOTSUPP;
+
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_FROM_DEVICE);
++ RCU_INIT_POINTER(priv->filter_list, NULL);
+
-+ put_page(virt_to_head_page(sg_vaddr));
++ if (!opt) {
++ pr_err(KBUILD_BASENAME " : %s : tc error - opt = NULL\n",
++ __func__);
++ return -EINVAL;
++ }
+
-+ if (dpaa2_sg_is_final(&sgt[i]))
-+ break;
++ err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
++ dpaa2_ceetm_policy);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "nla_parse_nested");
++ return err;
+ }
+
-+free_buf:
-+ put_page(virt_to_head_page(vaddr));
-+}
++ if (!tb[DPAA2_CEETM_TCA_QOPS]) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "tb");
++ return -EINVAL;
++ }
+
-+/* Build a linear skb based on a single-buffer frame descriptor */
-+static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ const struct dpaa2_fd *fd,
-+ void *fd_vaddr)
-+{
-+ struct sk_buff *skb = NULL;
-+ u16 fd_offset = dpaa2_fd_get_offset(fd);
-+ u32 fd_length = dpaa2_fd_get_len(fd);
++ if (TC_H_MIN(sch->handle)) {
++ pr_err("CEETM: a qdisc should not have a minor\n");
++ return -EINVAL;
++ }
+
-+ skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
-+ if (unlikely(!skb))
-+ return NULL;
++ qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
+
-+ skb_reserve(skb, fd_offset);
-+ skb_put(skb, fd_length);
++ /* Initialize the class hash list. Each qdisc has its own class hash */
++ err = qdisc_class_hash_init(&priv->clhash);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
++ __func__);
++ return err;
++ }
+
-+ ch->buf_count--;
++ priv->type = qopt->type;
++ priv->shaped = qopt->shaped;
+
-+ return skb;
++ switch (priv->type) {
++ case CEETM_ROOT:
++ err = dpaa2_ceetm_init_root(sch, priv, qopt);
++ break;
++ case CEETM_PRIO:
++ err = dpaa2_ceetm_init_prio(sch, priv, qopt);
++ break;
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
++ dpaa2_ceetm_destroy(sch);
++ err = -EINVAL;
++ }
++
++ return err;
+}
+
-+/* Build a non linear (fragmented) skb based on a S/G table */
-+static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ struct dpaa2_sg_entry *sgt)
++/* Attach the underlying pfifo qdiscs */
++static void dpaa2_ceetm_attach(struct Qdisc *sch)
+{
-+ struct sk_buff *skb = NULL;
-+ struct device *dev = priv->net_dev->dev.parent;
-+ void *sg_vaddr;
-+ dma_addr_t sg_addr;
-+ u16 sg_offset;
-+ u32 sg_length;
-+ struct page *page, *head_page;
-+ int page_offset;
-+ int i;
-+
-+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
-+ struct dpaa2_sg_entry *sge = &sgt[i];
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct Qdisc *qdisc, *old_qdisc;
++ unsigned int i;
+
-+ /* NOTE: We only support SG entries in dpaa2_sg_single format,
-+ * but this is the only format we may receive from HW anyway
-+ */
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
-+ /* Get the address and length from the S/G entry */
-+ sg_addr = dpaa2_sg_get_addr(sge);
-+ sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, sg_addr);
-+ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_FROM_DEVICE);
++ for (i = 0; i < dev->num_tx_queues; i++) {
++ qdisc = priv->root.qdiscs[i];
++ old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
++ if (old_qdisc)
++ qdisc_destroy(old_qdisc);
++ }
+
-+ sg_length = dpaa2_sg_get_len(sge);
++ /* Remove the references to the pfifo qdiscs since the kernel will
++ * destroy them when needed. No cleanup from our part is required from
++ * this point on.
++ */
++ kfree(priv->root.qdiscs);
++ priv->root.qdiscs = NULL;
++}
+
-+ if (i == 0) {
-+ /* We build the skb around the first data buffer */
-+ skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
-+ if (unlikely(!skb))
-+ return NULL;
++static unsigned long dpaa2_ceetm_cls_get(struct Qdisc *sch, u32 classid)
++{
++ struct dpaa2_ceetm_class *cl;
+
-+ sg_offset = dpaa2_sg_get_offset(sge);
-+ skb_reserve(skb, sg_offset);
-+ skb_put(skb, sg_length);
-+ } else {
-+ /* Rest of the data buffers are stored as skb frags */
-+ page = virt_to_page(sg_vaddr);
-+ head_page = virt_to_head_page(sg_vaddr);
++ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
++ __func__, classid, sch->handle);
++ cl = dpaa2_ceetm_find(classid, sch);
+
-+ /* Offset in page (which may be compound).
-+ * Data in subsequent SG entries is stored from the
-+ * beginning of the buffer, so we don't need to add the
-+ * sg_offset.
-+ */
-+ page_offset = ((unsigned long)sg_vaddr &
-+ (PAGE_SIZE - 1)) +
-+ (page_address(page) - page_address(head_page));
++ if (cl)
++ cl->refcnt++;
+
-+ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
-+ sg_length, DPAA2_ETH_RX_BUF_SIZE);
-+ }
++ return (unsigned long)cl;
++}
+
-+ if (dpaa2_sg_is_final(sge))
-+ break;
-+ }
++static void dpaa2_ceetm_cls_put(struct Qdisc *sch, unsigned long arg)
++{
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
++ cl->refcnt--;
+
-+ /* Count all data buffers + SG table buffer */
-+ ch->buf_count -= i + 2;
++ if (cl->refcnt == 0)
++ dpaa2_ceetm_cls_destroy(sch, cl);
++}
+
-+ return skb;
++static int dpaa2_ceetm_cls_change_root(struct dpaa2_ceetm_class *cl,
++ struct dpaa2_ceetm_tc_copt *copt,
++ struct net_device *dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++ struct dpni_tx_shaping_cfg scfg = { 0 }, ecfg = { 0 };
++ int err = 0;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X\n", __func__,
++ cl->common.classid);
++
++ if (!cl->shaped)
++ return 0;
++
++ if (dpaa2_eth_update_shaping_cfg(dev, copt->shaping_cfg,
++ &scfg, &ecfg))
++ return -EINVAL;
++
++ err = dpaa2_eth_set_ch_shaping(priv, &scfg, &ecfg,
++ copt->shaping_cfg.coupled,
++ cl->root.ch_id);
++ if (err)
++ return err;
++
++ memcpy(&cl->root.shaping_cfg, &copt->shaping_cfg,
++ sizeof(struct dpaa2_ceetm_shaping_cfg));
++
++ return err;
+}
+
-+/* Main Rx frame processing routine */
-+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ const struct dpaa2_fd *fd,
-+ struct napi_struct *napi,
-+ u16 queue_id)
++static int dpaa2_ceetm_cls_change_prio(struct dpaa2_ceetm_class *cl,
++ struct dpaa2_ceetm_tc_copt *copt,
++ struct net_device *dev)
+{
-+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
-+ u8 fd_format = dpaa2_fd_get_format(fd);
-+ void *vaddr;
-+ struct sk_buff *skb;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_eth_drv_stats *percpu_extras;
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpaa2_fas *fas;
-+ void *buf_data;
-+ u32 status = 0;
++ struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
++ struct dpni_tx_schedule_cfg *sched_cfg;
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++ int err;
+
-+ /* Tracing point */
-+ trace_dpaa2_rx_fd(priv->net_dev, fd);
++ pr_debug(KBUILD_BASENAME " : %s : class %X mode %d weight %d\n",
++ __func__, cl->common.classid, copt->mode, copt->weight);
+
-+ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
++ if (!cl->prio.cstats) {
++ cl->prio.cstats = alloc_percpu(struct dpaa2_ceetm_class_stats);
++ if (!cl->prio.cstats) {
++ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
++ __func__);
++ return -ENOMEM;
++ }
++ }
+
-+ /* HWA - FAS, timestamp */
-+ fas = dpaa2_eth_get_fas(vaddr);
-+ prefetch(fas);
-+ /* data / SG table */
-+ buf_data = vaddr + dpaa2_fd_get_offset(fd);
-+ prefetch(buf_data);
++ cl->prio.mode = copt->mode;
++ cl->prio.weight = copt->weight;
+
-+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
-+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++ sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[cl->prio.qpri];
+
-+ switch (fd_format) {
-+ case dpaa2_fd_single:
-+ skb = build_linear_skb(priv, ch, fd, vaddr);
++ switch (copt->mode) {
++ case STRICT_PRIORITY:
++ sched_cfg->mode = DPNI_TX_SCHED_STRICT_PRIORITY;
+ break;
-+ case dpaa2_fd_sg:
-+ skb = build_frag_skb(priv, ch, buf_data);
-+ put_page(virt_to_head_page(vaddr));
-+ percpu_extras->rx_sg_frames++;
-+ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
++ case WEIGHTED_A:
++ sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_A;
++ break;
++ case WEIGHTED_B:
++ sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_B;
+ break;
-+ default:
-+ /* We don't support any other format */
-+ goto err_frame_format;
+ }
+
-+ if (unlikely(!skb))
-+ goto err_build_skb;
++ err = dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_ADD_CQ);
+
-+ prefetch(skb->data);
++ return err;
++}
+
-+ /* Get the timestamp value */
-+ if (priv->ts_rx_en) {
-+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
-+ u64 *ns = (u64 *)dpaa2_eth_get_ts(vaddr);
++/* Add a new ceetm class */
++static int dpaa2_ceetm_cls_add(struct Qdisc *sch, u32 classid,
++ struct dpaa2_ceetm_tc_copt *copt,
++ unsigned long *arg)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
++ struct dpaa2_ceetm_class *cl;
++ int err;
+
-+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
-+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-+ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
++ if (copt->type == CEETM_ROOT &&
++ priv->clhash.hashelems == dpaa2_eth_ch_count(priv_eth)) {
++ pr_err("CEETM: only %d channel%s per DPNI allowed, sorry\n",
++ dpaa2_eth_ch_count(priv_eth),
++ dpaa2_eth_ch_count(priv_eth) == 1 ? "" : "s");
++ return -EINVAL;
+ }
+
-+ /* Check if we need to validate the L4 csum */
-+ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
-+ status = le32_to_cpu(fas->status);
-+ validate_rx_csum(priv, status, skb);
++ if (copt->type == CEETM_PRIO &&
++ priv->clhash.hashelems == dpaa2_eth_tc_count(priv_eth)) {
++ pr_err("CEETM: only %d queue%s per channel allowed, sorry\n",
++ dpaa2_eth_tc_count(priv_eth),
++ dpaa2_eth_tc_count(priv_eth) == 1 ? "" : "s");
++ return -EINVAL;
+ }
+
-+ skb->protocol = eth_type_trans(skb, priv->net_dev);
++ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
++ if (!cl)
++ return -ENOMEM;
+
-+ /* Record Rx queue - this will be used when picking a Tx queue to
-+ * forward the frames. We're keeping flow affinity through the
-+ * network stack.
++ RCU_INIT_POINTER(cl->filter_list, NULL);
++
++ cl->common.classid = classid;
++ cl->refcnt = 1;
++ cl->parent = sch;
++ cl->child = NULL;
++
++ /* Add class handle in Qdisc */
++ dpaa2_ceetm_link_class(sch, &priv->clhash, &cl->common);
++
++ cl->shaped = copt->shaped;
++ cl->type = copt->type;
++
++ /* Claim a CEETM channel / tc - DPAA2. will assume transition from
++ * classid to qdid/qpri, starting from qdid / qpri 0
+ */
-+ skb_record_rx_queue(skb, queue_id);
++ switch (copt->type) {
++ case CEETM_ROOT:
++ cl->root.ch_id = classid - sch->handle - 1;
++ err = dpaa2_ceetm_cls_change_root(cl, copt, dev);
++ break;
++ case CEETM_PRIO:
++ cl->prio.qpri = classid - sch->handle - 1;
++ err = dpaa2_ceetm_cls_change_prio(cl, copt, dev);
++ break;
++ default:
++ err = -EINVAL;
++ break;
++ }
+
-+ percpu_stats->rx_packets++;
-+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
++ if (err) {
++ pr_err("%s: Unable to set new %s class\n", __func__,
++ (copt->type == CEETM_ROOT ? "root" : "prio"));
++ goto out_free;
++ }
+
-+ napi_gro_receive(napi, skb);
++ switch (copt->type) {
++ case CEETM_ROOT:
++ pr_debug(KBUILD_BASENAME " : %s : configured root class %X associated with channel qdid %d\n",
++ __func__, classid, cl->root.ch_id);
++ break;
++ case CEETM_PRIO:
++ pr_debug(KBUILD_BASENAME " : %s : configured prio class %X associated with queue qpri %d\n",
++ __func__, classid, cl->prio.qpri);
++ break;
++ }
+
-+ return;
++ *arg = (unsigned long)cl;
++ return 0;
+
-+err_build_skb:
-+ free_rx_fd(priv, fd, vaddr);
-+err_frame_format:
-+ percpu_stats->rx_dropped++;
++out_free:
++ kfree(cl);
++ return err;
+}
+
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+/* Processing of Rx frames received on the error FQ
-+ * We check and print the error bits and then free the frame
-+ */
-+static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ const struct dpaa2_fd *fd,
-+ struct napi_struct *napi __always_unused,
-+ u16 queue_id __always_unused)
++/* Add or configure a ceetm class */
++static int dpaa2_ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
++ struct nlattr **tca, unsigned long *arg)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
-+ void *vaddr;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_fas *fas;
-+ u32 status = 0;
-+ bool check_fas_errors = false;
++ struct dpaa2_ceetm_qdisc *priv;
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)*arg;
++ struct nlattr *opt = tca[TCA_OPTIONS];
++ struct nlattr *tb[DPAA2_CEETM_TCA_MAX];
++ struct dpaa2_ceetm_tc_copt *copt;
++ struct net_device *dev = qdisc_dev(sch);
++ int err;
+
-+ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
++ pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
++ __func__, classid, sch->handle);
+
-+ /* check frame errors in the FD field */
-+ if (fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK) {
-+ check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
-+ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "Rx frame FD err: %x08\n",
-+ fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK);
++ if (strcmp(sch->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
++ return -EINVAL;
+ }
+
-+ /* check frame errors in the FAS field */
-+ if (check_fas_errors) {
-+ fas = dpaa2_eth_get_fas(vaddr);
-+ status = le32_to_cpu(fas->status);
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
-+ status & DPAA2_FAS_RX_ERR_MASK);
++ priv = qdisc_priv(sch);
++
++ if (!opt) {
++ pr_err(KBUILD_BASENAME " : %s : tc error NULL opt\n", __func__);
++ return -EINVAL;
+ }
-+ free_rx_fd(priv, fd, vaddr);
+
-+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
-+ percpu_stats->rx_errors++;
++ err = nla_parse_nested(tb, DPAA2_CEETM_TCA_COPT, opt,
++ dpaa2_ceetm_policy);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "nla_parse_nested");
++ return -EINVAL;
++ }
++
++ if (!tb[DPAA2_CEETM_TCA_COPT]) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "tb");
++ return -EINVAL;
++ }
++
++ copt = nla_data(tb[DPAA2_CEETM_TCA_COPT]);
++
++ /* Configure an existing ceetm class */
++ if (cl) {
++ if (copt->type != cl->type) {
++ pr_err("CEETM: class %X is not of the provided type\n",
++ cl->common.classid);
++ return -EINVAL;
++ }
++
++ switch (copt->type) {
++ case CEETM_ROOT:
++ return dpaa2_ceetm_cls_change_root(cl, copt, dev);
++ case CEETM_PRIO:
++ return dpaa2_ceetm_cls_change_prio(cl, copt, dev);
++
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid class\n",
++ __func__);
++ return -EINVAL;
++ }
++ }
++
++ return dpaa2_ceetm_cls_add(sch, classid, copt, arg);
+}
-+#endif
+
-+/* Consume all frames pull-dequeued into the store. This is the simplest way to
-+ * make sure we don't accidentally issue another volatile dequeue which would
-+ * overwrite (leak) frames already in the store.
-+ *
-+ * The number of frames is returned using the last 2 output arguments,
-+ * separately for Rx and Tx confirmations.
-+ *
-+ * Observance of NAPI budget is not our concern, leaving that to the caller.
-+ */
-+static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned,
-+ int *tx_conf_cleaned)
++static void dpaa2_ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
-+ struct dpaa2_eth_priv *priv = ch->priv;
-+ struct dpaa2_eth_fq *fq = NULL;
-+ struct dpaa2_dq *dq;
-+ const struct dpaa2_fd *fd;
-+ int cleaned = 0;
-+ int is_last;
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct dpaa2_ceetm_class *cl;
++ unsigned int i;
+
-+ do {
-+ dq = dpaa2_io_store_next(ch->store, &is_last);
-+ if (unlikely(!dq)) {
-+ /* If we're here, we *must* have placed a
-+ * volatile dequeue comnmand, so keep reading through
-+ * the store until we get some sort of valid response
-+ * token (either a valid frame or an "empty dequeue")
-+ */
-+ continue;
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ if (arg->stop)
++ return;
++
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
++ if (arg->count < arg->skip) {
++ arg->count++;
++ continue;
++ }
++ if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
++ arg->stop = 1;
++ return;
++ }
++ arg->count++;
+ }
++ }
++}
+
-+ fd = dpaa2_dq_fd(dq);
++static int dpaa2_ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
++ struct sk_buff *skb, struct tcmsg *tcm)
++{
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++ struct nlattr *nest;
++ struct dpaa2_ceetm_tc_copt copt;
+
-+ /* prefetch the frame descriptor */
-+ prefetch(fd);
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
+
-+ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
-+ fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
-+ cleaned++;
-+ } while (!is_last);
++ sch_tree_lock(sch);
+
-+ if (!cleaned)
-+ return false;
++ tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
++ tcm->tcm_handle = cl->common.classid;
+
-+ /* All frames brought in store by a volatile dequeue
-+ * come from the same queue
-+ */
-+ if (fq->type == DPAA2_TX_CONF_FQ)
-+ *tx_conf_cleaned += cleaned;
-+ else
-+ *rx_cleaned += cleaned;
++ memset(&copt, 0, sizeof(copt));
+
-+ fq->stats.frames += cleaned;
-+ ch->stats.frames += cleaned;
++ copt.shaped = cl->shaped;
++ copt.type = cl->type;
+
-+ return true;
++ switch (cl->type) {
++ case CEETM_ROOT:
++ if (cl->child)
++ tcm->tcm_info = cl->child->handle;
++
++ memcpy(&copt.shaping_cfg, &cl->root.shaping_cfg,
++ sizeof(struct dpaa2_ceetm_shaping_cfg));
++
++ break;
++
++ case CEETM_PRIO:
++ if (cl->child)
++ tcm->tcm_info = cl->child->handle;
++
++ copt.mode = cl->prio.mode;
++ copt.weight = cl->prio.weight;
++
++ break;
++ }
++
++ nest = nla_nest_start(skb, TCA_OPTIONS);
++ if (!nest)
++ goto nla_put_failure;
++ if (nla_put(skb, DPAA2_CEETM_TCA_COPT, sizeof(copt), &copt))
++ goto nla_put_failure;
++ nla_nest_end(skb, nest);
++ sch_tree_unlock(sch);
++ return skb->len;
++
++nla_put_failure:
++ sch_tree_unlock(sch);
++ nla_nest_cancel(skb, nest);
++ return -EMSGSIZE;
+}
+
-+/* Configure the egress frame annotation for timestamp update */
-+static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
++static int dpaa2_ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
+{
-+ struct dpaa2_faead *faead;
-+ u32 ctrl;
-+ u32 frc;
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+
-+ /* Mark the egress frame annotation area as valid */
-+ frc = dpaa2_fd_get_frc(fd);
-+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
+
-+ /* enable UPD (update prepanded data) bit in FAEAD field of
-+ * hardware frame annotation area
-+ */
-+ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
-+ faead = dpaa2_eth_get_faead(buf_start);
-+ faead->ctrl = cpu_to_le32(ctrl);
++ sch_tree_lock(sch);
++ qdisc_class_hash_remove(&priv->clhash, &cl->common);
++ cl->refcnt--;
++ WARN_ON(cl->refcnt == 0);
++ sch_tree_unlock(sch);
++ return 0;
+}
+
-+/* Create a frame descriptor based on a fragmented skb */
-+static int build_sg_fd(struct dpaa2_eth_priv *priv,
-+ struct sk_buff *skb,
-+ struct dpaa2_fd *fd)
++/* Get the class' child qdisc, if any */
++static struct Qdisc *dpaa2_ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ void *sgt_buf = NULL;
-+ dma_addr_t addr;
-+ int nr_frags = skb_shinfo(skb)->nr_frags;
-+ struct dpaa2_sg_entry *sgt;
-+ int i, err;
-+ int sgt_buf_size;
-+ struct scatterlist *scl, *crt_scl;
-+ int num_sg;
-+ int num_dma_bufs;
-+ struct dpaa2_fas *fas;
-+ struct dpaa2_eth_swa *swa;
-+
-+ /* Create and map scatterlist.
-+ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
-+ * to go beyond nr_frags+1.
-+ * Note: We don't support chained scatterlists
-+ */
-+ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
-+ return -EINVAL;
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+
-+ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
-+ if (unlikely(!scl))
-+ return -ENOMEM;
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
+
-+ sg_init_table(scl, nr_frags + 1);
-+ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
-+ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
-+ if (unlikely(!num_dma_bufs)) {
-+ err = -ENOMEM;
-+ goto dma_map_sg_failed;
++ switch (cl->type) {
++ case CEETM_ROOT:
++ case CEETM_PRIO:
++ return cl->child;
+ }
+
-+ /* Prepare the HW SGT structure */
-+ sgt_buf_size = priv->tx_data_offset +
-+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
-+ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
-+ if (unlikely(!sgt_buf)) {
-+ err = -ENOMEM;
-+ goto sgt_buf_alloc_failed;
++ return NULL;
++}
++
++static int dpaa2_ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
++ struct Qdisc *new, struct Qdisc **old)
++{
++ if (new && strcmp(new->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
++ return -EOPNOTSUPP;
+ }
-+ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
+
-+ /* PTA from egress side is passed as is to the confirmation side so
-+ * we need to clear some fields here in order to find consistent values
-+ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
-+ * field from the hardware annotation area
-+ */
-+ fas = dpaa2_eth_get_fas(sgt_buf);
-+ memset(fas, 0, DPAA2_FAS_SIZE);
++ return 0;
++}
+
-+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
++static int dpaa2_ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
++ struct gnet_dump *d)
++{
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++ struct gnet_stats_basic_packed tmp_bstats;
++ struct dpaa2_ceetm_tc_xstats xstats;
++ union dpni_statistics dpni_stats;
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
++ u8 ch_id = 0;
++ int err;
+
-+ /* Fill in the HW SGT structure.
-+ *
-+ * sgt_buf is zeroed out, so the following fields are implicit
-+ * in all sgt entries:
-+ * - offset is 0
-+ * - format is 'dpaa2_sg_single'
-+ */
-+ for_each_sg(scl, crt_scl, num_dma_bufs, i) {
-+ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
-+ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
-+ }
-+ dpaa2_sg_set_final(&sgt[i - 1], true);
++ memset(&xstats, 0, sizeof(xstats));
++ memset(&tmp_bstats, 0, sizeof(tmp_bstats));
+
-+ /* Store the skb backpointer in the SGT buffer.
-+ * Fit the scatterlist and the number of buffers alongside the
-+ * skb backpointer in the software annotation area. We'll need
-+ * all of them on Tx Conf.
-+ */
-+ swa = (struct dpaa2_eth_swa *)sgt_buf;
-+ swa->skb = skb;
-+ swa->scl = scl;
-+ swa->num_sg = num_sg;
-+ swa->num_dma_bufs = num_dma_bufs;
++ if (cl->type == CEETM_ROOT)
++ return 0;
+
-+ /* Separately map the SGT buffer */
-+ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
-+ if (unlikely(dma_mapping_error(dev, addr))) {
-+ err = -ENOMEM;
-+ goto dma_map_single_failed;
-+ }
-+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
-+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
-+ dpaa2_fd_set_addr(fd, addr);
-+ dpaa2_fd_set_len(fd, skb->len);
++ err = dpni_get_statistics(priv_eth->mc_io, 0, priv_eth->mc_token, 3,
++ DPNI_BUILD_CH_TC(ch_id, cl->prio.qpri),
++ &dpni_stats);
++ if (err)
++ netdev_warn(dev, "dpni_get_stats(%d) failed - %d\n", 3, err);
+
-+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1;
++ xstats.ceetm_dequeue_bytes = dpni_stats.page_3.ceetm_dequeue_bytes;
++ xstats.ceetm_dequeue_frames = dpni_stats.page_3.ceetm_dequeue_frames;
++ xstats.ceetm_reject_bytes = dpni_stats.page_3.ceetm_reject_bytes;
++ xstats.ceetm_reject_frames = dpni_stats.page_3.ceetm_reject_frames;
+
-+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
-+ enable_tx_tstamp(fd, sgt_buf);
++ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
++}
+
-+ return 0;
++static struct tcf_proto __rcu **dpaa2_ceetm_tcf_chain(struct Qdisc *sch,
++ unsigned long arg)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+
-+dma_map_single_failed:
-+ kfree(sgt_buf);
-+sgt_buf_alloc_failed:
-+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
-+dma_map_sg_failed:
-+ kfree(scl);
-+ return err;
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
++ cl ? cl->common.classid : 0, sch->handle);
++ return cl ? &cl->filter_list : &priv->filter_list;
+}
+
-+/* Create a frame descriptor based on a linear skb */
-+static int build_single_fd(struct dpaa2_eth_priv *priv,
-+ struct sk_buff *skb,
-+ struct dpaa2_fd *fd)
++static unsigned long dpaa2_ceetm_tcf_bind(struct Qdisc *sch,
++ unsigned long parent,
++ u32 classid)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ u8 *buffer_start;
-+ struct sk_buff **skbh;
-+ dma_addr_t addr;
-+ struct dpaa2_fas *fas;
++ struct dpaa2_ceetm_class *cl = dpaa2_ceetm_find(classid, sch);
+
-+ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
-+ DPAA2_ETH_TX_BUF_ALIGN,
-+ DPAA2_ETH_TX_BUF_ALIGN);
-+
-+ /* PTA from egress side is passed as is to the confirmation side so
-+ * we need to clear some fields here in order to find consistent values
-+ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
-+ * field from the hardware annotation area
-+ */
-+ fas = dpaa2_eth_get_fas(buffer_start);
-+ memset(fas, 0, DPAA2_FAS_SIZE);
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
++ cl ? cl->common.classid : 0, sch->handle);
++ return (unsigned long)cl;
++}
+
-+ /* Store a backpointer to the skb at the beginning of the buffer
-+ * (in the private data area) such that we can release it
-+ * on Tx confirm
-+ */
-+ skbh = (struct sk_buff **)buffer_start;
-+ *skbh = skb;
++static void dpaa2_ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
++{
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+
-+ addr = dma_map_single(dev, buffer_start,
-+ skb_tail_pointer(skb) - buffer_start,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(dma_mapping_error(dev, addr)))
-+ return -ENOMEM;
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
++ cl ? cl->common.classid : 0, sch->handle);
++}
+
-+ dpaa2_fd_set_addr(fd, addr);
-+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
-+ dpaa2_fd_set_len(fd, skb->len);
-+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
++const struct Qdisc_class_ops dpaa2_ceetm_cls_ops = {
++ .graft = dpaa2_ceetm_cls_graft,
++ .leaf = dpaa2_ceetm_cls_leaf,
++ .get = dpaa2_ceetm_cls_get,
++ .put = dpaa2_ceetm_cls_put,
++ .change = dpaa2_ceetm_cls_change,
++ .delete = dpaa2_ceetm_cls_delete,
++ .walk = dpaa2_ceetm_cls_walk,
++ .tcf_chain = dpaa2_ceetm_tcf_chain,
++ .bind_tcf = dpaa2_ceetm_tcf_bind,
++ .unbind_tcf = dpaa2_ceetm_tcf_unbind,
++ .dump = dpaa2_ceetm_cls_dump,
++ .dump_stats = dpaa2_ceetm_cls_dump_stats,
++};
+
-+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1;
++struct Qdisc_ops dpaa2_ceetm_qdisc_ops __read_mostly = {
++ .id = "ceetm",
++ .priv_size = sizeof(struct dpaa2_ceetm_qdisc),
++ .cl_ops = &dpaa2_ceetm_cls_ops,
++ .init = dpaa2_ceetm_init,
++ .destroy = dpaa2_ceetm_destroy,
++ .change = dpaa2_ceetm_change,
++ .dump = dpaa2_ceetm_dump,
++ .attach = dpaa2_ceetm_attach,
++ .owner = THIS_MODULE,
++};
+
-+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
-+ enable_tx_tstamp(fd, buffer_start);
++/* Run the filters and classifiers attached to the qdisc on the provided skb */
++int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
++ int *qdid, int *qpri)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct dpaa2_ceetm_class *cl = NULL;
++ struct tcf_result res;
++ struct tcf_proto *tcf;
++ int result;
+
-+ return 0;
-+}
++ tcf = rcu_dereference_bh(priv->filter_list);
++ while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
++#ifdef CONFIG_NET_CLS_ACT
++ switch (result) {
++ case TC_ACT_QUEUED:
++ case TC_ACT_STOLEN:
++ case TC_ACT_SHOT:
++ /* No valid class found due to action */
++ return -1;
++ }
++#endif
++ cl = (void *)res.class;
++ if (!cl) {
++ /* The filter leads to the qdisc */
++ if (res.classid == sch->handle)
++ return 0;
++
++ cl = dpaa2_ceetm_find(res.classid, sch);
++ /* The filter leads to an invalid class */
++ if (!cl)
++ break;
++ }
+
-+/* FD freeing routine on the Tx path
-+ *
-+ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
-+ * back-pointed to is also freed.
-+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
-+ * dpaa2_eth_tx().
-+ * Optionally, return the frame annotation status word (FAS), which needs
-+ * to be checked if we're on the confirmation path.
-+ */
-+static void free_tx_fd(const struct dpaa2_eth_priv *priv,
-+ const struct dpaa2_fd *fd,
-+ u32 *status)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ dma_addr_t fd_addr;
-+ struct sk_buff **skbh, *skb;
-+ unsigned char *buffer_start;
-+ int unmap_size;
-+ struct scatterlist *scl;
-+ int num_sg, num_dma_bufs;
-+ struct dpaa2_eth_swa *swa;
-+ u8 fd_format = dpaa2_fd_get_format(fd);
-+ struct dpaa2_fas *fas;
++ /* The class might have its own filters attached */
++ tcf = rcu_dereference_bh(cl->filter_list);
++ }
+
-+ fd_addr = dpaa2_fd_get_addr(fd);
-+ skbh = dpaa2_eth_iova_to_virt(priv->iommu_domain, fd_addr);
++ /* No valid class found */
++ if (!cl)
++ return 0;
+
-+ /* HWA - FAS, timestamp (for Tx confirmation frames) */
-+ fas = dpaa2_eth_get_fas(skbh);
-+ prefetch(fas);
++ switch (cl->type) {
++ case CEETM_ROOT:
++ *qdid = cl->root.ch_id;
+
-+ switch (fd_format) {
-+ case dpaa2_fd_single:
-+ skb = *skbh;
-+ buffer_start = (unsigned char *)skbh;
-+ /* Accessing the skb buffer is safe before dma unmap, because
-+ * we didn't map the actual skb shell.
-+ */
-+ dma_unmap_single(dev, fd_addr,
-+ skb_tail_pointer(skb) - buffer_start,
-+ DMA_BIDIRECTIONAL);
-+ break;
-+ case dpaa2_fd_sg:
-+ swa = (struct dpaa2_eth_swa *)skbh;
-+ skb = swa->skb;
-+ scl = swa->scl;
-+ num_sg = swa->num_sg;
-+ num_dma_bufs = swa->num_dma_bufs;
++ /* The root class does not have a child prio qdisc */
++ if (!cl->child)
++ return 0;
+
-+ /* Unmap the scatterlist */
-+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
-+ kfree(scl);
++ /* Run the prio qdisc classifiers */
++ return dpaa2_ceetm_classify(skb, cl->child, qdid, qpri);
+
-+ /* Unmap the SGT buffer */
-+ unmap_size = priv->tx_data_offset +
-+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
-+ dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
++ case CEETM_PRIO:
++ *qpri = cl->prio.qpri;
+ break;
-+ default:
-+ /* Unsupported format, mark it as errored and give up */
-+ if (status)
-+ *status = ~0;
-+ return;
+ }
+
-+ /* Get the timestamp value */
-+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
-+ struct skb_shared_hwtstamps shhwtstamps;
-+ u64 *ns;
-+
-+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
++ return 0;
++}
+
-+ ns = (u64 *)dpaa2_eth_get_ts(skbh);
-+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
-+ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
-+ skb_tstamp_tx(skb, &shhwtstamps);
-+ }
++int __init dpaa2_ceetm_register(void)
++{
++ int err = 0;
+
-+ /* Read the status from the Frame Annotation after we unmap the first
-+ * buffer but before we free it. The caller function is responsible
-+ * for checking the status value.
-+ */
-+ if (status)
-+ *status = le32_to_cpu(fas->status);
++ pr_debug(KBUILD_MODNAME ": " DPAA2_CEETM_DESCRIPTION "\n");
+
-+ /* Free SGT buffer kmalloc'ed on tx */
-+ if (fd_format != dpaa2_fd_single)
-+ kfree(skbh);
++ err = register_qdisc(&dpaa2_ceetm_qdisc_ops);
++ if (unlikely(err))
++ pr_err(KBUILD_MODNAME
++ ": %s:%hu:%s(): register_qdisc() = %d\n",
++ KBUILD_BASENAME ".c", __LINE__, __func__, err);
+
-+ /* Move on with skb release */
-+ dev_kfree_skb(skb);
++ return err;
+}
+
-+static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
++void __exit dpaa2_ceetm_unregister(void)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct device *dev = net_dev->dev.parent;
-+ struct dpaa2_fd fd;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_eth_drv_stats *percpu_extras;
-+ struct dpaa2_eth_fq *fq;
-+ u16 queue_mapping = skb_get_queue_mapping(skb);
-+ int err, i;
++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
++ KBUILD_BASENAME ".c", __func__);
+
-+ /* If we're congested, stop this tx queue; transmission of the
-+ * current skb happens regardless of congestion state
-+ */
-+ fq = &priv->fq[queue_mapping];
++ unregister_qdisc(&dpaa2_ceetm_qdisc_ops);
++}
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
+@@ -0,0 +1,182 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2017 NXP
++ *
++ */
+
-+ dma_sync_single_for_cpu(dev, priv->cscn_dma,
-+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
-+ netif_stop_subqueue(net_dev, queue_mapping);
-+ fq->stats.congestion_entry++;
-+ }
++#ifndef __DPAA2_ETH_CEETM_H
++#define __DPAA2_ETH_CEETM_H
+
-+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
-+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++#include <net/pkt_sched.h>
++#include <net/pkt_cls.h>
++#include <net/netlink.h>
+
-+ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
-+ struct sk_buff *ns;
++#include "dpaa2-eth.h"
+
-+ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
-+ if (unlikely(!ns)) {
-+ percpu_stats->tx_dropped++;
-+ goto err_alloc_headroom;
-+ }
-+ dev_kfree_skb(skb);
-+ skb = ns;
-+ }
++/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
++ * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
++ * are reserved for the maximum 32 CEETM channels (majors and minors are in
++ * hex).
++ */
++#define PFIFO_MIN_OFFSET 0x21
+
-+ /* We'll be holding a back-reference to the skb until Tx Confirmation;
-+ * we don't want that overwritten by a concurrent Tx with a cloned skb.
-+ */
-+ skb = skb_unshare(skb, GFP_ATOMIC);
-+ if (unlikely(!skb)) {
-+ /* skb_unshare() has already freed the skb */
-+ percpu_stats->tx_dropped++;
-+ return NETDEV_TX_OK;
-+ }
++#define DPAA2_CEETM_MIN_WEIGHT 100
++#define DPAA2_CEETM_MAX_WEIGHT 24800
+
-+ /* Setup the FD fields */
-+ memset(&fd, 0, sizeof(fd));
++#define DPAA2_CEETM_TD_THRESHOLD 1000
+
-+ if (skb_is_nonlinear(skb)) {
-+ err = build_sg_fd(priv, skb, &fd);
-+ percpu_extras->tx_sg_frames++;
-+ percpu_extras->tx_sg_bytes += skb->len;
-+ } else {
-+ err = build_single_fd(priv, skb, &fd);
-+ }
++enum wbfs_group_type {
++ WBFS_GRP_A,
++ WBFS_GRP_B,
++ WBFS_GRP_LARGE
++};
+
-+ if (unlikely(err)) {
-+ percpu_stats->tx_dropped++;
-+ goto err_build_fd;
-+ }
++enum {
++ DPAA2_CEETM_TCA_UNSPEC,
++ DPAA2_CEETM_TCA_COPT,
++ DPAA2_CEETM_TCA_QOPS,
++ DPAA2_CEETM_TCA_MAX,
++};
+
-+ /* Tracing point */
-+ trace_dpaa2_tx_fd(net_dev, &fd);
++/* CEETM configuration types */
++enum dpaa2_ceetm_type {
++ CEETM_ROOT = 1,
++ CEETM_PRIO,
++};
+
-+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
-+ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
-+ fq->tx_qdbin, &fd);
-+ /* TODO: This doesn't work. Check on simulator.
-+ * err = dpaa2_io_service_enqueue_fq(NULL,
-+ * priv->fq[0].fqid_tx, &fd);
-+ */
-+ if (err != -EBUSY)
-+ break;
-+ }
-+ percpu_extras->tx_portal_busy += i;
-+ if (unlikely(err < 0)) {
-+ percpu_stats->tx_errors++;
-+ /* Clean up everything, including freeing the skb */
-+ free_tx_fd(priv, &fd, NULL);
-+ } else {
-+ percpu_stats->tx_packets++;
-+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
-+ }
++enum {
++ STRICT_PRIORITY = 0,
++ WEIGHTED_A,
++ WEIGHTED_B,
++};
+
-+ return NETDEV_TX_OK;
++struct dpaa2_ceetm_shaping_cfg {
++ __u64 cir; /* committed information rate */
++ __u64 eir; /* excess information rate */
++ __u16 cbs; /* committed burst size */
++ __u16 ebs; /* excess burst size */
++ __u8 coupled; /* shaper coupling */
++};
+
-+err_build_fd:
-+err_alloc_headroom:
-+ dev_kfree_skb(skb);
++extern const struct nla_policy ceetm_policy[DPAA2_CEETM_TCA_MAX];
+
-+ return NETDEV_TX_OK;
-+}
++struct dpaa2_ceetm_class;
++struct dpaa2_ceetm_qdisc_stats;
++struct dpaa2_ceetm_class_stats;
+
-+/* Tx confirmation frame processing routine */
-+static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ const struct dpaa2_fd *fd,
-+ struct napi_struct *napi __always_unused,
-+ u16 queue_id)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_eth_drv_stats *percpu_extras;
-+ u32 status = 0;
-+ bool errors = !!(fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
-+ bool check_fas_errors = false;
++/* corresponds to CEETM shaping at LNI level */
++struct dpaa2_root_q {
++ struct Qdisc **qdiscs;
++ struct dpaa2_ceetm_qdisc_stats __percpu *qstats;
++};
+
-+ /* Tracing point */
-+ trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
++/* corresponds to the number of priorities a channel serves */
++struct dpaa2_prio_q {
++ struct dpaa2_ceetm_class *parent;
++ struct dpni_tx_priorities_cfg tx_prio_cfg;
++};
+
-+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
-+ percpu_extras->tx_conf_frames++;
-+ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
++struct dpaa2_ceetm_qdisc {
++ struct Qdisc_class_hash clhash;
++ struct tcf_proto *filter_list; /* qdisc attached filters */
+
-+ /* Check congestion state and wake all queues if necessary */
-+ if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) {
-+ dma_sync_single_for_cpu(dev, priv->cscn_dma,
-+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+ if (!dpaa2_cscn_state_congested(priv->cscn_mem))
-+ netif_tx_wake_all_queues(priv->net_dev);
-+ }
++ enum dpaa2_ceetm_type type; /* ROOT/PRIO */
++ bool shaped;
++ union {
++ struct dpaa2_root_q root;
++ struct dpaa2_prio_q prio;
++ };
++};
+
-+ /* check frame errors in the FD field */
-+ if (unlikely(errors)) {
-+ check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
-+ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "Tx frame FD err: %x08\n",
-+ fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
-+ }
++/* CEETM Qdisc configuration parameters */
++struct dpaa2_ceetm_tc_qopt {
++ enum dpaa2_ceetm_type type;
++ __u16 shaped;
++ __u8 prio_group_A;
++ __u8 prio_group_B;
++ __u8 separate_groups;
++};
+
-+ free_tx_fd(priv, fd, check_fas_errors ? &status : NULL);
++/* root class - corresponds to a channel */
++struct dpaa2_root_c {
++ struct dpaa2_ceetm_shaping_cfg shaping_cfg;
++ u32 ch_id;
++};
+
-+ /* if there are no errors, we're done */
-+ if (likely(!errors))
-+ return;
++/* prio class - corresponds to a strict priority queue (group) */
++struct dpaa2_prio_c {
++ struct dpaa2_ceetm_class_stats __percpu *cstats;
++ u32 qpri;
++ u8 mode;
++ u16 weight;
++};
+
-+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
-+ /* Tx-conf logically pertains to the egress path. */
-+ percpu_stats->tx_errors++;
++struct dpaa2_ceetm_class {
++ struct Qdisc_class_common common;
++ int refcnt;
++ struct tcf_proto *filter_list; /* class attached filters */
++ struct Qdisc *parent;
++ struct Qdisc *child;
+
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "Tx frame FAS err: %x08\n",
-+ status & DPAA2_FAS_TX_ERR_MASK);
-+}
++ enum dpaa2_ceetm_type type; /* ROOT/PRIO */
++ bool shaped;
++ union {
++ struct dpaa2_root_c root;
++ struct dpaa2_prio_c prio;
++ };
++};
+
-+static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
-+{
-+ int err;
++/* CEETM Class configuration parameters */
++struct dpaa2_ceetm_tc_copt {
++ enum dpaa2_ceetm_type type;
++ struct dpaa2_ceetm_shaping_cfg shaping_cfg;
++ __u16 shaped;
++ __u8 mode;
++ __u16 weight;
++};
+
-+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
-+ DPNI_OFF_RX_L3_CSUM, enable);
-+ if (err) {
-+ netdev_err(priv->net_dev,
-+ "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
-+ return err;
-+ }
++/* CEETM stats */
++struct dpaa2_ceetm_qdisc_stats {
++ __u32 drops;
++};
+
-+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
-+ DPNI_OFF_RX_L4_CSUM, enable);
-+ if (err) {
-+ netdev_err(priv->net_dev,
-+ "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
-+ return err;
-+ }
++struct dpaa2_ceetm_class_stats {
++ /* Software counters */
++ struct gnet_stats_basic_packed bstats;
++ __u32 ern_drop_count;
++ __u32 congested_count;
++};
+
-+ return 0;
-+}
++struct dpaa2_ceetm_tc_xstats {
++ __u64 ceetm_dequeue_bytes;
++ __u64 ceetm_dequeue_frames;
++ __u64 ceetm_reject_bytes;
++ __u64 ceetm_reject_frames;
++};
+
-+static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
++#ifdef CONFIG_FSL_DPAA2_ETH_CEETM
++int __init dpaa2_ceetm_register(void);
++void __exit dpaa2_ceetm_unregister(void);
++int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
++ int *qdid, int *qpri);
++#else
++static inline int dpaa2_ceetm_register(void)
+{
-+ int err;
-+
-+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
-+ DPNI_OFF_TX_L3_CSUM, enable);
-+ if (err) {
-+ netdev_err(priv->net_dev,
-+ "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
-+ return err;
-+ }
++ return 0;
++}
+
-+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
-+ DPNI_OFF_TX_L4_CSUM, enable);
-+ if (err) {
-+ netdev_err(priv->net_dev,
-+ "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
-+ return err;
-+ }
++static inline void dpaa2_ceetm_unregister(void) {}
+
++static inline int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
++ int *qdid, int *qpri)
++{
+ return 0;
+}
++#endif
+
-+/* Perform a single release command to add buffers
-+ * to the specified buffer pool
-+ */
-+static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
++static inline bool dpaa2_eth_ceetm_is_enabled(struct dpaa2_eth_priv *priv)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
-+ void *buf;
-+ dma_addr_t addr;
-+ int i;
-+
-+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
-+ /* Allocate buffer visible to WRIOP + skb shared info +
-+ * alignment padding.
-+ */
-+ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE(priv));
-+ if (unlikely(!buf))
-+ goto err_alloc;
-+
-+ buf = PTR_ALIGN(buf, priv->rx_buf_align);
-+
-+ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_FROM_DEVICE);
-+ if (unlikely(dma_mapping_error(dev, addr)))
-+ goto err_map;
++ return priv->ceetm_en;
++}
+
-+ buf_array[i] = addr;
++#endif
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
+@@ -0,0 +1,357 @@
+
-+ /* tracing point */
-+ trace_dpaa2_eth_buf_seed(priv->net_dev,
-+ buf, DPAA2_ETH_BUF_RAW_SIZE(priv),
-+ addr, DPAA2_ETH_RX_BUF_SIZE,
-+ bpid);
-+ }
++/* Copyright 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
+
-+release_bufs:
-+ /* In case the portal is busy, retry until successful.
-+ * The buffer release function would only fail if the QBMan portal
-+ * was busy, which implies portal contention (i.e. more CPUs than
-+ * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
-+ * there is little we can realistically do, short of giving up -
-+ * in which case we'd risk depleting the buffer pool and never again
-+ * receiving the Rx interrupt which would kick-start the refill logic.
-+ * So just keep retrying, at the risk of being moved to ksoftirqd.
-+ */
-+ while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
-+ cpu_relax();
-+ return i;
++#include <linux/module.h>
++#include <linux/debugfs.h>
++#include "dpaa2-eth.h"
++#include "dpaa2-eth-debugfs.h"
+
-+err_map:
-+ put_page(virt_to_head_page(buf));
-+err_alloc:
-+ if (i)
-+ goto release_bufs;
++#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
+
-+ return 0;
-+}
++static struct dentry *dpaa2_dbg_root;
+
-+static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
++static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
+{
-+ int i, j;
-+ int new_count;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
++ struct rtnl_link_stats64 *stats;
++ struct dpaa2_eth_drv_stats *extras;
++ int i;
+
-+ /* This is the lazy seeding of Rx buffer pools.
-+ * dpaa2_add_bufs() is also used on the Rx hotpath and calls
-+ * napi_alloc_frag(). The trouble with that is that it in turn ends up
-+ * calling this_cpu_ptr(), which mandates execution in atomic context.
-+ * Rather than splitting up the code, do a one-off preempt disable.
-+ */
-+ preempt_disable();
-+ for (j = 0; j < priv->num_channels; j++) {
-+ priv->channel[j]->buf_count = 0;
-+ for (i = 0; i < priv->num_bufs;
-+ i += DPAA2_ETH_BUFS_PER_CMD) {
-+ new_count = add_bufs(priv, bpid);
-+ priv->channel[j]->buf_count += new_count;
++ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
++ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
++ "Tx SG", "Tx realloc", "Enq busy");
+
-+ if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
-+ preempt_enable();
-+ return -ENOMEM;
-+ }
-+ }
++ for_each_online_cpu(i) {
++ stats = per_cpu_ptr(priv->percpu_stats, i);
++ extras = per_cpu_ptr(priv->percpu_extras, i);
++ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
++ i,
++ stats->rx_packets,
++ stats->rx_errors,
++ extras->rx_sg_frames,
++ stats->tx_packets,
++ stats->tx_errors,
++ extras->tx_conf_frames,
++ extras->tx_sg_frames,
++ extras->tx_reallocs,
++ extras->tx_portal_busy);
+ }
-+ preempt_enable();
+
+ return 0;
+}
+
-+/**
-+ * Drain the specified number of buffers from the DPNI's private buffer pool.
-+ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
-+ */
-+static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
++static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
-+ void *vaddr;
-+ int ret, i;
++ int err;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
-+ do {
-+ ret = dpaa2_io_service_acquire(NULL, priv->bpid,
-+ buf_array, count);
-+ if (ret < 0) {
-+ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
-+ return;
-+ }
-+ for (i = 0; i < ret; i++) {
-+ /* Same logic as on regular Rx path */
-+ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain,
-+ buf_array[i]);
-+ dma_unmap_single(dev, buf_array[i],
-+ DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_FROM_DEVICE);
-+ put_page(virt_to_head_page(vaddr));
-+ }
-+ } while (ret);
++ err = single_open(file, dpaa2_dbg_cpu_show, priv);
++ if (err < 0)
++ netdev_err(priv->net_dev, "single_open() failed\n");
++
++ return err;
+}
+
-+static void drain_pool(struct dpaa2_eth_priv *priv)
++static const struct file_operations dpaa2_dbg_cpu_ops = {
++ .open = dpaa2_dbg_cpu_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
+{
-+ preempt_disable();
-+ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
-+ drain_bufs(priv, 1);
-+ preempt_enable();
++ switch (fq->type) {
++ case DPAA2_RX_FQ:
++ return "Rx";
++ case DPAA2_TX_CONF_FQ:
++ return "Tx conf";
++ case DPAA2_RX_ERR_FQ:
++ return "Rx err";
++ default:
++ return "N/A";
++ }
+}
+
-+/* Function is called from softirq context only, so we don't need to guard
-+ * the access to percpu count
-+ */
-+static int refill_pool(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ u16 bpid)
++static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
+{
-+ int new_count;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
++ struct dpaa2_eth_fq *fq;
++ u32 fcnt, bcnt;
++ int i, err;
+
-+ if (likely(ch->buf_count >= priv->refill_thresh))
-+ return 0;
++ seq_printf(file, "non-zero FQ stats for %s:\n", priv->net_dev->name);
++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
++ "VFQID", "CPU", "Traffic Class", "Type", "Frames",
++ "Pending frames", "Congestion");
+
-+ do {
-+ new_count = add_bufs(priv, bpid);
-+ if (unlikely(!new_count)) {
-+ /* Out of memory; abort for now, we'll try later on */
-+ break;
-+ }
-+ ch->buf_count += new_count;
-+ } while (ch->buf_count < priv->num_bufs);
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
++ if (err)
++ fcnt = 0;
+
-+ if (unlikely(ch->buf_count < priv->num_bufs))
-+ return -ENOMEM;
++ /* A lot of queues, no use displaying zero traffic ones */
++ if (!fq->stats.frames && !fcnt)
++ continue;
++
++ seq_printf(file, "%5d%16d%16d%16s%16llu%16u%16llu\n",
++ fq->fqid,
++ fq->target_cpu,
++ fq->tc,
++ fq_type_to_str(fq),
++ fq->stats.frames,
++ fcnt,
++ fq->stats.congestion_entry);
++ }
+
+ return 0;
+}
+
-+static int pull_channel(struct dpaa2_eth_channel *ch)
++static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
+{
+ int err;
-+ int dequeues = -1;
-+
-+ /* Retry while portal is busy */
-+ do {
-+ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
-+ dequeues++;
-+ cpu_relax();
-+ } while (err == -EBUSY);
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
-+ ch->stats.dequeue_portal_busy += dequeues;
-+ if (unlikely(err))
-+ ch->stats.pull_err++;
++ err = single_open(file, dpaa2_dbg_fqs_show, priv);
++ if (err < 0)
++ netdev_err(priv->net_dev, "single_open() failed\n");
+
+ return err;
+}
+
-+/* NAPI poll routine
-+ *
-+ * Frames are dequeued from the QMan channel associated with this NAPI context.
-+ * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx
-+ * confirmation frames are limited by a threshold per NAPI poll cycle.
-+ */
-+static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
++static const struct file_operations dpaa2_dbg_fq_ops = {
++ .open = dpaa2_dbg_fqs_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
+{
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct dpaa2_eth_channel *ch;
-+ int rx_cleaned = 0, tx_conf_cleaned = 0;
-+ bool store_cleaned;
-+ struct dpaa2_eth_priv *priv;
-+ int err;
-+
-+ ch = container_of(napi, struct dpaa2_eth_channel, napi);
-+ priv = ch->priv;
++ int i;
+
-+ do {
-+ err = pull_channel(ch);
-+ if (unlikely(err))
-+ break;
++ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
++ "CHID", "CPU", "Deq busy", "Frames", "CDANs",
++ "Avg frm/CDAN", "Buf count");
+
-+ /* Refill pool if appropriate */
-+ refill_pool(priv, ch, priv->bpid);
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
++ ch->ch_id,
++ ch->nctx.desired_cpu,
++ ch->stats.dequeue_portal_busy,
++ ch->stats.frames,
++ ch->stats.cdan,
++ ch->stats.frames / ch->stats.cdan,
++ ch->buf_count);
++ }
+
-+ store_cleaned = consume_frames(ch, &rx_cleaned,
-+ &tx_conf_cleaned);
++ return 0;
++}
+
-+ /* If we've either consumed the budget with Rx frames,
-+ * or reached the Tx conf threshold, we're done.
-+ */
-+ if (rx_cleaned >= budget ||
-+ tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL)
-+ return budget;
-+ } while (store_cleaned);
++static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
++{
++ int err;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
-+ /* We didn't consume the entire budget, finish napi and
-+ * re-enable data availability notifications.
-+ */
-+ napi_complete(napi);
-+ do {
-+ err = dpaa2_io_service_rearm(NULL, &ch->nctx);
-+ cpu_relax();
-+ } while (err == -EBUSY);
++ err = single_open(file, dpaa2_dbg_ch_show, priv);
++ if (err < 0)
++ netdev_err(priv->net_dev, "single_open() failed\n");
+
-+ return max(rx_cleaned, 1);
++ return err;
+}
+
-+static void enable_ch_napi(struct dpaa2_eth_priv *priv)
++static const struct file_operations dpaa2_dbg_ch_ops = {
++ .open = dpaa2_dbg_ch_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *offset)
+{
++ struct dpaa2_eth_priv *priv = file->private_data;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ struct dpaa2_eth_fq *fq;
+ struct dpaa2_eth_channel *ch;
+ int i;
+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ napi_enable(&ch->napi);
++ for_each_online_cpu(i) {
++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
++ memset(percpu_stats, 0, sizeof(*percpu_stats));
++
++ percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
++ memset(percpu_extras, 0, sizeof(*percpu_extras));
+ }
-+}
+
-+static void disable_ch_napi(struct dpaa2_eth_priv *priv)
-+{
-+ struct dpaa2_eth_channel *ch;
-+ int i;
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ memset(&fq->stats, 0, sizeof(fq->stats));
++ }
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
-+ napi_disable(&ch->napi);
++ memset(&ch->stats, 0, sizeof(ch->stats));
+ }
++
++ return count;
+}
+
-+static int link_state_update(struct dpaa2_eth_priv *priv)
++static const struct file_operations dpaa2_dbg_reset_ops = {
++ .open = simple_open,
++ .write = dpaa2_dbg_reset_write,
++};
++
++static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
++ const char __user *buf,
++ size_t count, loff_t *offset)
+{
-+ struct dpni_link_state state;
++ struct dpaa2_eth_priv *priv = file->private_data;
+ int err;
+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (unlikely(err)) {
++ err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
++ if (err)
+ netdev_err(priv->net_dev,
-+ "dpni_get_link_state() failed\n");
-+ return err;
-+ }
-+
-+ /* Chech link state; speed / duplex changes are not treated yet */
-+ if (priv->link_state.up == state.up)
-+ return 0;
-+
-+ priv->link_state = state;
-+ if (state.up) {
-+ netif_carrier_on(priv->net_dev);
-+ netif_tx_start_all_queues(priv->net_dev);
-+ } else {
-+ netif_tx_stop_all_queues(priv->net_dev);
-+ netif_carrier_off(priv->net_dev);
-+ }
-+
-+ netdev_info(priv->net_dev, "Link Event: state %s",
-+ state.up ? "up" : "down");
++ "dpni_reset_statistics() failed %d\n", err);
+
-+ return 0;
++ return count;
+}
+
-+static int dpaa2_eth_open(struct net_device *net_dev)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int err;
-+
-+ /* We'll only start the txqs when the link is actually ready; make sure
-+ * we don't race against the link up notification, which may come
-+ * immediately after dpni_enable();
-+ */
-+ netif_tx_stop_all_queues(net_dev);
++static const struct file_operations dpaa2_dbg_reset_mc_ops = {
++ .open = simple_open,
++ .write = dpaa2_dbg_reset_mc_write,
++};
+
-+ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
-+ * return true and cause 'ip link show' to report the LOWER_UP flag,
-+ * even though the link notification wasn't even received.
-+ */
-+ netif_carrier_off(net_dev);
++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
++{
++ if (!dpaa2_dbg_root)
++ return;
+
-+ err = seed_pool(priv, priv->bpid);
-+ if (err) {
-+ /* Not much to do; the buffer pool, though not filled up,
-+ * may still contain some buffers which would enable us
-+ * to limp on.
-+ */
-+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
-+ priv->dpbp_dev->obj_desc.id, priv->bpid);
++ /* Create a directory for the interface */
++ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
++ dpaa2_dbg_root);
++ if (!priv->dbg.dir) {
++ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
++ return;
+ }
+
-+ if (priv->tx_pause_frames)
-+ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
-+ else
-+ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
-+
-+ err = dpni_enable(priv->mc_io, 0, priv->mc_token);
-+ if (err < 0) {
-+ netdev_err(net_dev, "dpni_enable() failed\n");
-+ goto enable_err;
++ /* per-cpu stats file */
++ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_cpu_ops);
++ if (!priv->dbg.cpu_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_cpu_stats;
+ }
+
-+ /* If the DPMAC object has already processed the link up interrupt,
-+ * we have to learn the link state ourselves.
-+ */
-+ err = link_state_update(priv);
-+ if (err < 0) {
-+ netdev_err(net_dev, "Can't update link state\n");
-+ goto link_state_err;
++ /* per-fq stats file */
++ priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_fq_ops);
++ if (!priv->dbg.fq_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_fq_stats;
+ }
+
-+ return 0;
++ /* per-fq stats file */
++ priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_ch_ops);
++ if (!priv->dbg.fq_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_ch_stats;
++ }
+
-+link_state_err:
-+enable_err:
-+ priv->refill_thresh = 0;
-+ drain_pool(priv);
-+ return err;
-+}
-+
-+static int dpaa2_eth_stop(struct net_device *net_dev)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int dpni_enabled;
-+ int retries = 10, i;
-+
-+ netif_tx_stop_all_queues(net_dev);
-+ netif_carrier_off(net_dev);
-+
-+ /* Loop while dpni_disable() attempts to drain the egress FQs
-+ * and confirm them back to us.
-+ */
-+ do {
-+ dpni_disable(priv->mc_io, 0, priv->mc_token);
-+ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
-+ if (dpni_enabled)
-+ /* Allow the MC some slack */
-+ msleep(100);
-+ } while (dpni_enabled && --retries);
-+ if (!retries) {
-+ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
-+ /* Must go on and disable NAPI nonetheless, so we don't crash at
-+ * the next "ifconfig up"
-+ */
++ /* reset stats */
++ priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_reset_ops);
++ if (!priv->dbg.reset_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_reset_stats;
+ }
+
-+ priv->refill_thresh = 0;
-+
-+ /* Wait for all running napi poll routines to finish, so that no
-+ * new refill operations are started.
-+ */
-+ for (i = 0; i < priv->num_channels; i++)
-+ napi_synchronize(&priv->channel[i]->napi);
-+
-+ /* Empty the buffer pool */
-+ drain_pool(priv);
-+
-+ return 0;
-+}
-+
-+static int dpaa2_eth_init(struct net_device *net_dev)
-+{
-+ u64 supported = 0;
-+ u64 not_supported = 0;
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ u32 options = priv->dpni_attrs.options;
-+
-+ /* Capabilities listing */
-+ supported |= IFF_LIVE_ADDR_CHANGE;
-+
-+ if (options & DPNI_OPT_NO_MAC_FILTER)
-+ not_supported |= IFF_UNICAST_FLT;
-+ else
-+ supported |= IFF_UNICAST_FLT;
-+
-+ net_dev->priv_flags |= supported;
-+ net_dev->priv_flags &= ~not_supported;
-+
-+ /* Features */
-+ net_dev->features = NETIF_F_RXCSUM |
-+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-+ NETIF_F_SG | NETIF_F_HIGHDMA |
-+ NETIF_F_LLTX;
-+ net_dev->hw_features = net_dev->features;
-+
-+ return 0;
-+}
-+
-+static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct device *dev = net_dev->dev.parent;
-+ int err;
-+
-+ err = eth_mac_addr(net_dev, addr);
-+ if (err < 0) {
-+ dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
-+ return err;
++ /* reset MC stats */
++ priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
++ 0222, priv->dbg.dir, priv,
++ &dpaa2_dbg_reset_mc_ops);
++ if (!priv->dbg.reset_mc_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_reset_mc_stats;
+ }
+
-+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
-+ net_dev->dev_addr);
-+ if (err) {
-+ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
-+ return err;
-+ }
++ return;
+
-+ return 0;
++err_reset_mc_stats:
++ debugfs_remove(priv->dbg.reset_stats);
++err_reset_stats:
++ debugfs_remove(priv->dbg.ch_stats);
++err_ch_stats:
++ debugfs_remove(priv->dbg.fq_stats);
++err_fq_stats:
++ debugfs_remove(priv->dbg.cpu_stats);
++err_cpu_stats:
++ debugfs_remove(priv->dbg.dir);
+}
+
-+/** Fill in counters maintained by the GPP driver. These may be different from
-+ * the hardware counters obtained by ethtool.
-+ */
-+static void dpaa2_eth_get_stats(struct net_device *net_dev,
-+ struct rtnl_link_stats64 *stats)
++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct rtnl_link_stats64 *percpu_stats;
-+ u64 *cpustats;
-+ u64 *netstats = (u64 *)stats;
-+ int i, j;
-+ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
-+
-+ for_each_possible_cpu(i) {
-+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
-+ cpustats = (u64 *)percpu_stats;
-+ for (j = 0; j < num; j++)
-+ netstats[j] += cpustats[j];
-+ }
++ debugfs_remove(priv->dbg.reset_mc_stats);
++ debugfs_remove(priv->dbg.reset_stats);
++ debugfs_remove(priv->dbg.fq_stats);
++ debugfs_remove(priv->dbg.ch_stats);
++ debugfs_remove(priv->dbg.cpu_stats);
++ debugfs_remove(priv->dbg.dir);
+}
+
-+static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
++void dpaa2_eth_dbg_init(void)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int err;
-+
-+ /* Set the maximum Rx frame length to match the transmit side;
-+ * account for L2 headers when computing the MFL
-+ */
-+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
-+ (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
-+ if (err) {
-+ netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
-+ return err;
++ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
++ if (!dpaa2_dbg_root) {
++ pr_err("DPAA2-ETH: debugfs create failed\n");
++ return;
+ }
+
-+ net_dev->mtu = mtu;
-+ return 0;
++ pr_info("DPAA2-ETH: debugfs created\n");
+}
+
-+/* Copy mac unicast addresses from @net_dev to @priv.
-+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
-+ */
-+static void add_uc_hw_addr(const struct net_device *net_dev,
-+ struct dpaa2_eth_priv *priv)
++void __exit dpaa2_eth_dbg_exit(void)
+{
-+ struct netdev_hw_addr *ha;
-+ int err;
-+
-+ netdev_for_each_uc_addr(ha, net_dev) {
-+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
-+ ha->addr);
-+ if (err)
-+ netdev_warn(priv->net_dev,
-+ "Could not add ucast MAC %pM to the filtering table (err %d)\n",
-+ ha->addr, err);
-+ }
++ debugfs_remove(dpaa2_dbg_root);
+}
-+
-+/* Copy mac multicast addresses from @net_dev to @priv
-+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
+@@ -0,0 +1,60 @@
++/* Copyright 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
-+static void add_mc_hw_addr(const struct net_device *net_dev,
-+ struct dpaa2_eth_priv *priv)
-+{
-+ struct netdev_hw_addr *ha;
-+ int err;
+
-+ netdev_for_each_mc_addr(ha, net_dev) {
-+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
-+ ha->addr);
-+ if (err)
-+ netdev_warn(priv->net_dev,
-+ "Could not add mcast MAC %pM to the filtering table (err %d)\n",
-+ ha->addr, err);
-+ }
-+}
++#ifndef DPAA2_ETH_DEBUGFS_H
++#define DPAA2_ETH_DEBUGFS_H
+
-+static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int uc_count = netdev_uc_count(net_dev);
-+ int mc_count = netdev_mc_count(net_dev);
-+ u8 max_mac = priv->dpni_attrs.mac_filter_entries;
-+ u32 options = priv->dpni_attrs.options;
-+ u16 mc_token = priv->mc_token;
-+ struct fsl_mc_io *mc_io = priv->mc_io;
-+ int err;
++#include <linux/dcache.h>
+
-+ /* Basic sanity checks; these probably indicate a misconfiguration */
-+ if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
-+ netdev_info(net_dev,
-+ "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
-+ max_mac);
++struct dpaa2_eth_priv;
+
-+ /* Force promiscuous if the uc or mc counts exceed our capabilities. */
-+ if (uc_count > max_mac) {
-+ netdev_info(net_dev,
-+ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
-+ uc_count, max_mac);
-+ goto force_promisc;
-+ }
-+ if (mc_count + uc_count > max_mac) {
-+ netdev_info(net_dev,
-+ "Unicast + Multicast addr count reached %d, max allowed is %d; forcing promisc\n",
-+ uc_count + mc_count, max_mac);
-+ goto force_mc_promisc;
-+ }
++struct dpaa2_debugfs {
++ struct dentry *dir;
++ struct dentry *fq_stats;
++ struct dentry *ch_stats;
++ struct dentry *cpu_stats;
++ struct dentry *reset_stats;
++ struct dentry *reset_mc_stats;
++};
+
-+ /* Adjust promisc settings due to flag combinations */
-+ if (net_dev->flags & IFF_PROMISC)
-+ goto force_promisc;
-+ if (net_dev->flags & IFF_ALLMULTI) {
-+ /* First, rebuild unicast filtering table. This should be done
-+ * in promisc mode, in order to avoid frame loss while we
-+ * progressively add entries to the table.
-+ * We don't know whether we had been in promisc already, and
-+ * making an MC call to find out is expensive; so set uc promisc
-+ * nonetheless.
-+ */
-+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
-+ if (err)
-+ netdev_warn(net_dev, "Can't set uc promisc\n");
-+
-+ /* Actual uc table reconstruction. */
-+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
-+ if (err)
-+ netdev_warn(net_dev, "Can't clear uc filters\n");
-+ add_uc_hw_addr(net_dev, priv);
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++void dpaa2_eth_dbg_init(void);
++void dpaa2_eth_dbg_exit(void);
++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
++#else
++static inline void dpaa2_eth_dbg_init(void) {}
++static inline void dpaa2_eth_dbg_exit(void) {}
++static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
++static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
++#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
+
-+ /* Finally, clear uc promisc and set mc promisc as requested. */
-+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
-+ if (err)
-+ netdev_warn(net_dev, "Can't clear uc promisc\n");
-+ goto force_mc_promisc;
-+ }
++#endif /* DPAA2_ETH_DEBUGFS_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
+@@ -0,0 +1,185 @@
++/* Copyright 2014-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
+
-+ /* Neither unicast, nor multicast promisc will be on... eventually.
-+ * For now, rebuild mac filtering tables while forcing both of them on.
-+ */
-+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
-+ if (err)
-+ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
-+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
-+ if (err)
-+ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM dpaa2_eth
+
-+ /* Actual mac filtering tables reconstruction */
-+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
-+ if (err)
-+ netdev_warn(net_dev, "Can't clear mac filters\n");
-+ add_mc_hw_addr(net_dev, priv);
-+ add_uc_hw_addr(net_dev, priv);
++#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _DPAA2_ETH_TRACE_H
+
-+ /* Now we can clear both ucast and mcast promisc, without risking
-+ * to drop legitimate frames anymore.
-+ */
-+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
-+ if (err)
-+ netdev_warn(net_dev, "Can't clear ucast promisc\n");
-+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
-+ if (err)
-+ netdev_warn(net_dev, "Can't clear mcast promisc\n");
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include "dpaa2-eth.h"
++#include <linux/tracepoint.h>
+
-+ return;
++#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
++/* trace_printk format for raw buffer event class */
++#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
+
-+force_promisc:
-+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
-+ if (err)
-+ netdev_warn(net_dev, "Can't set ucast promisc\n");
-+force_mc_promisc:
-+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
-+ if (err)
-+ netdev_warn(net_dev, "Can't set mcast promisc\n");
-+}
++/* This is used to declare a class of events.
++ * individual events of this type will be defined below.
++ */
+
-+static int dpaa2_eth_set_features(struct net_device *net_dev,
-+ netdev_features_t features)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ netdev_features_t changed = features ^ net_dev->features;
-+ bool enable;
-+ int err;
++/* Store details about a frame descriptor */
++DECLARE_EVENT_CLASS(dpaa2_eth_fd,
++ /* Trace function prototype */
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
+
-+ if (changed & NETIF_F_RXCSUM) {
-+ enable = !!(features & NETIF_F_RXCSUM);
-+ err = set_rx_csum(priv, enable);
-+ if (err)
-+ return err;
-+ }
++ /* Repeat argument list here */
++ TP_ARGS(netdev, fd),
+
-+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
-+ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
-+ err = set_tx_csum(priv, enable);
-+ if (err)
-+ return err;
-+ }
++ /* A structure containing the relevant information we want
++ * to record. Declare name and type for each normal element,
++ * name, type and size for arrays. Use __string for variable
++ * length strings.
++ */
++ TP_STRUCT__entry(
++ __field(u64, fd_addr)
++ __field(u32, fd_len)
++ __field(u16, fd_offset)
++ __string(name, netdev->name)
++ ),
+
-+ return 0;
-+}
++ /* The function that assigns values to the above declared
++ * fields
++ */
++ TP_fast_assign(
++ __entry->fd_addr = dpaa2_fd_get_addr(fd);
++ __entry->fd_len = dpaa2_fd_get_len(fd);
++ __entry->fd_offset = dpaa2_fd_get_offset(fd);
++ __assign_str(name, netdev->name);
++ ),
+
-+static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
-+ struct hwtstamp_config config;
++ /* This is what gets printed when the trace event is
++ * triggered.
++ */
++ TP_printk(TR_FMT,
++ __get_str(name),
++ __entry->fd_addr,
++ __entry->fd_len,
++ __entry->fd_offset)
++);
+
-+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
-+ return -EFAULT;
++/* Now declare events of the above type. Format is:
++ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
++ */
+
-+ switch (config.tx_type) {
-+ case HWTSTAMP_TX_OFF:
-+ priv->ts_tx_en = false;
-+ break;
-+ case HWTSTAMP_TX_ON:
-+ priv->ts_tx_en = true;
-+ break;
-+ default:
-+ return -ERANGE;
-+ }
++/* Tx (egress) fd */
++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
+
-+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
-+ priv->ts_rx_en = false;
-+ } else {
-+ priv->ts_rx_en = true;
-+ /* TS is set for all frame types, not only those requested */
-+ config.rx_filter = HWTSTAMP_FILTER_ALL;
-+ }
++ TP_ARGS(netdev, fd)
++);
+
-+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
-+ -EFAULT : 0;
-+}
++/* Rx fd */
++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
+
-+static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-+{
-+ if (cmd == SIOCSHWTSTAMP)
-+ return dpaa2_eth_ts_ioctl(dev, rq, cmd);
++ TP_ARGS(netdev, fd)
++);
+
-+ return -EINVAL;
-+}
++/* Tx confirmation fd */
++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
+
-+static const struct net_device_ops dpaa2_eth_ops = {
-+ .ndo_open = dpaa2_eth_open,
-+ .ndo_start_xmit = dpaa2_eth_tx,
-+ .ndo_stop = dpaa2_eth_stop,
-+ .ndo_init = dpaa2_eth_init,
-+ .ndo_set_mac_address = dpaa2_eth_set_addr,
-+ .ndo_get_stats64 = dpaa2_eth_get_stats,
-+ .ndo_change_mtu = dpaa2_eth_change_mtu,
-+ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
-+ .ndo_set_features = dpaa2_eth_set_features,
-+ .ndo_do_ioctl = dpaa2_eth_ioctl,
-+};
++ TP_ARGS(netdev, fd)
++);
+
-+static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
-+{
-+ struct dpaa2_eth_channel *ch;
++/* Log data about raw buffers. Useful for tracing DPBP content. */
++TRACE_EVENT(dpaa2_eth_buf_seed,
++ /* Trace function prototype */
++ TP_PROTO(struct net_device *netdev,
++ /* virtual address and size */
++ void *vaddr,
++ size_t size,
++ /* dma map address and size */
++ dma_addr_t dma_addr,
++ size_t map_size,
++ /* buffer pool id, if relevant */
++ u16 bpid),
+
-+ ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
++ /* Repeat argument list here */
++ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
+
-+ /* Update NAPI statistics */
-+ ch->stats.cdan++;
++ /* A structure containing the relevant information we want
++ * to record. Declare name and type for each normal element,
++ * name, type and size for arrays. Use __string for variable
++ * length strings.
++ */
++ TP_STRUCT__entry(
++ __field(void *, vaddr)
++ __field(size_t, size)
++ __field(dma_addr_t, dma_addr)
++ __field(size_t, map_size)
++ __field(u16, bpid)
++ __string(name, netdev->name)
++ ),
+
-+ napi_schedule_irqoff(&ch->napi);
-+}
++ /* The function that assigns values to the above declared
++ * fields
++ */
++ TP_fast_assign(
++ __entry->vaddr = vaddr;
++ __entry->size = size;
++ __entry->dma_addr = dma_addr;
++ __entry->map_size = map_size;
++ __entry->bpid = bpid;
++ __assign_str(name, netdev->name);
++ ),
+
-+/* Allocate and configure a DPCON object */
-+static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
-+{
-+ struct fsl_mc_device *dpcon;
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpcon_attr attrs;
-+ int err;
++ /* This is what gets printed when the trace event is
++ * triggered.
++ */
++ TP_printk(TR_BUF_FMT,
++ __get_str(name),
++ __entry->vaddr,
++ __entry->size,
++ &__entry->dma_addr,
++ __entry->map_size,
++ __entry->bpid)
++);
+
-+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
-+ FSL_MC_POOL_DPCON, &dpcon);
-+ if (err) {
-+ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
-+ return NULL;
-+ }
++/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
++ * The syntax is the same as for DECLARE_EVENT_CLASS().
++ */
+
-+ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpcon_open() failed\n");
-+ goto err_open;
-+ }
++#endif /* _DPAA2_ETH_TRACE_H */
+
-+ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpcon_reset() failed\n");
-+ goto err_reset;
-+ }
++/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH .
++#undef TRACE_INCLUDE_FILE
++#define TRACE_INCLUDE_FILE dpaa2-eth-trace
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+@@ -0,0 +1,3734 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016-2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/etherdevice.h>
++#include <linux/of_net.h>
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++#include <linux/kthread.h>
++#include <linux/iommu.h>
++#include <linux/net_tstamp.h>
++#include <linux/bpf.h>
++#include <linux/filter.h>
++#include <linux/atomic.h>
++#include <net/sock.h>
++#include <linux/fsl/mc.h>
++#include "dpaa2-eth.h"
++#include "dpaa2-eth-ceetm.h"
+
-+ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
-+ if (err) {
-+ dev_err(dev, "dpcon_get_attributes() failed\n");
-+ goto err_get_attr;
-+ }
++/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
++ * using trace events only need to #include <trace/events/sched.h>
++ */
++#define CREATE_TRACE_POINTS
++#include "dpaa2-eth-trace.h"
+
-+ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpcon_enable() failed\n");
-+ goto err_enable;
-+ }
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Freescale Semiconductor, Inc");
++MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+
-+ return dpcon;
++const char dpaa2_eth_drv_version[] = "0.1";
+
-+err_enable:
-+err_get_attr:
-+err_reset:
-+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
-+err_open:
-+ fsl_mc_object_free(dpcon);
++static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
++ dma_addr_t iova_addr)
++{
++ phys_addr_t phys_addr;
+
-+ return NULL;
++ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
++
++ return phys_to_virt(phys_addr);
+}
+
-+static void free_dpcon(struct dpaa2_eth_priv *priv,
-+ struct fsl_mc_device *dpcon)
++static void validate_rx_csum(struct dpaa2_eth_priv *priv,
++ u32 fd_status,
++ struct sk_buff *skb)
+{
-+ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
-+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
-+ fsl_mc_object_free(dpcon);
++ skb_checksum_none_assert(skb);
++
++ /* HW checksum validation is disabled, nothing to do here */
++ if (!(priv->net_dev->features & NETIF_F_RXCSUM))
++ return;
++
++ /* Read checksum validation bits */
++ if (!((fd_status & DPAA2_FAS_L3CV) &&
++ (fd_status & DPAA2_FAS_L4CV)))
++ return;
++
++ /* Inform the stack there's no need to compute L3/L4 csum anymore */
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
-+static struct dpaa2_eth_channel *
-+alloc_channel(struct dpaa2_eth_priv *priv)
++/* Free a received FD.
++ * Not to be used for Tx conf FDs or on any other paths.
++ */
++static void free_rx_fd(struct dpaa2_eth_priv *priv,
++ const struct dpaa2_fd *fd,
++ void *vaddr)
+{
-+ struct dpaa2_eth_channel *channel;
-+ struct dpcon_attr attr;
+ struct device *dev = priv->net_dev->dev.parent;
-+ int err;
++ dma_addr_t addr = dpaa2_fd_get_addr(fd);
++ u8 fd_format = dpaa2_fd_get_format(fd);
++ struct dpaa2_sg_entry *sgt;
++ void *sg_vaddr;
++ int i;
+
-+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
-+ if (!channel)
-+ return NULL;
++ /* If single buffer frame, just free the data buffer */
++ if (fd_format == dpaa2_fd_single)
++ goto free_buf;
++ else if (fd_format != dpaa2_fd_sg)
++ /* We don't support any other format */
++ return;
+
-+ channel->dpcon = setup_dpcon(priv);
-+ if (!channel->dpcon)
-+ goto err_setup;
++ /* For S/G frames, we first need to free all SG entries
++ * except the first one, which was taken care of already
++ */
++ sgt = vaddr + dpaa2_fd_get_offset(fd);
++ for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
++ addr = dpaa2_sg_get_addr(&sgt[i]);
++ sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
+
-+ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
-+ &attr);
-+ if (err) {
-+ dev_err(dev, "dpcon_get_attributes() failed\n");
-+ goto err_get_attr;
++ skb_free_frag(sg_vaddr);
++ if (dpaa2_sg_is_final(&sgt[i]))
++ break;
+ }
+
-+ channel->dpcon_id = attr.id;
-+ channel->ch_id = attr.qbman_ch_id;
-+ channel->priv = priv;
-+
-+ return channel;
-+
-+err_get_attr:
-+ free_dpcon(priv, channel->dpcon);
-+err_setup:
-+ kfree(channel);
-+ return NULL;
++free_buf:
++ skb_free_frag(vaddr);
+}
+
-+static void free_channel(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *channel)
++/* Build a linear skb based on a single-buffer frame descriptor */
++static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ void *fd_vaddr)
+{
-+ free_dpcon(priv, channel->dpcon);
-+ kfree(channel);
++ struct sk_buff *skb = NULL;
++ u16 fd_offset = dpaa2_fd_get_offset(fd);
++ u32 fd_length = dpaa2_fd_get_len(fd);
++
++ ch->buf_count--;
++
++ skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
++ if (unlikely(!skb))
++ return NULL;
++
++ skb_reserve(skb, fd_offset);
++ skb_put(skb, fd_length);
++
++ return skb;
+}
+
-+/* DPIO setup: allocate and configure QBMan channels, setup core affinity
-+ * and register data availability notifications
-+ */
-+static int setup_dpio(struct dpaa2_eth_priv *priv)
++/* Build a non linear (fragmented) skb based on a S/G table */
++static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ struct dpaa2_sg_entry *sgt)
+{
-+ struct dpaa2_io_notification_ctx *nctx;
-+ struct dpaa2_eth_channel *channel;
-+ struct dpcon_notification_cfg dpcon_notif_cfg;
++ struct sk_buff *skb = NULL;
+ struct device *dev = priv->net_dev->dev.parent;
-+ int i, err;
-+
-+ /* We want the ability to spread ingress traffic (RX, TX conf) to as
-+ * many cores as possible, so we need one channel for each core
-+ * (unless there's fewer queues than cores, in which case the extra
-+ * channels would be wasted).
-+ * Allocate one channel per core and register it to the core's
-+ * affine DPIO. If not enough channels are available for all cores
-+ * or if some cores don't have an affine DPIO, there will be no
-+ * ingress frame processing on those cores.
-+ */
-+ cpumask_clear(&priv->dpio_cpumask);
-+ for_each_online_cpu(i) {
-+ /* Try to allocate a channel */
-+ channel = alloc_channel(priv);
-+ if (!channel) {
-+ dev_info(dev,
-+ "No affine channel for cpu %d and above\n", i);
-+ goto err_alloc_ch;
-+ }
++ void *sg_vaddr;
++ dma_addr_t sg_addr;
++ u16 sg_offset;
++ u32 sg_length;
++ struct page *page, *head_page;
++ int page_offset;
++ int i;
+
-+ priv->channel[priv->num_channels] = channel;
++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
++ struct dpaa2_sg_entry *sge = &sgt[i];
+
-+ nctx = &channel->nctx;
-+ nctx->is_cdan = 1;
-+ nctx->cb = cdan_cb;
-+ nctx->id = channel->ch_id;
-+ nctx->desired_cpu = i;
++ /* NOTE: We only support SG entries in dpaa2_sg_single format,
++ * but this is the only format we may receive from HW anyway
++ */
+
-+ /* Register the new context */
-+ err = dpaa2_io_service_register(NULL, nctx);
-+ if (err) {
-+ dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
-+ /* If no affine DPIO for this core, there's probably
-+ * none available for next cores either.
++ /* Get the address and length from the S/G entry */
++ sg_addr = dpaa2_sg_get_addr(sge);
++ sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
++ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++
++ sg_length = dpaa2_sg_get_len(sge);
++
++ if (i == 0) {
++ /* We build the skb around the first data buffer */
++ skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
++ if (unlikely(!skb)) {
++ /* Free the first SG entry now, since we already
++ * unmapped it and obtained the virtual address
++ */
++ skb_free_frag(sg_vaddr);
++
++ /* We still need to subtract the buffers used
++ * by this FD from our software counter
++ */
++ while (!dpaa2_sg_is_final(&sgt[i]) &&
++ i < DPAA2_ETH_MAX_SG_ENTRIES)
++ i++;
++ break;
++ }
++
++ sg_offset = dpaa2_sg_get_offset(sge);
++ skb_reserve(skb, sg_offset);
++ skb_put(skb, sg_length);
++ } else {
++ /* Rest of the data buffers are stored as skb frags */
++ page = virt_to_page(sg_vaddr);
++ head_page = virt_to_head_page(sg_vaddr);
++
++ /* Offset in page (which may be compound).
++ * Data in subsequent SG entries is stored from the
++ * beginning of the buffer, so we don't need to add the
++ * sg_offset.
+ */
-+ goto err_service_reg;
-+ }
++ page_offset = ((unsigned long)sg_vaddr &
++ (PAGE_SIZE - 1)) +
++ (page_address(page) - page_address(head_page));
+
-+ /* Register DPCON notification with MC */
-+ dpcon_notif_cfg.dpio_id = nctx->dpio_id;
-+ dpcon_notif_cfg.priority = 0;
-+ dpcon_notif_cfg.user_ctx = nctx->qman64;
-+ err = dpcon_set_notification(priv->mc_io, 0,
-+ channel->dpcon->mc_handle,
-+ &dpcon_notif_cfg);
-+ if (err) {
-+ dev_err(dev, "dpcon_set_notification failed()\n");
-+ goto err_set_cdan;
++ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
++ sg_length, DPAA2_ETH_RX_BUF_SIZE);
+ }
+
-+ /* If we managed to allocate a channel and also found an affine
-+ * DPIO for this core, add it to the final mask
-+ */
-+ cpumask_set_cpu(i, &priv->dpio_cpumask);
-+ priv->num_channels++;
-+
-+ /* Stop if we already have enough channels to accommodate all
-+ * RX and TX conf queues
-+ */
-+ if (priv->num_channels == dpaa2_eth_queue_count(priv))
++ if (dpaa2_sg_is_final(sge))
+ break;
+ }
+
-+ /* Tx confirmation queues can only be serviced by cpus
-+ * with an affine DPIO/channel
-+ */
-+ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
++ WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
+
-+ return 0;
++ /* Count all data buffers + SG table buffer */
++ ch->buf_count -= i + 2;
+
-+err_set_cdan:
-+ dpaa2_io_service_deregister(NULL, nctx);
-+err_service_reg:
-+ free_channel(priv, channel);
-+err_alloc_ch:
-+ if (cpumask_empty(&priv->dpio_cpumask)) {
-+ dev_dbg(dev, "No cpu with an affine DPIO/DPCON\n");
-+ return -ENODEV;
++ return skb;
++}
++
++static int dpaa2_eth_xdp_tx(struct dpaa2_eth_priv *priv,
++ struct dpaa2_fd *fd,
++ void *buf_start,
++ u16 queue_id)
++{
++ struct dpaa2_eth_fq *fq;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ struct dpaa2_faead *faead;
++ u32 ctrl, frc;
++ int i, err;
++
++ /* Mark the egress frame annotation area as valid */
++ frc = dpaa2_fd_get_frc(fd);
++ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
++ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
++
++ ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
++ faead = dpaa2_get_faead(buf_start, false);
++ faead->ctrl = cpu_to_le32(ctrl);
++ faead->conf_fqid = 0;
++
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++
++ fq = &priv->fq[queue_id];
++ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
++ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
++ priv->tx_qdid, 0,
++ fq->tx_qdbin, fd);
++ if (err != -EBUSY)
++ break;
+ }
-+ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
+
-+ dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
-+ cpumask_pr_args(&priv->dpio_cpumask));
++ percpu_extras->tx_portal_busy += i;
++ if (unlikely(err)) {
++ percpu_stats->tx_errors++;
++ } else {
++ percpu_stats->tx_packets++;
++ percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
++ }
+
-+ return 0;
++ return err;
+}
+
-+static void free_dpio(struct dpaa2_eth_priv *priv)
++static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
++ struct device *dev = priv->net_dev->dev.parent;
++ void *vaddr;
+ int i;
-+ struct dpaa2_eth_channel *ch;
+
-+ /* deregister CDAN notifications and free channels */
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ dpaa2_io_service_deregister(NULL, &ch->nctx);
-+ free_channel(priv, ch);
++ for (i = 0; i < count; i++) {
++ /* Same logic as on regular Rx path */
++ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
++ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ skb_free_frag(vaddr);
+ }
+}
+
-+static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
-+ int cpu)
++static void release_fd_buf(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ dma_addr_t addr)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ int i;
++ int err;
+
-+ for (i = 0; i < priv->num_channels; i++)
-+ if (priv->channel[i]->nctx.desired_cpu == cpu)
-+ return priv->channel[i];
++ ch->rel_buf_array[ch->rel_buf_cnt++] = addr;
++ if (likely(ch->rel_buf_cnt < DPAA2_ETH_BUFS_PER_CMD))
++ return;
+
-+ /* We should never get here. Issue a warning and return
-+ * the first channel, because it's still better than nothing
-+ */
-+ dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
++ while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
++ ch->rel_buf_array,
++ ch->rel_buf_cnt)) == -EBUSY)
++ cpu_relax();
+
-+ return priv->channel[0];
++ if (err)
++ free_bufs(priv, ch->rel_buf_array, ch->rel_buf_cnt);
++
++ ch->rel_buf_cnt = 0;
+}
+
-+static void set_fq_affinity(struct dpaa2_eth_priv *priv)
++/* Main Rx frame processing routine */
++static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct napi_struct *napi,
++ u16 queue_id)
+{
++ dma_addr_t addr = dpaa2_fd_get_addr(fd);
++ u8 fd_format = dpaa2_fd_get_format(fd);
++ void *vaddr;
++ struct sk_buff *skb;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct device *dev = priv->net_dev->dev.parent;
-+ struct cpumask xps_mask = CPU_MASK_NONE;
-+ struct dpaa2_eth_fq *fq;
-+ int rx_cpu, txc_cpu;
-+ int i, err;
++ struct dpaa2_fas *fas;
++ void *buf_data;
++ u32 status = 0;
++ struct bpf_prog *xdp_prog;
++ struct xdp_buff xdp;
++ u32 xdp_act;
+
-+ /* For each FQ, pick one channel/CPU to deliver frames to.
-+ * This may well change at runtime, either through irqbalance or
-+ * through direct user intervention.
-+ */
-+ rx_cpu = cpumask_first(&priv->dpio_cpumask);
-+ txc_cpu = cpumask_first(&priv->txconf_cpumask);
++ /* Tracing point */
++ trace_dpaa2_rx_fd(priv->net_dev, fd);
+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ fq = &priv->fq[i];
-+ switch (fq->type) {
-+ case DPAA2_RX_FQ:
-+ case DPAA2_RX_ERR_FQ:
-+ fq->target_cpu = rx_cpu;
-+ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
-+ if (rx_cpu >= nr_cpu_ids)
-+ rx_cpu = cpumask_first(&priv->dpio_cpumask);
-+ break;
-+ case DPAA2_TX_CONF_FQ:
-+ fq->target_cpu = txc_cpu;
++ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
++ dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
+
-+ /* register txc_cpu to XPS */
-+ cpumask_set_cpu(txc_cpu, &xps_mask);
-+ err = netif_set_xps_queue(priv->net_dev, &xps_mask,
-+ fq->flowid);
-+ if (err)
-+ dev_info_once(dev,
-+ "Tx: error setting XPS queue\n");
-+ cpumask_clear_cpu(txc_cpu, &xps_mask);
++ fas = dpaa2_get_fas(vaddr, false);
++ prefetch(fas);
++ buf_data = vaddr + dpaa2_fd_get_offset(fd);
++ prefetch(buf_data);
+
-+ txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask);
-+ if (txc_cpu >= nr_cpu_ids)
-+ txc_cpu = cpumask_first(&priv->txconf_cpumask);
-+ break;
-+ default:
-+ dev_err(dev, "Unknown FQ type: %d\n", fq->type);
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++
++ xdp_prog = READ_ONCE(ch->xdp_prog);
++
++ if (fd_format == dpaa2_fd_single) {
++ if (xdp_prog) {
++ xdp.data = buf_data;
++ xdp.data_end = buf_data + dpaa2_fd_get_len(fd);
++ /* for now, we don't support changes in header size */
++ xdp.data_hard_start = buf_data;
++
++ /* update stats here, as we won't reach the code
++ * that does that for standard frames
++ */
++ percpu_stats->rx_packets++;
++ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
++
++ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
++ switch (xdp_act) {
++ case XDP_PASS:
++ break;
++ default:
++ bpf_warn_invalid_xdp_action(xdp_act);
++ case XDP_ABORTED:
++ case XDP_DROP:
++ release_fd_buf(priv, ch, addr);
++ goto drop_cnt;
++ case XDP_TX:
++ if (dpaa2_eth_xdp_tx(priv, (struct dpaa2_fd *)fd, vaddr,
++ queue_id)) {
++ dma_unmap_single(dev, addr,
++ DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ free_rx_fd(priv, fd, vaddr);
++ ch->buf_count--;
++ }
++ return;
++ }
+ }
-+ fq->channel = get_affine_channel(priv, fq->target_cpu);
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ skb = build_linear_skb(priv, ch, fd, vaddr);
++ } else if (fd_format == dpaa2_fd_sg) {
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ skb = build_frag_skb(priv, ch, buf_data);
++ skb_free_frag(vaddr);
++ percpu_extras->rx_sg_frames++;
++ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
++ } else {
++ /* We don't support any other format */
++ goto drop_cnt;
+ }
-+}
+
-+static void setup_fqs(struct dpaa2_eth_priv *priv)
-+{
-+ int i;
++ if (unlikely(!skb))
++ goto drop_fd;
+
-+ /* We have one TxConf FQ per Tx flow. Tx queues MUST be at the
-+ * beginning of the queue array.
-+ * Number of Rx and Tx queues are the same.
-+ * We only support one traffic class for now.
-+ */
-+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
-+ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
-+ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
-+ priv->fq[priv->num_fqs++].flowid = (u16)i;
++ prefetch(skb->data);
++
++ /* Get the timestamp value */
++ if (priv->ts_rx_en) {
++ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
++ u64 *ns = dpaa2_get_ts(vaddr, false);
++
++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
++ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
++ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
+ }
+
-+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
-+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
-+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
-+ priv->fq[priv->num_fqs++].flowid = (u16)i;
++ /* Check if we need to validate the L4 csum */
++ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
++ status = le32_to_cpu(fas->status);
++ validate_rx_csum(priv, status, skb);
+ }
+
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+ /* We have exactly one Rx error queue per DPNI */
-+ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
-+ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
-+#endif
++ skb->protocol = eth_type_trans(skb, priv->net_dev);
+
-+ /* For each FQ, decide on which core to process incoming frames */
-+ set_fq_affinity(priv);
++ /* Record Rx queue - this will be used when picking a Tx queue to
++ * forward the frames. We're keeping flow affinity through the
++ * network stack.
++ */
++ skb_record_rx_queue(skb, queue_id);
++
++ percpu_stats->rx_packets++;
++ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
++
++ napi_gro_receive(napi, skb);
++
++ return;
++
++drop_fd:
++ free_rx_fd(priv, fd, vaddr);
++drop_cnt:
++ percpu_stats->rx_dropped++;
+}
+
-+/* Allocate and configure one buffer pool for each interface */
-+static int setup_dpbp(struct dpaa2_eth_priv *priv)
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++/* Processing of Rx frames received on the error FQ
++ * We check and print the error bits and then free the frame
++ */
++static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct napi_struct *napi __always_unused,
++ u16 queue_id __always_unused)
+{
-+ int err;
-+ struct fsl_mc_device *dpbp_dev;
-+ struct dpbp_attr dpbp_attrs;
+ struct device *dev = priv->net_dev->dev.parent;
++ dma_addr_t addr = dpaa2_fd_get_addr(fd);
++ void *vaddr;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_fas *fas;
++ u32 status = 0;
++ u32 fd_errors;
++ bool has_fas_errors = false;
+
-+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
-+ &dpbp_dev);
-+ if (err) {
-+ dev_err(dev, "DPBP device allocation failed\n");
-+ return err;
-+ }
-+
-+ priv->dpbp_dev = dpbp_dev;
++ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
+
-+ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
-+ &dpbp_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpbp_open() failed\n");
-+ goto err_open;
++ /* check frame errors in the FD field */
++ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK;
++ if (likely(fd_errors)) {
++ has_fas_errors = (fd_errors & FD_CTRL_FAERR) &&
++ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
++ if (net_ratelimit())
++ netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n",
++ fd_errors);
+ }
+
-+ err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpbp_reset() failed\n");
-+ goto err_reset;
++ /* check frame errors in the FAS field */
++ if (has_fas_errors) {
++ fas = dpaa2_get_fas(vaddr, false);
++ status = le32_to_cpu(fas->status);
++ if (net_ratelimit())
++ netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
++ status & DPAA2_FAS_RX_ERR_MASK);
+ }
++ free_rx_fd(priv, fd, vaddr);
+
-+ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpbp_enable() failed\n");
-+ goto err_enable;
-+ }
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_stats->rx_errors++;
++ ch->buf_count--;
++}
++#endif
+
-+ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
-+ &dpbp_attrs);
-+ if (err) {
-+ dev_err(dev, "dpbp_get_attributes() failed\n");
-+ goto err_get_attr;
-+ }
++/* Consume all frames pull-dequeued into the store. This is the simplest way to
++ * make sure we don't accidentally issue another volatile dequeue which would
++ * overwrite (leak) frames already in the store.
++ *
++ * The number of frames is returned using the last 2 output arguments,
++ * separately for Rx and Tx confirmations.
++ *
++ * Observance of NAPI budget is not our concern, leaving that to the caller.
++ */
++static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned,
++ int *tx_conf_cleaned)
++{
++ struct dpaa2_eth_priv *priv = ch->priv;
++ struct dpaa2_eth_fq *fq = NULL;
++ struct dpaa2_dq *dq;
++ const struct dpaa2_fd *fd;
++ int cleaned = 0;
++ int is_last;
+
-+ priv->bpid = dpbp_attrs.bpid;
-+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
++ do {
++ dq = dpaa2_io_store_next(ch->store, &is_last);
++ if (unlikely(!dq)) {
++ /* If we're here, we *must* have placed a
++ * volatile dequeue comnmand, so keep reading through
++ * the store until we get some sort of valid response
++ * token (either a valid frame or an "empty dequeue")
++ */
++ continue;
++ }
+
-+ return 0;
++ fd = dpaa2_dq_fd(dq);
++ prefetch(fd);
+
-+err_get_attr:
-+ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
-+err_enable:
-+err_reset:
-+ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
-+err_open:
-+ fsl_mc_object_free(dpbp_dev);
++ fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
++ fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
++ cleaned++;
++ } while (!is_last);
+
-+ return err;
++ if (!cleaned)
++ return false;
++
++ /* All frames brought in store by a volatile dequeue
++ * come from the same queue
++ */
++ if (fq->type == DPAA2_TX_CONF_FQ)
++ *tx_conf_cleaned += cleaned;
++ else
++ *rx_cleaned += cleaned;
++
++ fq->stats.frames += cleaned;
++ ch->stats.frames += cleaned;
++
++ return true;
+}
+
-+static void free_dpbp(struct dpaa2_eth_priv *priv)
++/* Configure the egress frame annotation for timestamp update */
++static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
+{
-+ drain_pool(priv);
-+ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
-+ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
-+ fsl_mc_object_free(priv->dpbp_dev);
++ struct dpaa2_faead *faead;
++ u32 ctrl, frc;
++
++ /* Mark the egress frame annotation area as valid */
++ frc = dpaa2_fd_get_frc(fd);
++ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
++
++ /* Set hardware annotation size */
++ ctrl = dpaa2_fd_get_ctrl(fd);
++ dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
++
++ /* enable UPD (update prepanded data) bit in FAEAD field of
++ * hardware frame annotation area
++ */
++ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
++ faead = dpaa2_get_faead(buf_start, true);
++ faead->ctrl = cpu_to_le32(ctrl);
+}
+
-+static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
++/* Create a frame descriptor based on a fragmented skb */
++static int build_sg_fd(struct dpaa2_eth_priv *priv,
++ struct sk_buff *skb,
++ struct dpaa2_fd *fd)
+{
-+ struct dpni_congestion_notification_cfg cong_notif_cfg = { 0 };
+ struct device *dev = priv->net_dev->dev.parent;
-+ int err;
++ void *sgt_buf = NULL;
++ dma_addr_t addr;
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ struct dpaa2_sg_entry *sgt;
++ int i, err;
++ int sgt_buf_size;
++ struct scatterlist *scl, *crt_scl;
++ int num_sg;
++ int num_dma_bufs;
++ struct dpaa2_eth_swa *swa;
+
-+ priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
-+ GFP_KERNEL);
-+ if (!priv->cscn_unaligned)
++ /* Create and map scatterlist.
++ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
++ * to go beyond nr_frags+1.
++ * Note: We don't support chained scatterlists
++ */
++ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
++ return -EINVAL;
++
++ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
++ if (unlikely(!scl))
+ return -ENOMEM;
+
-+ priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN);
-+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(dev, priv->cscn_dma)) {
-+ dev_err(dev, "Error mapping CSCN memory area\n");
++ sg_init_table(scl, nr_frags + 1);
++ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
++ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
++ if (unlikely(!num_dma_bufs)) {
+ err = -ENOMEM;
-+ goto err_dma_map;
++ goto dma_map_sg_failed;
+ }
+
-+ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
-+ cong_notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
-+ cong_notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
-+ cong_notif_cfg.message_ctx = (u64)priv;
-+ cong_notif_cfg.message_iova = priv->cscn_dma;
-+ cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
-+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
-+ DPNI_CONG_OPT_COHERENT_WRITE;
-+ err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX, 0,
-+ &cong_notif_cfg);
-+ if (err) {
-+ dev_err(dev, "dpni_set_congestion_notification failed\n");
-+ goto err_set_cong;
++ /* Prepare the HW SGT structure */
++ sgt_buf_size = priv->tx_data_offset +
++ sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
++ sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
++ if (unlikely(!sgt_buf)) {
++ err = -ENOMEM;
++ goto sgt_buf_alloc_failed;
+ }
++ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
++ memset(sgt_buf, 0, sgt_buf_size);
+
-+ return 0;
++ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
-+err_set_cong:
-+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+err_dma_map:
-+ kfree(priv->cscn_unaligned);
++ /* Fill in the HW SGT structure.
++ *
++ * sgt_buf is zeroed out, so the following fields are implicit
++ * in all sgt entries:
++ * - offset is 0
++ * - format is 'dpaa2_sg_single'
++ */
++ for_each_sg(scl, crt_scl, num_dma_bufs, i) {
++ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
++ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
++ }
++ dpaa2_sg_set_final(&sgt[i - 1], true);
+
-+ return err;
-+}
++ /* Store the skb backpointer in the SGT buffer.
++ * Fit the scatterlist and the number of buffers alongside the
++ * skb backpointer in the software annotation area. We'll need
++ * all of them on Tx Conf.
++ */
++ swa = (struct dpaa2_eth_swa *)sgt_buf;
++ swa->type = DPAA2_ETH_SWA_SG;
++ swa->sg.skb = skb;
++ swa->sg.scl = scl;
++ swa->sg.num_sg = num_sg;
++ swa->sg.sgt_size = sgt_buf_size;
+
-+/* Configure the DPNI object this interface is associated with */
-+static int setup_dpni(struct fsl_mc_device *ls_dev)
-+{
-+ struct device *dev = &ls_dev->dev;
-+ struct dpaa2_eth_priv *priv;
-+ struct net_device *net_dev;
-+ struct dpni_buffer_layout buf_layout;
-+ struct dpni_link_cfg cfg = {0};
-+ int err;
++ /* Separately map the SGT buffer */
++ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
++ if (unlikely(dma_mapping_error(dev, addr))) {
++ err = -ENOMEM;
++ goto dma_map_single_failed;
++ }
++ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
++ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
++ dpaa2_fd_set_addr(fd, addr);
++ dpaa2_fd_set_len(fd, skb->len);
++ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
-+ net_dev = dev_get_drvdata(dev);
-+ priv = netdev_priv(net_dev);
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
++ enable_tx_tstamp(fd, sgt_buf);
+
-+ priv->dpni_id = ls_dev->obj_desc.id;
++ return 0;
+
-+ /* get a handle for the DPNI object */
-+ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
-+ if (err) {
-+ dev_err(dev, "dpni_open() failed\n");
-+ goto err_open;
-+ }
-+
-+ ls_dev->mc_io = priv->mc_io;
-+ ls_dev->mc_handle = priv->mc_token;
++dma_map_single_failed:
++ skb_free_frag(sgt_buf);
++sgt_buf_alloc_failed:
++ dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
++dma_map_sg_failed:
++ kfree(scl);
++ return err;
++}
+
-+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
-+ if (err) {
-+ dev_err(dev, "dpni_reset() failed\n");
-+ goto err_reset;
-+ }
++/* Create a frame descriptor based on a linear skb */
++static int build_single_fd(struct dpaa2_eth_priv *priv,
++ struct sk_buff *skb,
++ struct dpaa2_fd *fd)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ u8 *buffer_start, *aligned_start;
++ struct dpaa2_eth_swa *swa;
++ dma_addr_t addr;
+
-+ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
-+ &priv->dpni_attrs);
++ buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
+
-+ if (err) {
-+ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
-+ goto err_get_attr;
-+ }
++ /* If there's enough room to align the FD address, do it.
++ * It will help hardware optimize accesses.
++ */
++ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
++ DPAA2_ETH_TX_BUF_ALIGN);
++ if (aligned_start >= skb->head)
++ buffer_start = aligned_start;
+
-+ /* due to a limitation in WRIOP 1.0.0 (ERR009354), the Rx buf
-+ * align value must be a multiple of 256.
++ /* Store a backpointer to the skb at the beginning of the buffer
++ * (in the private data area) such that we can release it
++ * on Tx confirm
+ */
-+ priv->rx_buf_align =
-+ priv->dpni_attrs.wriop_version & 0x3ff ?
-+ DPAA2_ETH_RX_BUF_ALIGN : DPAA2_ETH_RX_BUF_ALIGN_V1;
++ swa = (struct dpaa2_eth_swa *)buffer_start;
++ swa->type = DPAA2_ETH_SWA_SINGLE;
++ swa->single.skb = skb;
+
-+ /* Update number of logical FQs in netdev */
-+ err = netif_set_real_num_tx_queues(net_dev,
-+ dpaa2_eth_queue_count(priv));
-+ if (err) {
-+ dev_err(dev, "netif_set_real_num_tx_queues failed (%d)\n", err);
-+ goto err_set_tx_queues;
-+ }
++ addr = dma_map_single(dev, buffer_start,
++ skb_tail_pointer(skb) - buffer_start,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(dma_mapping_error(dev, addr)))
++ return -ENOMEM;
+
-+ err = netif_set_real_num_rx_queues(net_dev,
-+ dpaa2_eth_queue_count(priv));
-+ if (err) {
-+ dev_err(dev, "netif_set_real_num_rx_queues failed (%d)\n", err);
-+ goto err_set_rx_queues;
-+ }
++ dpaa2_fd_set_addr(fd, addr);
++ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
++ dpaa2_fd_set_len(fd, skb->len);
++ dpaa2_fd_set_format(fd, dpaa2_fd_single);
++ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
-+ /* Configure buffer layouts */
-+ /* rx buffer */
-+ buf_layout.pass_parser_result = true;
-+ buf_layout.pass_frame_status = true;
-+ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
-+ buf_layout.data_align = priv->rx_buf_align;
-+ buf_layout.data_head_room = DPAA2_ETH_RX_HEAD_ROOM;
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
-+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
-+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
-+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
-+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX, &buf_layout);
-+ if (err) {
-+ dev_err(dev,
-+ "dpni_set_buffer_layout(RX) failed\n");
-+ goto err_buf_layout;
-+ }
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
++ enable_tx_tstamp(fd, buffer_start);
+
-+ /* tx buffer */
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
-+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
-+ buf_layout.pass_timestamp = true;
-+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX, &buf_layout);
-+ if (err) {
-+ dev_err(dev,
-+ "dpni_set_buffer_layout(TX) failed\n");
-+ goto err_buf_layout;
-+ }
++ return 0;
++}
+
-+ /* tx-confirm buffer */
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
-+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX_CONFIRM, &buf_layout);
-+ if (err) {
-+ dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
-+ goto err_buf_layout;
-+ }
++/* FD freeing routine on the Tx path
++ *
++ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
++ * back-pointed to is also freed.
++ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
++ * dpaa2_eth_tx().
++ * Optionally, return the frame annotation status word (FAS), which needs
++ * to be checked if we're on the confirmation path.
++ */
++static void free_tx_fd(struct dpaa2_eth_priv *priv,
++ const struct dpaa2_fd *fd,
++ bool in_napi)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ dma_addr_t fd_addr;
++ struct sk_buff *skb = NULL;
++ unsigned char *buffer_start;
++ struct dpaa2_eth_swa *swa;
++ u8 fd_format = dpaa2_fd_get_format(fd);
+
-+ /* Now that we've set our tx buffer layout, retrieve the minimum
-+ * required tx data offset.
-+ */
-+ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
-+ &priv->tx_data_offset);
-+ if (err) {
-+ dev_err(dev, "dpni_get_tx_data_offset() failed (%d)\n", err);
-+ goto err_data_offset;
-+ }
++ fd_addr = dpaa2_fd_get_addr(fd);
++ buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
++ swa = (struct dpaa2_eth_swa *)buffer_start;
+
-+ if ((priv->tx_data_offset % 64) != 0)
-+ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
-+ priv->tx_data_offset);
++ if (fd_format == dpaa2_fd_single) {
++ skb = swa->single.skb;
++ /* Accessing the skb buffer is safe before dma unmap, because
++ * we didn't map the actual skb shell.
++ */
++ dma_unmap_single(dev, fd_addr,
++ skb_tail_pointer(skb) - buffer_start,
++ DMA_BIDIRECTIONAL);
++ } else if (fd_format == dpaa2_fd_sg) {
++ skb = swa->sg.skb;
+
-+ /* Accommodate software annotation space (SWA) */
-+ priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
++ /* Unmap the scatterlist */
++ dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, DMA_BIDIRECTIONAL);
++ kfree(swa->sg.scl);
+
-+ /* Enable congestion notifications for Tx queues */
-+ err = setup_tx_congestion(priv);
-+ if (err)
-+ goto err_tx_cong;
++ /* Unmap the SGT buffer */
++ dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
++ DMA_BIDIRECTIONAL);
++ } else {
++ netdev_dbg(priv->net_dev, "Invalid FD format\n");
++ return;
++ }
+
-+ /* allocate classification rule space */
-+ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
-+ dpaa2_eth_fs_count(priv), GFP_KERNEL);
-+ if (!priv->cls_rule)
-+ goto err_cls_rule;
++ /* Get the timestamp value */
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
++ struct skb_shared_hwtstamps shhwtstamps;
++ u64 *ns;
+
-+ /* Enable flow control */
-+ cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
-+ priv->tx_pause_frames = 1;
++ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
-+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
-+ if (err) {
-+ netdev_err(net_dev, "ERROR %d setting link cfg", err);
-+ goto err_set_link_cfg;
++ ns = dpaa2_get_ts(buffer_start, true);
++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
++ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
++ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
-+ return 0;
++ /* Free SGT buffer allocated on tx */
++ if (fd_format != dpaa2_fd_single)
++ skb_free_frag(buffer_start);
+
-+err_set_link_cfg:
-+err_cls_rule:
-+err_tx_cong:
-+err_data_offset:
-+err_buf_layout:
-+err_set_rx_queues:
-+err_set_tx_queues:
-+err_get_attr:
-+err_reset:
-+ dpni_close(priv->mc_io, 0, priv->mc_token);
-+err_open:
-+ return err;
++ /* Move on with skb release */
++ napi_consume_skb(skb, in_napi);
+}
+
-+static void free_dpni(struct dpaa2_eth_priv *priv)
++static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ int err;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct device *dev = net_dev->dev.parent;
++ struct dpaa2_fd fd;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ unsigned int needed_headroom;
++ struct dpaa2_eth_fq *fq;
++ u16 queue_mapping;
++ int err, i, ch_id = 0, qpri = 0;
+
-+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
-+ if (err)
-+ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
-+ err);
++ queue_mapping = skb_get_queue_mapping(skb);
++ fq = &priv->fq[queue_mapping];
+
-+ dpni_close(priv->mc_io, 0, priv->mc_token);
++ /* If we're congested, stop this tx queue; transmission of
++ * the current skb happens regardless of congestion state
++ */
++ dma_sync_single_for_cpu(dev, priv->cscn_dma,
++ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
++ netif_stop_subqueue(net_dev, queue_mapping);
++ fq->stats.congestion_entry++;
++ }
+
-+ kfree(priv->cls_rule);
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
-+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+ kfree(priv->cscn_unaligned);
-+}
++ /* For non-linear skb we don't need a minimum headroom */
++ needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
++ if (skb_headroom(skb) < needed_headroom) {
++ struct sk_buff *ns;
+
-+int setup_fqs_taildrop(struct dpaa2_eth_priv *priv,
-+ bool enable)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_taildrop td;
-+ int err = 0, i;
++ ns = skb_realloc_headroom(skb, needed_headroom);
++ if (unlikely(!ns)) {
++ percpu_stats->tx_dropped++;
++ goto err_alloc_headroom;
++ }
++ percpu_extras->tx_reallocs++;
++ if (skb->sk)
++ skb_set_owner_w(ns, skb->sk);
++ dev_kfree_skb(skb);
++ skb = ns;
++ }
++
++ /* We'll be holding a back-reference to the skb until Tx Confirmation;
++ * we don't want that overwritten by a concurrent Tx with a cloned skb.
++ */
++ skb = skb_unshare(skb, GFP_ATOMIC);
++ if (unlikely(!skb)) {
++ /* skb_unshare() has already freed the skb */
++ percpu_stats->tx_dropped++;
++ return NETDEV_TX_OK;
++ }
+
-+ td.enable = enable;
-+ td.threshold = DPAA2_ETH_TAILDROP_THRESH;
++ /* Setup the FD fields */
++ memset(&fd, 0, sizeof(fd));
+
-+ if (enable) {
-+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD;
-+ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
++ if (skb_is_nonlinear(skb)) {
++ err = build_sg_fd(priv, skb, &fd);
++ percpu_extras->tx_sg_frames++;
++ percpu_extras->tx_sg_bytes += skb->len;
+ } else {
-+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC /
-+ priv->num_channels;
-+ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
++ err = build_single_fd(priv, skb, &fd);
+ }
+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ if (priv->fq[i].type != DPAA2_RX_FQ)
-+ continue;
++ if (unlikely(err)) {
++ percpu_stats->tx_dropped++;
++ goto err_build_fd;
++ }
+
-+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
-+ DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
-+ priv->fq[i].flowid, &td);
-+ if (err) {
-+ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
++ /* Tracing point */
++ trace_dpaa2_tx_fd(net_dev, &fd);
++
++ if (dpaa2_eth_ceetm_is_enabled(priv)) {
++ err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &qpri);
++ if (err)
++ goto err_ceetm_classify;
++ }
++
++ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
++ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
++ priv->tx_qdid, qpri,
++ fq->tx_qdbin, &fd);
++ if (err != -EBUSY)
+ break;
-+ }
++ }
++ percpu_extras->tx_portal_busy += i;
++ if (unlikely(err < 0)) {
++ percpu_stats->tx_errors++;
++ /* Clean up everything, including freeing the skb */
++ free_tx_fd(priv, &fd, false);
++ } else {
++ percpu_stats->tx_packets++;
++ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+ }
+
-+ return err;
++ return NETDEV_TX_OK;
++
++err_ceetm_classify:
++ free_tx_fd(priv, &fd, false);
++err_build_fd:
++err_alloc_headroom:
++ dev_kfree_skb(skb);
++
++ return NETDEV_TX_OK;
+}
+
-+static int setup_rx_flow(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_fq *fq)
++/* Tx confirmation frame processing routine */
++static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct napi_struct *napi __always_unused,
++ u16 queue_id)
+{
+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_queue q = { { 0 } };
-+ struct dpni_queue_id qid;
-+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
-+ int err;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ u32 fd_errors;
+
-+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX, 0, fq->flowid, &q, &qid);
-+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
-+ return err;
-+ }
++ /* Tracing point */
++ trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
+
-+ fq->fqid = qid.fqid;
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++ percpu_extras->tx_conf_frames++;
++ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
+
-+ q.destination.id = fq->channel->dpcon_id;
-+ q.destination.type = DPNI_DEST_DPCON;
-+ q.destination.priority = 1;
-+ q.user_context = (u64)fq;
-+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX, 0, fq->flowid, q_opt, &q);
-+ if (err) {
-+ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
-+ return err;
++ /* Check congestion state and wake all queues if necessary */
++ if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) {
++ dma_sync_single_for_cpu(dev, priv->cscn_dma,
++ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++ if (!dpaa2_cscn_state_congested(priv->cscn_mem))
++ netif_tx_wake_all_queues(priv->net_dev);
+ }
+
-+ return 0;
++ /* Check frame errors in the FD field */
++ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
++ free_tx_fd(priv, fd, true);
++
++ if (likely(!fd_errors))
++ return;
++
++ if (net_ratelimit())
++ netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
++ fd_errors);
++
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ /* Tx-conf logically pertains to the egress path. */
++ percpu_stats->tx_errors++;
+}
+
-+static int setup_tx_flow(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_fq *fq)
++static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_queue q = { { 0 } };
-+ struct dpni_queue_id qid;
-+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
+ int err;
+
-+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX, 0, fq->flowid, &q, &qid);
-+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
-+ return err;
-+ }
-+
-+ fq->tx_qdbin = qid.qdbin;
-+
-+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, &q, &qid);
++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
++ DPNI_OFF_RX_L3_CSUM, enable);
+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ netdev_err(priv->net_dev,
++ "dpni_set_offload(RX_L3_CSUM) failed\n");
+ return err;
+ }
+
-+ fq->fqid = qid.fqid;
-+
-+ q.destination.id = fq->channel->dpcon_id;
-+ q.destination.type = DPNI_DEST_DPCON;
-+ q.destination.priority = 0;
-+ q.user_context = (u64)fq;
-+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, q_opt, &q);
++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
++ DPNI_OFF_RX_L4_CSUM, enable);
+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ netdev_err(priv->net_dev,
++ "dpni_set_offload(RX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_fq *fq)
++static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_queue q = { { 0 } };
-+ struct dpni_queue_id qid;
-+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
+ int err;
+
-+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
++ DPNI_OFF_TX_L3_CSUM, enable);
+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
+ return err;
+ }
+
-+ fq->fqid = qid.fqid;
-+
-+ q.destination.id = fq->channel->dpcon_id;
-+ q.destination.type = DPNI_DEST_DPCON;
-+ q.destination.priority = 1;
-+ q.user_context = (u64)fq;
-+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
++ DPNI_OFF_TX_L4_CSUM, enable);
+ if (err) {
-+ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
++ netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
-+#endif
-+
-+/* default hash key fields */
-+static struct dpaa2_eth_hash_fields default_hash_fields[] = {
-+ {
-+ /* L2 header */
-+ .rxnfc_field = RXH_L2DA,
-+ .cls_prot = NET_PROT_ETH,
-+ .cls_field = NH_FLD_ETH_DA,
-+ .size = 6,
-+ }, {
-+ .cls_prot = NET_PROT_ETH,
-+ .cls_field = NH_FLD_ETH_SA,
-+ .size = 6,
-+ }, {
-+ /* This is the last ethertype field parsed:
-+ * depending on frame format, it can be the MAC ethertype
-+ * or the VLAN etype.
-+ */
-+ .cls_prot = NET_PROT_ETH,
-+ .cls_field = NH_FLD_ETH_TYPE,
-+ .size = 2,
-+ }, {
-+ /* VLAN header */
-+ .rxnfc_field = RXH_VLAN,
-+ .cls_prot = NET_PROT_VLAN,
-+ .cls_field = NH_FLD_VLAN_TCI,
-+ .size = 2,
-+ }, {
-+ /* IP header */
-+ .rxnfc_field = RXH_IP_SRC,
-+ .cls_prot = NET_PROT_IP,
-+ .cls_field = NH_FLD_IP_SRC,
-+ .size = 4,
-+ }, {
-+ .rxnfc_field = RXH_IP_DST,
-+ .cls_prot = NET_PROT_IP,
-+ .cls_field = NH_FLD_IP_DST,
-+ .size = 4,
-+ }, {
-+ .rxnfc_field = RXH_L3_PROTO,
-+ .cls_prot = NET_PROT_IP,
-+ .cls_field = NH_FLD_IP_PROTO,
-+ .size = 1,
-+ }, {
-+ /* Using UDP ports, this is functionally equivalent to raw
-+ * byte pairs from L4 header.
-+ */
-+ .rxnfc_field = RXH_L4_B_0_1,
-+ .cls_prot = NET_PROT_UDP,
-+ .cls_field = NH_FLD_UDP_PORT_SRC,
-+ .size = 2,
-+ }, {
-+ .rxnfc_field = RXH_L4_B_2_3,
-+ .cls_prot = NET_PROT_UDP,
-+ .cls_field = NH_FLD_UDP_PORT_DST,
-+ .size = 2,
-+ },
-+};
+
-+/* Set RX hash options */
-+static int set_hash(struct dpaa2_eth_priv *priv)
++/* Perform a single release command to add buffers
++ * to the specified buffer pool
++ */
++static int add_bufs(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch, u16 bpid)
+{
+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpkg_profile_cfg cls_cfg;
-+ struct dpni_rx_tc_dist_cfg dist_cfg;
-+ u8 *dma_mem;
-+ int i;
-+ int err = 0;
-+
-+ memset(&cls_cfg, 0, sizeof(cls_cfg));
++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
++ void *buf;
++ dma_addr_t addr;
++ int i, err;
+
-+ for (i = 0; i < priv->num_hash_fields; i++) {
-+ struct dpkg_extract *key =
-+ &cls_cfg.extracts[cls_cfg.num_extracts];
++ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
++ /* Allocate buffer visible to WRIOP + skb shared info +
++ * alignment padding
++ */
++ buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
++ if (unlikely(!buf))
++ goto err_alloc;
+
-+ key->type = DPKG_EXTRACT_FROM_HDR;
-+ key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot;
-+ key->extract.from_hdr.type = DPKG_FULL_FIELD;
-+ key->extract.from_hdr.field = priv->hash_fields[i].cls_field;
-+ cls_cfg.num_extracts++;
++ buf = PTR_ALIGN(buf, priv->rx_buf_align);
+
-+ priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field;
-+ }
++ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(dma_mapping_error(dev, addr)))
++ goto err_map;
+
-+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
-+ if (!dma_mem)
-+ return -ENOMEM;
++ buf_array[i] = addr;
+
-+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
-+ if (err) {
-+ dev_err(dev, "dpni_prepare_key_cfg() failed (%d)", err);
-+ goto err_prep_key;
++ /* tracing point */
++ trace_dpaa2_eth_buf_seed(priv->net_dev,
++ buf, dpaa2_eth_buf_raw_size(priv),
++ addr, DPAA2_ETH_RX_BUF_SIZE,
++ bpid);
+ }
+
-+ memset(&dist_cfg, 0, sizeof(dist_cfg));
++release_bufs:
++ /* In case the portal is busy, retry until successful */
++ while ((err = dpaa2_io_service_release(ch->dpio, bpid,
++ buf_array, i)) == -EBUSY)
++ cpu_relax();
+
-+ /* Prepare for setting the rx dist */
-+ dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
-+ DPAA2_CLASSIFIER_DMA_SIZE,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
-+ dev_err(dev, "DMA mapping failed\n");
-+ err = -ENOMEM;
-+ goto err_dma_map;
++ /* If release command failed, clean up and bail out; not much
++ * else we can do about it
++ */
++ if (err) {
++ free_bufs(priv, buf_array, i);
++ return 0;
+ }
+
-+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-+ if (dpaa2_eth_fs_enabled(priv)) {
-+ dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
-+ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
-+ } else {
-+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
-+ }
++ return i;
+
-+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
-+ dma_unmap_single(dev, dist_cfg.key_cfg_iova,
-+ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
-+ if (err)
-+ dev_err(dev, "dpni_set_rx_tc_dist() failed (%d)\n", err);
++err_map:
++ skb_free_frag(buf);
++err_alloc:
++ /* If we managed to allocate at least some buffers, release them */
++ if (i)
++ goto release_bufs;
+
-+err_dma_map:
-+err_prep_key:
-+ kfree(dma_mem);
-+ return err;
++ return 0;
+}
+
-+/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
-+ * frame queues and channels
-+ */
-+static int bind_dpni(struct dpaa2_eth_priv *priv)
++static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+{
-+ struct net_device *net_dev = priv->net_dev;
-+ struct device *dev = net_dev->dev.parent;
-+ struct dpni_pools_cfg pools_params;
-+ struct dpni_error_cfg err_cfg;
-+ int err = 0;
-+ int i;
-+
-+ pools_params.num_dpbp = 1;
-+ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
-+ pools_params.pools[0].backup_pool = 0;
-+ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
-+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
-+ if (err) {
-+ dev_err(dev, "dpni_set_pools() failed\n");
-+ return err;
-+ }
++ int i, j;
++ int new_count;
+
-+ /* Verify classification options and disable hashing and/or
-+ * flow steering support in case of invalid configuration values
++ /* This is the lazy seeding of Rx buffer pools.
++ * dpaa2_add_bufs() is also used on the Rx hotpath and calls
++ * napi_alloc_frag(). The trouble with that is that it in turn ends up
++ * calling this_cpu_ptr(), which mandates execution in atomic context.
++ * Rather than splitting up the code, do a one-off preempt disable.
+ */
-+ priv->hash_fields = default_hash_fields;
-+ priv->num_hash_fields = ARRAY_SIZE(default_hash_fields);
-+ check_cls_support(priv);
++ preempt_disable();
++ for (j = 0; j < priv->num_channels; j++) {
++ priv->channel[j]->buf_count = 0;
++ for (i = 0; i < priv->max_bufs_per_ch;
++ i += DPAA2_ETH_BUFS_PER_CMD) {
++ new_count = add_bufs(priv, priv->channel[j], bpid);
++ priv->channel[j]->buf_count += new_count;
+
-+ /* have the interface implicitly distribute traffic based on
-+ * a static hash key
-+ */
-+ if (dpaa2_eth_hash_enabled(priv)) {
-+ err = set_hash(priv);
-+ if (err) {
-+ dev_err(dev, "Hashing configuration failed\n");
-+ return err;
++ if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
++ preempt_enable();
++ return -ENOMEM;
++ }
+ }
+ }
-+
-+ /* Configure handling of error frames */
-+ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
-+ err_cfg.set_frame_annotation = 1;
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
-+#else
-+ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
-+#endif
-+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
-+ &err_cfg);
-+ if (err) {
-+ dev_err(dev, "dpni_set_errors_behavior() failed (%d)\n", err);
-+ return err;
-+ }
-+
-+ /* Configure Rx and Tx conf queues to generate CDANs */
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ switch (priv->fq[i].type) {
-+ case DPAA2_RX_FQ:
-+ err = setup_rx_flow(priv, &priv->fq[i]);
-+ break;
-+ case DPAA2_TX_CONF_FQ:
-+ err = setup_tx_flow(priv, &priv->fq[i]);
-+ break;
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+ case DPAA2_RX_ERR_FQ:
-+ err = setup_rx_err_flow(priv, &priv->fq[i]);
-+ break;
-+#endif
-+ default:
-+ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
-+ return -EINVAL;
-+ }
-+ if (err)
-+ return err;
-+ }
-+
-+ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX,
-+ &priv->tx_qdid);
-+ if (err) {
-+ dev_err(dev, "dpni_get_qdid() failed\n");
-+ return err;
-+ }
++ preempt_enable();
+
+ return 0;
+}
+
-+/* Allocate rings for storing incoming frame descriptors */
-+static int alloc_rings(struct dpaa2_eth_priv *priv)
++/**
++ * Drain the specified number of buffers from the DPNI's private buffer pool.
++ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
++ */
++static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
+{
-+ struct net_device *net_dev = priv->net_dev;
-+ struct device *dev = net_dev->dev.parent;
-+ int i;
++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
++ int ret;
+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ priv->channel[i]->store =
-+ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
-+ if (!priv->channel[i]->store) {
-+ netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
-+ goto err_ring;
++ do {
++ ret = dpaa2_io_service_acquire(NULL, priv->bpid,
++ buf_array, count);
++ if (ret < 0) {
++ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
++ return;
+ }
-+ }
++ free_bufs(priv, buf_array, ret);
++ } while (ret);
++}
+
-+ return 0;
++static void drain_pool(struct dpaa2_eth_priv *priv)
++{
++ preempt_disable();
++ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
++ drain_bufs(priv, 1);
++ preempt_enable();
++}
+
-+err_ring:
-+ for (i = 0; i < priv->num_channels; i++) {
-+ if (!priv->channel[i]->store)
++/* Function is called from softirq context only, so we don't need to guard
++ * the access to percpu count
++ */
++static int refill_pool(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ u16 bpid)
++{
++ int new_count;
++
++ if (likely(ch->buf_count >= priv->refill_thresh))
++ return 0;
++
++ do {
++ new_count = add_bufs(priv, ch, bpid);
++ if (unlikely(!new_count)) {
++ /* Out of memory; abort for now, we'll try later on */
+ break;
-+ dpaa2_io_store_destroy(priv->channel[i]->store);
-+ }
++ }
++ ch->buf_count += new_count;
++ } while (ch->buf_count < priv->max_bufs_per_ch);
+
-+ return -ENOMEM;
++ if (unlikely(ch->buf_count < priv->max_bufs_per_ch))
++ return -ENOMEM;
++
++ return 0;
+}
+
-+static void free_rings(struct dpaa2_eth_priv *priv)
++static int pull_channel(struct dpaa2_eth_channel *ch)
+{
-+ int i;
++ int err;
++ int dequeues = -1;
+
-+ for (i = 0; i < priv->num_channels; i++)
-+ dpaa2_io_store_destroy(priv->channel[i]->store);
++ /* Retry while portal is busy */
++ do {
++ err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
++ ch->store);
++ dequeues++;
++ cpu_relax();
++ } while (err == -EBUSY);
++
++ ch->stats.dequeue_portal_busy += dequeues;
++ if (unlikely(err))
++ ch->stats.pull_err++;
++
++ return err;
+}
+
-+static int netdev_init(struct net_device *net_dev)
++/* NAPI poll routine
++ *
++ * Frames are dequeued from the QMan channel associated with this NAPI context.
++ * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx
++ * confirmation frames are limited by a threshold per NAPI poll cycle.
++ */
++static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
+{
++ struct dpaa2_eth_channel *ch;
++ int rx_cleaned = 0, tx_conf_cleaned = 0;
++ bool store_cleaned;
++ struct dpaa2_eth_priv *priv;
+ int err;
-+ struct device *dev = net_dev->dev.parent;
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
-+ u8 bcast_addr[ETH_ALEN];
-+ u16 rx_headroom, rx_req_headroom;
-+
-+ net_dev->netdev_ops = &dpaa2_eth_ops;
+
-+ /* Get firmware address, if any */
-+ err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
-+ if (err) {
-+ dev_err(dev, "dpni_get_port_mac_addr() failed (%d)\n", err);
-+ return err;
-+ }
-+
-+ /* Get DPNI atttributes address, if any */
-+ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
-+ dpni_mac_addr);
-+ if (err) {
-+ dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
-+ return err;
-+ }
++ ch = container_of(napi, struct dpaa2_eth_channel, napi);
++ priv = ch->priv;
+
-+ /* First check if firmware has any address configured by bootloader */
-+ if (!is_zero_ether_addr(mac_addr)) {
-+ /* If the DPMAC addr != the DPNI addr, update it */
-+ if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
-+ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
-+ priv->mc_token,
-+ mac_addr);
-+ if (err) {
-+ dev_err(dev,
-+ "dpni_set_primary_mac_addr() failed (%d)\n",
-+ err);
-+ return err;
-+ }
-+ }
-+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
-+ } else if (is_zero_ether_addr(dpni_mac_addr)) {
-+ /* Fills in net_dev->dev_addr, as required by
-+ * register_netdevice()
-+ */
-+ eth_hw_addr_random(net_dev);
-+ /* Make the user aware, without cluttering the boot log */
-+ dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
-+ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
-+ priv->mc_token, net_dev->dev_addr);
-+ if (err) {
-+ dev_err(dev,
-+ "dpni_set_primary_mac_addr() failed (%d)\n", err);
-+ return err;
-+ }
-+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
-+ * practical purposes, this will be our "permanent" mac address,
-+ * at least until the next reboot. This move will also permit
-+ * register_netdevice() to properly fill up net_dev->perm_addr.
-+ */
-+ net_dev->addr_assign_type = NET_ADDR_PERM;
-+ /* If DPMAC address is non-zero, use that one */
-+ } else {
-+ /* NET_ADDR_PERM is default, all we have to do is
-+ * fill in the device addr.
-+ */
-+ memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
-+ }
++ do {
++ err = pull_channel(ch);
++ if (unlikely(err))
++ break;
+
-+ /* Explicitly add the broadcast address to the MAC filtering table;
-+ * the MC won't do that for us.
-+ */
-+ eth_broadcast_addr(bcast_addr);
-+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
-+ if (err) {
-+ dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
-+ /* Won't return an error; at least, we'd have egress traffic */
-+ }
++ /* Refill pool if appropriate */
++ refill_pool(priv, ch, priv->bpid);
+
-+ /* Reserve enough space to align buffer as per hardware requirement;
-+ * NOTE: priv->tx_data_offset MUST be initialized at this point.
-+ */
-+ net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
++ store_cleaned = consume_frames(ch, &rx_cleaned,
++ &tx_conf_cleaned);
+
-+ /* Set MTU limits */
-+ net_dev->min_mtu = 68;
-+ net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
++ /* If we've either consumed the budget with Rx frames,
++ * or reached the Tx conf threshold, we're done.
++ */
++ if (rx_cleaned >= budget ||
++ tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL)
++ return budget;
++ } while (store_cleaned);
+
-+ /* Required headroom for Rx skbs, to avoid reallocation on
-+ * forwarding path.
++ /* We didn't consume the entire budget, finish napi and
++ * re-enable data availability notifications
+ */
-+ rx_req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN;
-+ rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE + DPAA2_ETH_SWA_SIZE +
-+ DPAA2_ETH_RX_HEAD_ROOM, priv->rx_buf_align);
-+ if (rx_req_headroom > rx_headroom)
-+ dev_info_once(dev,
-+ "Required headroom (%d) greater than available (%d).\n"
-+ "This will impact performance due to reallocations.\n",
-+ rx_req_headroom, rx_headroom);
-+
-+ /* Our .ndo_init will be called herein */
-+ err = register_netdev(net_dev);
-+ if (err < 0) {
-+ dev_err(dev, "register_netdev() failed (%d)\n", err);
-+ return err;
-+ }
++ napi_complete(napi);
++ do {
++ err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
++ cpu_relax();
++ } while (err == -EBUSY);
++ WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
++ ch->nctx.desired_cpu);
+
-+ return 0;
++ return max(rx_cleaned, 1);
+}
+
-+static int poll_link_state(void *arg)
++static void enable_ch_napi(struct dpaa2_eth_priv *priv)
+{
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
-+ int err;
-+
-+ while (!kthread_should_stop()) {
-+ err = link_state_update(priv);
-+ if (unlikely(err))
-+ return err;
++ struct dpaa2_eth_channel *ch;
++ int i;
+
-+ msleep(DPAA2_ETH_LINK_STATE_REFRESH);
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ napi_enable(&ch->napi);
+ }
-+
-+ return 0;
+}
+
-+static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
++static void disable_ch_napi(struct dpaa2_eth_priv *priv)
+{
-+ return IRQ_WAKE_THREAD;
++ struct dpaa2_eth_channel *ch;
++ int i;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ napi_disable(&ch->napi);
++ }
+}
+
-+static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
++static int link_state_update(struct dpaa2_eth_priv *priv)
+{
-+ u32 status = 0, clear = 0;
-+ struct device *dev = (struct device *)arg;
-+ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
-+ struct net_device *net_dev = dev_get_drvdata(dev);
++ struct dpni_link_state state;
+ int err;
+
-+ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
-+ DPNI_IRQ_INDEX, &status);
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (unlikely(err)) {
-+ netdev_err(net_dev, "Can't get irq status (err %d)", err);
-+ clear = 0xffffffff;
-+ goto out;
-+ }
-+
-+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
-+ clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
-+ link_state_update(netdev_priv(net_dev));
++ netdev_err(priv->net_dev,
++ "dpni_get_link_state() failed\n");
++ return err;
+ }
+
-+out:
-+ dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
-+ DPNI_IRQ_INDEX, clear);
-+ return IRQ_HANDLED;
-+}
++ /* Chech link state; speed / duplex changes are not treated yet */
++ if (priv->link_state.up == state.up)
++ return 0;
+
-+static int setup_irqs(struct fsl_mc_device *ls_dev)
++ priv->link_state = state;
++ if (state.up) {
++ netif_carrier_on(priv->net_dev);
++ netif_tx_start_all_queues(priv->net_dev);
++ } else {
++ netif_tx_stop_all_queues(priv->net_dev);
++ netif_carrier_off(priv->net_dev);
++ }
++
++ netdev_info(priv->net_dev, "Link Event: state %s\n",
++ state.up ? "up" : "down");
++
++ return 0;
++}
++
++static int dpaa2_eth_open(struct net_device *net_dev)
+{
-+ int err = 0;
-+ struct fsl_mc_device_irq *irq;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err;
+
-+ err = fsl_mc_allocate_irqs(ls_dev);
++ /* We'll only start the txqs when the link is actually ready; make sure
++ * we don't race against the link up notification, which may come
++ * immediately after dpni_enable();
++ */
++ netif_tx_stop_all_queues(net_dev);
++
++ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
++ * return true and cause 'ip link show' to report the LOWER_UP flag,
++ * even though the link notification wasn't even received.
++ */
++ netif_carrier_off(net_dev);
++
++ err = seed_pool(priv, priv->bpid);
+ if (err) {
-+ dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
-+ return err;
++ /* Not much to do; the buffer pool, though not filled up,
++ * may still contain some buffers which would enable us
++ * to limp on.
++ */
++ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
++ priv->dpbp_dev->obj_desc.id, priv->bpid);
+ }
+
-+ irq = ls_dev->irqs[0];
-+ err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
-+ dpni_irq0_handler,
-+ dpni_irq0_handler_thread,
-+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
-+ dev_name(&ls_dev->dev), &ls_dev->dev);
-+ if (err < 0) {
-+ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
-+ goto free_mc_irq;
-+ }
++ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
+
-+ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
-+ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
++ err = dpni_enable(priv->mc_io, 0, priv->mc_token);
+ if (err < 0) {
-+ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
-+ goto free_irq;
++ netdev_err(net_dev, "dpni_enable() failed\n");
++ goto enable_err;
+ }
+
-+ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
-+ DPNI_IRQ_INDEX, 1);
++ /* If the DPMAC object has already processed the link up interrupt,
++ * we have to learn the link state ourselves.
++ */
++ err = link_state_update(priv);
+ if (err < 0) {
-+ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
-+ goto free_irq;
++ netdev_err(net_dev, "Can't update link state\n");
++ goto link_state_err;
+ }
+
+ return 0;
+
-+free_irq:
-+ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
-+free_mc_irq:
-+ fsl_mc_free_irqs(ls_dev);
-+
++link_state_err:
++enable_err:
++ priv->refill_thresh = 0;
++ drain_pool(priv);
+ return err;
+}
+
-+static void add_ch_napi(struct dpaa2_eth_priv *priv)
++static int dpaa2_eth_stop(struct net_device *net_dev)
+{
-+ int i;
-+ struct dpaa2_eth_channel *ch;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int dpni_enabled;
++ int retries = 10, i;
++ int err = 0;
+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
-+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
-+ NAPI_POLL_WEIGHT);
++ netif_tx_stop_all_queues(net_dev);
++ netif_carrier_off(net_dev);
++
++ /* Loop while dpni_disable() attempts to drain the egress FQs
++ * and confirm them back to us.
++ */
++ do {
++ dpni_disable(priv->mc_io, 0, priv->mc_token);
++ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
++ if (dpni_enabled)
++ /* Allow the hardware some slack */
++ msleep(100);
++ } while (dpni_enabled && --retries);
++ if (!retries) {
++ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
++ /* Must go on and finish processing pending frames, so we don't
++ * crash at the next "ifconfig up"
++ */
++ err = -ETIMEDOUT;
+ }
-+}
+
-+static void del_ch_napi(struct dpaa2_eth_priv *priv)
-+{
-+ int i;
-+ struct dpaa2_eth_channel *ch;
++ priv->refill_thresh = 0;
+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ netif_napi_del(&ch->napi);
-+ }
-+}
++ /* Wait for all running napi poll routines to finish, so that no
++ * new refill operations are started
++ */
++ for (i = 0; i < priv->num_channels; i++)
++ napi_synchronize(&priv->channel[i]->napi);
+
-+/* SysFS support */
-+static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
-+ /* No MC API for getting the shaping config. We're stateful. */
-+ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
++ /* Empty the buffer pool */
++ drain_pool(priv);
+
-+ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
++ return err;
+}
+
-+static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf,
-+ size_t count)
++static int dpaa2_eth_init(struct net_device *net_dev)
+{
-+ int err, items;
-+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
-+ struct dpni_tx_shaping_cfg scfg;
++ u64 supported = 0;
++ u64 not_supported = 0;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ u32 options = priv->dpni_attrs.options;
+
-+ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
-+ if (items != 2) {
-+ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
-+ return -EINVAL;
-+ }
-+ /* Size restriction as per MC API documentation */
-+ if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
-+ pr_err("max_burst_size must be <= %d\n",
-+ DPAA2_ETH_MAX_BURST_SIZE);
-+ return -EINVAL;
-+ }
++ /* Capabilities listing */
++ supported |= IFF_LIVE_ADDR_CHANGE;
+
-+ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg);
-+ if (err) {
-+ dev_err(dev, "dpni_set_tx_shaping() failed\n");
-+ return -EPERM;
-+ }
-+ /* If successful, save the current configuration for future inquiries */
-+ priv->shaping_cfg = scfg;
++ if (options & DPNI_OPT_NO_MAC_FILTER)
++ not_supported |= IFF_UNICAST_FLT;
++ else
++ supported |= IFF_UNICAST_FLT;
+
-+ return count;
-+}
++ net_dev->priv_flags |= supported;
++ net_dev->priv_flags &= ~not_supported;
+
-+static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++ /* Features */
++ net_dev->features = NETIF_F_RXCSUM |
++ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
++ NETIF_F_SG | NETIF_F_HIGHDMA |
++ NETIF_F_LLTX;
++ net_dev->hw_features = net_dev->features;
+
-+ return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask);
++ return 0;
+}
+
-+static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf,
-+ size_t count)
++static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
-+ struct dpaa2_eth_fq *fq;
-+ bool running = netif_running(priv->net_dev);
-+ int i, err;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct device *dev = net_dev->dev.parent;
++ int err;
+
-+ err = cpulist_parse(buf, &priv->txconf_cpumask);
-+ if (err)
++ err = eth_mac_addr(net_dev, addr);
++ if (err < 0) {
++ dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
+ return err;
-+
-+ /* Only accept CPUs that have an affine DPIO */
-+ if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) {
-+ netdev_info(priv->net_dev,
-+ "cpumask must be a subset of 0x%lx\n",
-+ *cpumask_bits(&priv->dpio_cpumask));
-+ cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask,
-+ &priv->txconf_cpumask);
+ }
+
-+ /* Rewiring the TxConf FQs requires interface shutdown.
-+ */
-+ if (running) {
-+ err = dpaa2_eth_stop(priv->net_dev);
-+ if (err)
-+ return -ENODEV;
++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
++ net_dev->dev_addr);
++ if (err) {
++ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
++ return err;
+ }
+
-+ /* Set the new TxConf FQ affinities */
-+ set_fq_affinity(priv);
++ return 0;
++}
+
-+ /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit
-+ * link up notification is received. Give the polling thread enough time
-+ * to detect the link state change, or else we'll end up with the
-+ * transmission side forever shut down.
-+ */
-+ if (priv->do_link_poll)
-+ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH);
++/** Fill in counters maintained by the GPP driver. These may be different from
++ * the hardware counters obtained by ethtool.
++ */
++static struct rtnl_link_stats64 *dpaa2_eth_get_stats(struct net_device *net_dev,
++ struct rtnl_link_stats64 *stats)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct rtnl_link_stats64 *percpu_stats;
++ u64 *cpustats;
++ u64 *netstats = (u64 *)stats;
++ int i, j;
++ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ fq = &priv->fq[i];
-+ if (fq->type != DPAA2_TX_CONF_FQ)
-+ continue;
-+ setup_tx_flow(priv, fq);
++ for_each_possible_cpu(i) {
++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
++ cpustats = (u64 *)percpu_stats;
++ for (j = 0; j < num; j++)
++ netstats[j] += cpustats[j];
+ }
++ return stats;
++}
++
++/* Copy mac unicast addresses from @net_dev to @priv.
++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
++ */
++static void add_uc_hw_addr(const struct net_device *net_dev,
++ struct dpaa2_eth_priv *priv)
++{
++ struct netdev_hw_addr *ha;
++ int err;
+
-+ if (running) {
-+ err = dpaa2_eth_open(priv->net_dev);
++ netdev_for_each_uc_addr(ha, net_dev) {
++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
++ ha->addr);
+ if (err)
-+ return -ENODEV;
++ netdev_warn(priv->net_dev,
++ "Could not add ucast MAC %pM to the filtering table (err %d)\n",
++ ha->addr, err);
+ }
-+
-+ return count;
+}
+
-+static struct device_attribute dpaa2_eth_attrs[] = {
-+ __ATTR(txconf_cpumask,
-+ 0600,
-+ dpaa2_eth_show_txconf_cpumask,
-+ dpaa2_eth_write_txconf_cpumask),
-+
-+ __ATTR(tx_shaping,
-+ 0600,
-+ dpaa2_eth_show_tx_shaping,
-+ dpaa2_eth_write_tx_shaping),
-+};
-+
-+static void dpaa2_eth_sysfs_init(struct device *dev)
++/* Copy mac multicast addresses from @net_dev to @priv
++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
++ */
++static void add_mc_hw_addr(const struct net_device *net_dev,
++ struct dpaa2_eth_priv *priv)
+{
-+ int i, err;
++ struct netdev_hw_addr *ha;
++ int err;
+
-+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
-+ err = device_create_file(dev, &dpaa2_eth_attrs[i]);
-+ if (err) {
-+ dev_err(dev, "ERROR creating sysfs file\n");
-+ goto undo;
-+ }
++ netdev_for_each_mc_addr(ha, net_dev) {
++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
++ ha->addr);
++ if (err)
++ netdev_warn(priv->net_dev,
++ "Could not add mcast MAC %pM to the filtering table (err %d)\n",
++ ha->addr, err);
+ }
-+ return;
-+
-+undo:
-+ while (i > 0)
-+ device_remove_file(dev, &dpaa2_eth_attrs[--i]);
+}
+
-+static void dpaa2_eth_sysfs_remove(struct device *dev)
++static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
+{
-+ int i;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int uc_count = netdev_uc_count(net_dev);
++ int mc_count = netdev_mc_count(net_dev);
++ u8 max_mac = priv->dpni_attrs.mac_filter_entries;
++ u32 options = priv->dpni_attrs.options;
++ u16 mc_token = priv->mc_token;
++ struct fsl_mc_io *mc_io = priv->mc_io;
++ int err;
+
-+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
-+ device_remove_file(dev, &dpaa2_eth_attrs[i]);
-+}
++ /* Basic sanity checks; these probably indicate a misconfiguration */
++ if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
++ netdev_info(net_dev,
++ "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
++ max_mac);
+
-+static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
-+{
-+ struct device *dev;
-+ struct net_device *net_dev = NULL;
-+ struct dpaa2_eth_priv *priv = NULL;
-+ int err = 0;
++ /* Force promiscuous if the uc or mc counts exceed our capabilities. */
++ if (uc_count > max_mac) {
++ netdev_info(net_dev,
++ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
++ uc_count, max_mac);
++ goto force_promisc;
++ }
++ if (mc_count + uc_count > max_mac) {
++ netdev_info(net_dev,
++ "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
++ uc_count + mc_count, max_mac);
++ goto force_mc_promisc;
++ }
+
-+ dev = &dpni_dev->dev;
++ /* Adjust promisc settings due to flag combinations */
++ if (net_dev->flags & IFF_PROMISC)
++ goto force_promisc;
++ if (net_dev->flags & IFF_ALLMULTI) {
++ /* First, rebuild unicast filtering table. This should be done
++ * in promisc mode, in order to avoid frame loss while we
++ * progressively add entries to the table.
++ * We don't know whether we had been in promisc already, and
++ * making an MC call to find out is expensive; so set uc promisc
++ * nonetheless.
++ */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set uc promisc\n");
+
-+ /* Net device */
-+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
-+ if (!net_dev) {
-+ dev_err(dev, "alloc_etherdev_mq() failed\n");
-+ return -ENOMEM;
++ /* Actual uc table reconstruction. */
++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
++ if (err)
++ netdev_warn(net_dev, "Can't clear uc filters\n");
++ add_uc_hw_addr(net_dev, priv);
++
++ /* Finally, clear uc promisc and set mc promisc as requested. */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
++ if (err)
++ netdev_warn(net_dev, "Can't clear uc promisc\n");
++ goto force_mc_promisc;
+ }
+
-+ SET_NETDEV_DEV(net_dev, dev);
-+ dev_set_drvdata(dev, net_dev);
++ /* Neither unicast, nor multicast promisc will be on... eventually.
++ * For now, rebuild mac filtering tables while forcing both of them on.
++ */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
+
-+ priv = netdev_priv(net_dev);
-+ priv->net_dev = net_dev;
++ /* Actual mac filtering tables reconstruction */
++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't clear mac filters\n");
++ add_mc_hw_addr(net_dev, priv);
++ add_uc_hw_addr(net_dev, priv);
+
-+ priv->iommu_domain = iommu_get_domain_for_dev(dev);
++ /* Now we can clear both ucast and mcast promisc, without risking
++ * to drop legitimate frames anymore.
++ */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
++ if (err)
++ netdev_warn(net_dev, "Can't clear ucast promisc\n");
++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
++ if (err)
++ netdev_warn(net_dev, "Can't clear mcast promisc\n");
+
-+ /* Obtain a MC portal */
-+ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
-+ &priv->mc_io);
-+ if (err) {
-+ dev_err(dev, "MC portal allocation failed\n");
-+ goto err_portal_alloc;
-+ }
++ return;
+
-+ /* MC objects initialization and configuration */
-+ err = setup_dpni(dpni_dev);
++force_promisc:
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
-+ goto err_dpni_setup;
++ netdev_warn(net_dev, "Can't set ucast promisc\n");
++force_mc_promisc:
++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set mcast promisc\n");
++}
+
-+ err = setup_dpio(priv);
-+ if (err) {
-+ dev_info(dev, "Defer probing as no DPIO available\n");
-+ err = -EPROBE_DEFER;
-+ goto err_dpio_setup;
++static int dpaa2_eth_set_features(struct net_device *net_dev,
++ netdev_features_t features)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ netdev_features_t changed = features ^ net_dev->features;
++ bool enable;
++ int err;
++
++ if (changed & NETIF_F_RXCSUM) {
++ enable = !!(features & NETIF_F_RXCSUM);
++ err = set_rx_csum(priv, enable);
++ if (err)
++ return err;
+ }
+
-+ setup_fqs(priv);
++ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
++ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
++ err = set_tx_csum(priv, enable);
++ if (err)
++ return err;
++ }
+
-+ err = setup_dpbp(priv);
-+ if (err)
-+ goto err_dpbp_setup;
++ return 0;
++}
+
-+ err = bind_dpni(priv);
-+ if (err)
-+ goto err_bind;
++static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++ struct hwtstamp_config config;
+
-+ /* Add a NAPI context for each channel */
-+ add_ch_napi(priv);
-+ enable_ch_napi(priv);
++ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
++ return -EFAULT;
+
-+ /* Percpu statistics */
-+ priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
-+ if (!priv->percpu_stats) {
-+ dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
-+ err = -ENOMEM;
-+ goto err_alloc_percpu_stats;
-+ }
-+ priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
-+ if (!priv->percpu_extras) {
-+ dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
-+ err = -ENOMEM;
-+ goto err_alloc_percpu_extras;
++ switch (config.tx_type) {
++ case HWTSTAMP_TX_OFF:
++ priv->ts_tx_en = false;
++ break;
++ case HWTSTAMP_TX_ON:
++ priv->ts_tx_en = true;
++ break;
++ default:
++ return -ERANGE;
+ }
+
-+ snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id);
-+ if (!dev_valid_name(net_dev->name)) {
-+ dev_warn(&net_dev->dev,
-+ "netdevice name \"%s\" cannot be used, reverting to default..\n",
-+ net_dev->name);
-+ dev_alloc_name(net_dev, "eth%d");
-+ dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name);
++ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
++ priv->ts_rx_en = false;
++ } else {
++ priv->ts_rx_en = true;
++ /* TS is set for all frame types, not only those requested */
++ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
-+ err = netdev_init(net_dev);
-+ if (err)
-+ goto err_netdev_init;
++ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
++ -EFAULT : 0;
++}
+
-+ /* Configure checksum offload based on current interface flags */
-+ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
-+ if (err)
-+ goto err_csum;
++static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ if (cmd == SIOCSHWTSTAMP)
++ return dpaa2_eth_ts_ioctl(dev, rq, cmd);
+
-+ err = set_tx_csum(priv, !!(net_dev->features &
-+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
-+ if (err)
-+ goto err_csum;
++ return -EINVAL;
++}
+
-+ err = alloc_rings(priv);
-+ if (err)
-+ goto err_alloc_rings;
++static int set_buffer_layout(struct dpaa2_eth_priv *priv)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_buffer_layout buf_layout = {0};
++ int err;
+
-+ net_dev->ethtool_ops = &dpaa2_ethtool_ops;
++ /* We need to check for WRIOP version 1.0.0, but depending on the MC
++ * version, this number is not always provided correctly on rev1.
++ * We need to check for both alternatives in this situation.
++ */
++ if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
++ priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
++ priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
++ else
++ priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
+
-+ err = setup_irqs(dpni_dev);
++ /* tx buffer */
++ buf_layout.pass_timestamp = true;
++ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
++ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, &buf_layout);
+ if (err) {
-+ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
-+ priv->poll_thread = kthread_run(poll_link_state, priv,
-+ "%s_poll_link", net_dev->name);
-+ if (IS_ERR(priv->poll_thread)) {
-+ netdev_err(net_dev, "Error starting polling thread\n");
-+ goto err_poll_thread;
-+ }
-+ priv->do_link_poll = true;
++ dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
++ return err;
+ }
+
-+ dpaa2_eth_sysfs_init(&net_dev->dev);
-+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
-+ dpaa2_dbg_add(priv);
-+#endif
++ /* tx-confirm buffer */
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX_CONFIRM, &buf_layout);
++ if (err) {
++ dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
++ return err;
++ }
+
-+ dev_info(dev, "Probed interface %s\n", net_dev->name);
-+ return 0;
++ /* Now that we've set our tx buffer layout, retrieve the minimum
++ * required tx data offset.
++ */
++ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
++ &priv->tx_data_offset);
++ if (err) {
++ dev_err(dev, "dpni_get_tx_data_offset() failed\n");
++ return err;
++ }
+
-+err_poll_thread:
-+ free_rings(priv);
-+err_alloc_rings:
-+err_csum:
-+ unregister_netdev(net_dev);
-+err_netdev_init:
-+ free_percpu(priv->percpu_extras);
-+err_alloc_percpu_extras:
-+ free_percpu(priv->percpu_stats);
-+err_alloc_percpu_stats:
-+ disable_ch_napi(priv);
-+ del_ch_napi(priv);
-+err_bind:
-+ free_dpbp(priv);
-+err_dpbp_setup:
-+ free_dpio(priv);
-+err_dpio_setup:
-+ free_dpni(priv);
-+err_dpni_setup:
-+ fsl_mc_portal_free(priv->mc_io);
-+err_portal_alloc:
-+ dev_set_drvdata(dev, NULL);
-+ free_netdev(net_dev);
++ if ((priv->tx_data_offset % 64) != 0)
++ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
++ priv->tx_data_offset);
+
-+ return err;
++ /* rx buffer */
++ buf_layout.pass_frame_status = true;
++ buf_layout.pass_parser_result = true;
++ buf_layout.data_align = priv->rx_buf_align;
++ buf_layout.private_data_size = 0;
++ buf_layout.data_head_room = dpaa2_eth_rx_headroom(priv);
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
++ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
++ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
++ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
++ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX, &buf_layout);
++ if (err) {
++ dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
++ return err;
++ }
++
++ return 0;
+}
+
-+static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
++static int dpaa2_eth_set_xdp(struct net_device *net_dev, struct bpf_prog *prog)
+{
-+ struct device *dev;
-+ struct net_device *net_dev;
-+ struct dpaa2_eth_priv *priv;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpaa2_eth_channel *ch;
++ struct bpf_prog *old_prog;
++ int i, err;
+
-+ dev = &ls_dev->dev;
-+ net_dev = dev_get_drvdata(dev);
-+ priv = netdev_priv(net_dev);
++ /* No support for SG frames */
++ if (DPAA2_ETH_L2_MAX_FRM(net_dev->mtu) > DPAA2_ETH_RX_BUF_SIZE)
++ return -EINVAL;
+
-+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
-+ dpaa2_dbg_remove(priv);
-+#endif
-+ dpaa2_eth_sysfs_remove(&net_dev->dev);
++ if (netif_running(net_dev)) {
++ err = dpaa2_eth_stop(net_dev);
++ if (err)
++ return err;
++ }
+
-+ unregister_netdev(net_dev);
-+ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
++ if (prog) {
++ prog = bpf_prog_add(prog, priv->num_channels - 1);
++ if (IS_ERR(prog))
++ return PTR_ERR(prog);
++ }
+
-+ if (priv->do_link_poll)
-+ kthread_stop(priv->poll_thread);
-+ else
-+ fsl_mc_free_irqs(ls_dev);
++ priv->has_xdp_prog = !!prog;
+
-+ free_rings(priv);
-+ free_percpu(priv->percpu_stats);
-+ free_percpu(priv->percpu_extras);
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ old_prog = xchg(&ch->xdp_prog, prog);
++ if (old_prog)
++ bpf_prog_put(old_prog);
++ }
+
-+ disable_ch_napi(priv);
-+ del_ch_napi(priv);
-+ free_dpbp(priv);
-+ free_dpio(priv);
-+ free_dpni(priv);
++ if (netif_running(net_dev)) {
++ err = dpaa2_eth_open(net_dev);
++ if (err)
++ return err;
++ }
+
-+ fsl_mc_portal_free(priv->mc_io);
++ return 0;
++}
+
-+ dev_set_drvdata(dev, NULL);
-+ free_netdev(net_dev);
++static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_xdp *xdp)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
-+ return 0;
++ switch (xdp->command) {
++ case XDP_SETUP_PROG:
++ return dpaa2_eth_set_xdp(dev, xdp->prog);
++ case XDP_QUERY_PROG:
++ xdp->prog_attached = priv->has_xdp_prog;
++ return 0;
++ default:
++ return -EINVAL;
++ }
+}
+
-+static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dpni",
-+ },
-+ { .vendor = 0x0 }
++static const struct net_device_ops dpaa2_eth_ops = {
++ .ndo_open = dpaa2_eth_open,
++ .ndo_start_xmit = dpaa2_eth_tx,
++ .ndo_stop = dpaa2_eth_stop,
++ .ndo_init = dpaa2_eth_init,
++ .ndo_set_mac_address = dpaa2_eth_set_addr,
++ .ndo_get_stats64 = dpaa2_eth_get_stats,
++ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
++ .ndo_set_features = dpaa2_eth_set_features,
++ .ndo_do_ioctl = dpaa2_eth_ioctl,
++ .ndo_xdp = dpaa2_eth_xdp,
+};
-+MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
+
-+static struct fsl_mc_driver dpaa2_eth_driver = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = dpaa2_eth_probe,
-+ .remove = dpaa2_eth_remove,
-+ .match_id_table = dpaa2_eth_match_id_table
-+};
++static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
++{
++ struct dpaa2_eth_channel *ch;
+
-+static int __init dpaa2_eth_driver_init(void)
++ ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
++
++ /* Update NAPI statistics */
++ ch->stats.cdan++;
++
++ napi_schedule_irqoff(&ch->napi);
++}
++
++/* Allocate and configure a DPCON object */
++static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
+{
++ struct fsl_mc_device *dpcon;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpcon_attr attrs;
+ int err;
+
-+ dpaa2_eth_dbg_init();
-+ err = fsl_mc_driver_register(&dpaa2_eth_driver);
++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
++ FSL_MC_POOL_DPCON, &dpcon);
+ if (err) {
-+ dpaa2_eth_dbg_exit();
-+ return err;
++ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
++ return NULL;
+ }
+
-+ return 0;
++ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
++ if (err) {
++ dev_err(dev, "dpcon_open() failed\n");
++ goto free;
++ }
++
++ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
++ if (err) {
++ dev_err(dev, "dpcon_reset() failed\n");
++ goto close;
++ }
++
++ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
++ if (err) {
++ dev_err(dev, "dpcon_get_attributes() failed\n");
++ goto close;
++ }
++
++ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
++ if (err) {
++ dev_err(dev, "dpcon_enable() failed\n");
++ goto close;
++ }
++
++ return dpcon;
++
++close:
++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
++free:
++ fsl_mc_object_free(dpcon);
++
++ return NULL;
+}
+
-+static void __exit dpaa2_eth_driver_exit(void)
++static void free_dpcon(struct dpaa2_eth_priv *priv,
++ struct fsl_mc_device *dpcon)
+{
-+ dpaa2_eth_dbg_exit();
-+ fsl_mc_driver_unregister(&dpaa2_eth_driver);
++ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
++ fsl_mc_object_free(dpcon);
+}
+
-+module_init(dpaa2_eth_driver_init);
-+module_exit(dpaa2_eth_driver_exit);
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
-@@ -0,0 +1,460 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
++static struct dpaa2_eth_channel *
++alloc_channel(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_eth_channel *channel;
++ struct dpcon_attr attr;
++ struct device *dev = priv->net_dev->dev.parent;
++ int err;
+
-+#ifndef __DPAA2_ETH_H
-+#define __DPAA2_ETH_H
++ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
++ if (!channel)
++ return NULL;
+
-+#include <linux/atomic.h>
-+#include <linux/netdevice.h>
-+#include <linux/if_vlan.h>
-+#include "../../fsl-mc/include/dpaa2-io.h"
-+#include "dpni.h"
-+#include "net.h"
++ channel->dpcon = setup_dpcon(priv);
++ if (!channel->dpcon)
++ goto err_setup;
+
-+#include "dpaa2-eth-debugfs.h"
++ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
++ &attr);
++ if (err) {
++ dev_err(dev, "dpcon_get_attributes() failed\n");
++ goto err_get_attr;
++ }
+
-+#define DPAA2_ETH_STORE_SIZE 16
++ channel->dpcon_id = attr.id;
++ channel->ch_id = attr.qbman_ch_id;
++ channel->priv = priv;
+
-+/* We set a max threshold for how many Tx confirmations we should process
-+ * on a NAPI poll call, they take less processing time.
-+ */
-+#define TX_CONF_PER_NAPI_POLL 256
++ return channel;
+
-+/* Maximum number of scatter-gather entries in an ingress frame,
-+ * considering the maximum receive frame size is 64K
-+ */
-+#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
++err_get_attr:
++ free_dpcon(priv, channel->dpcon);
++err_setup:
++ kfree(channel);
++ return NULL;
++}
+
-+/* Maximum acceptable MTU value. It is in direct relation with the hardware
-+ * enforced Max Frame Length (currently 10k).
-+ */
-+#define DPAA2_ETH_MFL (10 * 1024)
-+#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
-+/* Convert L3 MTU to L2 MFL */
-+#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
++static void free_channel(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *channel)
++{
++ struct bpf_prog *prog;
+
-+/* Maximum burst size value for Tx shaping */
-+#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
++ free_dpcon(priv, channel->dpcon);
+
-+/* Maximum number of buffers that can be acquired/released through a single
-+ * QBMan command
-+ */
-+#define DPAA2_ETH_BUFS_PER_CMD 7
++ prog = READ_ONCE(channel->xdp_prog);
++ if (prog)
++ bpf_prog_put(prog);
+
-+/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
-+ * frames in the Rx queues (length of the current frame is not
-+ * taken into account when making the taildrop decision)
-+ */
-+#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
++ kfree(channel);
++}
+
-+/* Buffer quota per queue. Must be large enough such that for minimum sized
-+ * frames taildrop kicks in before the bpool gets depleted, so we compute
-+ * how many 64B frames fit inside the taildrop threshold and add a margin
-+ * to accommodate the buffer refill delay.
++/* DPIO setup: allocate and configure QBMan channels, setup core affinity
++ * and register data availability notifications
+ */
-+#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
-+#define DPAA2_ETH_NUM_BUFS_TD (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
-+#define DPAA2_ETH_REFILL_THRESH_TD \
-+ (DPAA2_ETH_NUM_BUFS_TD - DPAA2_ETH_BUFS_PER_CMD)
++static int setup_dpio(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_io_notification_ctx *nctx;
++ struct dpaa2_eth_channel *channel;
++ struct dpcon_notification_cfg dpcon_notif_cfg;
++ struct device *dev = priv->net_dev->dev.parent;
++ int i, err;
+
-+/* Buffer quota per queue to use when flow control is active. */
-+#define DPAA2_ETH_NUM_BUFS_FC 256
++ /* We want the ability to spread ingress traffic (RX, TX conf) to as
++ * many cores as possible, so we need one channel for each core
++ * (unless there's fewer queues than cores, in which case the extra
++ * channels would be wasted).
++ * Allocate one channel per core and register it to the core's
++ * affine DPIO. If not enough channels are available for all cores
++ * or if some cores don't have an affine DPIO, there will be no
++ * ingress frame processing on those cores.
++ */
++ cpumask_clear(&priv->dpio_cpumask);
++ for_each_online_cpu(i) {
++ /* Try to allocate a channel */
++ channel = alloc_channel(priv);
++ if (!channel) {
++ dev_info(dev,
++ "No affine channel for cpu %d and above\n", i);
++ err = -ENODEV;
++ goto err_alloc_ch;
++ }
+
-+/* Hardware requires alignment for ingress/egress buffer addresses
-+ * and ingress buffer lengths.
-+ */
-+#define DPAA2_ETH_RX_BUF_SIZE 2048
-+#define DPAA2_ETH_TX_BUF_ALIGN 64
-+#define DPAA2_ETH_RX_BUF_ALIGN 64
-+#define DPAA2_ETH_RX_BUF_ALIGN_V1 256
-+#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
-+ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
++ priv->channel[priv->num_channels] = channel;
+
-+/* rx_extra_head prevents reallocations in L3 processing. */
-+#define DPAA2_ETH_SKB_SIZE \
-+ (DPAA2_ETH_RX_BUF_SIZE + \
-+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
++ nctx = &channel->nctx;
++ nctx->is_cdan = 1;
++ nctx->cb = cdan_cb;
++ nctx->id = channel->ch_id;
++ nctx->desired_cpu = i;
+
-+/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
-+ * buffers large enough to allow building an skb around them and also account
-+ * for alignment restrictions.
-+ */
-+#define DPAA2_ETH_BUF_RAW_SIZE(p_priv) \
-+ (DPAA2_ETH_SKB_SIZE + \
-+ (p_priv)->rx_buf_align)
++ /* Register the new context */
++ channel->dpio = dpaa2_io_service_select(i);
++ err = dpaa2_io_service_register(channel->dpio, nctx);
++ if (err) {
++ dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
++ /* If no affine DPIO for this core, there's probably
++ * none available for next cores either. Signal we want
++ * to retry later, in case the DPIO devices weren't
++ * probed yet.
++ */
++ err = -EPROBE_DEFER;
++ goto err_service_reg;
++ }
+
-+/* PTP nominal frequency 1GHz */
-+#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
++ /* Register DPCON notification with MC */
++ dpcon_notif_cfg.dpio_id = nctx->dpio_id;
++ dpcon_notif_cfg.priority = 0;
++ dpcon_notif_cfg.user_ctx = nctx->qman64;
++ err = dpcon_set_notification(priv->mc_io, 0,
++ channel->dpcon->mc_handle,
++ &dpcon_notif_cfg);
++ if (err) {
++ dev_err(dev, "dpcon_set_notification failed()\n");
++ goto err_set_cdan;
++ }
+
-+/* Leave enough extra space in the headroom to make sure the skb is
-+ * not realloc'd in forwarding scenarios.
-+ */
-+#define DPAA2_ETH_RX_HEAD_ROOM 192
++ /* If we managed to allocate a channel and also found an affine
++ * DPIO for this core, add it to the final mask
++ */
++ cpumask_set_cpu(i, &priv->dpio_cpumask);
++ priv->num_channels++;
+
-+/* We are accommodating a skb backpointer and some S/G info
-+ * in the frame's software annotation. The hardware
-+ * options are either 0 or 64, so we choose the latter.
-+ */
-+#define DPAA2_ETH_SWA_SIZE 64
++ /* Stop if we already have enough channels to accommodate all
++ * RX and TX conf queues
++ */
++ if (priv->num_channels == dpaa2_eth_queue_count(priv))
++ break;
++ }
+
-+/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
-+struct dpaa2_eth_swa {
-+ struct sk_buff *skb;
-+ struct scatterlist *scl;
-+ int num_sg;
-+ int num_dma_bufs;
-+};
++ return 0;
+
-+/* Annotation valid bits in FD FRC */
-+#define DPAA2_FD_FRC_FASV 0x8000
-+#define DPAA2_FD_FRC_FAEADV 0x4000
-+#define DPAA2_FD_FRC_FAPRV 0x2000
-+#define DPAA2_FD_FRC_FAIADV 0x1000
-+#define DPAA2_FD_FRC_FASWOV 0x0800
-+#define DPAA2_FD_FRC_FAICFDV 0x0400
++err_set_cdan:
++ dpaa2_io_service_deregister(channel->dpio, nctx);
++err_service_reg:
++ free_channel(priv, channel);
++err_alloc_ch:
++ if (cpumask_empty(&priv->dpio_cpumask)) {
++ dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
++ return err;
++ }
+
-+#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
-+#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
-+ FD_CTRL_SBE | \
-+ FD_CTRL_FSE | \
-+ FD_CTRL_FAERR)
++ dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
++ cpumask_pr_args(&priv->dpio_cpumask));
+
-+/* Annotation bits in FD CTRL */
-+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
++ return 0;
++}
+
-+/* Size of hardware annotation area based on the current buffer layout
-+ * configuration
-+ */
-+#define DPAA2_ETH_RX_HWA_SIZE 64
-+#define DPAA2_ETH_TX_HWA_SIZE 128
++static void free_dpio(struct dpaa2_eth_priv *priv)
++{
++ int i;
++ struct dpaa2_eth_channel *ch;
+
-+/* Frame annotation status */
-+struct dpaa2_fas {
-+ u8 reserved;
-+ u8 ppid;
-+ __le16 ifpid;
-+ __le32 status;
-+} __packed;
++ /* deregister CDAN notifications and free channels */
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
++ free_channel(priv, ch);
++ }
++}
+
-+/* Frame annotation status word is located in the first 8 bytes
-+ * of the buffer's hardware annotation area
-+ */
-+#define DPAA2_FAS_OFFSET 0
-+#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
++static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
++ int cpu)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ int i;
+
-+/* Timestamp is located in the next 8 bytes of the buffer's
-+ * hardware annotation area
-+ */
-+#define DPAA2_TS_OFFSET 0x8
++ for (i = 0; i < priv->num_channels; i++)
++ if (priv->channel[i]->nctx.desired_cpu == cpu)
++ return priv->channel[i];
+
-+/* Frame annotation egress action descriptor */
-+#define DPAA2_FAEAD_OFFSET 0x58
++ /* We should never get here. Issue a warning and return
++ * the first channel, because it's still better than nothing
++ */
++ dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
+
-+struct dpaa2_faead {
-+ __le32 conf_fqid;
-+ __le32 ctrl;
-+};
++ return priv->channel[0];
++}
+
-+#define DPAA2_FAEAD_A2V 0x20000000
-+#define DPAA2_FAEAD_UPDV 0x00001000
-+#define DPAA2_FAEAD_UPD 0x00000010
++static void set_fq_affinity(struct dpaa2_eth_priv *priv)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct cpumask xps_mask;
++ struct dpaa2_eth_fq *fq;
++ int rx_cpu, txc_cpu;
++ int i, err;
+
-+/* accessors for the hardware annotation fields that we use */
-+#define dpaa2_eth_get_hwa(buf_addr) \
-+ ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
++ /* For each FQ, pick one channel/CPU to deliver frames to.
++ * This may well change at runtime, either through irqbalance or
++ * through direct user intervention.
++ */
++ rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
+
-+#define dpaa2_eth_get_fas(buf_addr) \
-+ (struct dpaa2_fas *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ switch (fq->type) {
++ case DPAA2_RX_FQ:
++ case DPAA2_RX_ERR_FQ:
++ fq->target_cpu = rx_cpu;
++ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
++ if (rx_cpu >= nr_cpu_ids)
++ rx_cpu = cpumask_first(&priv->dpio_cpumask);
++ break;
++ case DPAA2_TX_CONF_FQ:
++ fq->target_cpu = txc_cpu;
+
-+#define dpaa2_eth_get_ts(buf_addr) \
-+ (u64 *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_TS_OFFSET)
++ /* Tell the stack to affine to txc_cpu the Tx queue
++ * associated with the confirmation one
++ */
++ cpumask_clear(&xps_mask);
++ cpumask_set_cpu(txc_cpu, &xps_mask);
++ err = netif_set_xps_queue(priv->net_dev, &xps_mask,
++ fq->flowid);
++ if (err)
++ dev_info_once(dev, "Error setting XPS queue\n");
+
-+#define dpaa2_eth_get_faead(buf_addr) \
-+ (struct dpaa2_faead *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAEAD_OFFSET)
++ txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
++ if (txc_cpu >= nr_cpu_ids)
++ txc_cpu = cpumask_first(&priv->dpio_cpumask);
++ break;
++ default:
++ dev_err(dev, "Unknown FQ type: %d\n", fq->type);
++ }
++ fq->channel = get_affine_channel(priv, fq->target_cpu);
++ }
++}
+
-+/* Error and status bits in the frame annotation status word */
-+/* Debug frame, otherwise supposed to be discarded */
-+#define DPAA2_FAS_DISC 0x80000000
-+/* MACSEC frame */
-+#define DPAA2_FAS_MS 0x40000000
-+#define DPAA2_FAS_PTP 0x08000000
-+/* Ethernet multicast frame */
-+#define DPAA2_FAS_MC 0x04000000
-+/* Ethernet broadcast frame */
-+#define DPAA2_FAS_BC 0x02000000
-+#define DPAA2_FAS_KSE 0x00040000
-+#define DPAA2_FAS_EOFHE 0x00020000
-+#define DPAA2_FAS_MNLE 0x00010000
-+#define DPAA2_FAS_TIDE 0x00008000
-+#define DPAA2_FAS_PIEE 0x00004000
-+/* Frame length error */
-+#define DPAA2_FAS_FLE 0x00002000
-+/* Frame physical error */
-+#define DPAA2_FAS_FPE 0x00001000
-+#define DPAA2_FAS_PTE 0x00000080
-+#define DPAA2_FAS_ISP 0x00000040
-+#define DPAA2_FAS_PHE 0x00000020
-+#define DPAA2_FAS_BLE 0x00000010
-+/* L3 csum validation performed */
-+#define DPAA2_FAS_L3CV 0x00000008
-+/* L3 csum error */
-+#define DPAA2_FAS_L3CE 0x00000004
-+/* L4 csum validation performed */
-+#define DPAA2_FAS_L4CV 0x00000002
-+/* L4 csum error */
-+#define DPAA2_FAS_L4CE 0x00000001
-+/* Possible errors on the ingress path */
-+#define DPAA2_FAS_RX_ERR_MASK ((DPAA2_FAS_KSE) | \
-+ (DPAA2_FAS_EOFHE) | \
-+ (DPAA2_FAS_MNLE) | \
-+ (DPAA2_FAS_TIDE) | \
-+ (DPAA2_FAS_PIEE) | \
-+ (DPAA2_FAS_FLE) | \
-+ (DPAA2_FAS_FPE) | \
-+ (DPAA2_FAS_PTE) | \
-+ (DPAA2_FAS_ISP) | \
-+ (DPAA2_FAS_PHE) | \
-+ (DPAA2_FAS_BLE) | \
-+ (DPAA2_FAS_L3CE) | \
-+ (DPAA2_FAS_L4CE))
-+/* Tx errors */
-+#define DPAA2_FAS_TX_ERR_MASK ((DPAA2_FAS_KSE) | \
-+ (DPAA2_FAS_EOFHE) | \
-+ (DPAA2_FAS_MNLE) | \
-+ (DPAA2_FAS_TIDE))
++static void setup_fqs(struct dpaa2_eth_priv *priv)
++{
++ int i, j;
+
-+/* Time in milliseconds between link state updates */
-+#define DPAA2_ETH_LINK_STATE_REFRESH 1000
++ /* We have one TxConf FQ per Tx flow.
++ * The number of Tx and Rx queues is the same.
++ * Tx queues come first in the fq array.
++ */
++ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
++ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
++ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
++ priv->fq[priv->num_fqs++].flowid = (u16)i;
++ }
+
-+/* Number of times to retry a frame enqueue before giving up.
-+ * Value determined empirically, in order to minimize the number
-+ * of frames dropped on Tx
-+ */
-+#define DPAA2_ETH_ENQUEUE_RETRIES 10
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++)
++ for (j = 0; j < dpaa2_eth_queue_count(priv); j++) {
++ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
++ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
++ priv->fq[priv->num_fqs].tc = (u8)i;
++ priv->fq[priv->num_fqs++].flowid = (u16)j;
++ }
+
-+/* Tx congestion entry & exit thresholds, in number of bytes.
-+ * We allow a maximum of 512KB worth of frames pending processing on the Tx
-+ * queues of an interface
-+ */
-+#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
-+#define DPAA2_ETH_TX_CONG_EXIT_THRESH (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9/10)
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ /* We have exactly one Rx error queue per DPNI */
++ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
++ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
++#endif
+
-+/* Driver statistics, other than those in struct rtnl_link_stats64.
-+ * These are usually collected per-CPU and aggregated by ethtool.
-+ */
-+struct dpaa2_eth_drv_stats {
-+ __u64 tx_conf_frames;
-+ __u64 tx_conf_bytes;
-+ __u64 tx_sg_frames;
-+ __u64 tx_sg_bytes;
-+ __u64 rx_sg_frames;
-+ __u64 rx_sg_bytes;
-+ /* Enqueues retried due to portal busy */
-+ __u64 tx_portal_busy;
-+};
++ /* For each FQ, decide on which core to process incoming frames */
++ set_fq_affinity(priv);
++}
+
-+/* Per-FQ statistics */
-+struct dpaa2_eth_fq_stats {
-+ /* Number of frames received on this queue */
-+ __u64 frames;
-+ /* Number of times this queue entered congestion */
-+ __u64 congestion_entry;
-+};
++/* Allocate and configure one buffer pool for each interface */
++static int setup_dpbp(struct dpaa2_eth_priv *priv)
++{
++ int err;
++ struct fsl_mc_device *dpbp_dev;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpbp_attr dpbp_attrs;
+
-+/* Per-channel statistics */
-+struct dpaa2_eth_ch_stats {
-+ /* Volatile dequeues retried due to portal busy */
-+ __u64 dequeue_portal_busy;
-+ /* Number of CDANs; useful to estimate avg NAPI len */
-+ __u64 cdan;
-+ /* Number of frames received on queues from this channel */
-+ __u64 frames;
-+ /* Pull errors */
-+ __u64 pull_err;
-+};
++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
++ &dpbp_dev);
++ if (err) {
++ dev_err(dev, "DPBP device allocation failed\n");
++ return err;
++ }
+
-+/* Maximum number of queues associated with a DPNI */
-+#define DPAA2_ETH_MAX_RX_QUEUES 16
-+#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS
-+#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
-+#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
-+ DPAA2_ETH_MAX_TX_QUEUES + \
-+ DPAA2_ETH_MAX_RX_ERR_QUEUES)
++ priv->dpbp_dev = dpbp_dev;
+
-+#define DPAA2_ETH_MAX_DPCONS NR_CPUS
++ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
++ &dpbp_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpbp_open() failed\n");
++ goto err_open;
++ }
+
-+enum dpaa2_eth_fq_type {
-+ DPAA2_RX_FQ = 0,
-+ DPAA2_TX_CONF_FQ,
-+ DPAA2_RX_ERR_FQ
-+};
++ err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpbp_reset() failed\n");
++ goto err_reset;
++ }
+
-+struct dpaa2_eth_priv;
++ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpbp_enable() failed\n");
++ goto err_enable;
++ }
+
-+struct dpaa2_eth_fq {
-+ u32 fqid;
-+ u32 tx_qdbin;
-+ u16 flowid;
-+ int target_cpu;
-+ struct dpaa2_eth_channel *channel;
-+ enum dpaa2_eth_fq_type type;
++ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
++ &dpbp_attrs);
++ if (err) {
++ dev_err(dev, "dpbp_get_attributes() failed\n");
++ goto err_get_attr;
++ }
++ priv->bpid = dpbp_attrs.bpid;
+
-+ void (*consume)(struct dpaa2_eth_priv *,
-+ struct dpaa2_eth_channel *,
-+ const struct dpaa2_fd *,
-+ struct napi_struct *,
-+ u16 queue_id);
-+ struct dpaa2_eth_fq_stats stats;
-+};
++ /* By default we start with flow control enabled */
++ priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
+
-+struct dpaa2_eth_channel {
-+ struct dpaa2_io_notification_ctx nctx;
-+ struct fsl_mc_device *dpcon;
-+ int dpcon_id;
-+ int ch_id;
-+ int dpio_id;
-+ struct napi_struct napi;
-+ struct dpaa2_io_store *store;
-+ struct dpaa2_eth_priv *priv;
-+ int buf_count;
-+ struct dpaa2_eth_ch_stats stats;
-+};
++ return 0;
+
-+struct dpaa2_eth_cls_rule {
-+ struct ethtool_rx_flow_spec fs;
-+ bool in_use;
-+};
++err_get_attr:
++ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
++err_enable:
++err_reset:
++ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
++err_open:
++ fsl_mc_object_free(dpbp_dev);
+
-+struct dpaa2_eth_hash_fields {
-+ u64 rxnfc_field;
-+ enum net_prot cls_prot;
-+ int cls_field;
-+ int offset;
-+ int size;
-+};
++ return err;
++}
+
-+/* Driver private data */
-+struct dpaa2_eth_priv {
-+ struct net_device *net_dev;
++static void free_dpbp(struct dpaa2_eth_priv *priv)
++{
++ drain_pool(priv);
++ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
++ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
++ fsl_mc_object_free(priv->dpbp_dev);
++}
+
-+ /* Standard statistics */
-+ struct rtnl_link_stats64 __percpu *percpu_stats;
-+ /* Extra stats, in addition to the ones known by the kernel */
-+ struct dpaa2_eth_drv_stats __percpu *percpu_extras;
-+ struct iommu_domain *iommu_domain;
++static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
++{
++ struct dpni_congestion_notification_cfg notif_cfg = {0};
++ struct device *dev = priv->net_dev->dev.parent;
++ int err;
+
-+ bool ts_tx_en; /* Tx timestamping enabled */
-+ bool ts_rx_en; /* Rx timestamping enabled */
++ priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
++ GFP_KERNEL);
+
-+ u16 tx_data_offset;
-+ u16 rx_buf_align;
++ if (!priv->cscn_unaligned)
++ return -ENOMEM;
+
-+ u16 bpid;
-+ u16 tx_qdid;
++ priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN);
++ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev, priv->cscn_dma)) {
++ dev_err(dev, "Error mapping CSCN memory area\n");
++ err = -ENOMEM;
++ goto err_dma_map;
++ }
+
-+ int tx_pause_frames;
-+ int num_bufs;
-+ int refill_thresh;
++ notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
++ notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
++ notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
++ notif_cfg.message_ctx = (u64)priv;
++ notif_cfg.message_iova = priv->cscn_dma;
++ notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
++ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
++ DPNI_CONG_OPT_COHERENT_WRITE;
++ err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, 0, ¬if_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_congestion_notification failed\n");
++ goto err_set_cong;
++ }
+
-+ /* Tx congestion notifications are written here */
-+ void *cscn_mem;
-+ void *cscn_unaligned;
-+ dma_addr_t cscn_dma;
++ return 0;
+
-+ u8 num_fqs;
-+ /* Tx queues are at the beginning of the array */
-+ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
++err_set_cong:
++ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++err_dma_map:
++ kfree(priv->cscn_unaligned);
+
-+ u8 num_channels;
-+ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
++ return err;
++}
+
-+ int dpni_id;
-+ struct dpni_attr dpni_attrs;
-+ struct fsl_mc_device *dpbp_dev;
++/* Configure the DPNI object this interface is associated with */
++static int setup_dpni(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev = &ls_dev->dev;
++ struct dpaa2_eth_priv *priv;
++ struct net_device *net_dev;
++ struct dpni_link_cfg cfg = {0};
++ int err;
+
-+ struct fsl_mc_io *mc_io;
-+ /* SysFS-controlled affinity mask for TxConf FQs */
-+ struct cpumask txconf_cpumask;
-+ /* Cores which have an affine DPIO/DPCON.
-+ * This is the cpu set on which Rx frames are processed;
-+ * Tx confirmation frames are processed on a subset of this,
-+ * depending on user settings.
-+ */
-+ struct cpumask dpio_cpumask;
++ net_dev = dev_get_drvdata(dev);
++ priv = netdev_priv(net_dev);
+
-+ u16 mc_token;
++ /* get a handle for the DPNI object */
++ err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
++ if (err) {
++ dev_err(dev, "dpni_open() failed\n");
++ return err;
++ }
+
-+ struct dpni_link_state link_state;
-+ bool do_link_poll;
-+ struct task_struct *poll_thread;
++ /* Check if we can work with this DPNI object */
++ err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
++ &priv->dpni_ver_minor);
++ if (err) {
++ dev_err(dev, "dpni_get_api_version() failed\n");
++ goto close;
++ }
++ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
++ dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
++ priv->dpni_ver_major, priv->dpni_ver_minor,
++ DPNI_VER_MAJOR, DPNI_VER_MINOR);
++ err = -ENOTSUPP;
++ goto close;
++ }
+
-+ struct dpaa2_eth_hash_fields *hash_fields;
-+ u8 num_hash_fields;
-+ /* enabled ethtool hashing bits */
-+ u64 rx_flow_hash;
++ ls_dev->mc_io = priv->mc_io;
++ ls_dev->mc_handle = priv->mc_token;
+
-+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
-+ struct dpaa2_debugfs dbg;
-+#endif
++ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
++ if (err) {
++ dev_err(dev, "dpni_reset() failed\n");
++ goto close;
++ }
+
-+ /* array of classification rules */
-+ struct dpaa2_eth_cls_rule *cls_rule;
++ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
++ &priv->dpni_attrs);
++ if (err) {
++ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
++ goto close;
++ }
+
-+ struct dpni_tx_shaping_cfg shaping_cfg;
-+};
++ err = set_buffer_layout(priv);
++ if (err)
++ goto close;
+
-+#define dpaa2_eth_hash_enabled(priv) \
-+ ((priv)->dpni_attrs.num_queues > 1)
++ /* Enable congestion notifications for Tx queues */
++ err = setup_tx_congestion(priv);
++ if (err)
++ goto close;
+
-+#define dpaa2_eth_fs_enabled(priv) \
-+ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
++ /* allocate classification rule space */
++ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
++ dpaa2_eth_fs_count(priv), GFP_KERNEL);
++ if (!priv->cls_rule)
++ goto close;
+
-+#define dpaa2_eth_fs_mask_enabled(priv) \
-+ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
++ /* Enable flow control */
++ cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
++ priv->tx_pause_frames = true;
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_link_cfg() failed\n");
++ goto cls_free;
++ }
+
-+#define dpaa2_eth_fs_count(priv) \
-+ ((priv)->dpni_attrs.fs_entries)
++ return 0;
+
-+/* size of DMA memory used to pass configuration to classifier, in bytes */
-+#define DPAA2_CLASSIFIER_DMA_SIZE 256
++cls_free:
++ kfree(priv->cls_rule);
++close:
++ dpni_close(priv->mc_io, 0, priv->mc_token);
+
-+extern const struct ethtool_ops dpaa2_ethtool_ops;
-+extern const char dpaa2_eth_drv_version[];
++ return err;
++}
+
-+static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
++static void free_dpni(struct dpaa2_eth_priv *priv)
+{
-+ return priv->dpni_attrs.num_queues;
-+}
++ struct device *dev = priv->net_dev->dev.parent;
++ int err;
+
-+void check_cls_support(struct dpaa2_eth_priv *priv);
++ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
++ if (err)
++ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
++ err);
+
-+int setup_fqs_taildrop(struct dpaa2_eth_priv *priv, bool enable);
-+#endif /* __DPAA2_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
-@@ -0,0 +1,856 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
++ dpni_close(priv->mc_io, 0, priv->mc_token);
+
-+#include "dpni.h" /* DPNI_LINK_OPT_* */
-+#include "dpaa2-eth.h"
++ kfree(priv->cls_rule);
+
-+/* To be kept in sync with dpni_statistics */
-+static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
-+ "rx frames",
-+ "rx bytes",
-+ "rx mcast frames",
-+ "rx mcast bytes",
-+ "rx bcast frames",
-+ "rx bcast bytes",
-+ "tx frames",
-+ "tx bytes",
-+ "tx mcast frames",
-+ "tx mcast bytes",
-+ "tx bcast frames",
-+ "tx bcast bytes",
-+ "rx filtered frames",
-+ "rx discarded frames",
-+ "rx nobuffer discards",
-+ "tx discarded frames",
-+ "tx confirmed frames",
-+};
++ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++ kfree(priv->cscn_unaligned);
++}
+
-+#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
++static int setup_rx_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_queue queue;
++ struct dpni_queue_id qid;
++ int err;
+
-+/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */
-+static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
-+ /* per-cpu stats */
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
++ if (err) {
++ dev_err(dev, "dpni_get_queue(RX) failed\n");
++ return err;
++ }
+
-+ "tx conf frames",
-+ "tx conf bytes",
-+ "tx sg frames",
-+ "tx sg bytes",
-+ "rx sg frames",
-+ "rx sg bytes",
-+ /* how many times we had to retry the enqueue command */
-+ "enqueue portal busy",
++ fq->fqid = qid.fqid;
+
-+ /* Channel stats */
-+ /* How many times we had to retry the volatile dequeue command */
-+ "dequeue portal busy",
-+ "channel pull errors",
-+ /* Number of notifications received */
-+ "cdan",
-+ "tx congestion state",
-+#ifdef CONFIG_FSL_QBMAN_DEBUG
-+ /* FQ stats */
-+ "rx pending frames",
-+ "rx pending bytes",
-+ "tx conf pending frames",
-+ "tx conf pending bytes",
-+ "buffer count"
-+#endif
-+};
++ queue.destination.id = fq->channel->dpcon_id;
++ queue.destination.type = DPNI_DEST_DPCON;
++ queue.destination.priority = 1;
++ queue.user_context = (u64)(uintptr_t)fq;
++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX, fq->tc, fq->flowid,
++ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
++ &queue);
++ if (err) {
++ dev_err(dev, "dpni_set_queue(RX) failed\n");
++ return err;
++ }
+
-+#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
++ return 0;
++}
+
-+static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
-+ struct ethtool_drvinfo *drvinfo)
++static int set_queue_taildrop(struct dpaa2_eth_priv *priv,
++ struct dpni_taildrop *td)
+{
-+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
-+ strlcpy(drvinfo->version, dpaa2_eth_drv_version,
-+ sizeof(drvinfo->version));
-+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
-+ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
-+ sizeof(drvinfo->bus_info));
++ struct device *dev = priv->net_dev->dev.parent;
++ int i, err;
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ if (priv->fq[i].type != DPAA2_RX_FQ)
++ continue;
++
++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
++ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
++ priv->fq[i].tc, priv->fq[i].flowid,
++ td);
++ if (err) {
++ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
++ return err;
++ }
++
++ dev_dbg(dev, "%s taildrop for Rx group tc %d\n",
++ (td->enable ? "Enabled" : "Disabled"),
++ i);
++ }
++
++ return 0;
+}
+
-+static int dpaa2_eth_get_settings(struct net_device *net_dev,
-+ struct ethtool_cmd *cmd)
++static int set_group_taildrop(struct dpaa2_eth_priv *priv,
++ struct dpni_taildrop *td)
+{
-+ struct dpni_link_state state = {0};
-+ int err = 0;
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_taildrop disable_td, *tc_td;
++ int i, err;
+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (err) {
-+ netdev_err(net_dev, "ERROR %d getting link state", err);
-+ goto out;
-+ }
++ memset(&disable_td, 0, sizeof(struct dpni_taildrop));
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i))
++ /* Do not set taildrop thresholds for PFC-enabled
++ * traffic classes. We will enable congestion
++ * notifications for them.
++ */
++ tc_td = &disable_td;
++ else
++ tc_td = td;
+
-+ /* At the moment, we have no way of interrogating the DPMAC
-+ * from the DPNI side - and for that matter there may exist
-+ * no DPMAC at all. So for now we just don't report anything
-+ * beyond the DPNI attributes.
-+ */
-+ if (state.options & DPNI_LINK_OPT_AUTONEG)
-+ cmd->autoneg = AUTONEG_ENABLE;
-+ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
-+ cmd->duplex = DUPLEX_FULL;
-+ ethtool_cmd_speed_set(cmd, state.rate);
++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
++ DPNI_CP_GROUP, DPNI_QUEUE_RX,
++ i, 0, tc_td);
++ if (err) {
++ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
++ return err;
++ }
+
-+out:
-+ return err;
++ dev_dbg(dev, "%s taildrop for Rx queue id %d tc %d\n",
++ (tc_td->enable ? "Enabled" : "Disabled"),
++ priv->fq[i].flowid, priv->fq[i].tc);
++ }
++
++ return 0;
+}
+
-+static int dpaa2_eth_set_settings(struct net_device *net_dev,
-+ struct ethtool_cmd *cmd)
++/* Enable/disable Rx FQ taildrop
++ *
++ * Rx FQ taildrop is mutually exclusive with flow control and it only gets
++ * disabled when FC is active. Depending on FC status, we need to compute
++ * the maximum number of buffers in the pool differently, so use the
++ * opportunity to update max number of buffers as well.
++ */
++int set_rx_taildrop(struct dpaa2_eth_priv *priv)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_link_state state = {0};
-+ struct dpni_link_cfg cfg = {0};
++ enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv);
++ struct dpni_taildrop td_queue, td_group;
+ int err = 0;
+
-+ netdev_dbg(net_dev, "Setting link parameters...");
-+
-+ /* Need to interrogate on link state to get flow control params */
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (err) {
-+ netdev_err(net_dev, "ERROR %d getting link state", err);
-+ goto out;
++ switch (cfg) {
++ case DPAA2_ETH_TD_NONE:
++ memset(&td_queue, 0, sizeof(struct dpni_taildrop));
++ memset(&td_group, 0, sizeof(struct dpni_taildrop));
++ priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC /
++ priv->num_channels;
++ break;
++ case DPAA2_ETH_TD_QUEUE:
++ memset(&td_group, 0, sizeof(struct dpni_taildrop));
++ td_queue.enable = 1;
++ td_queue.units = DPNI_CONGESTION_UNIT_BYTES;
++ td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH /
++ dpaa2_eth_tc_count(priv);
++ priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_PER_CH;
++ break;
++ case DPAA2_ETH_TD_GROUP:
++ memset(&td_queue, 0, sizeof(struct dpni_taildrop));
++ td_group.enable = 1;
++ td_group.units = DPNI_CONGESTION_UNIT_FRAMES;
++ td_group.threshold = NAPI_POLL_WEIGHT *
++ dpaa2_eth_queue_count(priv);
++ priv->max_bufs_per_ch = NAPI_POLL_WEIGHT *
++ dpaa2_eth_tc_count(priv);
++ break;
++ default:
++ break;
+ }
+
-+ cfg.options = state.options;
-+ cfg.rate = ethtool_cmd_speed(cmd);
-+ if (cmd->autoneg == AUTONEG_ENABLE)
-+ cfg.options |= DPNI_LINK_OPT_AUTONEG;
-+ else
-+ cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
-+ if (cmd->duplex == DUPLEX_HALF)
-+ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
-+ else
-+ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
++ err = set_queue_taildrop(priv, &td_queue);
++ if (err)
++ return err;
+
-+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ err = set_group_taildrop(priv, &td_group);
+ if (err)
-+ /* ethtool will be loud enough if we return an error; no point
-+ * in putting our own error message on the console by default
-+ */
-+ netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
++ return err;
+
-+out:
-+ return err;
++ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
++
++ return 0;
+}
+
-+static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
-+ struct ethtool_pauseparam *pause)
++static int setup_tx_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_link_state state = {0};
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_queue queue;
++ struct dpni_queue_id qid;
+ int err;
+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (err)
-+ netdev_dbg(net_dev, "ERROR %d getting link state", err);
-+
-+ /* for now, pause frames autonegotiation is not separate */
-+ pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
-+ pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
-+ pause->tx_pause = pause->rx_pause ^
-+ !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
-+}
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
++ if (err) {
++ dev_err(dev, "dpni_get_queue(TX) failed\n");
++ return err;
++ }
+
-+static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
-+ struct ethtool_pauseparam *pause)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_link_state state = {0};
-+ struct dpni_link_cfg cfg = {0};
-+ u32 current_tx_pause;
-+ int err = 0;
++ fq->tx_qdbin = qid.qdbin;
+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
++ &queue, &qid);
+ if (err) {
-+ netdev_dbg(net_dev, "ERROR %d getting link state", err);
-+ goto out;
++ dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
++ return err;
+ }
+
-+ cfg.rate = state.rate;
-+ cfg.options = state.options;
-+ current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
-+ !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
++ fq->fqid = qid.fqid;
+
-+ if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
-+ netdev_warn(net_dev,
-+ "WARN: Can't change pause frames autoneg separately\n");
++ queue.destination.id = fq->channel->dpcon_id;
++ queue.destination.type = DPNI_DEST_DPCON;
++ queue.destination.priority = 0;
++ queue.user_context = (u64)(uintptr_t)fq;
++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
++ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
++ &queue);
++ if (err) {
++ dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
++ return err;
++ }
+
-+ if (pause->rx_pause)
-+ cfg.options |= DPNI_LINK_OPT_PAUSE;
-+ else
-+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
++ return 0;
++}
+
-+ if (pause->rx_pause ^ pause->tx_pause)
-+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
-+ else
-+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_queue q = { { 0 } };
++ struct dpni_queue_id qid;
++ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
++ int err;
+
-+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
+ if (err) {
-+ /* ethtool will be loud enough if we return an error; no point
-+ * in putting our own error message on the console by default
-+ */
-+ netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
-+ goto out;
++ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ return err;
+ }
+
-+ /* Enable / disable taildrops if Tx pause frames have changed */
-+ if (current_tx_pause == pause->tx_pause)
-+ goto out;
++ fq->fqid = qid.fqid;
+
-+ err = setup_fqs_taildrop(priv, !pause->tx_pause);
-+ if (err)
-+ netdev_dbg(net_dev, "ERROR %d configuring taildrop", err);
++ q.destination.id = fq->channel->dpcon_id;
++ q.destination.type = DPNI_DEST_DPCON;
++ q.destination.priority = 1;
++ q.user_context = (u64)fq;
++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
++ if (err) {
++ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
++ return err;
++ }
+
-+ priv->tx_pause_frames = pause->tx_pause;
-+out:
-+ return err;
++ return 0;
+}
++#endif
+
-+static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
-+ u8 *data)
++/* default hash key fields */
++static struct dpaa2_eth_dist_fields default_dist_fields[] = {
++ {
++ /* L2 header */
++ .rxnfc_field = RXH_L2DA,
++ .cls_prot = NET_PROT_ETH,
++ .cls_field = NH_FLD_ETH_DA,
++ .id = DPAA2_ETH_DIST_ETHDST,
++ .size = 6,
++ }, {
++ .cls_prot = NET_PROT_ETH,
++ .cls_field = NH_FLD_ETH_SA,
++ .id = DPAA2_ETH_DIST_ETHSRC,
++ .size = 6,
++ }, {
++ /* This is the last ethertype field parsed:
++ * depending on frame format, it can be the MAC ethertype
++ * or the VLAN etype.
++ */
++ .cls_prot = NET_PROT_ETH,
++ .cls_field = NH_FLD_ETH_TYPE,
++ .id = DPAA2_ETH_DIST_ETHTYPE,
++ .size = 2,
++ }, {
++ /* VLAN header */
++ .rxnfc_field = RXH_VLAN,
++ .cls_prot = NET_PROT_VLAN,
++ .cls_field = NH_FLD_VLAN_TCI,
++ .id = DPAA2_ETH_DIST_VLAN,
++ .size = 2,
++ }, {
++ /* IP header */
++ .rxnfc_field = RXH_IP_SRC,
++ .cls_prot = NET_PROT_IP,
++ .cls_field = NH_FLD_IP_SRC,
++ .id = DPAA2_ETH_DIST_IPSRC,
++ .size = 4,
++ }, {
++ .rxnfc_field = RXH_IP_DST,
++ .cls_prot = NET_PROT_IP,
++ .cls_field = NH_FLD_IP_DST,
++ .id = DPAA2_ETH_DIST_IPDST,
++ .size = 4,
++ }, {
++ .rxnfc_field = RXH_L3_PROTO,
++ .cls_prot = NET_PROT_IP,
++ .cls_field = NH_FLD_IP_PROTO,
++ .id = DPAA2_ETH_DIST_IPPROTO,
++ .size = 1,
++ }, {
++ /* Using UDP ports, this is functionally equivalent to raw
++ * byte pairs from L4 header.
++ */
++ .rxnfc_field = RXH_L4_B_0_1,
++ .cls_prot = NET_PROT_UDP,
++ .cls_field = NH_FLD_UDP_PORT_SRC,
++ .id = DPAA2_ETH_DIST_L4SRC,
++ .size = 2,
++ }, {
++ .rxnfc_field = RXH_L4_B_2_3,
++ .cls_prot = NET_PROT_UDP,
++ .cls_field = NH_FLD_UDP_PORT_DST,
++ .id = DPAA2_ETH_DIST_L4DST,
++ .size = 2,
++ },
++};
++
++static int legacy_config_dist_key(struct dpaa2_eth_priv *priv,
++ dma_addr_t key_iova)
+{
-+ u8 *p = data;
-+ int i;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_rx_tc_dist_cfg dist_cfg;
++ int i, err;
+
-+ switch (stringset) {
-+ case ETH_SS_STATS:
-+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
-+ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
-+ p += ETH_GSTRING_LEN;
-+ }
-+ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
-+ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
-+ p += ETH_GSTRING_LEN;
-+ }
-+ break;
++ /* In legacy mode, we can't configure flow steering independently */
++ if (!dpaa2_eth_hash_enabled(priv))
++ return -EOPNOTSUPP;
++
++ memset(&dist_cfg, 0, sizeof(dist_cfg));
++
++ dist_cfg.key_cfg_iova = key_iova;
++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
++ if (dpaa2_eth_fs_enabled(priv)) {
++ dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
++ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
++ } else {
++ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+ }
-+}
+
-+static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
-+{
-+ switch (sset) {
-+ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
-+ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
-+ default:
-+ return -EOPNOTSUPP;
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i,
++ &dist_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_rx_tc_dist failed\n");
++ return err;
++ }
+ }
++
++ return 0;
+}
+
-+/** Fill in hardware counters, as returned by MC.
-+ */
-+static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
-+ struct ethtool_stats *stats,
-+ u64 *data)
++static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova)
+{
-+ int i = 0; /* Current index in the data array */
-+ int j = 0, k, err;
-+ union dpni_statistics dpni_stats;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_rx_dist_cfg dist_cfg;
++ int i, err;
+
-+#ifdef CONFIG_FSL_QBMAN_DEBUG
-+ u32 fcnt, bcnt;
-+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
-+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
-+ u32 buf_cnt;
-+#endif
-+ u64 cdan = 0;
-+ u64 portal_busy = 0, pull_err = 0;
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpaa2_eth_drv_stats *extras;
-+ struct dpaa2_eth_ch_stats *ch_stats;
++ if (!dpaa2_eth_hash_enabled(priv))
++ return -EOPNOTSUPP;
+
-+ memset(data, 0,
-+ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
++ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
-+ /* Print standard counters, from DPNI statistics */
-+ for (j = 0; j <= 2; j++) {
-+ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
-+ j, &dpni_stats);
-+ if (err != 0)
-+ netdev_warn(net_dev, "Err %d getting DPNI stats page %d",
-+ err, j);
++ dist_cfg.key_cfg_iova = key_iova;
++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
++ dist_cfg.enable = true;
+
-+ switch (j) {
-+ case 0:
-+ *(data + i++) = dpni_stats.page_0.ingress_all_frames;
-+ *(data + i++) = dpni_stats.page_0.ingress_all_bytes;
-+ *(data + i++) = dpni_stats.page_0.ingress_multicast_frames;
-+ *(data + i++) = dpni_stats.page_0.ingress_multicast_bytes;
-+ *(data + i++) = dpni_stats.page_0.ingress_broadcast_frames;
-+ *(data + i++) = dpni_stats.page_0.ingress_broadcast_bytes;
-+ break;
-+ case 1:
-+ *(data + i++) = dpni_stats.page_1.egress_all_frames;
-+ *(data + i++) = dpni_stats.page_1.egress_all_bytes;
-+ *(data + i++) = dpni_stats.page_1.egress_multicast_frames;
-+ *(data + i++) = dpni_stats.page_1.egress_multicast_bytes;
-+ *(data + i++) = dpni_stats.page_1.egress_broadcast_frames;
-+ *(data + i++) = dpni_stats.page_1.egress_broadcast_bytes;
-+ break;
-+ case 2:
-+ *(data + i++) = dpni_stats.page_2.ingress_filtered_frames;
-+ *(data + i++) = dpni_stats.page_2.ingress_discarded_frames;
-+ *(data + i++) = dpni_stats.page_2.ingress_nobuffer_discards;
-+ *(data + i++) = dpni_stats.page_2.egress_discarded_frames;
-+ *(data + i++) = dpni_stats.page_2.egress_confirmed_frames;
-+ break;
-+ default:
-+ break;
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ dist_cfg.tc = i;
++
++ err = dpni_set_rx_hash_dist(priv->mc_io, 0,
++ priv->mc_token, &dist_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_rx_hash_dist failed\n");
++ return err;
+ }
+ }
+
-+ /* Print per-cpu extra stats */
-+ for_each_online_cpu(k) {
-+ extras = per_cpu_ptr(priv->percpu_extras, k);
-+ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
-+ *((__u64 *)data + i + j) += *((__u64 *)extras + j);
-+ }
++ return 0;
++}
+
-+ i += j;
++static int config_fs_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_rx_dist_cfg dist_cfg;
++ int i, err;
+
-+ /* We may be using fewer DPIOs than actual CPUs */
-+ for (j = 0; j < priv->num_channels; j++) {
-+ ch_stats = &priv->channel[j]->stats;
-+ cdan += ch_stats->cdan;
-+ portal_busy += ch_stats->dequeue_portal_busy;
-+ pull_err += ch_stats->pull_err;
-+ }
++ if (!dpaa2_eth_fs_enabled(priv))
++ return -EOPNOTSUPP;
+
-+ *(data + i++) = portal_busy;
-+ *(data + i++) = pull_err;
-+ *(data + i++) = cdan;
++ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
-+ *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem);
++ dist_cfg.key_cfg_iova = key_iova;
++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
++ dist_cfg.enable = true;
+
-+#ifdef CONFIG_FSL_QBMAN_DEBUG
-+ for (j = 0; j < priv->num_fqs; j++) {
-+ /* Print FQ instantaneous counts */
-+ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
-+ &fcnt, &bcnt);
-+ if (err) {
-+ netdev_warn(net_dev, "FQ query error %d", err);
-+ return;
-+ }
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ dist_cfg.tc = i;
+
-+ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
-+ fcnt_tx_total += fcnt;
-+ bcnt_tx_total += bcnt;
-+ } else {
-+ fcnt_rx_total += fcnt;
-+ bcnt_rx_total += bcnt;
++ err = dpni_set_rx_fs_dist(priv->mc_io, 0,
++ priv->mc_token, &dist_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_rx_fs_dist failed\n");
++ return err;
+ }
+ }
+
-+ *(data + i++) = fcnt_rx_total;
-+ *(data + i++) = bcnt_rx_total;
-+ *(data + i++) = fcnt_tx_total;
-+ *(data + i++) = bcnt_tx_total;
-+
-+ err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
-+ if (err) {
-+ netdev_warn(net_dev, "Buffer count query error %d\n", err);
-+ return;
-+ }
-+ *(data + i++) = buf_cnt;
-+#endif
++ return 0;
+}
+
-+static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field)
++int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv,
++ enum dpaa2_eth_rx_dist type, u32 key_fields)
+{
-+ int i, off = 0;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpkg_profile_cfg cls_cfg;
++ struct dpkg_extract *key;
++ u32 hash_fields = 0;
++ dma_addr_t key_iova;
++ u8 *key_mem;
++ int i, err;
+
-+ for (i = 0; i < priv->num_hash_fields; i++) {
-+ if (priv->hash_fields[i].cls_prot == prot &&
-+ priv->hash_fields[i].cls_field == field)
-+ return off;
-+ off += priv->hash_fields[i].size;
-+ }
++ memset(&cls_cfg, 0, sizeof(cls_cfg));
+
-+ return -1;
-+}
++ for (i = 0; i < priv->num_dist_fields; i++) {
++ if (!(key_fields & priv->dist_fields[i].id))
++ continue;
+
-+static u8 cls_key_size(struct dpaa2_eth_priv *priv)
-+{
-+ u8 i, size = 0;
++ key = &cls_cfg.extracts[cls_cfg.num_extracts];
++ key->type = DPKG_EXTRACT_FROM_HDR;
++ key->extract.from_hdr.prot = priv->dist_fields[i].cls_prot;
++ key->extract.from_hdr.type = DPKG_FULL_FIELD;
++ key->extract.from_hdr.field = priv->dist_fields[i].cls_field;
++ cls_cfg.num_extracts++;
+
-+ for (i = 0; i < priv->num_hash_fields; i++)
-+ size += priv->hash_fields[i].size;
++ hash_fields |= priv->dist_fields[i].rxnfc_field;
++ }
+
-+ return size;
-+}
++ key_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
++ if (!key_mem)
++ return -ENOMEM;
+
-+void check_cls_support(struct dpaa2_eth_priv *priv)
-+{
-+ u8 key_size = cls_key_size(priv);
-+ struct device *dev = priv->net_dev->dev.parent;
++ err = dpni_prepare_key_cfg(&cls_cfg, key_mem);
++ if (err) {
++ dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
++ goto free_key;
++ }
+
-+ if (dpaa2_eth_hash_enabled(priv)) {
-+ if (priv->dpni_attrs.fs_key_size < key_size) {
-+ dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n",
-+ priv->dpni_attrs.fs_key_size,
-+ key_size);
-+ goto disable_fs;
-+ }
-+ if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
-+ dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
-+ DPKG_MAX_NUM_OF_EXTRACTS);
-+ goto disable_fs;
-+ }
++ key_iova = dma_map_single(dev, key_mem, DPAA2_CLASSIFIER_DMA_SIZE,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, key_iova)) {
++ dev_err(dev, "DMA mapping failed\n");
++ err = -ENOMEM;
++ goto free_key;
+ }
+
-+ if (dpaa2_eth_fs_enabled(priv)) {
-+ if (!dpaa2_eth_hash_enabled(priv)) {
-+ dev_info(dev, "Insufficient queues. Steering is disabled\n");
-+ goto disable_fs;
-+ }
++ switch (type) {
++ case DPAA2_ETH_RX_DIST_LEGACY:
++ err = legacy_config_dist_key(priv, key_iova);
++ break;
++ case DPAA2_ETH_RX_DIST_HASH:
++ err = config_hash_key(priv, key_iova);
++ break;
++ case DPAA2_ETH_RX_DIST_FS:
++ err = config_fs_key(priv, key_iova);
++ break;
++ default:
++ err = -EINVAL;
++ break;
++ }
+
-+ if (!dpaa2_eth_fs_mask_enabled(priv)) {
-+ dev_info(dev, "Key masks not supported. Steering is disabled\n");
-+ goto disable_fs;
-+ }
++ dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
++ DMA_TO_DEVICE);
++ if (err) {
++ if (err != -EOPNOTSUPP)
++ dev_err(dev, "Distribution key config failed\n");
++ goto free_key;
+ }
+
-+ return;
++ if (type != DPAA2_ETH_RX_DIST_FS)
++ priv->rx_hash_fields = hash_fields;
+
-+disable_fs:
-+ priv->dpni_attrs.options |= DPNI_OPT_NO_FS;
-+ priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING;
++free_key:
++ kfree(key_mem);
++ return err;
+}
+
-+static int prep_l4_rule(struct dpaa2_eth_priv *priv,
-+ struct ethtool_tcpip4_spec *l4_value,
-+ struct ethtool_tcpip4_spec *l4_mask,
-+ void *key, void *mask, u8 l4_proto)
++/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
++ * frame queues and channels
++ */
++static int bind_dpni(struct dpaa2_eth_priv *priv)
+{
-+ int offset;
++ struct net_device *net_dev = priv->net_dev;
++ struct device *dev = net_dev->dev.parent;
++ struct dpni_pools_cfg pools_params;
++ struct dpni_error_cfg err_cfg;
++ int err = 0;
++ int i;
+
-+ if (l4_mask->tos) {
-+ netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n");
-+ return -EOPNOTSUPP;
++ pools_params.num_dpbp = 1;
++ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
++ pools_params.pools[0].backup_pool = 0;
++ pools_params.pools[0].priority_mask = 0xff;
++ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
++ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
++ if (err) {
++ dev_err(dev, "dpni_set_pools() failed\n");
++ return err;
+ }
+
-+ if (l4_mask->ip4src) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
-+ *(u32 *)(key + offset) = l4_value->ip4src;
-+ *(u32 *)(mask + offset) = l4_mask->ip4src;
-+ }
++ /* Verify classification options and disable hashing and/or
++ * flow steering support in case of invalid configuration values
++ */
++ priv->dist_fields = default_dist_fields;
++ priv->num_dist_fields = ARRAY_SIZE(default_dist_fields);
++ check_cls_support(priv);
+
-+ if (l4_mask->ip4dst) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
-+ *(u32 *)(key + offset) = l4_value->ip4dst;
-+ *(u32 *)(mask + offset) = l4_mask->ip4dst;
++ /* have the interface implicitly distribute traffic based on
++ * a static hash key. Also configure flow steering key, if supported.
++ * Errors here are not blocking, so just let the called function
++ * print its error message and move along.
++ */
++ if (dpaa2_eth_has_legacy_dist(priv)) {
++ dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_LEGACY,
++ DPAA2_ETH_DIST_ALL);
++ } else {
++ dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH,
++ DPAA2_ETH_DIST_DEFAULT_HASH);
++ dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_FS,
++ DPAA2_ETH_DIST_ALL);
+ }
+
-+ if (l4_mask->psrc) {
-+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
-+ *(u32 *)(key + offset) = l4_value->psrc;
-+ *(u32 *)(mask + offset) = l4_mask->psrc;
++ /* Configure handling of error frames */
++ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
++ err_cfg.set_frame_annotation = 1;
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
++#else
++ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
++#endif
++ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
++ &err_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_errors_behavior failed\n");
++ return err;
+ }
+
-+ if (l4_mask->pdst) {
-+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
-+ *(u32 *)(key + offset) = l4_value->pdst;
-+ *(u32 *)(mask + offset) = l4_mask->pdst;
++ /* Configure Rx and Tx conf queues to generate CDANs */
++ for (i = 0; i < priv->num_fqs; i++) {
++ switch (priv->fq[i].type) {
++ case DPAA2_RX_FQ:
++ err = setup_rx_flow(priv, &priv->fq[i]);
++ break;
++ case DPAA2_TX_CONF_FQ:
++ err = setup_tx_flow(priv, &priv->fq[i]);
++ break;
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ case DPAA2_RX_ERR_FQ:
++ err = setup_rx_err_flow(priv, &priv->fq[i]);
++ break;
++#endif
++ default:
++ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
++ return -EINVAL;
++ }
++ if (err)
++ return err;
+ }
+
-+ /* Only apply the rule for the user-specified L4 protocol
-+ * and if ethertype matches IPv4
-+ */
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
-+ *(u16 *)(key + offset) = htons(ETH_P_IP);
-+ *(u16 *)(mask + offset) = 0xFFFF;
-+
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
-+ *(u8 *)(key + offset) = l4_proto;
-+ *(u8 *)(mask + offset) = 0xFF;
-+
-+ /* TODO: check IP version */
++ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, &priv->tx_qdid);
++ if (err) {
++ dev_err(dev, "dpni_get_qdid() failed\n");
++ return err;
++ }
+
+ return 0;
+}
+
-+static int prep_eth_rule(struct dpaa2_eth_priv *priv,
-+ struct ethhdr *eth_value, struct ethhdr *eth_mask,
-+ void *key, void *mask)
++/* Allocate rings for storing incoming frame descriptors */
++static int alloc_rings(struct dpaa2_eth_priv *priv)
+{
-+ int offset;
++ struct net_device *net_dev = priv->net_dev;
++ struct device *dev = net_dev->dev.parent;
++ int i;
+
-+ if (eth_mask->h_proto) {
-+ netdev_err(priv->net_dev, "Ethertype is not supported!\n");
-+ return -EOPNOTSUPP;
++ for (i = 0; i < priv->num_channels; i++) {
++ priv->channel[i]->store =
++ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
++ if (!priv->channel[i]->store) {
++ netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
++ goto err_ring;
++ }
+ }
+
-+ if (!is_zero_ether_addr(eth_mask->h_source)) {
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA);
-+ ether_addr_copy(key + offset, eth_value->h_source);
-+ ether_addr_copy(mask + offset, eth_mask->h_source);
-+ }
++ return 0;
+
-+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
-+ ether_addr_copy(key + offset, eth_value->h_dest);
-+ ether_addr_copy(mask + offset, eth_mask->h_dest);
++err_ring:
++ for (i = 0; i < priv->num_channels; i++) {
++ if (!priv->channel[i]->store)
++ break;
++ dpaa2_io_store_destroy(priv->channel[i]->store);
+ }
+
-+ return 0;
++ return -ENOMEM;
+}
+
-+static int prep_user_ip_rule(struct dpaa2_eth_priv *priv,
-+ struct ethtool_usrip4_spec *uip_value,
-+ struct ethtool_usrip4_spec *uip_mask,
-+ void *key, void *mask)
++static void free_rings(struct dpaa2_eth_priv *priv)
+{
-+ int offset;
-+
-+ if (uip_mask->tos)
-+ return -EOPNOTSUPP;
++ int i;
+
-+ if (uip_mask->ip4src) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
-+ *(u32 *)(key + offset) = uip_value->ip4src;
-+ *(u32 *)(mask + offset) = uip_mask->ip4src;
-+ }
++ for (i = 0; i < priv->num_channels; i++)
++ dpaa2_io_store_destroy(priv->channel[i]->store);
++}
+
-+ if (uip_mask->ip4dst) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
-+ *(u32 *)(key + offset) = uip_value->ip4dst;
-+ *(u32 *)(mask + offset) = uip_mask->ip4dst;
-+ }
++static int set_mac_addr(struct dpaa2_eth_priv *priv)
++{
++ struct net_device *net_dev = priv->net_dev;
++ struct device *dev = net_dev->dev.parent;
++ u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
++ int err;
+
-+ if (uip_mask->proto) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
-+ *(u32 *)(key + offset) = uip_value->proto;
-+ *(u32 *)(mask + offset) = uip_mask->proto;
++ /* Get firmware address, if any */
++ err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
++ if (err) {
++ dev_err(dev, "dpni_get_port_mac_addr() failed\n");
++ return err;
+ }
-+ if (uip_mask->l4_4_bytes) {
-+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
-+ *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16;
-+ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16;
+
-+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
-+ *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF;
-+ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF;
++ /* Get DPNI attributes address, if any */
++ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
++ dpni_mac_addr);
++ if (err) {
++ dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
++ return err;
+ }
+
-+ /* Ethertype must be IP */
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
-+ *(u16 *)(key + offset) = htons(ETH_P_IP);
-+ *(u16 *)(mask + offset) = 0xFFFF;
-+
-+ return 0;
-+}
++ /* First check if firmware has any address configured by bootloader */
++ if (!is_zero_ether_addr(mac_addr)) {
++ /* If the DPMAC addr != DPNI addr, update it */
++ if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
++ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
++ priv->mc_token,
++ mac_addr);
++ if (err) {
++ dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
++ return err;
++ }
++ }
++ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
++ } else if (is_zero_ether_addr(dpni_mac_addr)) {
++ /* No MAC address configured, fill in net_dev->dev_addr
++ * with a random one
++ */
++ eth_hw_addr_random(net_dev);
++ dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
+
-+static int prep_ext_rule(struct dpaa2_eth_priv *priv,
-+ struct ethtool_flow_ext *ext_value,
-+ struct ethtool_flow_ext *ext_mask,
-+ void *key, void *mask)
-+{
-+ int offset;
++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
++ net_dev->dev_addr);
++ if (err) {
++ dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
++ return err;
++ }
+
-+ if (ext_mask->vlan_etype)
-+ return -EOPNOTSUPP;
-+
-+ if (ext_mask->vlan_tci) {
-+ offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI);
-+ *(u16 *)(key + offset) = ext_value->vlan_tci;
-+ *(u16 *)(mask + offset) = ext_mask->vlan_tci;
-+ }
-+
-+ return 0;
-+}
-+
-+static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv,
-+ struct ethtool_flow_ext *ext_value,
-+ struct ethtool_flow_ext *ext_mask,
-+ void *key, void *mask)
-+{
-+ int offset;
-+
-+ if (!is_zero_ether_addr(ext_mask->h_dest)) {
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
-+ ether_addr_copy(key + offset, ext_value->h_dest);
-+ ether_addr_copy(mask + offset, ext_mask->h_dest);
++ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
++ * practical purposes, this will be our "permanent" mac address,
++ * at least until the next reboot. This move will also permit
++ * register_netdevice() to properly fill up net_dev->perm_addr.
++ */
++ net_dev->addr_assign_type = NET_ADDR_PERM;
++ } else {
++ /* NET_ADDR_PERM is default, all we have to do is
++ * fill in the device addr.
++ */
++ memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
+ }
+
+ return 0;
+}
+
-+static int prep_cls_rule(struct net_device *net_dev,
-+ struct ethtool_rx_flow_spec *fs,
-+ void *key)
++static int netdev_init(struct net_device *net_dev)
+{
++ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ const u8 key_size = cls_key_size(priv);
-+ void *msk = key + key_size;
++ u8 bcast_addr[ETH_ALEN];
++ u8 num_queues;
+ int err;
+
-+ memset(key, 0, key_size * 2);
++ net_dev->netdev_ops = &dpaa2_eth_ops;
+
-+ switch (fs->flow_type & 0xff) {
-+ case TCP_V4_FLOW:
-+ err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec,
-+ &fs->m_u.tcp_ip4_spec, key, msk,
-+ IPPROTO_TCP);
-+ break;
-+ case UDP_V4_FLOW:
-+ err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec,
-+ &fs->m_u.udp_ip4_spec, key, msk,
-+ IPPROTO_UDP);
-+ break;
-+ case SCTP_V4_FLOW:
-+ err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec,
-+ &fs->m_u.sctp_ip4_spec, key, msk,
-+ IPPROTO_SCTP);
-+ break;
-+ case ETHER_FLOW:
-+ err = prep_eth_rule(priv, &fs->h_u.ether_spec,
-+ &fs->m_u.ether_spec, key, msk);
-+ break;
-+ case IP_USER_FLOW:
-+ err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec,
-+ &fs->m_u.usr_ip4_spec, key, msk);
-+ break;
-+ default:
-+ /* TODO: AH, ESP */
-+ return -EOPNOTSUPP;
-+ }
++ err = set_mac_addr(priv);
+ if (err)
+ return err;
+
-+ if (fs->flow_type & FLOW_EXT) {
-+ err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
-+ if (err)
-+ return err;
++ /* Explicitly add the broadcast address to the MAC filtering table */
++ eth_broadcast_addr(bcast_addr);
++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
++ if (err) {
++ dev_err(dev, "dpni_add_mac_addr() failed\n");
++ return err;
+ }
+
-+ if (fs->flow_type & FLOW_MAC_EXT) {
-+ err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
-+ if (err)
-+ return err;
++ /* Set MTU upper limit; lower limit is default (68B) */
++ net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
++ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
++ (u16)DPAA2_ETH_MFL);
++ if (err) {
++ dev_err(dev, "dpni_set_max_frame_length() failed\n");
++ return err;
++ }
++
++ /* Set actual number of queues in the net device */
++ num_queues = dpaa2_eth_queue_count(priv);
++ err = netif_set_real_num_tx_queues(net_dev, num_queues);
++ if (err) {
++ dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
++ return err;
++ }
++ err = netif_set_real_num_rx_queues(net_dev, num_queues);
++ if (err) {
++ dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
++ return err;
++ }
++
++ /* Our .ndo_init will be called herein */
++ err = register_netdev(net_dev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev() failed\n");
++ return err;
+ }
+
+ return 0;
+}
+
-+static int del_cls(struct net_device *net_dev, int location);
-+
-+static int do_cls(struct net_device *net_dev,
-+ struct ethtool_rx_flow_spec *fs,
-+ bool add)
++static int poll_link_state(void *arg)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct device *dev = net_dev->dev.parent;
-+ const int rule_cnt = dpaa2_eth_fs_count(priv);
-+ struct dpni_rule_cfg rule_cfg;
-+ struct dpni_fs_action_cfg fs_act = { 0 };
-+ void *dma_mem;
-+ int err = 0;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
++ int err;
+
-+ if (!dpaa2_eth_fs_enabled(priv)) {
-+ netdev_err(net_dev, "dev does not support steering!\n");
-+ /* dev doesn't support steering */
-+ return -EOPNOTSUPP;
++ while (!kthread_should_stop()) {
++ err = link_state_update(priv);
++ if (unlikely(err))
++ return err;
++
++ msleep(DPAA2_ETH_LINK_STATE_REFRESH);
+ }
+
-+ if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
-+ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
-+ fs->location >= rule_cnt)
-+ return -EINVAL;
++ return 0;
++}
+
-+ /* When adding a new rule, check if location if available,
-+ * and if not free the existing table entry before inserting
-+ * the new one
-+ */
-+ if (add && (priv->cls_rule[fs->location].in_use == true))
-+ del_cls(net_dev, fs->location);
++static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
++{
++ u32 status = ~0;
++ struct device *dev = (struct device *)arg;
++ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
++ struct net_device *net_dev = dev_get_drvdata(dev);
++ int err;
+
-+ memset(&rule_cfg, 0, sizeof(rule_cfg));
-+ rule_cfg.key_size = cls_key_size(priv);
++ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
++ DPNI_IRQ_INDEX, &status);
++ if (unlikely(err)) {
++ netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
++ return IRQ_HANDLED;
++ }
+
-+ /* allocate twice the key size, for the actual key and for mask */
-+ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
-+ if (!dma_mem)
-+ return -ENOMEM;
++ if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
++ link_state_update(netdev_priv(net_dev));
+
-+ err = prep_cls_rule(net_dev, fs, dma_mem);
-+ if (err)
-+ goto err_free_mem;
++ return IRQ_HANDLED;
++}
+
-+ rule_cfg.key_iova = dma_map_single(dev, dma_mem,
-+ rule_cfg.key_size * 2,
-+ DMA_TO_DEVICE);
++static int setup_irqs(struct fsl_mc_device *ls_dev)
++{
++ int err = 0;
++ struct fsl_mc_device_irq *irq;
+
-+ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
++ err = fsl_mc_allocate_irqs(ls_dev);
++ if (err) {
++ dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
++ return err;
++ }
+
-+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
-+ fs_act.options |= DPNI_FS_OPT_DISCARD;
-+ else
-+ fs_act.flow_id = fs->ring_cookie;
++ irq = ls_dev->irqs[0];
++ err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
++ NULL, dpni_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(&ls_dev->dev), &ls_dev->dev);
++ if (err < 0) {
++ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
++ goto free_mc_irq;
++ }
+
-+ if (add)
-+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
-+ 0, fs->location, &rule_cfg, &fs_act);
-+ else
-+ err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token,
-+ 0, &rule_cfg);
++ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
++ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
++ if (err < 0) {
++ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
++ goto free_irq;
++ }
+
-+ dma_unmap_single(dev, rule_cfg.key_iova,
-+ rule_cfg.key_size * 2, DMA_TO_DEVICE);
++ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
++ DPNI_IRQ_INDEX, 1);
++ if (err < 0) {
++ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
++ goto free_irq;
++ }
+
-+ if (err)
-+ netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err);
++ return 0;
+
-+err_free_mem:
-+ kfree(dma_mem);
++free_irq:
++ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
++free_mc_irq:
++ fsl_mc_free_irqs(ls_dev);
+
+ return err;
+}
+
-+static int add_cls(struct net_device *net_dev,
-+ struct ethtool_rx_flow_spec *fs)
++static void add_ch_napi(struct dpaa2_eth_priv *priv)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int err;
-+
-+ err = do_cls(net_dev, fs, true);
-+ if (err)
-+ return err;
-+
-+ priv->cls_rule[fs->location].in_use = true;
-+ priv->cls_rule[fs->location].fs = *fs;
++ int i;
++ struct dpaa2_eth_channel *ch;
+
-+ return 0;
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
++ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
++ NAPI_POLL_WEIGHT);
++ }
+}
+
-+static int del_cls(struct net_device *net_dev, int location)
++static void del_ch_napi(struct dpaa2_eth_priv *priv)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int err;
++ int i;
++ struct dpaa2_eth_channel *ch;
+
-+ err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
-+ if (err)
-+ return err;
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ netif_napi_del(&ch->napi);
++ }
++}
+
-+ priv->cls_rule[location].in_use = false;
++/* SysFS support */
++static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++ /* No MC API for getting the shaping config. We're stateful. */
++ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
+
-+ return 0;
++ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
+}
+
-+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
-+ struct ethtool_rxnfc *rxnfc)
++static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t count)
+{
-+ int err = 0;
++ int err, items;
++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++ struct dpni_tx_shaping_cfg scfg, ercfg = { 0 };
+
-+ switch (rxnfc->cmd) {
-+ case ETHTOOL_SRXCLSRLINS:
-+ err = add_cls(net_dev, &rxnfc->fs);
-+ break;
++ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
++ if (items != 2) {
++ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
++ return -EINVAL;
++ }
++ /* Size restriction as per MC API documentation */
++ if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
++ pr_err("max_burst_size must be <= %d\n",
++ DPAA2_ETH_MAX_BURST_SIZE);
++ return -EINVAL;
++ }
+
-+ case ETHTOOL_SRXCLSRLDEL:
-+ err = del_cls(net_dev, rxnfc->fs.location);
-+ break;
++ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg,
++ &ercfg, 0);
++ if (err) {
++ dev_err(dev, "dpni_set_tx_shaping() failed\n");
++ return -EPERM;
++ }
++ /* If successful, save the current configuration for future inquiries */
++ priv->shaping_cfg = scfg;
+
-+ default:
-+ err = -EOPNOTSUPP;
++ return count;
++}
++
++static struct device_attribute dpaa2_eth_attrs[] = {
++ __ATTR(tx_shaping,
++ 0600,
++ dpaa2_eth_show_tx_shaping,
++ dpaa2_eth_write_tx_shaping),
++};
++
++static void dpaa2_eth_sysfs_init(struct device *dev)
++{
++ int i, err;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
++ err = device_create_file(dev, &dpaa2_eth_attrs[i]);
++ if (err) {
++ dev_err(dev, "ERROR creating sysfs file\n");
++ goto undo;
++ }
+ }
++ return;
+
-+ return err;
++undo:
++ while (i > 0)
++ device_remove_file(dev, &dpaa2_eth_attrs[--i]);
+}
+
-+static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
-+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
++static void dpaa2_eth_sysfs_remove(struct device *dev)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ const int rule_cnt = dpaa2_eth_fs_count(priv);
-+ int i, j;
++ int i;
+
-+ switch (rxnfc->cmd) {
-+ case ETHTOOL_GRXFH:
-+ /* we purposely ignore cmd->flow_type, because the hashing key
-+ * is the same (and fixed) for all protocols
-+ */
-+ rxnfc->data = priv->rx_flow_hash;
-+ break;
++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
++ device_remove_file(dev, &dpaa2_eth_attrs[i]);
++}
+
-+ case ETHTOOL_GRXRINGS:
-+ rxnfc->data = dpaa2_eth_queue_count(priv);
-+ break;
++#ifdef CONFIG_FSL_DPAA2_ETH_DCB
++static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
++ struct ieee_pfc *pfc)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_congestion_notification_cfg notification_cfg;
++ struct dpni_link_state state;
++ int err, i;
+
-+ case ETHTOOL_GRXCLSRLCNT:
-+ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
-+ if (priv->cls_rule[i].in_use)
-+ rxnfc->rule_cnt++;
-+ rxnfc->data = rule_cnt;
-+ break;
++ priv->pfc.pfc_cap = dpaa2_eth_tc_count(priv);
+
-+ case ETHTOOL_GRXCLSRULE:
-+ if (!priv->cls_rule[rxnfc->fs.location].in_use)
-+ return -EINVAL;
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d getting link state", err);
++ return err;
++ }
+
-+ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
-+ break;
++ if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE))
++ return 0;
+
-+ case ETHTOOL_GRXCLSRLALL:
-+ for (i = 0, j = 0; i < rule_cnt; i++) {
-+ if (!priv->cls_rule[i].in_use)
-+ continue;
-+ if (j == rxnfc->rule_cnt)
-+ return -EMSGSIZE;
-+ rule_locs[j++] = i;
++ priv->pfc.pfc_en = 0;
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ err = dpni_get_congestion_notification(priv->mc_io, 0,
++ priv->mc_token,
++ DPNI_QUEUE_RX,
++ i, ¬ification_cfg);
++ if (err) {
++ netdev_err(net_dev, "Error %d getting congestion notif",
++ err);
++ return err;
+ }
-+ rxnfc->rule_cnt = j;
-+ rxnfc->data = rule_cnt;
-+ break;
+
-+ default:
-+ return -EOPNOTSUPP;
++ if (notification_cfg.threshold_entry)
++ priv->pfc.pfc_en |= 1 << i;
+ }
+
++ memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
++
+ return 0;
+}
+
-+const struct ethtool_ops dpaa2_ethtool_ops = {
-+ .get_drvinfo = dpaa2_eth_get_drvinfo,
-+ .get_link = ethtool_op_get_link,
-+ .get_settings = dpaa2_eth_get_settings,
-+ .set_settings = dpaa2_eth_set_settings,
-+ .get_pauseparam = dpaa2_eth_get_pauseparam,
-+ .set_pauseparam = dpaa2_eth_set_pauseparam,
-+ .get_sset_count = dpaa2_eth_get_sset_count,
-+ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
-+ .get_strings = dpaa2_eth_get_strings,
-+ .get_rxnfc = dpaa2_eth_get_rxnfc,
-+ .set_rxnfc = dpaa2_eth_set_rxnfc,
-+};
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
-@@ -0,0 +1,176 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPKG_H_
-+#define __FSL_DPKG_H_
++/* Configure ingress classification based on VLAN PCP */
++static int set_vlan_qos(struct dpaa2_eth_priv *priv)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpkg_profile_cfg kg_cfg = {0};
++ struct dpni_qos_tbl_cfg qos_cfg = {0};
++ struct dpni_rule_cfg key_params;
++ u8 *params_iova, *key, *mask = NULL;
++ /* We only need the trailing 16 bits, without the TPID */
++ u8 key_size = VLAN_HLEN / 2;
++ int err = 0, i, j = 0;
++
++ if (priv->vlan_clsf_set)
++ return 0;
+
-+#include <linux/types.h>
-+#include "net.h"
++ params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
++ if (!params_iova)
++ return -ENOMEM;
+
-+/* Data Path Key Generator API
-+ * Contains initialization APIs and runtime APIs for the Key Generator
-+ */
++ kg_cfg.num_extracts = 1;
++ kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
++ kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
++ kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
++ kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
+
-+/** Key Generator properties */
++ err = dpni_prepare_key_cfg(&kg_cfg, params_iova);
++ if (err) {
++ dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err);
++ goto out_free;
++ }
++
++ /* Set QoS table */
++ qos_cfg.default_tc = 0;
++ qos_cfg.discard_on_miss = 0;
++ qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova,
++ DPAA2_CLASSIFIER_DMA_SIZE,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
++ dev_err(dev, "%s: DMA mapping failed\n", __func__);
++ err = -ENOMEM;
++ goto out_free;
++ }
++ err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
++ dma_unmap_single(dev, qos_cfg.key_cfg_iova,
++ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
+
-+/**
-+ * Number of masks per key extraction
-+ */
-+#define DPKG_NUM_OF_MASKS 4
-+/**
-+ * Number of extractions per key profile
-+ */
-+#define DPKG_MAX_NUM_OF_EXTRACTS 10
++ if (err) {
++ dev_err(dev, "dpni_set_qos_table failed: %d\n", err);
++ goto out_free;
++ }
+
-+/**
-+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
-+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
-+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
-+ * @DPKG_FULL_FIELD: Extract a full field
-+ */
-+enum dpkg_extract_from_hdr_type {
-+ DPKG_FROM_HDR = 0,
-+ DPKG_FROM_FIELD = 1,
-+ DPKG_FULL_FIELD = 2
-+};
++ key_params.key_size = key_size;
+
-+/**
-+ * enum dpkg_extract_type - Enumeration for selecting extraction type
-+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
-+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
-+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
-+ * e.g. can be used to extract header existence;
-+ * please refer to 'Parse Result definition' section in the parser BG
-+ */
-+enum dpkg_extract_type {
-+ DPKG_EXTRACT_FROM_HDR = 0,
-+ DPKG_EXTRACT_FROM_DATA = 1,
-+ DPKG_EXTRACT_FROM_PARSE = 3
-+};
++ if (dpaa2_eth_fs_mask_enabled(priv)) {
++ mask = kzalloc(key_size, GFP_KERNEL);
++ if (!mask)
++ goto out_free;
+
-+/**
-+ * struct dpkg_mask - A structure for defining a single extraction mask
-+ * @mask: Byte mask for the extracted content
-+ * @offset: Offset within the extracted content
-+ */
-+struct dpkg_mask {
-+ u8 mask;
-+ u8 offset;
-+};
++ *mask = cpu_to_be16(VLAN_PRIO_MASK);
+
-+/**
-+ * struct dpkg_extract - A structure for defining a single extraction
-+ * @type: Determines how the union below is interpreted:
-+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
-+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
-+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
-+ * @extract: Selects extraction method
-+ * @num_of_byte_masks: Defines the number of valid entries in the array below;
-+ * This is also the number of bytes to be used as masks
-+ * @masks: Masks parameters
-+ */
-+struct dpkg_extract {
-+ enum dpkg_extract_type type;
-+ /**
-+ * union extract - Selects extraction method
-+ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
-+ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
-+ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
-+ */
-+ union {
-+ /**
-+ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
-+ * @prot: Any of the supported headers
-+ * @type: Defines the type of header extraction:
-+ * DPKG_FROM_HDR: use size & offset below;
-+ * DPKG_FROM_FIELD: use field, size and offset below;
-+ * DPKG_FULL_FIELD: use field below
-+ * @field: One of the supported fields (NH_FLD_)
-+ *
-+ * @size: Size in bytes
-+ * @offset: Byte offset
-+ * @hdr_index: Clear for cases not listed below;
-+ * Used for protocols that may have more than a single
-+ * header, 0 indicates an outer header;
-+ * Supported protocols (possible values):
-+ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
-+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
-+ * NET_PROT_IP(0, HDR_INDEX_LAST);
-+ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
-+ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
-+ */
++ key_params.mask_iova = dma_map_single(dev, mask, key_size,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, key_params.mask_iova)) {
++ dev_err(dev, "DMA mapping failed %s\n", __func__);
++ err = -ENOMEM;
++ goto out_free_mask;
++ }
++ } else {
++ key_params.mask_iova = 0;
++ }
+
-+ struct {
-+ enum net_prot prot;
-+ enum dpkg_extract_from_hdr_type type;
-+ u32 field;
-+ u8 size;
-+ u8 offset;
-+ u8 hdr_index;
-+ } from_hdr;
-+ /**
-+ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
-+ * @size: Size in bytes
-+ * @offset: Byte offset
-+ */
-+ struct {
-+ u8 size;
-+ u8 offset;
-+ } from_data;
++ key = kzalloc(key_size, GFP_KERNEL);
++ if (!key)
++ goto out_cleanup_mask;
+
-+ /**
-+ * struct from_parse - Used when
-+ * 'type = DPKG_EXTRACT_FROM_PARSE'
-+ * @size: Size in bytes
-+ * @offset: Byte offset
-+ */
-+ struct {
-+ u8 size;
-+ u8 offset;
-+ } from_parse;
-+ } extract;
-+
-+ u8 num_of_byte_masks;
-+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
-+};
++ key_params.key_iova = dma_map_single(dev, key, key_size,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, key_params.key_iova)) {
++ dev_err(dev, "%s: DMA mapping failed\n", __func__);
++ err = -ENOMEM;
++ goto out_free_key;
++ }
+
-+/**
-+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
-+ * profile (rule)
-+ * @num_extracts: Defines the number of valid entries in the array below
-+ * @extracts: Array of required extractions
-+ */
-+struct dpkg_profile_cfg {
-+ u8 num_extracts;
-+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
-+};
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ *key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
+
-+#endif /* __FSL_DPKG_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
-@@ -0,0 +1,600 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ * Copyright 2016 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPNI_CMD_H
-+#define _FSL_DPNI_CMD_H
++ dma_sync_single_for_device(dev, key_params.key_iova,
++ key_size, DMA_TO_DEVICE);
+
-+/* DPNI Version */
-+#define DPNI_VER_MAJOR 7
-+#define DPNI_VER_MINOR 0
-+#define DPNI_CMD_BASE_VERSION 1
-+#define DPNI_CMD_ID_OFFSET 4
++ err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
++ &key_params, i, j++);
++ if (err) {
++ dev_err(dev, "dpni_add_qos_entry failed: %d\n", err);
++ goto out_remove;
++ }
++ }
+
-+#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
++ priv->vlan_clsf_set = true;
++ dev_dbg(dev, "Vlan PCP QoS classification set\n");
++ goto out_cleanup;
+
-+#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
-+#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
-+#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
-+#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
-+#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
++out_remove:
++ for (j = 0; j < i; j++) {
++ *key = cpu_to_be16(j << VLAN_PRIO_SHIFT);
+
-+#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
-+#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
-+#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
-+#define DPNI_CMDID_RESET DPNI_CMD(0x005)
-+#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
++ dma_sync_single_for_device(dev, key_params.key_iova, key_size,
++ DMA_TO_DEVICE);
+
-+#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
-+#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
-+#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
-+#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
-+#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
-+#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
-+#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
-+#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
++ err = dpni_remove_qos_entry(priv->mc_io, 0, priv->mc_token,
++ &key_params);
++ if (err)
++ dev_err(dev, "dpni_remove_qos_entry failed: %d\n", err);
++ }
++
++out_cleanup:
++ dma_unmap_single(dev, key_params.key_iova, key_size, DMA_TO_DEVICE);
++out_free_key:
++ kfree(key);
++out_cleanup_mask:
++ if (key_params.mask_iova)
++ dma_unmap_single(dev, key_params.mask_iova, key_size,
++ DMA_TO_DEVICE);
++out_free_mask:
++ kfree(mask);
++out_free:
++ kfree(params_iova);
++ return err;
++}
+
-+#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
-+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
++static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
++ struct ieee_pfc *pfc)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_congestion_notification_cfg notification_cfg = {0};
++ struct dpni_link_state state = {0};
++ struct dpni_link_cfg cfg = {0};
++ struct ieee_pfc old_pfc;
++ int err = 0, i;
+
-+#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
-+#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
-+#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
-+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
-+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
-+#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
-+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B)
++ if (dpaa2_eth_tc_count(priv) == 1) {
++ netdev_dbg(net_dev, "DPNI has 1 TC, PFC configuration N/A\n");
++ return 0;
++ }
+
-+#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
-+#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
-+#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
-+#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
-+#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
-+#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
-+#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
-+#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
-+#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
++ /* Zero out pfc_enabled prios greater than tc_count */
++ pfc->pfc_en &= (1 << dpaa2_eth_tc_count(priv)) - 1;
+
-+#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
++ if (priv->pfc.pfc_en == pfc->pfc_en)
++ /* Same enabled mask, nothing to be done */
++ return 0;
+
-+#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
-+#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
-+#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
++ err = set_vlan_qos(priv);
++ if (err)
++ return err;
+
-+#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
-+#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
-+#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
-+#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
-+#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
-+#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d getting link state", err);
++ return err;
++ }
+
-+#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
++ cfg.rate = state.rate;
++ cfg.options = state.options;
++ if (pfc->pfc_en)
++ cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
+
-+#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
-+#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d setting link cfg", err);
++ return err;
++ }
+
-+#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
-+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
-+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
-+#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
-+#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
-+#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
-+#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
++ memcpy(&old_pfc, &priv->pfc, sizeof(priv->pfc));
++ memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
+
-+/* Macros for accessing command fields smaller than 1byte */
-+#define DPNI_MASK(field) \
-+ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
-+ DPNI_##field##_SHIFT)
++ err = set_rx_taildrop(priv);
++ if (err)
++ goto out_restore_config;
++
++ /* configure congestion notifications */
++ notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
++ notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
++ notification_cfg.message_iova = 0ULL;
++ notification_cfg.message_ctx = 0ULL;
++
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ if (dpaa2_eth_is_pfc_enabled(priv, i)) {
++ notification_cfg.threshold_entry = NAPI_POLL_WEIGHT;
++ notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2;
++ } else {
++ notification_cfg.threshold_entry = 0;
++ notification_cfg.threshold_exit = 0;
++ }
+
-+#define dpni_set_field(var, field, val) \
-+ ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
-+#define dpni_get_field(var, field) \
-+ (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
++ err = dpni_set_congestion_notification(priv->mc_io, 0,
++ priv->mc_token,
++ DPNI_QUEUE_RX,
++ i, ¬ification_cfg);
++ if (err) {
++ netdev_err(net_dev, "Error %d setting congestion notif",
++ err);
++ goto out_restore_config;
++ }
+
-+struct dpni_cmd_open {
-+ __le32 dpni_id;
-+};
++ netdev_dbg(net_dev, "%s congestion notifications for tc %d\n",
++ (notification_cfg.threshold_entry ?
++ "Enabled" : "Disabled"), i);
++ }
+
-+#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
-+struct dpni_cmd_set_pools {
-+ /* cmd word 0 */
-+ u8 num_dpbp;
-+ u8 backup_pool_mask;
-+ __le16 pad;
-+ /* cmd word 0..4 */
-+ __le32 dpbp_id[DPNI_MAX_DPBP];
-+ /* cmd word 4..6 */
-+ __le16 buffer_size[DPNI_MAX_DPBP];
-+};
++ return 0;
+
-+/* The enable indication is always the least significant bit */
-+#define DPNI_ENABLE_SHIFT 0
-+#define DPNI_ENABLE_SIZE 1
++out_restore_config:
++ memcpy(&priv->pfc, &old_pfc, sizeof(priv->pfc));
++ return err;
++}
+
-+struct dpni_rsp_is_enabled {
-+ u8 enabled;
-+};
++static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
-+struct dpni_rsp_get_irq {
-+ /* response word 0 */
-+ __le32 irq_val;
-+ __le32 pad;
-+ /* response word 1 */
-+ __le64 irq_addr;
-+ /* response word 2 */
-+ __le32 irq_num;
-+ __le32 type;
-+};
++ return priv->dcbx_mode;
++}
+
-+struct dpni_cmd_set_irq_enable {
-+ u8 enable;
-+ u8 pad[3];
-+ u8 irq_index;
-+};
++static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
-+struct dpni_cmd_get_irq_enable {
-+ __le32 pad;
-+ u8 irq_index;
-+};
++ priv->dcbx_mode = mode;
++ return 0;
++}
+
-+struct dpni_rsp_get_irq_enable {
-+ u8 enabled;
-+};
++static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
-+struct dpni_cmd_set_irq_mask {
-+ __le32 mask;
-+ u8 irq_index;
-+};
++ switch (capid) {
++ case DCB_CAP_ATTR_PFC:
++ *cap = true;
++ break;
++ case DCB_CAP_ATTR_PFC_TCS:
++ /* bitmap where each bit represents a number of traffic
++ * classes the device can be configured to use for Priority
++ * Flow Control
++ */
++ *cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
++ break;
++ case DCB_CAP_ATTR_DCBX:
++ *cap = priv->dcbx_mode;
++ break;
++ default:
++ *cap = false;
++ break;
++ }
+
-+struct dpni_cmd_get_irq_mask {
-+ __le32 pad;
-+ u8 irq_index;
-+};
++ return 0;
++}
+
-+struct dpni_rsp_get_irq_mask {
-+ __le32 mask;
++const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
++ .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
++ .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
++ .getdcbx = dpaa2_eth_dcbnl_getdcbx,
++ .setdcbx = dpaa2_eth_dcbnl_setdcbx,
++ .getcap = dpaa2_eth_dcbnl_getcap,
+};
++#endif
+
-+struct dpni_cmd_get_irq_status {
-+ __le32 status;
-+ u8 irq_index;
-+};
++static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
++{
++ struct device *dev;
++ struct net_device *net_dev = NULL;
++ struct dpaa2_eth_priv *priv = NULL;
++ int err = 0;
+
-+struct dpni_rsp_get_irq_status {
-+ __le32 status;
-+};
++ dev = &dpni_dev->dev;
+
-+struct dpni_cmd_clear_irq_status {
-+ __le32 status;
-+ u8 irq_index;
-+};
++ /* Net device */
++ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
++ if (!net_dev) {
++ dev_err(dev, "alloc_etherdev_mq() failed\n");
++ return -ENOMEM;
++ }
+
-+struct dpni_rsp_get_attr {
-+ /* response word 0 */
-+ __le32 options;
-+ u8 num_queues;
-+ u8 num_tcs;
-+ u8 mac_filter_entries;
-+ u8 pad0;
-+ /* response word 1 */
-+ u8 vlan_filter_entries;
-+ u8 pad1;
-+ u8 qos_entries;
-+ u8 pad2;
-+ __le16 fs_entries;
-+ __le16 pad3;
-+ /* response word 2 */
-+ u8 qos_key_size;
-+ u8 fs_key_size;
-+ __le16 wriop_version;
-+};
++ SET_NETDEV_DEV(net_dev, dev);
++ dev_set_drvdata(dev, net_dev);
+
-+#define DPNI_ERROR_ACTION_SHIFT 0
-+#define DPNI_ERROR_ACTION_SIZE 4
-+#define DPNI_FRAME_ANN_SHIFT 4
-+#define DPNI_FRAME_ANN_SIZE 1
++ priv = netdev_priv(net_dev);
++ priv->net_dev = net_dev;
+
-+struct dpni_cmd_set_errors_behavior {
-+ __le32 errors;
-+ /* from least significant bit: error_action:4, set_frame_annotation:1 */
-+ u8 flags;
-+};
++ priv->iommu_domain = iommu_get_domain_for_dev(dev);
+
-+/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
-+ * buffer layouts, but they all share the same parameters.
-+ * If one of the functions changes, below structure needs to be split.
-+ */
++ /* Obtain a MC portal */
++ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &priv->mc_io);
++ if (err) {
++ if (err == -ENXIO)
++ err = -EPROBE_DEFER;
++ else
++ dev_err(dev, "MC portal allocation failed\n");
++ goto err_portal_alloc;
++ }
+
-+#define DPNI_PASS_TS_SHIFT 0
-+#define DPNI_PASS_TS_SIZE 1
-+#define DPNI_PASS_PR_SHIFT 1
-+#define DPNI_PASS_PR_SIZE 1
-+#define DPNI_PASS_FS_SHIFT 2
-+#define DPNI_PASS_FS_SIZE 1
++ /* MC objects initialization and configuration */
++ err = setup_dpni(dpni_dev);
++ if (err)
++ goto err_dpni_setup;
+
-+struct dpni_cmd_get_buffer_layout {
-+ u8 qtype;
-+};
++ err = setup_dpio(priv);
++ if (err)
++ goto err_dpio_setup;
+
-+struct dpni_rsp_get_buffer_layout {
-+ /* response word 0 */
-+ u8 pad0[6];
-+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
-+ u8 flags;
-+ u8 pad1;
-+ /* response word 1 */
-+ __le16 private_data_size;
-+ __le16 data_align;
-+ __le16 head_room;
-+ __le16 tail_room;
-+};
++ setup_fqs(priv);
+
-+struct dpni_cmd_set_buffer_layout {
-+ /* cmd word 0 */
-+ u8 qtype;
-+ u8 pad0[3];
-+ __le16 options;
-+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
-+ u8 flags;
-+ u8 pad1;
-+ /* cmd word 1 */
-+ __le16 private_data_size;
-+ __le16 data_align;
-+ __le16 head_room;
-+ __le16 tail_room;
-+};
++ err = setup_dpbp(priv);
++ if (err)
++ goto err_dpbp_setup;
+
-+struct dpni_cmd_set_offload {
-+ u8 pad[3];
-+ u8 dpni_offload;
-+ __le32 config;
-+};
++ err = bind_dpni(priv);
++ if (err)
++ goto err_bind;
+
-+struct dpni_cmd_get_offload {
-+ u8 pad[3];
-+ u8 dpni_offload;
-+};
++ /* Percpu statistics */
++ priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
++ if (!priv->percpu_stats) {
++ dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
++ err = -ENOMEM;
++ goto err_alloc_percpu_stats;
++ }
++ priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
++ if (!priv->percpu_extras) {
++ dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
++ err = -ENOMEM;
++ goto err_alloc_percpu_extras;
++ }
+
-+struct dpni_rsp_get_offload {
-+ __le32 pad;
-+ __le32 config;
-+};
++ err = netdev_init(net_dev);
++ if (err)
++ goto err_netdev_init;
+
-+struct dpni_cmd_get_qdid {
-+ u8 qtype;
-+};
++ /* Configure checksum offload based on current interface flags */
++ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
++ if (err)
++ goto err_csum;
+
-+struct dpni_rsp_get_qdid {
-+ __le16 qdid;
-+};
++ err = set_tx_csum(priv, !!(net_dev->features &
++ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
++ if (err)
++ goto err_csum;
+
-+struct dpni_rsp_get_tx_data_offset {
-+ __le16 data_offset;
-+};
++ err = alloc_rings(priv);
++ if (err)
++ goto err_alloc_rings;
+
-+struct dpni_cmd_get_statistics {
-+ u8 page_number;
-+};
++ net_dev->ethtool_ops = &dpaa2_ethtool_ops;
++#ifdef CONFIG_FSL_DPAA2_ETH_DCB
++ net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
++ priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
++#endif
+
-+struct dpni_rsp_get_statistics {
-+ __le64 counter[DPNI_STATISTICS_CNT];
-+};
++ /* Add a NAPI context for each channel */
++ add_ch_napi(priv);
++ enable_ch_napi(priv);
+
-+struct dpni_cmd_set_link_cfg {
-+ /* cmd word 0 */
-+ __le64 pad0;
-+ /* cmd word 1 */
-+ __le32 rate;
-+ __le32 pad1;
-+ /* cmd word 2 */
-+ __le64 options;
-+};
++ err = setup_irqs(dpni_dev);
++ if (err) {
++ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
++ priv->poll_thread = kthread_run(poll_link_state, priv,
++ "%s_poll_link", net_dev->name);
++ if (IS_ERR(priv->poll_thread)) {
++ netdev_err(net_dev, "Error starting polling thread\n");
++ goto err_poll_thread;
++ }
++ priv->do_link_poll = true;
++ }
+
-+#define DPNI_LINK_STATE_SHIFT 0
-+#define DPNI_LINK_STATE_SIZE 1
++ dpaa2_eth_sysfs_init(&net_dev->dev);
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++ dpaa2_dbg_add(priv);
++#endif
+
-+struct dpni_rsp_get_link_state {
-+ /* response word 0 */
-+ __le32 pad0;
-+ /* from LSB: up:1 */
-+ u8 flags;
-+ u8 pad1[3];
-+ /* response word 1 */
-+ __le32 rate;
-+ __le32 pad2;
-+ /* response word 2 */
-+ __le64 options;
-+};
++ dev_info(dev, "Probed interface %s\n", net_dev->name);
++ return 0;
+
-+struct dpni_cmd_set_tx_shaping {
-+ /* cmd word 0 */
-+ __le16 max_burst_size;
-+ __le16 pad0[3];
-+ /* cmd word 1 */
-+ __le32 rate_limit;
-+};
++err_poll_thread:
++ free_rings(priv);
++err_alloc_rings:
++err_csum:
++ unregister_netdev(net_dev);
++err_netdev_init:
++ free_percpu(priv->percpu_extras);
++err_alloc_percpu_extras:
++ free_percpu(priv->percpu_stats);
++err_alloc_percpu_stats:
++ disable_ch_napi(priv);
++ del_ch_napi(priv);
++err_bind:
++ free_dpbp(priv);
++err_dpbp_setup:
++ free_dpio(priv);
++err_dpio_setup:
++ free_dpni(priv);
++err_dpni_setup:
++ fsl_mc_portal_free(priv->mc_io);
++err_portal_alloc:
++ dev_set_drvdata(dev, NULL);
++ free_netdev(net_dev);
+
-+struct dpni_cmd_set_max_frame_length {
-+ __le16 max_frame_length;
-+};
++ return err;
++}
+
-+struct dpni_rsp_get_max_frame_length {
-+ __le16 max_frame_length;
-+};
++static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev;
++ struct net_device *net_dev;
++ struct dpaa2_eth_priv *priv;
+
-+struct dpni_cmd_set_multicast_promisc {
-+ u8 enable;
-+};
++ dev = &ls_dev->dev;
++ net_dev = dev_get_drvdata(dev);
++ priv = netdev_priv(net_dev);
+
-+struct dpni_rsp_get_multicast_promisc {
-+ u8 enabled;
-+};
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++ dpaa2_dbg_remove(priv);
++#endif
++ dpaa2_eth_sysfs_remove(&net_dev->dev);
+
-+struct dpni_cmd_set_unicast_promisc {
-+ u8 enable;
-+};
++ unregister_netdev(net_dev);
+
-+struct dpni_rsp_get_unicast_promisc {
-+ u8 enabled;
-+};
++ disable_ch_napi(priv);
++ del_ch_napi(priv);
+
-+struct dpni_cmd_set_primary_mac_addr {
-+ __le16 pad;
-+ u8 mac_addr[6];
-+};
++ if (priv->do_link_poll)
++ kthread_stop(priv->poll_thread);
++ else
++ fsl_mc_free_irqs(ls_dev);
+
-+struct dpni_rsp_get_primary_mac_addr {
-+ __le16 pad;
-+ u8 mac_addr[6];
-+};
++ free_rings(priv);
++ free_percpu(priv->percpu_stats);
++ free_percpu(priv->percpu_extras);
++ free_dpbp(priv);
++ free_dpio(priv);
++ free_dpni(priv);
+
-+struct dpni_rsp_get_port_mac_addr {
-+ __le16 pad;
-+ u8 mac_addr[6];
-+};
++ fsl_mc_portal_free(priv->mc_io);
+
-+struct dpni_cmd_add_mac_addr {
-+ __le16 pad;
-+ u8 mac_addr[6];
++ dev_set_drvdata(dev, NULL);
++ free_netdev(net_dev);
++
++ dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
++
++ return 0;
++}
++
++static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpni",
++ },
++ { .vendor = 0x0 }
+};
++MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
+
-+struct dpni_cmd_remove_mac_addr {
-+ __le16 pad;
-+ u8 mac_addr[6];
++static struct fsl_mc_driver dpaa2_eth_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = dpaa2_eth_probe,
++ .remove = dpaa2_eth_remove,
++ .match_id_table = dpaa2_eth_match_id_table
+};
+
-+#define DPNI_UNICAST_FILTERS_SHIFT 0
-+#define DPNI_UNICAST_FILTERS_SIZE 1
-+#define DPNI_MULTICAST_FILTERS_SHIFT 1
-+#define DPNI_MULTICAST_FILTERS_SIZE 1
++static int __init dpaa2_eth_driver_init(void)
++{
++ int err;
+
-+struct dpni_cmd_clear_mac_filters {
-+ /* from LSB: unicast:1, multicast:1 */
-+ u8 flags;
-+};
++ dpaa2_eth_dbg_init();
++ err = fsl_mc_driver_register(&dpaa2_eth_driver);
++ if (err)
++ goto out_debugfs_err;
+
-+#define DPNI_DIST_MODE_SHIFT 0
-+#define DPNI_DIST_MODE_SIZE 4
-+#define DPNI_MISS_ACTION_SHIFT 4
-+#define DPNI_MISS_ACTION_SIZE 4
++ err = dpaa2_ceetm_register();
++ if (err)
++ goto out_ceetm_err;
+
-+struct dpni_cmd_set_rx_tc_dist {
-+ /* cmd word 0 */
-+ __le16 dist_size;
-+ u8 tc_id;
-+ /* from LSB: dist_mode:4, miss_action:4 */
-+ u8 flags;
-+ __le16 pad0;
-+ __le16 default_flow_id;
-+ /* cmd word 1..5 */
-+ __le64 pad1[5];
-+ /* cmd word 6 */
-+ __le64 key_cfg_iova;
-+};
++ return 0;
+
-+/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
-+ * key_cfg_iova)
-+ */
-+struct dpni_mask_cfg {
-+ u8 mask;
-+ u8 offset;
-+};
++out_ceetm_err:
++ fsl_mc_driver_unregister(&dpaa2_eth_driver);
++out_debugfs_err:
++ dpaa2_eth_dbg_exit();
++ return err;
++}
+
-+#define DPNI_EFH_TYPE_SHIFT 0
-+#define DPNI_EFH_TYPE_SIZE 4
-+#define DPNI_EXTRACT_TYPE_SHIFT 0
-+#define DPNI_EXTRACT_TYPE_SIZE 4
++static void __exit dpaa2_eth_driver_exit(void)
++{
++ dpaa2_ceetm_unregister();
++ fsl_mc_driver_unregister(&dpaa2_eth_driver);
++ dpaa2_eth_dbg_exit();
++}
+
-+struct dpni_dist_extract {
-+ /* word 0 */
-+ u8 prot;
-+ /* EFH type stored in the 4 least significant bits */
-+ u8 efh_type;
-+ u8 size;
-+ u8 offset;
-+ __le32 field;
-+ /* word 1 */
-+ u8 hdr_index;
-+ u8 constant;
-+ u8 num_of_repeats;
-+ u8 num_of_byte_masks;
-+ /* Extraction type is stored in the 4 LSBs */
-+ u8 extract_type;
-+ u8 pad[3];
-+ /* word 2 */
-+ struct dpni_mask_cfg masks[4];
-+};
++module_init(dpaa2_eth_driver_init);
++module_exit(dpaa2_eth_driver_exit);
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+@@ -0,0 +1,601 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
+
-+struct dpni_ext_set_rx_tc_dist {
-+ /* extension word 0 */
-+ u8 num_extracts;
-+ u8 pad[7];
-+ /* words 1..25 */
-+ struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
-+};
++#ifndef __DPAA2_ETH_H
++#define __DPAA2_ETH_H
+
-+struct dpni_cmd_get_queue {
-+ u8 qtype;
-+ u8 tc;
-+ u8 index;
-+};
++#include <linux/dcbnl.h>
++#include <linux/netdevice.h>
++#include <linux/if_vlan.h>
+
-+#define DPNI_DEST_TYPE_SHIFT 0
-+#define DPNI_DEST_TYPE_SIZE 4
-+#define DPNI_STASH_CTRL_SHIFT 6
-+#define DPNI_STASH_CTRL_SIZE 1
-+#define DPNI_HOLD_ACTIVE_SHIFT 7
-+#define DPNI_HOLD_ACTIVE_SIZE 1
++#include "../../fsl-mc/include/dpaa2-io.h"
++#include "../../fsl-mc/include/dpaa2-fd.h"
++#include "dpni.h"
++#include "dpni-cmd.h"
+
-+struct dpni_rsp_get_queue {
-+ /* response word 0 */
-+ __le64 pad0;
-+ /* response word 1 */
-+ __le32 dest_id;
-+ __le16 pad1;
-+ u8 dest_prio;
-+ /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
-+ u8 flags;
-+ /* response word 2 */
-+ __le64 flc;
-+ /* response word 3 */
-+ __le64 user_context;
-+ /* response word 4 */
-+ __le32 fqid;
-+ __le16 qdbin;
-+};
++#include "dpaa2-eth-trace.h"
++#include "dpaa2-eth-debugfs.h"
+
-+struct dpni_cmd_set_queue {
-+ /* cmd word 0 */
-+ u8 qtype;
-+ u8 tc;
-+ u8 index;
-+ u8 options;
-+ __le32 pad0;
-+ /* cmd word 1 */
-+ __le32 dest_id;
-+ __le16 pad1;
-+ u8 dest_prio;
-+ u8 flags;
-+ /* cmd word 2 */
-+ __le64 flc;
-+ /* cmd word 3 */
-+ __le64 user_context;
-+};
++#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
+
-+struct dpni_cmd_add_fs_entry {
-+ /* cmd word 0 */
-+ u16 options;
-+ u8 tc_id;
-+ u8 key_size;
-+ u16 index;
-+ u16 flow_id;
-+ /* cmd word 1 */
-+ u64 key_iova;
-+ /* cmd word 2 */
-+ u64 mask_iova;
-+ /* cmd word 3 */
-+ u64 flc;
-+};
++#define DPAA2_ETH_STORE_SIZE 16
+
-+struct dpni_cmd_remove_fs_entry {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ u8 tc_id;
-+ u8 key_size;
-+ __le32 pad1;
-+ /* cmd word 1 */
-+ u64 key_iova;
-+ /* cmd word 2 */
-+ u64 mask_iova;
-+};
-+
-+struct dpni_cmd_set_taildrop {
-+ /* cmd word 0 */
-+ u8 congestion_point;
-+ u8 qtype;
-+ u8 tc;
-+ u8 index;
-+ __le32 pad0;
-+ /* cmd word 1 */
-+ /* Only least significant bit is relevant */
-+ u8 enable;
-+ u8 pad1;
-+ u8 units;
-+ u8 pad2;
-+ __le32 threshold;
-+};
++/* We set a max threshold for how many Tx confirmations we should process
++ * on a NAPI poll call, they take less processing time.
++ */
++#define TX_CONF_PER_NAPI_POLL 256
+
-+struct dpni_cmd_get_taildrop {
-+ u8 congestion_point;
-+ u8 qtype;
-+ u8 tc;
-+ u8 index;
-+};
++/* Maximum number of scatter-gather entries in an ingress frame,
++ * considering the maximum receive frame size is 64K
++ */
++#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
+
-+struct dpni_rsp_get_taildrop {
-+ /* cmd word 0 */
-+ __le64 pad0;
-+ /* cmd word 1 */
-+ /* only least significant bit is relevant */
-+ u8 enable;
-+ u8 pad1;
-+ u8 units;
-+ u8 pad2;
-+ __le32 threshold;
-+};
++/* Maximum acceptable MTU value. It is in direct relation with the hardware
++ * enforced Max Frame Length (currently 10k).
++ */
++#define DPAA2_ETH_MFL (10 * 1024)
++#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
++/* Convert L3 MTU to L2 MFL */
++#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
+
-+#define DPNI_DEST_TYPE_SHIFT 0
-+#define DPNI_DEST_TYPE_SIZE 4
-+#define DPNI_CONG_UNITS_SHIFT 4
-+#define DPNI_CONG_UNITS_SIZE 2
++/* Maximum burst size value for Tx shaping */
++#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
+
-+struct dpni_cmd_set_congestion_notification {
-+ /* cmd word 0 */
-+ u8 qtype;
-+ u8 tc;
-+ u8 pad[6];
-+ /* cmd word 1 */
-+ u32 dest_id;
-+ u16 notification_mode;
-+ u8 dest_priority;
-+ /* from LSB: dest_type: 4 units:2 */
-+ u8 type_units;
-+ /* cmd word 2 */
-+ u64 message_iova;
-+ /* cmd word 3 */
-+ u64 message_ctx;
-+ /* cmd word 4 */
-+ u32 threshold_entry;
-+ u32 threshold_exit;
-+};
++/* Maximum number of buffers that can be acquired/released through a single
++ * QBMan command
++ */
++#define DPAA2_ETH_BUFS_PER_CMD 7
+
-+#endif /* _FSL_DPNI_CMD_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
-@@ -0,0 +1,1770 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ * Copyright 2016 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
++/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
++ * frames in the Rx queues (length of the current frame is not
++ * taken into account when making the taildrop decision)
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
-+#include "dpni.h"
-+#include "dpni-cmd.h"
++#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
+
-+/**
-+ * dpni_prepare_key_cfg() - function prepare extract parameters
-+ * @cfg: defining a full Key Generation profile (rule)
-+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before the following functions:
-+ * - dpni_set_rx_tc_dist()
-+ * - dpni_set_qos_table()
++/* Buffer quota per queue. Must be large enough such that for minimum sized
++ * frames taildrop kicks in before the bpool gets depleted, so we compute
++ * how many 64B frames fit inside the taildrop threshold and add a margin
++ * to accommodate the buffer refill delay.
+ */
-+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
-+{
-+ int i, j;
-+ struct dpni_ext_set_rx_tc_dist *dpni_ext;
-+ struct dpni_dist_extract *extr;
++#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
++#define DPAA2_ETH_NUM_BUFS_PER_CH (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
++#define DPAA2_ETH_REFILL_THRESH(priv) \
++ ((priv)->max_bufs_per_ch - DPAA2_ETH_BUFS_PER_CMD)
+
-+ if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
-+ return -EINVAL;
++/* Global buffer quota in case flow control is enabled */
++#define DPAA2_ETH_NUM_BUFS_FC 256
+
-+ dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
-+ dpni_ext->num_extracts = cfg->num_extracts;
++/* Hardware requires alignment for ingress/egress buffer addresses */
++#define DPAA2_ETH_TX_BUF_ALIGN 64
+
-+ for (i = 0; i < cfg->num_extracts; i++) {
-+ extr = &dpni_ext->extracts[i];
++/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
++ * to 256B. For newer revisions, the requirement is only for 64B alignment
++ */
++#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
++#define DPAA2_ETH_RX_BUF_ALIGN 64
+
-+ switch (cfg->extracts[i].type) {
-+ case DPKG_EXTRACT_FROM_HDR:
-+ extr->prot = cfg->extracts[i].extract.from_hdr.prot;
-+ dpni_set_field(extr->efh_type, EFH_TYPE,
-+ cfg->extracts[i].extract.from_hdr.type);
-+ extr->size = cfg->extracts[i].extract.from_hdr.size;
-+ extr->offset = cfg->extracts[i].extract.from_hdr.offset;
-+ extr->field = cpu_to_le32(
-+ cfg->extracts[i].extract.from_hdr.field);
-+ extr->hdr_index =
-+ cfg->extracts[i].extract.from_hdr.hdr_index;
-+ break;
-+ case DPKG_EXTRACT_FROM_DATA:
-+ extr->size = cfg->extracts[i].extract.from_data.size;
-+ extr->offset =
-+ cfg->extracts[i].extract.from_data.offset;
-+ break;
-+ case DPKG_EXTRACT_FROM_PARSE:
-+ extr->size = cfg->extracts[i].extract.from_parse.size;
-+ extr->offset =
-+ cfg->extracts[i].extract.from_parse.offset;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
++#define DPAA2_ETH_RX_BUF_SIZE 2048
++#define DPAA2_ETH_SKB_SIZE \
++ (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
-+ extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
-+ dpni_set_field(extr->extract_type, EXTRACT_TYPE,
-+ cfg->extracts[i].type);
++/* PTP nominal frequency 1GHz */
++#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
+
-+ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
-+ extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
-+ extr->masks[j].offset =
-+ cfg->extracts[i].masks[j].offset;
-+ }
-+ }
++/* Hardware annotation area in RX/TX buffers */
++#define DPAA2_ETH_RX_HWA_SIZE 64
++#define DPAA2_ETH_TX_HWA_SIZE 128
+
-+ return 0;
-+}
++/* We are accommodating a skb backpointer and some S/G info
++ * in the frame's software annotation. The hardware
++ * options are either 0 or 64, so we choose the latter.
++ */
++#define DPAA2_ETH_SWA_SIZE 64
+
-+/**
-+ * dpni_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpni_id: DPNI unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpni_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/* We store different information in the software annotation area of a Tx frame
++ * based on what type of frame it is
+ */
-+int dpni_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpni_id,
-+ u16 *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_open *cmd_params;
++enum dpaa2_eth_swa_type {
++ DPAA2_ETH_SWA_SINGLE,
++ DPAA2_ETH_SWA_SG,
++};
+
-+ int err;
++/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
++struct dpaa2_eth_swa {
++ enum dpaa2_eth_swa_type type;
++ union {
++ struct {
++ struct sk_buff *skb;
++ } single;
++ struct {
++ struct sk_buff *skb;
++ struct scatterlist *scl;
++ int num_sg;
++ int sgt_size;
++ } sg;
++ };
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ cmd_params = (struct dpni_cmd_open *)cmd.params;
-+ cmd_params->dpni_id = cpu_to_le32(dpni_id);
++/* Annotation valid bits in FD FRC */
++#define DPAA2_FD_FRC_FASV 0x8000
++#define DPAA2_FD_FRC_FAEADV 0x4000
++#define DPAA2_FD_FRC_FAPRV 0x2000
++#define DPAA2_FD_FRC_FAIADV 0x1000
++#define DPAA2_FD_FRC_FASWOV 0x0800
++#define DPAA2_FD_FRC_FAICFDV 0x0400
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
++#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
++ FD_CTRL_SBE | \
++ FD_CTRL_FSE | \
++ FD_CTRL_FAERR)
+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
++/* Annotation bits in FD CTRL */
++#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
+
-+ return 0;
-+}
++/* Frame annotation status */
++struct dpaa2_fas {
++ u8 reserved;
++ u8 ppid;
++ __le16 ifpid;
++ __le32 status;
++};
+
-+/**
-+ * dpni_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/* Frame annotation status word is located in the first 8 bytes
++ * of the buffer's hardware annoatation area
+ */
-+int dpni_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct mc_command cmd = { 0 };
++#define DPAA2_FAS_OFFSET 0
++#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
++/* Timestamp is located in the next 8 bytes of the buffer's
++ * hardware annotation area
++ */
++#define DPAA2_TS_OFFSET 0x8
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/* Frame annotation egress action descriptor */
++#define DPAA2_FAEAD_OFFSET 0x58
+
-+/**
-+ * dpni_set_pools() - Set buffer pools configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Buffer pools configuration
-+ *
-+ * mandatory for DPNI operation
-+ * warning:Allowed only when DPNI is disabled
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_pools(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_pools_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_pools *cmd_params;
-+ int i;
++struct dpaa2_faead {
++ __le32 conf_fqid;
++ __le32 ctrl;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
-+ cmd_params->num_dpbp = cfg->num_dpbp;
-+ for (i = 0; i < DPNI_MAX_DPBP; i++) {
-+ cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
-+ cmd_params->buffer_size[i] =
-+ cpu_to_le16(cfg->pools[i].buffer_size);
-+ cmd_params->backup_pool_mask |=
-+ DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
-+ }
++#define DPAA2_FAEAD_A2V 0x20000000
++#define DPAA2_FAEAD_A4V 0x08000000
++#define DPAA2_FAEAD_UPDV 0x00001000
++#define DPAA2_FAEAD_EBDDV 0x00002000
++#define DPAA2_FAEAD_UPD 0x00000010
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++/* Accessors for the hardware annotation fields that we use */
++static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
++{
++ return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
+}
+
-+/**
-+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
++ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
++}
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++static inline u64 *dpaa2_get_ts(void *buf_addr, bool swa)
++{
++ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
+}
+
-+/**
-+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
+{
-+ struct mc_command cmd = { 0 };
++ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
++}
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
++/* Error and status bits in the frame annotation status word */
++/* Debug frame, otherwise supposed to be discarded */
++#define DPAA2_FAS_DISC 0x80000000
++/* MACSEC frame */
++#define DPAA2_FAS_MS 0x40000000
++#define DPAA2_FAS_PTP 0x08000000
++/* Ethernet multicast frame */
++#define DPAA2_FAS_MC 0x04000000
++/* Ethernet broadcast frame */
++#define DPAA2_FAS_BC 0x02000000
++#define DPAA2_FAS_KSE 0x00040000
++#define DPAA2_FAS_EOFHE 0x00020000
++#define DPAA2_FAS_MNLE 0x00010000
++#define DPAA2_FAS_TIDE 0x00008000
++#define DPAA2_FAS_PIEE 0x00004000
++/* Frame length error */
++#define DPAA2_FAS_FLE 0x00002000
++/* Frame physical error */
++#define DPAA2_FAS_FPE 0x00001000
++#define DPAA2_FAS_PTE 0x00000080
++#define DPAA2_FAS_ISP 0x00000040
++#define DPAA2_FAS_PHE 0x00000020
++#define DPAA2_FAS_BLE 0x00000010
++/* L3 csum validation performed */
++#define DPAA2_FAS_L3CV 0x00000008
++/* L3 csum error */
++#define DPAA2_FAS_L3CE 0x00000004
++/* L4 csum validation performed */
++#define DPAA2_FAS_L4CV 0x00000002
++/* L4 csum error */
++#define DPAA2_FAS_L4CE 0x00000001
++/* Possible errors on the ingress path */
++#define DPAA2_FAS_RX_ERR_MASK (DPAA2_FAS_KSE | \
++ DPAA2_FAS_EOFHE | \
++ DPAA2_FAS_MNLE | \
++ DPAA2_FAS_TIDE | \
++ DPAA2_FAS_PIEE | \
++ DPAA2_FAS_FLE | \
++ DPAA2_FAS_FPE | \
++ DPAA2_FAS_PTE | \
++ DPAA2_FAS_ISP | \
++ DPAA2_FAS_PHE | \
++ DPAA2_FAS_BLE | \
++ DPAA2_FAS_L3CE | \
++ DPAA2_FAS_L4CE)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/* Time in milliseconds between link state updates */
++#define DPAA2_ETH_LINK_STATE_REFRESH 1000
+
-+/**
-+ * dpni_is_enabled() - Check if the DPNI is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/* Number of times to retry a frame enqueue before giving up.
++ * Value determined empirically, in order to minimize the number
++ * of frames dropped on Tx
+ */
-+int dpni_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_is_enabled *rsp_params;
-+ int err;
++#define DPAA2_ETH_ENQUEUE_RETRIES 10
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
-+ cmd_flags,
-+ token);
++/* Tx congestion entry & exit thresholds, in number of bytes.
++ * We allow a maximum of 512KB worth of frames pending processing on the Tx
++ * queues of an interface
++ */
++#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
++#define DPAA2_ETH_TX_CONG_EXIT_THRESH \
++ (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9 / 10)
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++/* Driver statistics, other than those in struct rtnl_link_stats64.
++ * These are usually collected per-CPU and aggregated by ethtool.
++ */
++struct dpaa2_eth_drv_stats {
++ __u64 tx_conf_frames;
++ __u64 tx_conf_bytes;
++ __u64 tx_sg_frames;
++ __u64 tx_sg_bytes;
++ __u64 tx_reallocs;
++ __u64 rx_sg_frames;
++ __u64 rx_sg_bytes;
++ /* Enqueues retried due to portal busy */
++ __u64 tx_portal_busy;
++};
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
-+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++/* Per-FQ statistics */
++struct dpaa2_eth_fq_stats {
++ /* Number of frames received on this queue */
++ __u64 frames;
++ /* Number of times this queue entered congestion */
++ __u64 congestion_entry;
++};
+
-+ return 0;
-+}
++/* Per-channel statistics */
++struct dpaa2_eth_ch_stats {
++ /* Volatile dequeues retried due to portal busy */
++ __u64 dequeue_portal_busy;
++ /* Number of CDANs; useful to estimate avg NAPI len */
++ __u64 cdan;
++ /* Number of frames received on queues from this channel */
++ __u64 frames;
++ /* Pull errors */
++ __u64 pull_err;
++};
+
-+/**
-+ * dpni_reset() - Reset the DPNI, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct mc_command cmd = { 0 };
++#define DPAA2_ETH_MAX_TCS 8
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
-+ cmd_flags,
-+ token);
++/* Maximum number of queues associated with a DPNI */
++#define DPAA2_ETH_MAX_RX_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
++#define DPAA2_ETH_MAX_TX_QUEUES DPNI_MAX_SENDERS
++#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
++#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
++ DPAA2_ETH_MAX_TX_QUEUES + \
++ DPAA2_ETH_MAX_RX_ERR_QUEUES)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPAA2_ETH_MAX_DPCONS 16
+
-+/**
-+ * dpni_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state: - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en)
++enum dpaa2_eth_fq_type {
++ DPAA2_RX_FQ = 0,
++ DPAA2_TX_CONF_FQ,
++ DPAA2_RX_ERR_FQ
++};
++
++struct dpaa2_eth_priv;
++
++struct dpaa2_eth_fq {
++ u32 fqid;
++ u32 tx_qdbin;
++ u16 flowid;
++ u8 tc;
++ int target_cpu;
++ struct dpaa2_eth_channel *channel;
++ enum dpaa2_eth_fq_type type;
++
++ void (*consume)(struct dpaa2_eth_priv *,
++ struct dpaa2_eth_channel *,
++ const struct dpaa2_fd *,
++ struct napi_struct *,
++ u16 queue_id);
++ struct dpaa2_eth_fq_stats stats;
++};
++
++struct dpaa2_eth_channel {
++ struct dpaa2_io_notification_ctx nctx;
++ struct fsl_mc_device *dpcon;
++ int dpcon_id;
++ int ch_id;
++ struct napi_struct napi;
++ struct dpaa2_io *dpio;
++ struct dpaa2_io_store *store;
++ struct dpaa2_eth_priv *priv;
++ int buf_count;
++ struct dpaa2_eth_ch_stats stats;
++ struct bpf_prog *xdp_prog;
++ u64 rel_buf_array[DPAA2_ETH_BUFS_PER_CMD];
++ u8 rel_buf_cnt;
++};
++
++struct dpaa2_eth_cls_rule {
++ struct ethtool_rx_flow_spec fs;
++ bool in_use;
++};
++
++struct dpaa2_eth_dist_fields {
++ u64 rxnfc_field;
++ enum net_prot cls_prot;
++ int cls_field;
++ int offset;
++ int size;
++ u32 id;
++};
++
++/* Driver private data */
++struct dpaa2_eth_priv {
++ struct net_device *net_dev;
++ /* Standard statistics */
++ struct rtnl_link_stats64 __percpu *percpu_stats;
++ /* Extra stats, in addition to the ones known by the kernel */
++ struct dpaa2_eth_drv_stats __percpu *percpu_extras;
++ bool ts_tx_en; /* Tx timestamping enabled */
++ bool ts_rx_en; /* Rx timestamping enabled */
++ u16 tx_data_offset;
++ u16 bpid;
++ u16 tx_qdid;
++ u16 rx_buf_align;
++ struct iommu_domain *iommu_domain;
++ int max_bufs_per_ch;
++ int refill_thresh;
++ bool has_xdp_prog;
++
++ void *cscn_mem; /* Tx congestion notifications are written here */
++ void *cscn_unaligned;
++ dma_addr_t cscn_dma;
++
++ u8 num_fqs;
++ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
++
++ u8 num_channels;
++ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
++
++ struct dpni_attr dpni_attrs;
++ u16 dpni_ver_major;
++ u16 dpni_ver_minor;
++ struct fsl_mc_device *dpbp_dev;
++
++ struct fsl_mc_io *mc_io;
++ /* Cores which have an affine DPIO/DPCON.
++ * This is the cpu set on which Rx and Tx conf frames are processed
++ */
++ struct cpumask dpio_cpumask;
++
++ u16 mc_token;
++
++ struct dpni_link_state link_state;
++ bool do_link_poll;
++ struct task_struct *poll_thread;
++
++ /* Rx distribution (hash and flow steering) header fields
++ * supported by the driver
++ */
++ struct dpaa2_eth_dist_fields *dist_fields;
++ u8 num_dist_fields;
++ /* enabled ethtool hashing bits */
++ u64 rx_hash_fields;
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++ struct dpaa2_debugfs dbg;
++#endif
++ /* array of classification rules */
++ struct dpaa2_eth_cls_rule *cls_rule;
++ struct dpni_tx_shaping_cfg shaping_cfg;
++
++ u8 dcbx_mode;
++ struct ieee_pfc pfc;
++ bool vlan_clsf_set;
++ bool tx_pause_frames;
++
++ bool ceetm_en;
++};
++
++enum dpaa2_eth_rx_dist {
++ DPAA2_ETH_RX_DIST_HASH,
++ DPAA2_ETH_RX_DIST_FS,
++ DPAA2_ETH_RX_DIST_LEGACY
++};
++
++/* Supported Rx distribution field ids */
++#define DPAA2_ETH_DIST_ETHSRC BIT(0)
++#define DPAA2_ETH_DIST_ETHDST BIT(1)
++#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
++#define DPAA2_ETH_DIST_VLAN BIT(3)
++#define DPAA2_ETH_DIST_IPSRC BIT(4)
++#define DPAA2_ETH_DIST_IPDST BIT(5)
++#define DPAA2_ETH_DIST_IPPROTO BIT(6)
++#define DPAA2_ETH_DIST_L4SRC BIT(7)
++#define DPAA2_ETH_DIST_L4DST BIT(8)
++#define DPAA2_ETH_DIST_ALL (~0U)
++
++/* Default Rx hash key */
++#define DPAA2_ETH_DIST_DEFAULT_HASH \
++ (DPAA2_ETH_DIST_IPPROTO | \
++ DPAA2_ETH_DIST_IPSRC | DPAA2_ETH_DIST_IPDST | \
++ DPAA2_ETH_DIST_L4SRC | DPAA2_ETH_DIST_L4DST)
++
++#define dpaa2_eth_hash_enabled(priv) \
++ ((priv)->dpni_attrs.num_queues > 1)
++
++#define dpaa2_eth_fs_enabled(priv) \
++ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
++
++#define dpaa2_eth_fs_mask_enabled(priv) \
++ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
++
++#define dpaa2_eth_fs_count(priv) \
++ ((priv)->dpni_attrs.fs_entries)
++
++/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
++#define DPAA2_CLASSIFIER_DMA_SIZE 256
++
++extern const struct ethtool_ops dpaa2_ethtool_ops;
++extern const char dpaa2_eth_drv_version[];
++
++static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
++ u16 ver_major, u16 ver_minor)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_irq_enable *cmd_params;
++ if (priv->dpni_ver_major == ver_major)
++ return priv->dpni_ver_minor - ver_minor;
++ return priv->dpni_ver_major - ver_major;
++}
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
-+ dpni_set_field(cmd_params->enable, ENABLE, en);
-+ cmd_params->irq_index = irq_index;
++#define DPNI_DIST_KEY_VER_MAJOR 7
++#define DPNI_DIST_KEY_VER_MINOR 5
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++static inline bool dpaa2_eth_has_legacy_dist(struct dpaa2_eth_priv *priv)
++{
++ return (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DIST_KEY_VER_MAJOR,
++ DPNI_DIST_KEY_VER_MINOR) < 0);
+}
+
-+/**
-+ * dpni_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
++ * the buffer also needs space for its shared info struct, and we need
++ * to allocate enough to accommodate hardware alignment restrictions
+ */
-+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 *en)
++static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_irq_enable *cmd_params;
-+ struct dpni_rsp_get_irq_enable *rsp_params;
++ return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align;
++}
+
-+ int err;
++/* Total headroom needed by the hardware in Tx frame buffers */
++static inline unsigned int
++dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, struct sk_buff *skb)
++{
++ unsigned int headroom = DPAA2_ETH_SWA_SIZE;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
-+ cmd_params->irq_index = irq_index;
++ /* If we don't have an skb (e.g. XDP buffer), we only need space for
++ * the software annotation area
++ */
++ if (!skb)
++ return headroom;
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ /* For non-linear skbs we have no headroom requirement, as we build a
++ * SG frame with a newly allocated SGT buffer
++ */
++ if (skb_is_nonlinear(skb))
++ return 0;
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
-+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++ /* If we have Tx timestamping, need 128B hardware annotation */
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
++ headroom += DPAA2_ETH_TX_HWA_SIZE;
+
-+ return 0;
++ return headroom;
+}
+
-+/**
-+ * dpni_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/* Extra headroom space requested to hardware, in order to make sure there's
++ * no realloc'ing in forwarding scenarios. We need to reserve enough space
++ * such that we can accommodate the maximum required Tx offset and alignment
++ * in the ingress frame buffer
+ */
-+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask)
++static inline unsigned int dpaa2_eth_rx_headroom(struct dpaa2_eth_priv *priv)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_irq_mask *cmd_params;
++ return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN -
++ DPAA2_ETH_RX_HWA_SIZE;
++}
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
-+ cmd_params->mask = cpu_to_le32(mask);
-+ cmd_params->irq_index = irq_index;
++static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
++{
++ return priv->dpni_attrs.num_queues;
++}
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++static inline int dpaa2_eth_tc_count(struct dpaa2_eth_priv *priv)
++{
++ return priv->dpni_attrs.num_tcs;
+}
+
-+/**
-+ * dpni_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *mask)
++static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv,
++ int traffic_class)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_irq_mask *cmd_params;
-+ struct dpni_rsp_get_irq_mask *rsp_params;
-+ int err;
++ return priv->pfc.pfc_en & (1 << traffic_class);
++}
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
-+ cmd_params->irq_index = irq_index;
++enum dpaa2_eth_td_cfg {
++ DPAA2_ETH_TD_NONE,
++ DPAA2_ETH_TD_QUEUE,
++ DPAA2_ETH_TD_GROUP
++};
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++static inline enum dpaa2_eth_td_cfg
++dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv)
++{
++ bool pfc_enabled = !!(priv->pfc.pfc_en);
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
-+ *mask = le32_to_cpu(rsp_params->mask);
++ if (pfc_enabled)
++ return DPAA2_ETH_TD_GROUP;
++ else if (priv->tx_pause_frames)
++ return DPAA2_ETH_TD_NONE;
++ else
++ return DPAA2_ETH_TD_QUEUE;
++}
+
-+ return 0;
++static inline int dpaa2_eth_ch_count(struct dpaa2_eth_priv *priv)
++{
++ return 1;
+}
+
-+/**
-+ * dpni_get_irq_status() - Get the current status of any pending interrupts.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
++void check_cls_support(struct dpaa2_eth_priv *priv);
++
++int set_rx_taildrop(struct dpaa2_eth_priv *priv);
++
++int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv,
++ enum dpaa2_eth_rx_dist type, u32 key_fields);
++
++#endif /* __DPAA2_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+@@ -0,0 +1,878 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016-2017 NXP
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
-+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_irq_status *cmd_params;
-+ struct dpni_rsp_get_irq_status *rsp_params;
-+ int err;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(*status);
-+ cmd_params->irq_index = irq_index;
++#include "dpni.h" /* DPNI_LINK_OPT_* */
++#include "dpaa2-eth.h"
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++/* To be kept in sync with DPNI statistics */
++static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
++ "[hw] rx frames",
++ "[hw] rx bytes",
++ "[hw] rx mcast frames",
++ "[hw] rx mcast bytes",
++ "[hw] rx bcast frames",
++ "[hw] rx bcast bytes",
++ "[hw] tx frames",
++ "[hw] tx bytes",
++ "[hw] tx mcast frames",
++ "[hw] tx mcast bytes",
++ "[hw] tx bcast frames",
++ "[hw] tx bcast bytes",
++ "[hw] rx filtered frames",
++ "[hw] rx discarded frames",
++ "[hw] rx nobuffer discards",
++ "[hw] tx discarded frames",
++ "[hw] tx confirmed frames",
++};
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
-+ *status = le32_to_cpu(rsp_params->status);
++#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
+
-+ return 0;
-+}
++static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
++ /* per-cpu stats */
++ "[drv] tx conf frames",
++ "[drv] tx conf bytes",
++ "[drv] tx sg frames",
++ "[drv] tx sg bytes",
++ "[drv] tx realloc frames",
++ "[drv] rx sg frames",
++ "[drv] rx sg bytes",
++ "[drv] enqueue portal busy",
++ /* Channel stats */
++ "[drv] dequeue portal busy",
++ "[drv] channel pull errors",
++ "[drv] cdan",
++ "[drv] tx congestion state",
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++ /* FQ stats */
++ "rx pending frames",
++ "rx pending bytes",
++ "tx conf pending frames",
++ "tx conf pending bytes",
++ "buffer count"
++#endif
++};
+
-+/**
-+ * dpni_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status)
++#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
++
++static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
++ struct ethtool_drvinfo *drvinfo)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_clear_irq_status *cmd_params;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+ cmd_params->status = cpu_to_le32(status);
++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
++ strlcpy(drvinfo->version, dpaa2_eth_drv_version,
++ sizeof(drvinfo->version));
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
++
++ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
++ sizeof(drvinfo->bus_info));
+}
+
-+/**
-+ * dpni_get_attributes() - Retrieve DPNI attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @attr: Object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_attr *attr)
++static int
++dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
++ struct ethtool_link_ksettings *link_settings)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_attr *rsp_params;
-+
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
++ struct dpni_link_state state = {0};
++ int err = 0;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d getting link state\n", err);
++ goto out;
++ }
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
-+ attr->options = le32_to_cpu(rsp_params->options);
-+ attr->num_queues = rsp_params->num_queues;
-+ attr->num_tcs = rsp_params->num_tcs;
-+ attr->mac_filter_entries = rsp_params->mac_filter_entries;
-+ attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
-+ attr->qos_entries = rsp_params->qos_entries;
-+ attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
-+ attr->qos_key_size = rsp_params->qos_key_size;
-+ attr->fs_key_size = rsp_params->fs_key_size;
-+ attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
++ /* At the moment, we have no way of interrogating the DPMAC
++ * from the DPNI side - and for that matter there may exist
++ * no DPMAC at all. So for now we just don't report anything
++ * beyond the DPNI attributes.
++ */
++ if (state.options & DPNI_LINK_OPT_AUTONEG)
++ link_settings->base.autoneg = AUTONEG_ENABLE;
++ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
++ link_settings->base.duplex = DUPLEX_FULL;
++ link_settings->base.speed = state.rate;
+
-+ return 0;
++out:
++ return err;
+}
+
-+/**
-+ * dpni_set_errors_behavior() - Set errors behavior
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Errors configuration
-+ *
-+ * this function may be called numerous times with different
-+ * error masks
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_error_cfg *cfg)
++#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7
++#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1
++static int
++dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
++ const struct ethtool_link_ksettings *link_settings)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_errors_behavior *cmd_params;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_link_state state = {0};
++ struct dpni_link_cfg cfg = {0};
++ int err = 0;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
-+ cmd_params->errors = cpu_to_le32(cfg->errors);
-+ dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
-+ dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
++ /* If using an older MC version, the DPNI must be down
++ * in order to be able to change link settings. Taking steps to let
++ * the user know that.
++ */
++ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
++ DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
++ if (netif_running(net_dev)) {
++ netdev_info(net_dev, "Interface must be brought down first.\n");
++ return -EACCES;
++ }
++ }
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ /* Need to interrogate link state to get flow control params */
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_err(net_dev, "Error getting link state\n");
++ goto out;
++ }
++
++ cfg.options = state.options;
++ cfg.rate = link_settings->base.speed;
++ if (link_settings->base.autoneg == AUTONEG_ENABLE)
++ cfg.options |= DPNI_LINK_OPT_AUTONEG;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
++ if (link_settings->base.duplex == DUPLEX_HALF)
++ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
++
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ if (err)
++ /* ethtool will be loud enough if we return an error; no point
++ * in putting our own error message on the console by default
++ */
++ netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
++
++out:
++ return err;
+}
+
-+/**
-+ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue to retrieve configuration for
-+ * @layout: Returns buffer layout attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ struct dpni_buffer_layout *layout)
++static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
++ struct ethtool_pauseparam *pause)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_buffer_layout *cmd_params;
-+ struct dpni_rsp_get_buffer_layout *rsp_params;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_link_state state = {0};
+ int err;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
-+ cmd_params->qtype = qtype;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
-+ layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
-+ layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
-+ layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
-+ layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
-+ layout->data_align = le16_to_cpu(rsp_params->data_align);
-+ layout->data_head_room = le16_to_cpu(rsp_params->head_room);
-+ layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
++ netdev_dbg(net_dev, "Error getting link state\n");
+
-+ return 0;
++ /* Report general port autonegotiation status */
++ pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
++ pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
++ pause->tx_pause = pause->rx_pause ^
++ !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
+}
+
-+/**
-+ * dpni_set_buffer_layout() - Set buffer layout configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue this configuration applies to
-+ * @layout: Buffer layout configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ * @warning Allowed only when DPNI is disabled
-+ */
-+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ const struct dpni_buffer_layout *layout)
++static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
++ struct ethtool_pauseparam *pause)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_buffer_layout *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
-+ cmd_params->qtype = qtype;
-+ cmd_params->options = cpu_to_le16(layout->options);
-+ dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
-+ dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
-+ dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
-+ cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
-+ cmd_params->data_align = cpu_to_le16(layout->data_align);
-+ cmd_params->head_room = cpu_to_le16(layout->data_head_room);
-+ cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_link_state state = {0};
++ struct dpni_link_cfg cfg = {0};
++ u32 current_tx_pause;
++ int err = 0;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_dbg(net_dev, "Error getting link state\n");
++ goto out;
++ }
+
-+/**
-+ * dpni_set_offload() - Set DPNI offload configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @type: Type of DPNI offload
-+ * @config: Offload configuration.
-+ * For checksum offloads, non-zero value enables the offload
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ * @warning Allowed only when DPNI is disabled
-+ */
++ cfg.rate = state.rate;
++ cfg.options = state.options;
++ current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
++ !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
+
-+int dpni_set_offload(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_offload type,
-+ u32 config)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_offload *cmd_params;
++ /* We don't support changing pause frame autonegotiation separately
++ * from general port autoneg
++ */
++ if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
++ netdev_warn(net_dev,
++ "Cannot change pause frame autoneg separately\n");
+
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
-+ cmd_params->dpni_offload = type;
-+ cmd_params->config = cpu_to_le32(config);
++ if (pause->rx_pause)
++ cfg.options |= DPNI_LINK_OPT_PAUSE;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+
-+ return mc_send_command(mc_io, &cmd);
-+}
++ if (pause->rx_pause ^ pause->tx_pause)
++ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+
-+int dpni_get_offload(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_offload type,
-+ u32 *config)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_offload *cmd_params;
-+ struct dpni_rsp_get_offload *rsp_params;
-+ int err;
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ if (err) {
++ netdev_dbg(net_dev, "Error setting link\n");
++ goto out;
++ }
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
-+ cmd_params->dpni_offload = type;
++ /* Enable/disable Rx FQ taildrop if Tx pause frames have changed */
++ if (current_tx_pause == pause->tx_pause)
++ goto out;
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
++ priv->tx_pause_frames = pause->tx_pause;
++ err = set_rx_taildrop(priv);
+ if (err)
-+ return err;
++ netdev_dbg(net_dev, "Error configuring taildrop\n");
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
-+ *config = le32_to_cpu(rsp_params->config);
-+
-+ return 0;
++out:
++ return err;
+}
+
-+/**
-+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
-+ * for enqueue operations
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue to receive QDID for
-+ * @qdid: Returned virtual QDID value that should be used as an argument
-+ * in all enqueue operations
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_qdid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u16 *qdid)
++static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
++ u8 *data)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_qdid *cmd_params;
-+ struct dpni_rsp_get_qdid *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
-+ cmd_params->qtype = qtype;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
-+ *qdid = le16_to_cpu(rsp_params->qdid);
++ u8 *p = data;
++ int i;
+
-+ return 0;
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
++ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
++ p += ETH_GSTRING_LEN;
++ }
++ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
++ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
++ p += ETH_GSTRING_LEN;
++ }
++ break;
++ }
+}
+
-+/**
-+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @data_offset: Tx data offset (from start of buffer)
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *data_offset)
++static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_tx_data_offset *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
-+ *data_offset = le16_to_cpu(rsp_params->data_offset);
-+
-+ return 0;
++ switch (sset) {
++ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
++ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
++ default:
++ return -EOPNOTSUPP;
++ }
+}
+
-+/**
-+ * dpni_set_link_cfg() - set the link configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/** Fill in hardware counters, as returned by MC.
+ */
-+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_link_cfg *cfg)
++static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
++ struct ethtool_stats *stats,
++ u64 *data)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_link_cfg *cmd_params;
++ int i = 0;
++ int j, k, err;
++ int num_cnt;
++ union dpni_statistics dpni_stats;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
-+ cmd_params->rate = cpu_to_le32(cfg->rate);
-+ cmd_params->options = cpu_to_le64(cfg->options);
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++ u32 fcnt, bcnt;
++ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
++ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
++ u32 buf_cnt;
++#endif
++ u64 cdan = 0;
++ u64 portal_busy = 0, pull_err = 0;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpaa2_eth_drv_stats *extras;
++ struct dpaa2_eth_ch_stats *ch_stats;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ memset(data, 0,
++ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
+
-+/**
-+ * dpni_get_link_state() - Return the link state (either up or down)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @state: Returned link state;
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_link_state(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_link_state *state)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_link_state *rsp_params;
-+ int err;
++ /* Print standard counters, from DPNI statistics */
++ for (j = 0; j <= 2; j++) {
++ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
++ j, 0, &dpni_stats);
++ if (err != 0)
++ netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
++ switch (j) {
++ case 0:
++ num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64);
++ break;
++ case 1:
++ num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64);
++ break;
++ case 2:
++ num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
++ break;
++ }
++ for (k = 0; k < num_cnt; k++)
++ *(data + i++) = dpni_stats.raw.counter[k];
++ }
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
-+ cmd_flags,
-+ token);
++ /* Print per-cpu extra stats */
++ for_each_online_cpu(k) {
++ extras = per_cpu_ptr(priv->percpu_extras, k);
++ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
++ *((__u64 *)data + i + j) += *((__u64 *)extras + j);
++ }
++ i += j;
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ for (j = 0; j < priv->num_channels; j++) {
++ ch_stats = &priv->channel[j]->stats;
++ cdan += ch_stats->cdan;
++ portal_busy += ch_stats->dequeue_portal_busy;
++ pull_err += ch_stats->pull_err;
++ }
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
-+ state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
-+ state->rate = le32_to_cpu(rsp_params->rate);
-+ state->options = le64_to_cpu(rsp_params->options);
++ *(data + i++) = portal_busy;
++ *(data + i++) = pull_err;
++ *(data + i++) = cdan;
+
-+ return 0;
++ *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem);
++
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++ for (j = 0; j < priv->num_fqs; j++) {
++ /* Print FQ instantaneous counts */
++ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
++ &fcnt, &bcnt);
++ if (err) {
++ netdev_warn(net_dev, "FQ query error %d", err);
++ return;
++ }
++
++ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
++ fcnt_tx_total += fcnt;
++ bcnt_tx_total += bcnt;
++ } else {
++ fcnt_rx_total += fcnt;
++ bcnt_rx_total += bcnt;
++ }
++ }
++
++ *(data + i++) = fcnt_rx_total;
++ *(data + i++) = bcnt_rx_total;
++ *(data + i++) = fcnt_tx_total;
++ *(data + i++) = bcnt_tx_total;
++
++ err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
++ if (err) {
++ netdev_warn(net_dev, "Buffer count query error %d\n", err);
++ return;
++ }
++ *(data + i++) = buf_cnt;
++#endif
+}
+
-+/**
-+ * dpni_set_tx_shaping() - Set the transmit shaping
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tx_shaper: tx shaping configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_tx_shaping_cfg *tx_shaper)
++static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_tx_shaping *cmd_params;
++ int i, off = 0;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
-+ cmd_params->max_burst_size = cpu_to_le16(tx_shaper->max_burst_size);
-+ cmd_params->rate_limit = cpu_to_le32(tx_shaper->rate_limit);
++ for (i = 0; i < priv->num_dist_fields; i++) {
++ if (priv->dist_fields[i].cls_prot == prot &&
++ priv->dist_fields[i].cls_field == field)
++ return off;
++ off += priv->dist_fields[i].size;
++ }
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ return -1;
+}
+
-+/**
-+ * dpni_set_max_frame_length() - Set the maximum received frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @max_frame_length: Maximum received frame length (in
-+ * bytes); frame is discarded if its
-+ * length exceeds this value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 max_frame_length)
++static u8 cls_key_size(struct dpaa2_eth_priv *priv)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_max_frame_length *cmd_params;
++ u8 i, size = 0;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
-+ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
++ for (i = 0; i < priv->num_dist_fields; i++)
++ size += priv->dist_fields[i].size;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ return size;
+}
+
-+/**
-+ * dpni_get_max_frame_length() - Get the maximum received frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @max_frame_length: Maximum received frame length (in
-+ * bytes); frame is discarded if its
-+ * length exceeds this value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *max_frame_length)
++void check_cls_support(struct dpaa2_eth_priv *priv)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_max_frame_length *rsp_params;
-+ int err;
++ u8 key_size = cls_key_size(priv);
++ struct device *dev = priv->net_dev->dev.parent;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
++ if (dpaa2_eth_hash_enabled(priv)) {
++ if (priv->dpni_attrs.fs_key_size < key_size) {
++ dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n",
++ priv->dpni_attrs.fs_key_size,
++ key_size);
++ goto disable_fs;
++ }
++ if (priv->num_dist_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
++ dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
++ DPKG_MAX_NUM_OF_EXTRACTS);
++ goto disable_fs;
++ }
++ }
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ if (dpaa2_eth_fs_enabled(priv)) {
++ if (!dpaa2_eth_hash_enabled(priv)) {
++ dev_info(dev, "Insufficient queues. Steering is disabled\n");
++ goto disable_fs;
++ }
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
-+ *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
++ if (!dpaa2_eth_fs_mask_enabled(priv)) {
++ dev_info(dev, "Key masks not supported. Steering is disabled\n");
++ goto disable_fs;
++ }
++ }
+
-+ return 0;
++ return;
++
++disable_fs:
++ priv->dpni_attrs.options |= DPNI_OPT_NO_FS;
++ priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING;
+}
+
-+/**
-+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int en)
++static int prep_l4_rule(struct dpaa2_eth_priv *priv,
++ struct ethtool_tcpip4_spec *l4_value,
++ struct ethtool_tcpip4_spec *l4_mask,
++ void *key, void *mask, u8 l4_proto)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_multicast_promisc *cmd_params;
++ int offset;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
-+ dpni_set_field(cmd_params->enable, ENABLE, en);
++ if (l4_mask->tos) {
++ netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n");
++ return -EOPNOTSUPP;
++ }
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ if (l4_mask->ip4src) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
++ *(u32 *)(key + offset) = l4_value->ip4src;
++ *(u32 *)(mask + offset) = l4_mask->ip4src;
++ }
+
-+/**
-+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Returns '1' if enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_multicast_promisc *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
-+ cmd_flags,
-+ token);
++ if (l4_mask->ip4dst) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
++ *(u32 *)(key + offset) = l4_value->ip4dst;
++ *(u32 *)(mask + offset) = l4_mask->ip4dst;
++ }
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ if (l4_mask->psrc) {
++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
++ *(u32 *)(key + offset) = l4_value->psrc;
++ *(u32 *)(mask + offset) = l4_mask->psrc;
++ }
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
-+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++ if (l4_mask->pdst) {
++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
++ *(u32 *)(key + offset) = l4_value->pdst;
++ *(u32 *)(mask + offset) = l4_mask->pdst;
++ }
+
-+ return 0;
-+}
++ /* Only apply the rule for the user-specified L4 protocol
++ * and if ethertype matches IPv4
++ */
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
++ *(u16 *)(key + offset) = htons(ETH_P_IP);
++ *(u16 *)(mask + offset) = 0xFFFF;
+
-+/**
-+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_unicast_promisc *cmd_params;
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
++ *(u8 *)(key + offset) = l4_proto;
++ *(u8 *)(mask + offset) = 0xFF;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
-+ dpni_set_field(cmd_params->enable, ENABLE, en);
++ /* TODO: check IP version */
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ return 0;
+}
+
-+/**
-+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Returns '1' if enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en)
++static int prep_eth_rule(struct dpaa2_eth_priv *priv,
++ struct ethhdr *eth_value, struct ethhdr *eth_mask,
++ void *key, void *mask)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_unicast_promisc *rsp_params;
-+ int err;
++ int offset;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
-+ cmd_flags,
-+ token);
++ if (eth_mask->h_proto) {
++ netdev_err(priv->net_dev, "Ethertype is not supported!\n");
++ return -EOPNOTSUPP;
++ }
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ if (!is_zero_ether_addr(eth_mask->h_source)) {
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA);
++ ether_addr_copy(key + offset, eth_value->h_source);
++ ether_addr_copy(mask + offset, eth_mask->h_source);
++ }
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
-+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++ if (!is_zero_ether_addr(eth_mask->h_dest)) {
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
++ ether_addr_copy(key + offset, eth_value->h_dest);
++ ether_addr_copy(mask + offset, eth_mask->h_dest);
++ }
+
+ return 0;
+}
+
-+/**
-+ * dpni_set_primary_mac_addr() - Set the primary MAC address
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: MAC address to set as primary address
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 mac_addr[6])
++static int prep_user_ip_rule(struct dpaa2_eth_priv *priv,
++ struct ethtool_usrip4_spec *uip_value,
++ struct ethtool_usrip4_spec *uip_mask,
++ void *key, void *mask)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_primary_mac_addr *cmd_params;
-+ int i;
++ int offset;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = mac_addr[5 - i];
++ if (uip_mask->tos)
++ return -EOPNOTSUPP;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ if (uip_mask->ip4src) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
++ *(u32 *)(key + offset) = uip_value->ip4src;
++ *(u32 *)(mask + offset) = uip_mask->ip4src;
++ }
+
-+/**
-+ * dpni_get_primary_mac_addr() - Get the primary MAC address
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: Returned MAC address
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 mac_addr[6])
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_primary_mac_addr *rsp_params;
-+ int i, err;
++ if (uip_mask->ip4dst) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
++ *(u32 *)(key + offset) = uip_value->ip4dst;
++ *(u32 *)(mask + offset) = uip_mask->ip4dst;
++ }
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
-+ cmd_flags,
-+ token);
++ if (uip_mask->proto) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
++ *(u32 *)(key + offset) = uip_value->proto;
++ *(u32 *)(mask + offset) = uip_mask->proto;
++ }
++ if (uip_mask->l4_4_bytes) {
++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
++ *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16;
++ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16;
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
++ *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF;
++ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF;
++ }
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
-+ for (i = 0; i < 6; i++)
-+ mac_addr[5 - i] = rsp_params->mac_addr[i];
++ /* Ethertype must be IP */
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
++ *(u16 *)(key + offset) = htons(ETH_P_IP);
++ *(u16 *)(mask + offset) = 0xFFFF;
+
+ return 0;
+}
+
-+/**
-+ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
-+ * port the DPNI is attached to
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: MAC address of the physical port, if any, otherwise 0
-+ *
-+ * The primary MAC address is not cleared by this operation.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 mac_addr[6])
++static int prep_ext_rule(struct dpaa2_eth_priv *priv,
++ struct ethtool_flow_ext *ext_value,
++ struct ethtool_flow_ext *ext_mask,
++ void *key, void *mask)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_port_mac_addr *rsp_params;
-+ int i, err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
-+ cmd_flags,
-+ token);
++ int offset;
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ if (ext_mask->vlan_etype)
++ return -EOPNOTSUPP;
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
-+ for (i = 0; i < 6; i++)
-+ mac_addr[5 - i] = rsp_params->mac_addr[i];
++ if (ext_mask->vlan_tci) {
++ offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI);
++ *(u16 *)(key + offset) = ext_value->vlan_tci;
++ *(u16 *)(mask + offset) = ext_mask->vlan_tci;
++ }
+
+ return 0;
+}
+
-+/**
-+ * dpni_add_mac_addr() - Add MAC address filter
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: MAC address to add
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 mac_addr[6])
++static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv,
++ struct ethtool_flow_ext *ext_value,
++ struct ethtool_flow_ext *ext_mask,
++ void *key, void *mask)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_add_mac_addr *cmd_params;
-+ int i;
++ int offset;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = mac_addr[5 - i];
++ if (!is_zero_ether_addr(ext_mask->h_dest)) {
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
++ ether_addr_copy(key + offset, ext_value->h_dest);
++ ether_addr_copy(mask + offset, ext_mask->h_dest);
++ }
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ return 0;
+}
+
-+/**
-+ * dpni_remove_mac_addr() - Remove MAC address filter
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: MAC address to remove
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 mac_addr[6])
++static int prep_cls_rule(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs,
++ void *key)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_remove_mac_addr *cmd_params;
-+ int i;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ const u8 key_size = cls_key_size(priv);
++ void *msk = key + key_size;
++ int err;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = mac_addr[5 - i];
++ memset(key, 0, key_size * 2);
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ switch (fs->flow_type & 0xff) {
++ case TCP_V4_FLOW:
++ err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec,
++ &fs->m_u.tcp_ip4_spec, key, msk,
++ IPPROTO_TCP);
++ break;
++ case UDP_V4_FLOW:
++ err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec,
++ &fs->m_u.udp_ip4_spec, key, msk,
++ IPPROTO_UDP);
++ break;
++ case SCTP_V4_FLOW:
++ err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec,
++ &fs->m_u.sctp_ip4_spec, key, msk,
++ IPPROTO_SCTP);
++ break;
++ case ETHER_FLOW:
++ err = prep_eth_rule(priv, &fs->h_u.ether_spec,
++ &fs->m_u.ether_spec, key, msk);
++ break;
++ case IP_USER_FLOW:
++ err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec,
++ &fs->m_u.usr_ip4_spec, key, msk);
++ break;
++ default:
++ /* TODO: AH, ESP */
++ return -EOPNOTSUPP;
++ }
++ if (err)
++ return err;
+
-+/**
-+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @unicast: Set to '1' to clear unicast addresses
-+ * @multicast: Set to '1' to clear multicast addresses
-+ *
-+ * The primary MAC address is not cleared by this operation.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int unicast,
-+ int multicast)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_clear_mac_filters *cmd_params;
++ if (fs->flow_type & FLOW_EXT) {
++ err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
++ if (err)
++ return err;
++ }
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
-+ dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
-+ dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
++ if (fs->flow_type & FLOW_MAC_EXT) {
++ err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
++ if (err)
++ return err;
++ }
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ return 0;
+}
+
-+/**
-+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: Traffic class distribution configuration
-+ *
-+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
-+ * first to prepare the key_cfg_iova parameter
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ const struct dpni_rx_tc_dist_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_rx_tc_dist *cmd_params;
++static int del_cls(struct net_device *net_dev, int location);
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
-+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
-+ cmd_params->tc_id = tc_id;
-+ dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
-+ dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
-+ cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
-+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
++static int do_cls(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs,
++ bool add)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct device *dev = net_dev->dev.parent;
++ const int rule_cnt = dpaa2_eth_fs_count(priv);
++ struct dpni_rule_cfg rule_cfg;
++ struct dpni_fs_action_cfg fs_act = { 0 };
++ void *dma_mem;
++ int err = 0, tc;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ if (!dpaa2_eth_fs_enabled(priv)) {
++ netdev_err(net_dev, "dev does not support steering!\n");
++ /* dev doesn't support steering */
++ return -EOPNOTSUPP;
++ }
+
-+/**
-+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
-+ * (to select a flow ID)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @index: Location in the QoS table where to insert the entry.
-+ * Only relevant if MASKING is enabled for QoS
-+ * classification on this DPNI, it is ignored for exact match.
-+ * @cfg: Flow steering rule to add
-+ * @action: Action to be taken as result of a classification hit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ u16 index,
-+ const struct dpni_rule_cfg *cfg,
-+ const struct dpni_fs_action_cfg *action)
-+{
-+ struct dpni_cmd_add_fs_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
++ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
++ fs->location >= rule_cnt)
++ return -EINVAL;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->key_size = cfg->key_size;
-+ cmd_params->index = cpu_to_le16(index);
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
-+ cmd_params->options = cpu_to_le16(action->options);
-+ cmd_params->flow_id = cpu_to_le16(action->flow_id);
-+ cmd_params->flc = cpu_to_le64(action->flc);
++ /* When adding a new rule, check if location if available
++ * and if not, free the existing table entry before inserting
++ * the new one
++ */
++ if (add && (priv->cls_rule[fs->location].in_use == true))
++ del_cls(net_dev, fs->location);
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ memset(&rule_cfg, 0, sizeof(rule_cfg));
++ rule_cfg.key_size = cls_key_size(priv);
+
-+/**
-+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
-+ * traffic class
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: Flow steering rule to remove
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ const struct dpni_rule_cfg *cfg)
-+{
-+ struct dpni_cmd_remove_fs_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ /* allocate twice the key size, for the actual key and for mask */
++ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
++ if (!dma_mem)
++ return -ENOMEM;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->key_size = cfg->key_size;
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
++ err = prep_cls_rule(net_dev, fs, dma_mem);
++ if (err)
++ goto err_free_mem;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ rule_cfg.key_iova = dma_map_single(dev, dma_mem,
++ rule_cfg.key_size * 2,
++ DMA_TO_DEVICE);
+
-+/**
-+ * dpni_set_congestion_notification() - Set traffic class congestion
-+ * notification configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: congestion notification configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc_id,
-+ const struct dpni_congestion_notification_cfg *cfg)
-+{
-+ struct dpni_cmd_set_congestion_notification *cmd_params;
-+ struct mc_command cmd = { 0 };
++ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc_id;
-+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
-+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
-+ cmd_params->dest_priority = cfg->dest_cfg.priority;
-+ dpni_set_field(cmd_params->type_units, DEST_TYPE,
-+ cfg->dest_cfg.dest_type);
-+ dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
-+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
-+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
-+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
-+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
++ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
++ fs_act.options |= DPNI_FS_OPT_DISCARD;
++ else
++ fs_act.flow_id = fs->ring_cookie;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ for (tc = 0; tc < dpaa2_eth_tc_count(priv); tc++) {
++ if (add)
++ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
++ tc, fs->location, &rule_cfg,
++ &fs_act);
++ else
++ err = dpni_remove_fs_entry(priv->mc_io, 0,
++ priv->mc_token, tc,
++ &rule_cfg);
+
-+/**
-+ * dpni_set_queue() - Set queue parameters
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue - all queue types are supported, although
-+ * the command is ignored for Tx
-+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
-+ * @index: Selects the specific queue out of the set allocated for the
-+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
-+ * @options: A combination of DPNI_QUEUE_OPT_ values that control what
-+ * configuration options are set on the queue
-+ * @queue: Queue structure
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_queue(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc,
-+ u8 index,
-+ u8 options,
-+ const struct dpni_queue *queue)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_queue *cmd_params;
++ if (err)
++ break;
++ }
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc;
-+ cmd_params->index = index;
-+ cmd_params->options = options;
-+ cmd_params->dest_id = cpu_to_le32(queue->destination.id);
-+ cmd_params->dest_prio = queue->destination.priority;
-+ dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
-+ dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
-+ dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
-+ queue->destination.hold_active);
-+ cmd_params->flc = cpu_to_le64(queue->flc.value);
-+ cmd_params->user_context = cpu_to_le64(queue->user_context);
++ dma_unmap_single(dev, rule_cfg.key_iova,
++ rule_cfg.key_size * 2, DMA_TO_DEVICE);
+
-+ /* send command to mc */
-+ return mc_send_command(mc_io, &cmd);
++ if (err)
++ netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err);
++
++err_free_mem:
++ kfree(dma_mem);
++
++ return err;
+}
+
-+/**
-+ * dpni_get_queue() - Get queue parameters
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue - all queue types are supported
-+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
-+ * @index: Selects the specific queue out of the set allocated for the
-+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
-+ * @queue: Queue configuration structure
-+ * @qid: Queue identification
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_queue(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc,
-+ u8 index,
-+ struct dpni_queue *queue,
-+ struct dpni_queue_id *qid)
++static int add_cls(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_queue *cmd_params;
-+ struct dpni_rsp_get_queue *rsp_params;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc;
-+ cmd_params->index = index;
-+
-+ /* send command to mc */
-+ err = mc_send_command(mc_io, &cmd);
++ err = do_cls(net_dev, fs, true);
+ if (err)
+ return err;
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
-+ queue->destination.id = le32_to_cpu(rsp_params->dest_id);
-+ queue->destination.priority = rsp_params->dest_prio;
-+ queue->destination.type = dpni_get_field(rsp_params->flags,
-+ DEST_TYPE);
-+ queue->flc.stash_control = dpni_get_field(rsp_params->flags,
-+ STASH_CTRL);
-+ queue->destination.hold_active = dpni_get_field(rsp_params->flags,
-+ HOLD_ACTIVE);
-+ queue->flc.value = le64_to_cpu(rsp_params->flc);
-+ queue->user_context = le64_to_cpu(rsp_params->user_context);
-+ qid->fqid = le32_to_cpu(rsp_params->fqid);
-+ qid->qdbin = le16_to_cpu(rsp_params->qdbin);
++ priv->cls_rule[fs->location].in_use = true;
++ priv->cls_rule[fs->location].fs = *fs;
+
+ return 0;
+}
+
-+/**
-+ * dpni_get_statistics() - Get DPNI statistics
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @page: Selects the statistics page to retrieve, see
-+ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
-+ * @stat: Structure containing the statistics
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_statistics(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 page,
-+ union dpni_statistics *stat)
++static int del_cls(struct net_device *net_dev, int location)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_statistics *cmd_params;
-+ struct dpni_rsp_get_statistics *rsp_params;
-+ int i, err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
-+ cmd_params->page_number = page;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err;
+
-+ /* send command to mc */
-+ err = mc_send_command(mc_io, &cmd);
++ err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
+ if (err)
+ return err;
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
-+ for (i = 0; i < DPNI_STATISTICS_CNT; i++)
-+ stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
++ priv->cls_rule[location].in_use = false;
+
+ return 0;
+}
+
-+/**
-+ * dpni_reset_statistics() - Clears DPNI statistics
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++static int set_hash(struct net_device *net_dev, u64 data)
+{
-+ struct mc_command cmd = { 0 };
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ u32 key = 0;
++ int i;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
-+ cmd_flags,
-+ token);
++ if (data & RXH_DISCARD)
++ return -EOPNOTSUPP;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ for (i = 0; i < priv->num_dist_fields; i++)
++ if (priv->dist_fields[i].rxnfc_field & data)
++ key |= priv->dist_fields[i].id;
++
++ return dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH, key);
+}
+
-+/**
-+ * dpni_set_taildrop() - Set taildrop per queue or TC
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cg_point: Congestion point
-+ * @q_type: Queue type on which the taildrop is configured.
-+ * Only Rx queues are supported for now
-+ * @tc: Traffic class to apply this taildrop to
-+ * @q_index: Index of the queue if the DPNI supports multiple queues for
-+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
-+ * @taildrop: Taildrop structure
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_congestion_point cg_point,
-+ enum dpni_queue_type qtype,
-+ u8 tc,
-+ u8 index,
-+ struct dpni_taildrop *taildrop)
++static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
++ struct ethtool_rxnfc *rxnfc)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_taildrop *cmd_params;
++ int err = 0;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
-+ cmd_params->congestion_point = cg_point;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc;
-+ cmd_params->index = index;
-+ dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
-+ cmd_params->units = taildrop->units;
-+ cmd_params->threshold = cpu_to_le32(taildrop->threshold);
++ switch (rxnfc->cmd) {
++ case ETHTOOL_SRXCLSRLINS:
++ err = add_cls(net_dev, &rxnfc->fs);
++ break;
++ case ETHTOOL_SRXCLSRLDEL:
++ err = del_cls(net_dev, rxnfc->fs.location);
++ break;
++ case ETHTOOL_SRXFH:
++ err = set_hash(net_dev, rxnfc->data);
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ }
+
-+ /* send command to mc */
-+ return mc_send_command(mc_io, &cmd);
++ return err;
+}
+
-+/**
-+ * dpni_get_taildrop() - Get taildrop information
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cg_point: Congestion point
-+ * @q_type: Queue type on which the taildrop is configured.
-+ * Only Rx queues are supported for now
-+ * @tc: Traffic class to apply this taildrop to
-+ * @q_index: Index of the queue if the DPNI supports multiple queues for
-+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
-+ * @taildrop: Taildrop structure
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_congestion_point cg_point,
-+ enum dpni_queue_type qtype,
-+ u8 tc,
-+ u8 index,
-+ struct dpni_taildrop *taildrop)
++static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
++ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_taildrop *cmd_params;
-+ struct dpni_rsp_get_taildrop *rsp_params;
-+ int err;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ const int rule_cnt = dpaa2_eth_fs_count(priv);
++ int i, j;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
-+ cmd_params->congestion_point = cg_point;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc;
-+ cmd_params->index = index;
++ switch (rxnfc->cmd) {
++ case ETHTOOL_GRXFH:
++ /* we purposely ignore cmd->flow_type for now, because the
++ * classifier only supports a single set of fields for all
++ * protocols
++ */
++ rxnfc->data = priv->rx_hash_fields;
++ break;
++ case ETHTOOL_GRXRINGS:
++ rxnfc->data = dpaa2_eth_queue_count(priv);
++ break;
+
-+ /* send command to mc */
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ case ETHTOOL_GRXCLSRLCNT:
++ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
++ if (priv->cls_rule[i].in_use)
++ rxnfc->rule_cnt++;
++ rxnfc->data = rule_cnt;
++ break;
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
-+ taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
-+ taildrop->units = rsp_params->units;
-+ taildrop->threshold = le32_to_cpu(rsp_params->threshold);
++ case ETHTOOL_GRXCLSRULE:
++ if (!priv->cls_rule[rxnfc->fs.location].in_use)
++ return -EINVAL;
++
++ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
++ break;
++
++ case ETHTOOL_GRXCLSRLALL:
++ for (i = 0, j = 0; i < rule_cnt; i++) {
++ if (!priv->cls_rule[i].in_use)
++ continue;
++ if (j == rxnfc->rule_cnt)
++ return -EMSGSIZE;
++ rule_locs[j++] = i;
++ }
++ rxnfc->rule_cnt = j;
++ rxnfc->data = rule_cnt;
++ break;
++
++ default:
++ return -EOPNOTSUPP;
++ }
+
+ return 0;
+}
++
++const struct ethtool_ops dpaa2_ethtool_ops = {
++ .get_drvinfo = dpaa2_eth_get_drvinfo,
++ .get_link = ethtool_op_get_link,
++ .get_link_ksettings = dpaa2_eth_get_link_ksettings,
++ .set_link_ksettings = dpaa2_eth_set_link_ksettings,
++ .get_pauseparam = dpaa2_eth_get_pauseparam,
++ .set_pauseparam = dpaa2_eth_set_pauseparam,
++ .get_sset_count = dpaa2_eth_get_sset_count,
++ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
++ .get_strings = dpaa2_eth_get_strings,
++ .get_rxnfc = dpaa2_eth_get_rxnfc,
++ .set_rxnfc = dpaa2_eth_set_rxnfc,
++};
--- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
-@@ -0,0 +1,989 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ * Copyright 2016 NXP
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
+@@ -0,0 +1,176 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#ifndef __FSL_DPNI_H
-+#define __FSL_DPNI_H
-+
-+#include "dpkg.h"
++#ifndef __FSL_DPKG_H_
++#define __FSL_DPKG_H_
+
-+struct fsl_mc_io;
++#include <linux/types.h>
++#include "net.h"
+
-+/**
-+ * Data Path Network Interface API
-+ * Contains initialization APIs and runtime control APIs for DPNI
++/* Data Path Key Generator API
++ * Contains initialization APIs and runtime APIs for the Key Generator
+ */
+
-+/** General DPNI macros */
++/** Key Generator properties */
+
+/**
-+ * Maximum number of traffic classes
++ * Number of masks per key extraction
+ */
-+#define DPNI_MAX_TC 8
++#define DPKG_NUM_OF_MASKS 4
+/**
-+ * Maximum number of buffer pools per DPNI
++ * Number of extractions per key profile
+ */
-+#define DPNI_MAX_DPBP 8
++#define DPKG_MAX_NUM_OF_EXTRACTS 10
+
+/**
-+ * All traffic classes considered; see dpni_set_queue()
-+ */
-+#define DPNI_ALL_TCS (u8)(-1)
-+/**
-+ * All flows within traffic class considered; see dpni_set_queue()
-+ */
-+#define DPNI_ALL_TC_FLOWS (u16)(-1)
-+/**
-+ * Generate new flow ID; see dpni_set_queue()
++ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
++ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
++ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
++ * @DPKG_FULL_FIELD: Extract a full field
+ */
-+#define DPNI_NEW_FLOW_ID (u16)(-1)
++enum dpkg_extract_from_hdr_type {
++ DPKG_FROM_HDR = 0,
++ DPKG_FROM_FIELD = 1,
++ DPKG_FULL_FIELD = 2
++};
+
+/**
-+ * Tx traffic is always released to a buffer pool on transmit, there are no
-+ * resources allocated to have the frames confirmed back to the source after
-+ * transmission.
-+ */
-+#define DPNI_OPT_TX_FRM_RELEASE 0x000001
-+/**
-+ * Disables support for MAC address filtering for addresses other than primary
-+ * MAC address. This affects both unicast and multicast. Promiscuous mode can
-+ * still be enabled/disabled for both unicast and multicast. If promiscuous mode
-+ * is disabled, only traffic matching the primary MAC address will be accepted.
-+ */
-+#define DPNI_OPT_NO_MAC_FILTER 0x000002
-+/**
-+ * Allocate policers for this DPNI. They can be used to rate-limit traffic per
-+ * traffic class (TC) basis.
-+ */
-+#define DPNI_OPT_HAS_POLICING 0x000004
-+/**
-+ * Congestion can be managed in several ways, allowing the buffer pool to
-+ * deplete on ingress, taildrop on each queue or use congestion groups for sets
-+ * of queues. If set, it configures a single congestion groups across all TCs.
-+ * If reset, a congestion group is allocated for each TC. Only relevant if the
-+ * DPNI has multiple traffic classes.
-+ */
-+#define DPNI_OPT_SHARED_CONGESTION 0x000008
-+/**
-+ * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
-+ * look-ups are exact match. Note that TCAM is not available on LS1088 and its
-+ * variants. Setting this bit on these SoCs will trigger an error.
++ * enum dpkg_extract_type - Enumeration for selecting extraction type
++ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
++ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
++ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
++ * e.g. can be used to extract header existence;
++ * please refer to 'Parse Result definition' section in the parser BG
+ */
-+#define DPNI_OPT_HAS_KEY_MASKING 0x000010
++enum dpkg_extract_type {
++ DPKG_EXTRACT_FROM_HDR = 0,
++ DPKG_EXTRACT_FROM_DATA = 1,
++ DPKG_EXTRACT_FROM_PARSE = 3
++};
++
+/**
-+ * Disables the flow steering table.
++ * struct dpkg_mask - A structure for defining a single extraction mask
++ * @mask: Byte mask for the extracted content
++ * @offset: Offset within the extracted content
+ */
-+#define DPNI_OPT_NO_FS 0x000020
-+
-+int dpni_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpni_id,
-+ u16 *token);
-+
-+int dpni_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++struct dpkg_mask {
++ u8 mask;
++ u8 offset;
++};
+
+/**
-+ * struct dpni_pools_cfg - Structure representing buffer pools configuration
-+ * @num_dpbp: Number of DPBPs
-+ * @pools: Array of buffer pools parameters; The number of valid entries
-+ * must match 'num_dpbp' value
++ * struct dpkg_extract - A structure for defining a single extraction
++ * @type: Determines how the union below is interpreted:
++ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
++ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
++ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
++ * @extract: Selects extraction method
++ * @num_of_byte_masks: Defines the number of valid entries in the array below;
++ * This is also the number of bytes to be used as masks
++ * @masks: Masks parameters
+ */
-+struct dpni_pools_cfg {
-+ u8 num_dpbp;
++struct dpkg_extract {
++ enum dpkg_extract_type type;
+ /**
-+ * struct pools - Buffer pools parameters
-+ * @dpbp_id: DPBP object ID
-+ * @buffer_size: Buffer size
-+ * @backup_pool: Backup pool
++ * union extract - Selects extraction method
++ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
++ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
++ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ */
-+ struct {
-+ int dpbp_id;
-+ u16 buffer_size;
-+ int backup_pool;
-+ } pools[DPNI_MAX_DPBP];
-+};
-+
-+int dpni_set_pools(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_pools_cfg *cfg);
-+
-+int dpni_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++ union {
++ /**
++ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
++ * @prot: Any of the supported headers
++ * @type: Defines the type of header extraction:
++ * DPKG_FROM_HDR: use size & offset below;
++ * DPKG_FROM_FIELD: use field, size and offset below;
++ * DPKG_FULL_FIELD: use field below
++ * @field: One of the supported fields (NH_FLD_)
++ *
++ * @size: Size in bytes
++ * @offset: Byte offset
++ * @hdr_index: Clear for cases not listed below;
++ * Used for protocols that may have more than a single
++ * header, 0 indicates an outer header;
++ * Supported protocols (possible values):
++ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
++ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
++ * NET_PROT_IP(0, HDR_INDEX_LAST);
++ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
++ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
++ */
+
-+int dpni_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++ struct {
++ enum net_prot prot;
++ enum dpkg_extract_from_hdr_type type;
++ u32 field;
++ u8 size;
++ u8 offset;
++ u8 hdr_index;
++ } from_hdr;
++ /**
++ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
++ * @size: Size in bytes
++ * @offset: Byte offset
++ */
++ struct {
++ u8 size;
++ u8 offset;
++ } from_data;
+
-+int dpni_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en);
++ /**
++ * struct from_parse - Used when
++ * 'type = DPKG_EXTRACT_FROM_PARSE'
++ * @size: Size in bytes
++ * @offset: Byte offset
++ */
++ struct {
++ u8 size;
++ u8 offset;
++ } from_parse;
++ } extract;
+
-+int dpni_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++ u8 num_of_byte_masks;
++ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
++};
+
+/**
-+ * DPNI IRQ Index and Events
++ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
++ * profile (rule)
++ * @num_extracts: Defines the number of valid entries in the array below
++ * @extracts: Array of required extractions
+ */
++struct dpkg_profile_cfg {
++ u8 num_extracts;
++ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
++};
+
-+/**
-+ * IRQ index
-+ */
-+#define DPNI_IRQ_INDEX 0
-+/**
-+ * IRQ event - indicates a change in link state
++#endif /* __FSL_DPKG_H_ */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+@@ -0,0 +1,719 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
++#ifndef _FSL_DPNI_CMD_H
++#define _FSL_DPNI_CMD_H
+
-+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en);
++#include "dpni.h"
+
-+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 *en);
++/* DPNI Version */
++#define DPNI_VER_MAJOR 7
++#define DPNI_VER_MINOR 0
++#define DPNI_CMD_BASE_VERSION 1
++#define DPNI_CMD_2ND_VERSION 2
++#define DPNI_CMD_ID_OFFSET 4
+
-+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask);
++#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
++#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
+
-+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *mask);
++#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
++#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
++#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
++#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
++#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
+
-+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status);
++#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
++#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
++#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
++#define DPNI_CMDID_RESET DPNI_CMD(0x005)
++#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
+
-+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status);
++#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
++#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
++#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
++#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
++#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
++#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
++#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
++#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
+
-+/**
-+ * struct dpni_attr - Structure representing DPNI attributes
-+ * @options: Any combination of the following options:
-+ * DPNI_OPT_TX_FRM_RELEASE
-+ * DPNI_OPT_NO_MAC_FILTER
-+ * DPNI_OPT_HAS_POLICING
-+ * DPNI_OPT_SHARED_CONGESTION
-+ * DPNI_OPT_HAS_KEY_MASKING
-+ * DPNI_OPT_NO_FS
-+ * @num_queues: Number of Tx and Rx queues used for traffic distribution.
-+ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
-+ * @mac_filter_entries: Number of entries in the MAC address filtering table.
-+ * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
-+ * @qos_entries: Number of entries in the QoS classification table.
-+ * @fs_entries: Number of entries in the flow steering table.
-+ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
-+ * than this when adding QoS entries will result in an error.
-+ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
-+ * key larger than this when composing the hash + FS key will
-+ * result in an error.
-+ * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
-+ * on 6, 5, 5 bits respectively.
-+ */
-+struct dpni_attr {
-+ u32 options;
-+ u8 num_queues;
-+ u8 num_tcs;
-+ u8 mac_filter_entries;
-+ u8 vlan_filter_entries;
-+ u8 qos_entries;
-+ u16 fs_entries;
-+ u8 qos_key_size;
-+ u8 fs_key_size;
-+ u16 wriop_version;
-+};
++#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200)
++#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
+
-+int dpni_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_attr *attr);
++#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
++#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
++#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
++#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
++#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
++#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
++#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
+
-+/**
-+ * DPNI errors
-+ */
++#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
++#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
++#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
++#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
++#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
++#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
++#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
++#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
++#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
+
-+/**
-+ * Extract out of frame header error
-+ */
-+#define DPNI_ERROR_EOFHE 0x00020000
-+/**
-+ * Frame length error
-+ */
-+#define DPNI_ERROR_FLE 0x00002000
-+/**
-+ * Frame physical error
-+ */
-+#define DPNI_ERROR_FPE 0x00001000
-+/**
-+ * Parsing header error
-+ */
-+#define DPNI_ERROR_PHE 0x00000020
-+/**
-+ * Parser L3 checksum error
-+ */
-+#define DPNI_ERROR_L3CE 0x00000004
-+/**
-+ * Parser L3 checksum error
-+ */
-+#define DPNI_ERROR_L4CE 0x00000001
++#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+
-+/**
-+ * enum dpni_error_action - Defines DPNI behavior for errors
-+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
-+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
-+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
-+ */
-+enum dpni_error_action {
-+ DPNI_ERROR_ACTION_DISCARD = 0,
-+ DPNI_ERROR_ACTION_CONTINUE = 1,
-+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
++#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
++#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
++#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
++#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
++#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
++#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
++
++#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V2(0x250)
++#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V2(0x25D)
++#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
++#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
++#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
++#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
++#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
++
++#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
++
++#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
++#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
++
++#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
++#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
++#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
++#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
++#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
++#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
++#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
++
++#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
++#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
++
++/* Macros for accessing command fields smaller than 1byte */
++#define DPNI_MASK(field) \
++ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
++ DPNI_##field##_SHIFT)
++
++#define dpni_set_field(var, field, val) \
++ ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
++#define dpni_get_field(var, field) \
++ (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
++
++struct dpni_cmd_open {
++ __le32 dpni_id;
+};
+
-+/**
-+ * struct dpni_error_cfg - Structure representing DPNI errors treatment
-+ * @errors: Errors mask; use 'DPNI_ERROR__<X>
-+ * @error_action: The desired action for the errors mask
-+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
-+ * status (FAS); relevant only for the non-discard action
-+ */
-+struct dpni_error_cfg {
-+ u32 errors;
-+ enum dpni_error_action error_action;
-+ int set_frame_annotation;
++#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
++struct dpni_cmd_set_pools {
++ u8 num_dpbp;
++ u8 backup_pool_mask;
++ __le16 pad;
++ struct {
++ __le16 dpbp_id;
++ u8 priority_mask;
++ u8 pad;
++ } pool[DPNI_MAX_DPBP];
++ __le16 buffer_size[DPNI_MAX_DPBP];
+};
+
-+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_error_cfg *cfg);
++/* The enable indication is always the least significant bit */
++#define DPNI_ENABLE_SHIFT 0
++#define DPNI_ENABLE_SIZE 1
+
-+/**
-+ * DPNI buffer layout modification options
-+ */
++struct dpni_rsp_is_enabled {
++ u8 enabled;
++};
+
-+/**
-+ * Select to modify the time-stamp setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
-+/**
-+ * Select to modify the parser-result setting; not applicable for Tx
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
-+/**
-+ * Select to modify the frame-status setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
-+/**
-+ * Select to modify the private-data-size setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
-+/**
-+ * Select to modify the data-alignment setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
-+/**
-+ * Select to modify the data-head-room setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
-+/**
-+ * Select to modify the data-tail-room setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
++struct dpni_rsp_get_irq {
++ /* response word 0 */
++ __le32 irq_val;
++ __le32 pad;
++ /* response word 1 */
++ __le64 irq_addr;
++ /* response word 2 */
++ __le32 irq_num;
++ __le32 type;
++};
+
-+/**
-+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
-+ * @options: Flags representing the suggested modifications to the buffer
-+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
-+ * @pass_timestamp: Pass timestamp value
-+ * @pass_parser_result: Pass parser results
-+ * @pass_frame_status: Pass frame status
-+ * @private_data_size: Size kept for private data (in bytes)
-+ * @data_align: Data alignment
-+ * @data_head_room: Data head room
-+ * @data_tail_room: Data tail room
-+ */
-+struct dpni_buffer_layout {
-+ u32 options;
-+ int pass_timestamp;
-+ int pass_parser_result;
-+ int pass_frame_status;
-+ u16 private_data_size;
-+ u16 data_align;
-+ u16 data_head_room;
-+ u16 data_tail_room;
++struct dpni_cmd_set_irq_enable {
++ u8 enable;
++ u8 pad[3];
++ u8 irq_index;
+};
+
-+/**
-+ * enum dpni_queue_type - Identifies a type of queue targeted by the command
-+ * @DPNI_QUEUE_RX: Rx queue
-+ * @DPNI_QUEUE_TX: Tx queue
-+ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
-+ * @DPNI_QUEUE_RX_ERR: Rx error queue
-+ */enum dpni_queue_type {
-+ DPNI_QUEUE_RX,
-+ DPNI_QUEUE_TX,
-+ DPNI_QUEUE_TX_CONFIRM,
-+ DPNI_QUEUE_RX_ERR,
++struct dpni_cmd_get_irq_enable {
++ __le32 pad;
++ u8 irq_index;
+};
+
-+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ struct dpni_buffer_layout *layout);
++struct dpni_rsp_get_irq_enable {
++ u8 enabled;
++};
+
-+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ const struct dpni_buffer_layout *layout);
++struct dpni_cmd_set_irq_mask {
++ __le32 mask;
++ u8 irq_index;
++};
+
-+/**
-+ * enum dpni_offload - Identifies a type of offload targeted by the command
-+ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
-+ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
-+ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
-+ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
-+ */
-+enum dpni_offload {
-+ DPNI_OFF_RX_L3_CSUM,
-+ DPNI_OFF_RX_L4_CSUM,
-+ DPNI_OFF_TX_L3_CSUM,
-+ DPNI_OFF_TX_L4_CSUM,
++struct dpni_cmd_get_irq_mask {
++ __le32 pad;
++ u8 irq_index;
+};
+
-+int dpni_set_offload(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_offload type,
-+ u32 config);
++struct dpni_rsp_get_irq_mask {
++ __le32 mask;
++};
+
-+int dpni_get_offload(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_offload type,
-+ u32 *config);
++struct dpni_cmd_get_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
+
-+int dpni_get_qdid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u16 *qdid);
++struct dpni_rsp_get_irq_status {
++ __le32 status;
++};
+
-+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *data_offset);
++struct dpni_cmd_clear_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
+
-+#define DPNI_STATISTICS_CNT 7
++struct dpni_rsp_get_attr {
++ /* response word 0 */
++ __le32 options;
++ u8 num_queues;
++ u8 num_tcs;
++ u8 mac_filter_entries;
++ u8 pad0;
++ /* response word 1 */
++ u8 vlan_filter_entries;
++ u8 pad1;
++ u8 qos_entries;
++ u8 pad2;
++ __le16 fs_entries;
++ __le16 pad3;
++ /* response word 2 */
++ u8 qos_key_size;
++ u8 fs_key_size;
++ __le16 wriop_version;
++};
+
-+union dpni_statistics {
-+ /**
-+ * struct page_0 - Page_0 statistics structure
-+ * @ingress_all_frames: Ingress frame count
-+ * @ingress_all_bytes: Ingress byte count
-+ * @ingress_multicast_frames: Ingress multicast frame count
-+ * @ingress_multicast_bytes: Ingress multicast byte count
-+ * @ingress_broadcast_frames: Ingress broadcast frame count
-+ * @ingress_broadcast_bytes: Ingress broadcast byte count
-+ */
-+ struct {
-+ u64 ingress_all_frames;
-+ u64 ingress_all_bytes;
-+ u64 ingress_multicast_frames;
-+ u64 ingress_multicast_bytes;
-+ u64 ingress_broadcast_frames;
-+ u64 ingress_broadcast_bytes;
-+ } page_0;
-+ /**
-+ * struct page_1 - Page_1 statistics structure
-+ * @egress_all_frames: Egress frame count
-+ * @egress_all_bytes: Egress byte count
-+ * @egress_multicast_frames: Egress multicast frame count
-+ * @egress_multicast_bytes: Egress multicast byte count
-+ * @egress_broadcast_frames: Egress broadcast frame count
-+ * @egress_broadcast_bytes: Egress broadcast byte count
-+ */
-+ struct {
-+ u64 egress_all_frames;
-+ u64 egress_all_bytes;
-+ u64 egress_multicast_frames;
-+ u64 egress_multicast_bytes;
-+ u64 egress_broadcast_frames;
-+ u64 egress_broadcast_bytes;
-+ } page_1;
-+ /**
-+ * struct page_2 - Page_2 statistics structure
-+ * @ingress_filtered_frames: Ingress filtered frame count
-+ * @ingress_discarded_frames: Ingress discarded frame count
-+ * @ingress_nobuffer_discards: Ingress discarded frame count
-+ * due to lack of buffers
-+ * @egress_discarded_frames: Egress discarded frame count
-+ * @egress_confirmed_frames: Egress confirmed frame count
-+ */
-+ struct {
-+ u64 ingress_filtered_frames;
-+ u64 ingress_discarded_frames;
-+ u64 ingress_nobuffer_discards;
-+ u64 egress_discarded_frames;
-+ u64 egress_confirmed_frames;
-+ } page_2;
-+ /**
-+ * struct raw - raw statistics structure
-+ */
-+ struct {
-+ u64 counter[DPNI_STATISTICS_CNT];
-+ } raw;
++#define DPNI_ERROR_ACTION_SHIFT 0
++#define DPNI_ERROR_ACTION_SIZE 4
++#define DPNI_FRAME_ANN_SHIFT 4
++#define DPNI_FRAME_ANN_SIZE 1
++
++struct dpni_cmd_set_errors_behavior {
++ __le32 errors;
++ /* from least significant bit: error_action:4, set_frame_annotation:1 */
++ u8 flags;
+};
+
-+int dpni_get_statistics(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 page,
-+ union dpni_statistics *stat);
++/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
++ * buffer layouts, but they all share the same parameters.
++ * If one of the functions changes, below structure needs to be split.
++ */
+
-+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++#define DPNI_PASS_TS_SHIFT 0
++#define DPNI_PASS_TS_SIZE 1
++#define DPNI_PASS_PR_SHIFT 1
++#define DPNI_PASS_PR_SIZE 1
++#define DPNI_PASS_FS_SHIFT 2
++#define DPNI_PASS_FS_SIZE 1
+
-+/**
-+ * Enable auto-negotiation
-+ */
-+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
-+/**
-+ * Enable half-duplex mode
-+ */
-+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
-+/**
-+ * Enable pause frames
-+ */
-+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
-+/**
-+ * Enable a-symmetric pause frames
-+ */
-+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++struct dpni_cmd_get_buffer_layout {
++ u8 qtype;
++};
+
-+/**
-+ * struct - Structure representing DPNI link configuration
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
-+ */
-+struct dpni_link_cfg {
-+ u32 rate;
-+ u64 options;
++struct dpni_rsp_get_buffer_layout {
++ /* response word 0 */
++ u8 pad0[6];
++ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
++ u8 flags;
++ u8 pad1;
++ /* response word 1 */
++ __le16 private_data_size;
++ __le16 data_align;
++ __le16 head_room;
++ __le16 tail_room;
+};
+
-+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_link_cfg *cfg);
++struct dpni_cmd_set_buffer_layout {
++ /* cmd word 0 */
++ u8 qtype;
++ u8 pad0[3];
++ __le16 options;
++ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
++ u8 flags;
++ u8 pad1;
++ /* cmd word 1 */
++ __le16 private_data_size;
++ __le16 data_align;
++ __le16 head_room;
++ __le16 tail_room;
++};
+
-+/**
-+ * struct dpni_link_state - Structure representing DPNI link state
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
-+ * @up: Link state; '0' for down, '1' for up
-+ */
-+struct dpni_link_state {
-+ u32 rate;
-+ u64 options;
-+ int up;
++struct dpni_cmd_set_offload {
++ u8 pad[3];
++ u8 dpni_offload;
++ __le32 config;
+};
+
-+int dpni_get_link_state(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_link_state *state);
++struct dpni_cmd_get_offload {
++ u8 pad[3];
++ u8 dpni_offload;
++};
+
-+/**
-+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
-+ * @rate_limit: rate in Mbps
-+ * @max_burst_size: burst size in bytes (up to 64KB)
-+ */
-+struct dpni_tx_shaping_cfg {
-+ u32 rate_limit;
-+ u16 max_burst_size;
++struct dpni_rsp_get_offload {
++ __le32 pad;
++ __le32 config;
+};
+
-+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_tx_shaping_cfg *tx_shaper);
++struct dpni_cmd_get_qdid {
++ u8 qtype;
++};
+
-+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 max_frame_length);
++struct dpni_rsp_get_qdid {
++ __le16 qdid;
++};
+
-+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *max_frame_length);
++struct dpni_rsp_get_tx_data_offset {
++ __le16 data_offset;
++};
+
-+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int en);
++struct dpni_cmd_get_statistics {
++ u8 page_number;
++ u8 param;
++};
+
-+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en);
++struct dpni_rsp_get_statistics {
++ __le64 counter[DPNI_STATISTICS_CNT];
++};
+
-+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int en);
++struct dpni_cmd_set_link_cfg {
++ /* cmd word 0 */
++ __le64 pad0;
++ /* cmd word 1 */
++ __le32 rate;
++ __le32 pad1;
++ /* cmd word 2 */
++ __le64 options;
++};
+
-+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en);
++#define DPNI_LINK_STATE_SHIFT 0
++#define DPNI_LINK_STATE_SIZE 1
+
-+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 mac_addr[6]);
++struct dpni_rsp_get_link_state {
++ /* response word 0 */
++ __le32 pad0;
++ /* from LSB: up:1 */
++ u8 flags;
++ u8 pad1[3];
++ /* response word 1 */
++ __le32 rate;
++ __le32 pad2;
++ /* response word 2 */
++ __le64 options;
++};
+
-+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 mac_addr[6]);
++#define DPNI_COUPLED_SHIFT 0
++#define DPNI_COUPLED_SIZE 1
+
-+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cm_flags,
-+ u16 token,
-+ u8 mac_addr[6]);
++struct dpni_cmd_set_tx_shaping {
++ /* cmd word 0 */
++ __le16 tx_cr_max_burst_size;
++ __le16 tx_er_max_burst_size;
++ __le32 pad;
++ /* cmd word 1 */
++ __le32 tx_cr_rate_limit;
++ __le32 tx_er_rate_limit;
++ /* cmd word 2 */
++ /* from LSB: coupled:1 */
++ u8 coupled;
++};
+
-+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 mac_addr[6]);
++struct dpni_cmd_set_max_frame_length {
++ __le16 max_frame_length;
++};
+
-+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 mac_addr[6]);
-+
-+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int unicast,
-+ int multicast);
++struct dpni_rsp_get_max_frame_length {
++ __le16 max_frame_length;
++};
+
-+/**
-+ * enum dpni_dist_mode - DPNI distribution mode
-+ * @DPNI_DIST_MODE_NONE: No distribution
-+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
-+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
-+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
-+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
-+ */
-+enum dpni_dist_mode {
-+ DPNI_DIST_MODE_NONE = 0,
-+ DPNI_DIST_MODE_HASH = 1,
-+ DPNI_DIST_MODE_FS = 2
++struct dpni_cmd_set_multicast_promisc {
++ u8 enable;
+};
+
-+/**
-+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
-+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
-+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
-+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
-+ */
-+enum dpni_fs_miss_action {
-+ DPNI_FS_MISS_DROP = 0,
-+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
-+ DPNI_FS_MISS_HASH = 2
++struct dpni_rsp_get_multicast_promisc {
++ u8 enabled;
+};
+
-+/**
-+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
-+ * @miss_action: Miss action selection
-+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
-+ */
-+struct dpni_fs_tbl_cfg {
-+ enum dpni_fs_miss_action miss_action;
-+ u16 default_flow_id;
++struct dpni_cmd_set_unicast_promisc {
++ u8 enable;
+};
+
-+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
-+ u8 *key_cfg_buf);
++struct dpni_rsp_get_unicast_promisc {
++ u8 enabled;
++};
+
-+/**
-+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
-+ * @dist_size: Set the distribution size;
-+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
-+ * 112,128,192,224,256,384,448,512,768,896,1024
-+ * @dist_mode: Distribution mode
-+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
-+ * the extractions to be used for the distribution key by calling
-+ * dpni_prepare_key_cfg() relevant only when
-+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
-+ * @fs_cfg: Flow Steering table configuration; only relevant if
-+ * 'dist_mode = DPNI_DIST_MODE_FS'
-+ */
-+struct dpni_rx_tc_dist_cfg {
-+ u16 dist_size;
-+ enum dpni_dist_mode dist_mode;
-+ u64 key_cfg_iova;
-+ struct dpni_fs_tbl_cfg fs_cfg;
++struct dpni_cmd_set_primary_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
+};
+
-+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ const struct dpni_rx_tc_dist_cfg *cfg);
++struct dpni_rsp_get_primary_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
++};
+
-+/**
-+ * enum dpni_dest - DPNI destination types
-+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
-+ * does not generate FQDAN notifications; user is expected to
-+ * dequeue from the queue based on polling or other user-defined
-+ * method
-+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
-+ * notifications to the specified DPIO; user is expected to dequeue
-+ * from the queue only after notification is received
-+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
-+ * FQDAN notifications, but is connected to the specified DPCON
-+ * object; user is expected to dequeue from the DPCON channel
-+ */
-+enum dpni_dest {
-+ DPNI_DEST_NONE = 0,
-+ DPNI_DEST_DPIO = 1,
-+ DPNI_DEST_DPCON = 2
++struct dpni_rsp_get_port_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
+};
+
-+/**
-+ * struct dpni_queue - Queue structure
-+ * @user_context: User data, presented to the user along with any frames from
-+ * this queue. Not relevant for Tx queues.
-+ */
-+struct dpni_queue {
-+/**
-+ * struct destination - Destination structure
-+ * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
-+ * Identifies either a DPIO or a DPCON object. Not relevant for
-+ * Tx queues.
-+ * @type: May be one of the following:
-+ * 0 - No destination, queue can be manually queried, but will not
-+ * push traffic or notifications to a DPIO;
-+ * 1 - The destination is a DPIO. When traffic becomes available in
-+ * the queue a FQDAN (FQ data available notification) will be
-+ * generated to selected DPIO;
-+ * 2 - The destination is a DPCON. The queue is associated with a
-+ * DPCON object for the purpose of scheduling between multiple
-+ * queues. The DPCON may be independently configured to
-+ * generate notifications. Not relevant for Tx queues.
-+ * @hold_active: Hold active, maintains a queue scheduled for longer
-+ * in a DPIO during dequeue to reduce spread of traffic.
-+ * Only relevant if queues are not affined to a single DPIO.
-+ */
-+ struct {
-+ u16 id;
-+ enum dpni_dest type;
-+ char hold_active;
-+ u8 priority;
-+ } destination;
-+ u64 user_context;
-+ struct {
-+ u64 value;
-+ char stash_control;
-+ } flc;
++struct dpni_cmd_add_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
+};
+
-+/**
-+ * struct dpni_queue_id - Queue identification, used for enqueue commands
-+ * or queue control
-+ * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
-+ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
-+ * for Tx queues.
-+ */
-+struct dpni_queue_id {
-+ u32 fqid;
-+ u16 qdbin;
++struct dpni_cmd_remove_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
+};
+
-+/**
-+ * Set User Context
-+ */
-+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
-+#define DPNI_QUEUE_OPT_DEST 0x00000002
-+#define DPNI_QUEUE_OPT_FLC 0x00000004
-+#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
++#define DPNI_UNICAST_FILTERS_SHIFT 0
++#define DPNI_UNICAST_FILTERS_SIZE 1
++#define DPNI_MULTICAST_FILTERS_SHIFT 1
++#define DPNI_MULTICAST_FILTERS_SIZE 1
+
-+int dpni_set_queue(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc,
-+ u8 index,
-+ u8 options,
-+ const struct dpni_queue *queue);
++struct dpni_cmd_clear_mac_filters {
++ /* from LSB: unicast:1, multicast:1 */
++ u8 flags;
++};
+
-+int dpni_get_queue(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc,
-+ u8 index,
-+ struct dpni_queue *queue,
-+ struct dpni_queue_id *qid);
++#define DPNI_SEPARATE_GRP_SHIFT 0
++#define DPNI_SEPARATE_GRP_SIZE 1
++#define DPNI_MODE_1_SHIFT 0
++#define DPNI_MODE_1_SIZE 4
++#define DPNI_MODE_2_SHIFT 4
++#define DPNI_MODE_2_SIZE 4
+
-+/**
-+ * enum dpni_congestion_unit - DPNI congestion units
-+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
-+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
-+ */
-+enum dpni_congestion_unit {
-+ DPNI_CONGESTION_UNIT_BYTES = 0,
-+ DPNI_CONGESTION_UNIT_FRAMES
++struct dpni_cmd_set_tx_priorities {
++ __le16 flags;
++ u8 prio_group_A;
++ u8 prio_group_B;
++ __le32 pad0;
++ u8 modes[4];
++ __le32 pad1;
++ __le64 pad2;
++ __le16 delta_bandwidth[8];
+};
+
-+/**
-+ * enum dpni_congestion_point - Structure representing congestion point
-+ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
-+ * QUEUE_INDEX
-+ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
-+ * define the DPNI this can be either per TC (default) or per
-+ * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
-+ * QUEUE_INDEX is ignored if this type is used.
-+ */
-+enum dpni_congestion_point {
-+ DPNI_CP_QUEUE,
-+ DPNI_CP_GROUP,
++#define DPNI_DIST_MODE_SHIFT 0
++#define DPNI_DIST_MODE_SIZE 4
++#define DPNI_MISS_ACTION_SHIFT 4
++#define DPNI_MISS_ACTION_SIZE 4
++
++struct dpni_cmd_set_rx_tc_dist {
++ /* cmd word 0 */
++ __le16 dist_size;
++ u8 tc_id;
++ /* from LSB: dist_mode:4, miss_action:4 */
++ u8 flags;
++ __le16 pad0;
++ __le16 default_flow_id;
++ /* cmd word 1..5 */
++ __le64 pad1[5];
++ /* cmd word 6 */
++ __le64 key_cfg_iova;
+};
+
-+/**
-+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
-+ * @dest_type: Destination type
-+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
-+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
-+ * are 0-1 or 0-7, depending on the number of priorities in that
-+ * channel; not relevant for 'DPNI_DEST_NONE' option
++/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
++ * key_cfg_iova)
+ */
-+struct dpni_dest_cfg {
-+ enum dpni_dest dest_type;
-+ int dest_id;
-+ u8 priority;
++struct dpni_mask_cfg {
++ u8 mask;
++ u8 offset;
+};
+
-+/* DPNI congestion options */
++#define DPNI_EFH_TYPE_SHIFT 0
++#define DPNI_EFH_TYPE_SIZE 4
++#define DPNI_EXTRACT_TYPE_SHIFT 0
++#define DPNI_EXTRACT_TYPE_SIZE 4
+
-+/**
-+ * CSCN message is written to message_iova once entering a
-+ * congestion state (see 'threshold_entry')
-+ */
-+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
-+/**
-+ * CSCN message is written to message_iova once exiting a
-+ * congestion state (see 'threshold_exit')
-+ */
-+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
-+/**
-+ * CSCN write will attempt to allocate into a cache (coherent write);
-+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
-+ */
-+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
-+/**
-+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
-+ * DPIO/DPCON's WQ channel once entering a congestion state
-+ * (see 'threshold_entry')
-+ */
-+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
-+/**
-+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
-+ * DPIO/DPCON's WQ channel once exiting a congestion state
-+ * (see 'threshold_exit')
-+ */
-+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
-+/**
-+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
-+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
-+ */
-+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
++struct dpni_dist_extract {
++ /* word 0 */
++ u8 prot;
++ /* EFH type stored in the 4 least significant bits */
++ u8 efh_type;
++ u8 size;
++ u8 offset;
++ __le32 field;
++ /* word 1 */
++ u8 hdr_index;
++ u8 constant;
++ u8 num_of_repeats;
++ u8 num_of_byte_masks;
++ /* Extraction type is stored in the 4 LSBs */
++ u8 extract_type;
++ u8 pad[3];
++ /* word 2 */
++ struct dpni_mask_cfg masks[4];
++};
+
-+/**
-+ * struct dpni_congestion_notification_cfg - congestion notification
-+ * configuration
-+ * @units: units type
-+ * @threshold_entry: above this threshold we enter a congestion state.
-+ * set it to '0' to disable it
-+ * @threshold_exit: below this threshold we exit the congestion state.
-+ * @message_ctx: The context that will be part of the CSCN message
-+ * @message_iova: I/O virtual address (must be in DMA-able memory),
-+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
-+ * contained in 'options'
-+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
-+ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
-+ */
++struct dpni_ext_set_rx_tc_dist {
++ /* extension word 0 */
++ u8 num_extracts;
++ u8 pad[7];
++ /* words 1..25 */
++ struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
++};
+
-+struct dpni_congestion_notification_cfg {
-+ enum dpni_congestion_unit units;
-+ u32 threshold_entry;
-+ u32 threshold_exit;
-+ u64 message_ctx;
-+ u64 message_iova;
-+ struct dpni_dest_cfg dest_cfg;
-+ u16 notification_mode;
++struct dpni_cmd_get_queue {
++ u8 qtype;
++ u8 tc;
++ u8 index;
+};
+
-+int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc_id,
-+ const struct dpni_congestion_notification_cfg *cfg);
++#define DPNI_DEST_TYPE_SHIFT 0
++#define DPNI_DEST_TYPE_SIZE 4
++#define DPNI_STASH_CTRL_SHIFT 6
++#define DPNI_STASH_CTRL_SIZE 1
++#define DPNI_HOLD_ACTIVE_SHIFT 7
++#define DPNI_HOLD_ACTIVE_SIZE 1
+
-+/**
-+ * struct dpni_taildrop - Structure representing the taildrop
-+ * @enable: Indicates whether the taildrop is active or not.
-+ * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
-+ * byte units, this field is ignored and assumed = 0 if
-+ * CONGESTION_POINT is 0.
-+ * @threshold: Threshold value, in units identified by UNITS field. Value 0
-+ * cannot be used as a valid taildrop threshold, THRESHOLD must
-+ * be > 0 if the taildrop is enabled.
-+ */
-+struct dpni_taildrop {
-+ char enable;
-+ enum dpni_congestion_unit units;
-+ u32 threshold;
++struct dpni_rsp_get_queue {
++ /* response word 0 */
++ __le64 pad0;
++ /* response word 1 */
++ __le32 dest_id;
++ __le16 pad1;
++ u8 dest_prio;
++ /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
++ u8 flags;
++ /* response word 2 */
++ __le64 flc;
++ /* response word 3 */
++ __le64 user_context;
++ /* response word 4 */
++ __le32 fqid;
++ __le16 qdbin;
+};
+
-+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_congestion_point cg_point,
-+ enum dpni_queue_type q_type,
-+ u8 tc,
-+ u8 q_index,
-+ struct dpni_taildrop *taildrop);
++struct dpni_cmd_set_queue {
++ /* cmd word 0 */
++ u8 qtype;
++ u8 tc;
++ u8 index;
++ u8 options;
++ __le32 pad0;
++ /* cmd word 1 */
++ __le32 dest_id;
++ __le16 pad1;
++ u8 dest_prio;
++ u8 flags;
++ /* cmd word 2 */
++ __le64 flc;
++ /* cmd word 3 */
++ __le64 user_context;
++};
+
-+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_congestion_point cg_point,
-+ enum dpni_queue_type q_type,
-+ u8 tc,
-+ u8 q_index,
-+ struct dpni_taildrop *taildrop);
++#define DPNI_DISCARD_ON_MISS_SHIFT 0
++#define DPNI_DISCARD_ON_MISS_SIZE 1
+
-+/**
-+ * struct dpni_rule_cfg - Rule configuration for table lookup
-+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
-+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
-+ * @key_size: key and mask size (in bytes)
-+ */
-+struct dpni_rule_cfg {
-+ u64 key_iova;
-+ u64 mask_iova;
-+ u8 key_size;
++struct dpni_cmd_set_qos_table {
++ __le32 pad;
++ u8 default_tc;
++ /* only the LSB */
++ u8 discard_on_miss;
++ __le16 pad1[21];
++ __le64 key_cfg_iova;
+};
+
-+/**
-+ * Discard matching traffic. If set, this takes precedence over any other
-+ * configuration and matching traffic is always discarded.
-+ */
-+ #define DPNI_FS_OPT_DISCARD 0x1
++struct dpni_cmd_add_qos_entry {
++ __le16 pad;
++ u8 tc_id;
++ u8 key_size;
++ __le16 index;
++ __le16 pad2;
++ __le64 key_iova;
++ __le64 mask_iova;
++};
+
-+/**
-+ * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to
-+ * override the FLC value set per queue.
-+ * For more details check the Frame Descriptor section in the hardware
-+ * documentation.
-+ */
-+#define DPNI_FS_OPT_SET_FLC 0x2
++struct dpni_cmd_remove_qos_entry {
++ u8 pad1[3];
++ u8 key_size;
++ __le32 pad2;
++ __le64 key_iova;
++ __le64 mask_iova;
++};
+
-+/*
-+ * Indicates whether the 6 lowest significant bits of FLC are used for stash
-+ * control. If set, the 6 least significant bits in value are interpreted as
-+ * follows:
-+ * - bits 0-1: indicates the number of 64 byte units of context that are
-+ * stashed. FLC value is interpreted as a memory address in this case,
-+ * excluding the 6 LS bits.
-+ * - bits 2-3: indicates the number of 64 byte units of frame annotation
-+ * to be stashed. Annotation is placed at FD[ADDR].
-+ * - bits 4-5: indicates the number of 64 byte units of frame data to be
-+ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
-+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
-+ */
-+#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
++struct dpni_cmd_add_fs_entry {
++ /* cmd word 0 */
++ __le16 options;
++ u8 tc_id;
++ u8 key_size;
++ __le16 index;
++ __le16 flow_id;
++ /* cmd word 1 */
++ __le64 key_iova;
++ /* cmd word 2 */
++ __le64 mask_iova;
++ /* cmd word 3 */
++ __le64 flc;
++};
+
-+/**
-+ * struct dpni_fs_action_cfg - Action configuration for table look-up
-+ * @flc: FLC value for traffic matching this rule. Please check the Frame
-+ * Descriptor section in the hardware documentation for more information.
-+ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
-+ * values are in range 0 to num_queue-1.
-+ * @options: Any combination of DPNI_FS_OPT_ values.
-+ */
-+struct dpni_fs_action_cfg {
-+ u64 flc;
-+ u16 flow_id;
-+ u16 options;
++struct dpni_cmd_remove_fs_entry {
++ /* cmd word 0 */
++ __le16 pad0;
++ u8 tc_id;
++ u8 key_size;
++ __le32 pad1;
++ /* cmd word 1 */
++ __le64 key_iova;
++ /* cmd word 2 */
++ __le64 mask_iova;
+};
+
-+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ u16 index,
-+ const struct dpni_rule_cfg *cfg,
-+ const struct dpni_fs_action_cfg *action);
++struct dpni_cmd_set_taildrop {
++ /* cmd word 0 */
++ u8 congestion_point;
++ u8 qtype;
++ u8 tc;
++ u8 index;
++ __le32 pad0;
++ /* cmd word 1 */
++ /* Only least significant bit is relevant */
++ u8 enable;
++ u8 pad1;
++ u8 units;
++ u8 pad2;
++ __le32 threshold;
++};
+
-+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ const struct dpni_rule_cfg *cfg);
++struct dpni_cmd_get_taildrop {
++ u8 congestion_point;
++ u8 qtype;
++ u8 tc;
++ u8 index;
++};
+
-+#endif /* __FSL_DPNI_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
-@@ -0,0 +1,480 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++struct dpni_rsp_get_taildrop {
++ /* cmd word 0 */
++ __le64 pad0;
++ /* cmd word 1 */
++ /* only least significant bit is relevant */
++ u8 enable;
++ u8 pad1;
++ u8 units;
++ u8 pad2;
++ __le32 threshold;
++};
++
++struct dpni_rsp_get_api_version {
++ u16 major;
++ u16 minor;
++};
++
++#define DPNI_DEST_TYPE_SHIFT 0
++#define DPNI_DEST_TYPE_SIZE 4
++#define DPNI_CONG_UNITS_SHIFT 4
++#define DPNI_CONG_UNITS_SIZE 2
++
++struct dpni_cmd_set_congestion_notification {
++ /* cmd word 0 */
++ u8 qtype;
++ u8 tc;
++ u8 pad[6];
++ /* cmd word 1 */
++ __le32 dest_id;
++ __le16 notification_mode;
++ u8 dest_priority;
++ /* from LSB: dest_type: 4 units:2 */
++ u8 type_units;
++ /* cmd word 2 */
++ __le64 message_iova;
++ /* cmd word 3 */
++ __le64 message_ctx;
++ /* cmd word 4 */
++ __le32 threshold_entry;
++ __le32 threshold_exit;
++};
++
++struct dpni_cmd_get_congestion_notification {
++ /* cmd word 0 */
++ u8 qtype;
++ u8 tc;
++};
++
++struct dpni_rsp_get_congestion_notification {
++ /* cmd word 0 */
++ __le64 pad;
++ /* cmd word 1 */
++ __le32 dest_id;
++ __le16 notification_mode;
++ u8 dest_priority;
++ /* from LSB: dest_type: 4 units:2 */
++ u8 type_units;
++ /* cmd word 2 */
++ __le64 message_iova;
++ /* cmd word 3 */
++ __le64 message_ctx;
++ /* cmd word 4 */
++ __le32 threshold_entry;
++ __le32 threshold_exit;
++};
++
++#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
++#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
++struct dpni_cmd_set_rx_fs_dist {
++ __le16 dist_size;
++ u8 enable;
++ u8 tc;
++ __le16 miss_flow_id;
++ __le16 pad;
++ __le64 key_cfg_iova;
++};
++
++#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
++#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
++struct dpni_cmd_set_rx_hash_dist {
++ __le16 dist_size;
++ u8 enable;
++ u8 tc;
++ __le32 pad;
++ __le64 key_cfg_iova;
++};
++
++#endif /* _FSL_DPNI_CMD_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+@@ -0,0 +1,2112 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#ifndef __FSL_NET_H
-+#define __FSL_NET_H
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/fsl/mc.h>
++#include "dpni.h"
++#include "dpni-cmd.h"
+
-+#define LAST_HDR_INDEX 0xFFFFFFFF
++/**
++ * dpni_prepare_key_cfg() - function prepare extract parameters
++ * @cfg: defining a full Key Generation profile (rule)
++ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before the following functions:
++ * - dpni_set_rx_tc_dist()
++ * - dpni_set_qos_table()
++ */
++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
++{
++ int i, j;
++ struct dpni_ext_set_rx_tc_dist *dpni_ext;
++ struct dpni_dist_extract *extr;
+
-+/*****************************************************************************/
-+/* Protocol fields */
-+/*****************************************************************************/
++ if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
++ return -EINVAL;
+
-+/************************* Ethernet fields *********************************/
-+#define NH_FLD_ETH_DA (1)
-+#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
-+#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
-+#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
-+#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
-+#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
-+#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
++ dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
++ dpni_ext->num_extracts = cfg->num_extracts;
+
-+#define NH_FLD_ETH_ADDR_SIZE 6
++ for (i = 0; i < cfg->num_extracts; i++) {
++ extr = &dpni_ext->extracts[i];
+
-+/*************************** VLAN fields ***********************************/
-+#define NH_FLD_VLAN_VPRI (1)
-+#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
-+#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
-+#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
-+#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
-+#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
++ switch (cfg->extracts[i].type) {
++ case DPKG_EXTRACT_FROM_HDR:
++ extr->prot = cfg->extracts[i].extract.from_hdr.prot;
++ dpni_set_field(extr->efh_type, EFH_TYPE,
++ cfg->extracts[i].extract.from_hdr.type);
++ extr->size = cfg->extracts[i].extract.from_hdr.size;
++ extr->offset = cfg->extracts[i].extract.from_hdr.offset;
++ extr->field = cpu_to_le32(
++ cfg->extracts[i].extract.from_hdr.field);
++ extr->hdr_index =
++ cfg->extracts[i].extract.from_hdr.hdr_index;
++ break;
++ case DPKG_EXTRACT_FROM_DATA:
++ extr->size = cfg->extracts[i].extract.from_data.size;
++ extr->offset =
++ cfg->extracts[i].extract.from_data.offset;
++ break;
++ case DPKG_EXTRACT_FROM_PARSE:
++ extr->size = cfg->extracts[i].extract.from_parse.size;
++ extr->offset =
++ cfg->extracts[i].extract.from_parse.offset;
++ break;
++ default:
++ return -EINVAL;
++ }
+
-+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
-+ NH_FLD_VLAN_CFI | \
-+ NH_FLD_VLAN_VID)
++ extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
++ dpni_set_field(extr->extract_type, EXTRACT_TYPE,
++ cfg->extracts[i].type);
+
-+/************************ IP (generic) fields ******************************/
-+#define NH_FLD_IP_VER (1)
-+#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
-+#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
-+#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
-+#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
-+#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
-+#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
-+#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
-+#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
++ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
++ extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
++ extr->masks[j].offset =
++ cfg->extracts[i].masks[j].offset;
++ }
++ }
+
-+#define NH_FLD_IP_PROTO_SIZE 1
++ return 0;
++}
+
-+/***************************** IPV4 fields *********************************/
-+#define NH_FLD_IPV4_VER (1)
-+#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
-+#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
-+#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
-+#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
-+#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
-+#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
-+#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
-+#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
-+#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
-+#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
-+#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
-+#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
-+#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
-+#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
-+#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
++/**
++ * dpni_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpni_id: DPNI unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpni_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpni_id,
++ u16 *token)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_open *cmd_params;
+
-+#define NH_FLD_IPV4_ADDR_SIZE 4
-+#define NH_FLD_IPV4_PROTO_SIZE 1
++ int err;
+
-+/***************************** IPV6 fields *********************************/
-+#define NH_FLD_IPV6_VER (1)
-+#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
-+#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
-+#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
-+#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
-+#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
-+#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
-+#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
-+#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ cmd_params = (struct dpni_cmd_open *)cmd.params;
++ cmd_params->dpni_id = cpu_to_le32(dpni_id);
+
-+#define NH_FLD_IPV6_ADDR_SIZE 16
-+#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+/***************************** ICMP fields *********************************/
-+#define NH_FLD_ICMP_TYPE (1)
-+#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
-+#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
-+#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
-+#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
-+#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
+
-+#define NH_FLD_ICMP_CODE_SIZE 1
-+#define NH_FLD_ICMP_TYPE_SIZE 1
++ return 0;
++}
+
-+/***************************** IGMP fields *********************************/
-+#define NH_FLD_IGMP_VERSION (1)
-+#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
-+#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
-+#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
-+#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
++/**
++ * dpni_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
+
-+/***************************** TCP fields **********************************/
-+#define NH_FLD_TCP_PORT_SRC (1)
-+#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
-+#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
-+#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
-+#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
-+#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
-+#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
-+#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
-+#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
-+#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
-+#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
-+#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
++ cmd_flags,
++ token);
+
-+#define NH_FLD_TCP_PORT_SIZE 2
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+/***************************** UDP fields **********************************/
-+#define NH_FLD_UDP_PORT_SRC (1)
-+#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
-+#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
-+#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
-+#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
++/**
++ * dpni_set_pools() - Set buffer pools configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Buffer pools configuration
++ *
++ * mandatory for DPNI operation
++ * warning:Allowed only when DPNI is disabled
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_pools(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_pools_cfg *cfg)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_pools *cmd_params;
++ int i;
+
-+#define NH_FLD_UDP_PORT_SIZE 2
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
++ cmd_params->num_dpbp = cfg->num_dpbp;
++ for (i = 0; i < DPNI_MAX_DPBP; i++) {
++ cmd_params->pool[i].dpbp_id =
++ cpu_to_le16(cfg->pools[i].dpbp_id);
++ cmd_params->pool[i].priority_mask =
++ cfg->pools[i].priority_mask;
++ cmd_params->buffer_size[i] =
++ cpu_to_le16(cfg->pools[i].buffer_size);
++ cmd_params->backup_pool_mask |=
++ DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
++ }
+
-+/*************************** UDP-lite fields *******************************/
-+#define NH_FLD_UDP_LITE_PORT_SRC (1)
-+#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
-+#define NH_FLD_UDP_LITE_ALL_FIELDS \
-+ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+#define NH_FLD_UDP_LITE_PORT_SIZE 2
++/**
++ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
+
-+/*************************** UDP-encap-ESP fields **************************/
-+#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
-+#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
-+#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
-+#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
-+#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
-+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
-+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
-+ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
++ cmd_flags,
++ token);
+
-+#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
-+#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+/***************************** SCTP fields *********************************/
-+#define NH_FLD_SCTP_PORT_SRC (1)
-+#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
-+#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
-+#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
-+#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
++/**
++ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
+
-+#define NH_FLD_SCTP_PORT_SIZE 2
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
++ cmd_flags,
++ token);
+
-+/***************************** DCCP fields *********************************/
-+#define NH_FLD_DCCP_PORT_SRC (1)
-+#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
-+#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+#define NH_FLD_DCCP_PORT_SIZE 2
++/**
++ * dpni_is_enabled() - Check if the DPNI is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_is_enabled *rsp_params;
++ int err;
+
-+/***************************** IPHC fields *********************************/
-+#define NH_FLD_IPHC_CID (1)
-+#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
-+#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
-+#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
-+#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
-+#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
+
-+/***************************** SCTP fields *********************************/
-+#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
-+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
-+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
-+#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
-+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
-+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
-+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
-+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
-+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
-+#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
-+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
-+ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+/*************************** L2TPV2 fields *********************************/
-+#define NH_FLD_L2TPV2_TYPE_BIT (1)
-+#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
-+#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
-+#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
-+#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
-+#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
-+#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
-+#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
-+#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
-+#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
-+#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
-+#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
-+#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
-+#define NH_FLD_L2TPV2_ALL_FIELDS \
-+ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
++ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
-+/*************************** L2TPV3 fields *********************************/
-+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
-+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
-+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
-+#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
-+#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
-+#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
-+#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
-+#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
-+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
-+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
-+ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
++ return 0;
++}
+
-+#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
-+#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
-+#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
-+#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
-+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
-+ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
++/**
++ * dpni_reset() - Reset the DPNI, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
+
-+/**************************** PPP fields ***********************************/
-+#define NH_FLD_PPP_PID (1)
-+#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
-+#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
-+
-+/************************** PPPoE fields ***********************************/
-+#define NH_FLD_PPPOE_VER (1)
-+#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
-+#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
-+#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
-+#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
-+#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
-+#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
-+#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
-+
-+/************************* PPP-Mux fields **********************************/
-+#define NH_FLD_PPPMUX_PID (1)
-+#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
-+#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
-+#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
-+
-+/*********************** PPP-Mux sub-frame fields **************************/
-+#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
-+#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
-+#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
-+#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
-+#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
-+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
-+ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
-+
-+/*************************** LLC fields ************************************/
-+#define NH_FLD_LLC_DSAP (1)
-+#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
-+#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
-+#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
-+
-+/*************************** NLPID fields **********************************/
-+#define NH_FLD_NLPID_NLPID (1)
-+#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
++ cmd_flags,
++ token);
+
-+/*************************** SNAP fields ***********************************/
-+#define NH_FLD_SNAP_OUI (1)
-+#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
-+#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+/*************************** LLC SNAP fields *******************************/
-+#define NH_FLD_LLC_SNAP_TYPE (1)
-+#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
++/**
++ * dpni_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state: - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_irq_enable *cmd_params;
+
-+#define NH_FLD_ARP_HTYPE (1)
-+#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
-+#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
-+#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
-+#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
-+#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
-+#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
-+#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
-+#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
-+#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
++ dpni_set_field(cmd_params->enable, ENABLE, en);
++ cmd_params->irq_index = irq_index;
+
-+/*************************** RFC2684 fields ********************************/
-+#define NH_FLD_RFC2684_LLC (1)
-+#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
-+#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
-+#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
-+#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
-+#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
-+#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+/*************************** User defined fields ***************************/
-+#define NH_FLD_USER_DEFINED_SRCPORT (1)
-+#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
-+#define NH_FLD_USER_DEFINED_ALL_FIELDS \
-+ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
++/**
++ * dpni_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_irq_enable *cmd_params;
++ struct dpni_rsp_get_irq_enable *rsp_params;
+
-+/*************************** Payload fields ********************************/
-+#define NH_FLD_PAYLOAD_BUFFER (1)
-+#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
-+#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
-+#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
-+#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
-+#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
-+#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
++ int err;
+
-+/*************************** GRE fields ************************************/
-+#define NH_FLD_GRE_TYPE (1)
-+#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
++ cmd_params->irq_index = irq_index;
+
-+/*************************** MINENCAP fields *******************************/
-+#define NH_FLD_MINENCAP_SRC_IP (1)
-+#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
-+#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
-+#define NH_FLD_MINENCAP_ALL_FIELDS \
-+ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+/*************************** IPSEC AH fields *******************************/
-+#define NH_FLD_IPSEC_AH_SPI (1)
-+#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
-+#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
++ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
-+/*************************** IPSEC ESP fields ******************************/
-+#define NH_FLD_IPSEC_ESP_SPI (1)
-+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
-+#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
++ return 0;
++}
+
-+#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
++/**
++ * dpni_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_irq_mask *cmd_params;
+
-+/*************************** MPLS fields ***********************************/
-+#define NH_FLD_MPLS_LABEL_STACK (1)
-+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
-+ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
++ cmd_params->mask = cpu_to_le32(mask);
++ cmd_params->irq_index = irq_index;
+
-+/*************************** MACSEC fields *********************************/
-+#define NH_FLD_MACSEC_SECTAG (1)
-+#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+/*************************** GTP fields ************************************/
-+#define NH_FLD_GTP_TEID (1)
++/**
++ * dpni_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_irq_mask *cmd_params;
++ struct dpni_rsp_get_irq_mask *rsp_params;
++ int err;
+
-+/* Protocol options */
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
++ cmd_params->irq_index = irq_index;
+
-+/* Ethernet options */
-+#define NH_OPT_ETH_BROADCAST 1
-+#define NH_OPT_ETH_MULTICAST 2
-+#define NH_OPT_ETH_UNICAST 3
-+#define NH_OPT_ETH_BPDU 4
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
-+/* also applicable for broadcast */
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
++ *mask = le32_to_cpu(rsp_params->mask);
+
-+/* VLAN options */
-+#define NH_OPT_VLAN_CFI 1
++ return 0;
++}
+
-+/* IPV4 options */
-+#define NH_OPT_IPV4_UNICAST 1
-+#define NH_OPT_IPV4_MULTICAST 2
-+#define NH_OPT_IPV4_BROADCAST 3
-+#define NH_OPT_IPV4_OPTION 4
-+#define NH_OPT_IPV4_FRAG 5
-+#define NH_OPT_IPV4_INITIAL_FRAG 6
++/**
++ * dpni_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_irq_status *cmd_params;
++ struct dpni_rsp_get_irq_status *rsp_params;
++ int err;
+
-+/* IPV6 options */
-+#define NH_OPT_IPV6_UNICAST 1
-+#define NH_OPT_IPV6_MULTICAST 2
-+#define NH_OPT_IPV6_OPTION 3
-+#define NH_OPT_IPV6_FRAG 4
-+#define NH_OPT_IPV6_INITIAL_FRAG 5
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(*status);
++ cmd_params->irq_index = irq_index;
+
-+/* General IP options (may be used for any version) */
-+#define NH_OPT_IP_FRAG 1
-+#define NH_OPT_IP_INITIAL_FRAG 2
-+#define NH_OPT_IP_OPTION 3
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+/* Minenc. options */
-+#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
++ *status = le32_to_cpu(rsp_params->status);
+
-+/* GRE. options */
-+#define NH_OPT_GRE_ROUTING_PRESENT 1
++ return 0;
++}
+
-+/* TCP options */
-+#define NH_OPT_TCP_OPTIONS 1
-+#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
-+#define NH_OPT_TCP_CONTROL_LOW_BITS 3
++/**
++ * dpni_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_clear_irq_status *cmd_params;
+
-+/* CAPWAP options */
-+#define NH_OPT_CAPWAP_DTLS 1
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
++ cmd_params->irq_index = irq_index;
++ cmd_params->status = cpu_to_le32(status);
+
-+enum net_prot {
-+ NET_PROT_NONE = 0,
-+ NET_PROT_PAYLOAD,
-+ NET_PROT_ETH,
-+ NET_PROT_VLAN,
-+ NET_PROT_IPV4,
-+ NET_PROT_IPV6,
-+ NET_PROT_IP,
-+ NET_PROT_TCP,
-+ NET_PROT_UDP,
-+ NET_PROT_UDP_LITE,
-+ NET_PROT_IPHC,
-+ NET_PROT_SCTP,
-+ NET_PROT_SCTP_CHUNK_DATA,
-+ NET_PROT_PPPOE,
-+ NET_PROT_PPP,
-+ NET_PROT_PPPMUX,
-+ NET_PROT_PPPMUX_SUBFRM,
-+ NET_PROT_L2TPV2,
-+ NET_PROT_L2TPV3_CTRL,
-+ NET_PROT_L2TPV3_SESS,
-+ NET_PROT_LLC,
-+ NET_PROT_LLC_SNAP,
-+ NET_PROT_NLPID,
-+ NET_PROT_SNAP,
-+ NET_PROT_MPLS,
-+ NET_PROT_IPSEC_AH,
-+ NET_PROT_IPSEC_ESP,
-+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
-+ NET_PROT_MACSEC,
-+ NET_PROT_GRE,
-+ NET_PROT_MINENCAP,
-+ NET_PROT_DCCP,
-+ NET_PROT_ICMP,
-+ NET_PROT_IGMP,
-+ NET_PROT_ARP,
-+ NET_PROT_CAPWAP_DATA,
-+ NET_PROT_CAPWAP_CTRL,
-+ NET_PROT_RFC2684,
-+ NET_PROT_ICMPV6,
-+ NET_PROT_FCOE,
-+ NET_PROT_FIP,
-+ NET_PROT_ISCSI,
-+ NET_PROT_GTP,
-+ NET_PROT_USER_DEFINED_L2,
-+ NET_PROT_USER_DEFINED_L3,
-+ NET_PROT_USER_DEFINED_L4,
-+ NET_PROT_USER_DEFINED_L5,
-+ NET_PROT_USER_DEFINED_SHIM1,
-+ NET_PROT_USER_DEFINED_SHIM2,
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+ NET_PROT_DUMMY_LAST
-+};
++/**
++ * dpni_get_attributes() - Retrieve DPNI attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @attr: Object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_attr *attr)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_attr *rsp_params;
+
-+/*! IEEE8021.Q */
-+#define NH_IEEE8021Q_ETYPE 0x8100
-+#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
-+ ((((u32)((etype) & 0xFFFF)) << 16) | \
-+ (((u32)((pcp) & 0x07)) << 13) | \
-+ (((u32)((dei) & 0x01)) << 12) | \
-+ (((u32)((vlan_id) & 0xFFF))))
++ int err;
+
-+#endif /* __FSL_NET_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/Kconfig
-@@ -0,0 +1,6 @@
-+config FSL_DPAA2_ETHSW
-+ tristate "DPAA2 Ethernet Switch"
-+ depends on FSL_MC_BUS && FSL_DPAA2
-+ default y
-+ ---help---
-+ Prototype driver for DPAA2 Ethernet Switch.
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
-@@ -0,0 +1,10 @@
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
+
-+obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+dpaa2-ethsw-objs := switch.o dpsw.o
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
++ attr->options = le32_to_cpu(rsp_params->options);
++ attr->num_queues = rsp_params->num_queues;
++ attr->num_tcs = rsp_params->num_tcs;
++ attr->mac_filter_entries = rsp_params->mac_filter_entries;
++ attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
++ attr->qos_entries = rsp_params->qos_entries;
++ attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
++ attr->qos_key_size = rsp_params->qos_key_size;
++ attr->fs_key_size = rsp_params->fs_key_size;
++ attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
+
-+all:
-+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
++ return 0;
++}
+
-+clean:
-+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
-@@ -0,0 +1,851 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
++/**
++ * dpni_set_errors_behavior() - Set errors behavior
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Errors configuration
+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
++ * this function may be called numerous times with different
++ * error masks
+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+#ifndef __FSL_DPSW_CMD_H
-+#define __FSL_DPSW_CMD_H
-+
-+/* DPSW Version */
-+#define DPSW_VER_MAJOR 8
-+#define DPSW_VER_MINOR 0
-+
-+#define DPSW_CMD_BASE_VERSION 1
-+#define DPSW_CMD_ID_OFFSET 4
++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_error_cfg *cfg)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_errors_behavior *cmd_params;
+
-+#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
++ cmd_params->errors = cpu_to_le32(cfg->errors);
++ dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
++ dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
+
-+/* Command IDs */
-+#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
-+#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
++/**
++ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue to retrieve configuration for
++ * @layout: Returns buffer layout attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ struct dpni_buffer_layout *layout)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_buffer_layout *cmd_params;
++ struct dpni_rsp_get_buffer_layout *rsp_params;
++ int err;
+
-+#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
-+#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
-+#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
-+#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
-+#define DPSW_CMDID_IS_ENABLED DPSW_CMD_ID(0x006)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
++ cmd_params->qtype = qtype;
+
-+#define DPSW_CMDID_SET_IRQ DPSW_CMD_ID(0x010)
-+#define DPSW_CMDID_GET_IRQ DPSW_CMD_ID(0x011)
-+#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
-+#define DPSW_CMDID_GET_IRQ_ENABLE DPSW_CMD_ID(0x013)
-+#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
-+#define DPSW_CMDID_GET_IRQ_MASK DPSW_CMD_ID(0x015)
-+#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
-+#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
++ layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
++ layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
++ layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
++ layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
++ layout->data_align = le16_to_cpu(rsp_params->data_align);
++ layout->data_head_room = le16_to_cpu(rsp_params->head_room);
++ layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
+
-+#define DPSW_CMDID_ADD_CUSTOM_TPID DPSW_CMD_ID(0x024)
++ return 0;
++}
+
-+#define DPSW_CMDID_REMOVE_CUSTOM_TPID DPSW_CMD_ID(0x026)
++/**
++ * dpni_set_buffer_layout() - Set buffer layout configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue this configuration applies to
++ * @layout: Buffer layout configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Allowed only when DPNI is disabled
++ */
++int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ const struct dpni_buffer_layout *layout)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_buffer_layout *cmd_params;
+
-+#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
-+#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
-+#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES DPSW_CMD_ID(0x032)
-+#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN DPSW_CMD_ID(0x033)
-+#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
-+#define DPSW_CMDID_IF_SET_COUNTER DPSW_CMD_ID(0x035)
-+#define DPSW_CMDID_IF_SET_TX_SELECTION DPSW_CMD_ID(0x036)
-+#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
-+#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
-+#define DPSW_CMDID_IF_SET_FLOODING_METERING DPSW_CMD_ID(0x039)
-+#define DPSW_CMDID_IF_SET_METERING DPSW_CMD_ID(0x03A)
-+#define DPSW_CMDID_IF_SET_EARLY_DROP DPSW_CMD_ID(0x03B)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->options = cpu_to_le16(layout->options);
++ dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
++ dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
++ dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
++ cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
++ cmd_params->data_align = cpu_to_le16(layout->data_align);
++ cmd_params->head_room = cpu_to_le16(layout->data_head_room);
++ cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
+
-+#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
-+#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042)
++/**
++ * dpni_set_offload() - Set DPNI offload configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @type: Type of DPNI offload
++ * @config: Offload configuration.
++ * For checksum offloads, non-zero value enables the offload
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Allowed only when DPNI is disabled
++ */
+
-+#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
-+#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x045)
-+#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
-+#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
-+#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
-+#define DPSW_CMDID_IF_SET_MULTICAST DPSW_CMD_ID(0x049)
-+#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
++int dpni_set_offload(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_offload type,
++ u32 config)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_offload *cmd_params;
+
-+#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
++ cmd_params->dpni_offload = type;
++ cmd_params->config = cpu_to_le32(config);
+
-+#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
-+#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
-+#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
-+#define DPSW_CMDID_VLAN_ADD_IF_FLOODING DPSW_CMD_ID(0x063)
-+#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
-+#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
-+#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
-+#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
-+#define DPSW_CMDID_VLAN_GET_IF DPSW_CMD_ID(0x068)
-+#define DPSW_CMDID_VLAN_GET_IF_FLOODING DPSW_CMD_ID(0x069)
-+#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED DPSW_CMD_ID(0x06A)
-+#define DPSW_CMDID_VLAN_GET_ATTRIBUTES DPSW_CMD_ID(0x06B)
-+
-+#define DPSW_CMDID_FDB_GET_MULTICAST DPSW_CMD_ID(0x080)
-+#define DPSW_CMDID_FDB_GET_UNICAST DPSW_CMD_ID(0x081)
-+#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082)
-+#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083)
-+#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
-+#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
-+#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
-+#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
-+#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
-+#define DPSW_CMDID_FDB_GET_ATTR DPSW_CMD_ID(0x089)
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_offload(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_offload type,
++ u32 *config)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_offload *cmd_params;
++ struct dpni_rsp_get_offload *rsp_params;
++ int err;
+
-+#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090)
-+#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091)
-+#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092)
-+#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093)
-+#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094)
-+#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095)
-+#define DPSW_CMDID_ACL_GET_ATTR DPSW_CMD_ID(0x096)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
++ cmd_params->dpni_offload = type;
+
-+#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0)
-+#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1)
-+#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2)
-+#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3)
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+/* Macros for accessing command fields smaller than 1byte */
-+#define DPSW_MASK(field) \
-+ GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
-+ DPSW_##field##_SHIFT)
-+#define dpsw_set_field(var, field, val) \
-+ ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
-+#define dpsw_get_field(var, field) \
-+ (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
-+#define dpsw_get_bit(var, bit) \
-+ (((var) >> (bit)) & GENMASK(0, 0))
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
++ *config = le32_to_cpu(rsp_params->config);
+
-+static inline u64 dpsw_set_bit(u64 var, unsigned int bit, u8 val)
-+{
-+ var |= (u64)val << bit & GENMASK(bit, bit);
-+ return var;
++ return 0;
+}
+
-+struct dpsw_cmd_open {
-+ __le32 dpsw_id;
-+};
-+
-+#define DPSW_COMPONENT_TYPE_SHIFT 0
-+#define DPSW_COMPONENT_TYPE_SIZE 4
++/**
++ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
++ * for enqueue operations
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue to receive QDID for
++ * @qdid: Returned virtual QDID value that should be used as an argument
++ * in all enqueue operations
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_qdid(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u16 *qdid)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_qdid *cmd_params;
++ struct dpni_rsp_get_qdid *rsp_params;
++ int err;
+
-+struct dpsw_cmd_create {
-+ /* cmd word 0 */
-+ __le16 num_ifs;
-+ u8 max_fdbs;
-+ u8 max_meters_per_if;
-+ /* from LSB: only the first 4 bits */
-+ u8 component_type;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ __le16 max_vlans;
-+ __le16 max_fdb_entries;
-+ __le16 fdb_aging_time;
-+ __le16 max_fdb_mc_groups;
-+ /* cmd word 2 */
-+ __le64 options;
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
++ cmd_params->qtype = qtype;
+
-+struct dpsw_cmd_destroy {
-+ __le32 dpsw_id;
-+};
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+#define DPSW_ENABLE_SHIFT 0
-+#define DPSW_ENABLE_SIZE 1
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
++ *qdid = le16_to_cpu(rsp_params->qdid);
+
-+struct dpsw_rsp_is_enabled {
-+ /* from LSB: enable:1 */
-+ u8 enabled;
-+};
++ return 0;
++}
+
-+struct dpsw_cmd_set_irq {
-+ /* cmd word 0 */
-+ u8 irq_index;
-+ u8 pad[3];
-+ __le32 irq_val;
-+ /* cmd word 1 */
-+ __le64 irq_addr;
-+ /* cmd word 2 */
-+ __le32 irq_num;
-+};
++/**
++ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @data_offset: Tx data offset (from start of buffer)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *data_offset)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_tx_data_offset *rsp_params;
++ int err;
+
-+struct dpsw_cmd_get_irq {
-+ __le32 pad;
-+ u8 irq_index;
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
++ cmd_flags,
++ token);
+
-+struct dpsw_rsp_get_irq {
-+ /* cmd word 0 */
-+ __le32 irq_val;
-+ __le32 pad;
-+ /* cmd word 1 */
-+ __le64 irq_addr;
-+ /* cmd word 2 */
-+ __le32 irq_num;
-+ __le32 irq_type;
-+};
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+struct dpsw_cmd_set_irq_enable {
-+ u8 enable_state;
-+ u8 pad[3];
-+ u8 irq_index;
-+};
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
++ *data_offset = le16_to_cpu(rsp_params->data_offset);
+
-+struct dpsw_cmd_get_irq_enable {
-+ __le32 pad;
-+ u8 irq_index;
-+};
++ return 0;
++}
+
-+struct dpsw_rsp_get_irq_enable {
-+ u8 enable_state;
-+};
++/**
++ * dpni_set_link_cfg() - set the link configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_link_cfg *cfg)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_link_cfg *cmd_params;
+
-+struct dpsw_cmd_set_irq_mask {
-+ __le32 mask;
-+ u8 irq_index;
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
++ cmd_params->rate = cpu_to_le32(cfg->rate);
++ cmd_params->options = cpu_to_le64(cfg->options);
+
-+struct dpsw_cmd_get_irq_mask {
-+ __le32 pad;
-+ u8 irq_index;
-+};
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+struct dpsw_rsp_get_irq_mask {
-+ __le32 mask;
-+};
++/**
++ * dpni_get_link_state() - Return the link state (either up or down)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @state: Returned link state;
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_link_state *state)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_link_state *rsp_params;
++ int err;
+
-+struct dpsw_cmd_get_irq_status {
-+ __le32 status;
-+ u8 irq_index;
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
++ cmd_flags,
++ token);
+
-+struct dpsw_rsp_get_irq_status {
-+ __le32 status;
-+};
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+struct dpsw_cmd_clear_irq_status {
-+ __le32 status;
-+ u8 irq_index;
-+};
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
++ state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
++ state->rate = le32_to_cpu(rsp_params->rate);
++ state->options = le64_to_cpu(rsp_params->options);
+
-+#define DPSW_COMPONENT_TYPE_SHIFT 0
-+#define DPSW_COMPONENT_TYPE_SIZE 4
++ return 0;
++}
+
-+struct dpsw_rsp_get_attr {
-+ /* cmd word 0 */
-+ __le16 num_ifs;
-+ u8 max_fdbs;
-+ u8 num_fdbs;
-+ __le16 max_vlans;
-+ __le16 num_vlans;
-+ /* cmd word 1 */
-+ __le16 max_fdb_entries;
-+ __le16 fdb_aging_time;
-+ __le32 dpsw_id;
-+ /* cmd word 2 */
-+ __le16 mem_size;
-+ __le16 max_fdb_mc_groups;
-+ u8 max_meters_per_if;
-+ /* from LSB only the ffirst 4 bits */
-+ u8 component_type;
-+ __le16 pad;
-+ /* cmd word 3 */
-+ __le64 options;
-+};
++/**
++ * dpni_set_tx_shaping() - Set the transmit shaping
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tx_cr_shaper: TX committed rate shaping configuration
++ * @tx_er_shaper: TX excess rate shaping configuration
++ * @coupled: Committed and excess rate shapers are coupled
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
++ const struct dpni_tx_shaping_cfg *tx_er_shaper,
++ int coupled)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_tx_shaping *cmd_params;
+
-+struct dpsw_cmd_set_reflection_if {
-+ __le16 if_id;
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
++ cmd_params->tx_cr_max_burst_size =
++ cpu_to_le16(tx_cr_shaper->max_burst_size);
++ cmd_params->tx_er_max_burst_size =
++ cpu_to_le16(tx_er_shaper->max_burst_size);
++ cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit);
++ cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit);
++ dpni_set_field(cmd_params->coupled, COUPLED, coupled);
+
-+struct dpsw_cmd_if_set_flooding {
-+ __le16 if_id;
-+ /* from LSB: enable:1 */
-+ u8 enable;
-+};
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+struct dpsw_cmd_if_set_broadcast {
-+ __le16 if_id;
-+ /* from LSB: enable:1 */
-+ u8 enable;
-+};
++/**
++ * dpni_set_max_frame_length() - Set the maximum received frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @max_frame_length: Maximum received frame length (in
++ * bytes); frame is discarded if its
++ * length exceeds this value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 max_frame_length)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_max_frame_length *cmd_params;
+
-+struct dpsw_cmd_if_set_multicast {
-+ __le16 if_id;
-+ /* from LSB: enable:1 */
-+ u8 enable;
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
++ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
+
-+#define DPSW_VLAN_ID_SHIFT 0
-+#define DPSW_VLAN_ID_SIZE 12
-+#define DPSW_DEI_SHIFT 12
-+#define DPSW_DEI_SIZE 1
-+#define DPSW_PCP_SHIFT 13
-+#define DPSW_PCP_SIZE 3
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+struct dpsw_cmd_if_set_tci {
-+ __le16 if_id;
-+ /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
-+ __le16 conf;
-+};
++/**
++ * dpni_get_max_frame_length() - Get the maximum received frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @max_frame_length: Maximum received frame length (in
++ * bytes); frame is discarded if its
++ * length exceeds this value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *max_frame_length)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_max_frame_length *rsp_params;
++ int err;
+
-+struct dpsw_cmd_if_get_tci {
-+ __le16 if_id;
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
+
-+struct dpsw_rsp_if_get_tci {
-+ __le16 pad;
-+ __le16 vlan_id;
-+ u8 dei;
-+ u8 pcp;
-+};
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+#define DPSW_STATE_SHIFT 0
-+#define DPSW_STATE_SIZE 4
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
++ *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
+
-+struct dpsw_cmd_if_set_stp {
-+ __le16 if_id;
-+ __le16 vlan_id;
-+ /* only the first LSB 4 bits */
-+ u8 state;
-+};
++ return 0;
++}
+
-+#define DPSW_FRAME_TYPE_SHIFT 0
-+#define DPSW_FRAME_TYPE_SIZE 4
-+#define DPSW_UNACCEPTED_ACT_SHIFT 4
-+#define DPSW_UNACCEPTED_ACT_SIZE 4
++/**
++ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_multicast_promisc *cmd_params;
+
-+struct dpsw_cmd_if_set_accepted_frames {
-+ __le16 if_id;
-+ /* from LSB: type:4 unaccepted_act:4 */
-+ u8 unaccepted;
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
++ dpni_set_field(cmd_params->enable, ENABLE, en);
+
-+#define DPSW_ACCEPT_ALL_SHIFT 0
-+#define DPSW_ACCEPT_ALL_SIZE 1
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+struct dpsw_cmd_if_set_accept_all_vlan {
-+ __le16 if_id;
-+ /* only the least significant bit */
-+ u8 accept_all;
-+};
++/**
++ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_multicast_promisc *rsp_params;
++ int err;
+
-+#define DPSW_COUNTER_TYPE_SHIFT 0
-+#define DPSW_COUNTER_TYPE_SIZE 5
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
++ cmd_flags,
++ token);
+
-+struct dpsw_cmd_if_get_counter {
-+ __le16 if_id;
-+ /* from LSB: type:5 */
-+ u8 type;
-+};
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+struct dpsw_rsp_if_get_counter {
-+ __le64 pad;
-+ __le64 counter;
-+};
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
++ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
-+struct dpsw_cmd_if_set_counter {
-+ /* cmd word 0 */
-+ __le16 if_id;
-+ /* from LSB: type:5 */
-+ u8 type;
-+ /* cmd word 1 */
-+ __le64 counter;
-+};
++ return 0;
++}
+
-+#define DPSW_PRIORITY_SELECTOR_SHIFT 0
-+#define DPSW_PRIORITY_SELECTOR_SIZE 3
-+#define DPSW_SCHED_MODE_SHIFT 0
-+#define DPSW_SCHED_MODE_SIZE 4
++/**
++ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_unicast_promisc *cmd_params;
+
-+struct dpsw_cmd_if_set_tx_selection {
-+ __le16 if_id;
-+ /* from LSB: priority_selector:3 */
-+ u8 priority_selector;
-+ u8 pad[5];
-+ u8 tc_id[8];
-+
-+ struct dpsw_tc_sched {
-+ __le16 delta_bandwidth;
-+ u8 mode;
-+ u8 pad;
-+ } tc_sched[8];
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
++ dpni_set_field(cmd_params->enable, ENABLE, en);
+
-+#define DPSW_FILTER_SHIFT 0
-+#define DPSW_FILTER_SIZE 2
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+struct dpsw_cmd_if_reflection {
-+ __le16 if_id;
-+ __le16 vlan_id;
-+ /* only 2 bits from the LSB */
-+ u8 filter;
-+};
++/**
++ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_unicast_promisc *rsp_params;
++ int err;
+
-+#define DPSW_MODE_SHIFT 0
-+#define DPSW_MODE_SIZE 4
-+#define DPSW_UNITS_SHIFT 4
-+#define DPSW_UNITS_SIZE 4
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
++ cmd_flags,
++ token);
+
-+struct dpsw_cmd_if_set_flooding_metering {
-+ /* cmd word 0 */
-+ __le16 if_id;
-+ u8 pad;
-+ /* from LSB: mode:4 units:4 */
-+ u8 mode_units;
-+ __le32 cir;
-+ /* cmd word 1 */
-+ __le32 eir;
-+ __le32 cbs;
-+ /* cmd word 2 */
-+ __le32 ebs;
-+};
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+struct dpsw_cmd_if_set_metering {
-+ /* cmd word 0 */
-+ __le16 if_id;
-+ u8 tc_id;
-+ /* from LSB: mode:4 units:4 */
-+ u8 mode_units;
-+ __le32 cir;
-+ /* cmd word 1 */
-+ __le32 eir;
-+ __le32 cbs;
-+ /* cmd word 2 */
-+ __le32 ebs;
-+};
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
++ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
-+#define DPSW_EARLY_DROP_MODE_SHIFT 0
-+#define DPSW_EARLY_DROP_MODE_SIZE 2
-+#define DPSW_EARLY_DROP_UNIT_SHIFT 2
-+#define DPSW_EARLY_DROP_UNIT_SIZE 2
++ return 0;
++}
+
-+struct dpsw_prep_early_drop {
-+ /* from LSB: mode:2 units:2 */
-+ u8 conf;
-+ u8 pad0[3];
-+ __le32 tail_drop_threshold;
-+ u8 green_drop_probability;
-+ u8 pad1[7];
-+ __le64 green_max_threshold;
-+ __le64 green_min_threshold;
-+ __le64 pad2;
-+ u8 yellow_drop_probability;
-+ u8 pad3[7];
-+ __le64 yellow_max_threshold;
-+ __le64 yellow_min_threshold;
-+};
++/**
++ * dpni_set_primary_mac_addr() - Set the primary MAC address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to set as primary address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6])
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_primary_mac_addr *cmd_params;
++ int i;
+
-+struct dpsw_cmd_if_set_early_drop {
-+ /* cmd word 0 */
-+ u8 pad0;
-+ u8 tc_id;
-+ __le16 if_id;
-+ __le32 pad1;
-+ /* cmd word 1 */
-+ __le64 early_drop_iova;
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
-+struct dpsw_cmd_custom_tpid {
-+ __le16 pad;
-+ __le16 tpid;
-+};
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+struct dpsw_cmd_if {
-+ __le16 if_id;
-+};
++/**
++ * dpni_get_primary_mac_addr() - Get the primary MAC address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: Returned MAC address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 mac_addr[6])
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_primary_mac_addr *rsp_params;
++ int i, err;
+
-+#define DPSW_ADMIT_UNTAGGED_SHIFT 0
-+#define DPSW_ADMIT_UNTAGGED_SIZE 4
-+#define DPSW_ENABLED_SHIFT 5
-+#define DPSW_ENABLED_SIZE 1
-+#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6
-+#define DPSW_ACCEPT_ALL_VLAN_SIZE 1
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
++ cmd_flags,
++ token);
+
-+struct dpsw_rsp_if_get_attr {
-+ /* cmd word 0 */
-+ /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */
-+ u8 conf;
-+ u8 pad1;
-+ u8 num_tcs;
-+ u8 pad2;
-+ __le16 qdid;
-+ /* cmd word 1 */
-+ __le32 options;
-+ __le32 pad3;
-+ /* cmd word 2 */
-+ __le32 rate;
-+};
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+struct dpsw_cmd_if_set_max_frame_length {
-+ __le16 if_id;
-+ __le16 frame_length;
-+};
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
-+struct dpsw_cmd_if_get_max_frame_length {
-+ __le16 if_id;
-+};
++ return 0;
++}
+
-+struct dpsw_rsp_if_get_max_frame_length {
-+ __le16 pad;
-+ __le16 frame_length;
-+};
++/**
++ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
++ * port the DPNI is attached to
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address of the physical port, if any, otherwise 0
++ *
++ * The primary MAC address is not cleared by this operation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 mac_addr[6])
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_port_mac_addr *rsp_params;
++ int i, err;
+
-+struct dpsw_cmd_if_set_link_cfg {
-+ /* cmd word 0 */
-+ __le16 if_id;
-+ u8 pad[6];
-+ /* cmd word 1 */
-+ __le32 rate;
-+ __le32 pad1;
-+ /* cmd word 2 */
-+ __le64 options;
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
++ cmd_flags,
++ token);
+
-+struct dpsw_cmd_if_get_link_state {
-+ __le16 if_id;
-+};
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+#define DPSW_UP_SHIFT 0
-+#define DPSW_UP_SIZE 1
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
-+struct dpsw_rsp_if_get_link_state {
-+ /* cmd word 0 */
-+ __le32 pad0;
-+ u8 up;
-+ u8 pad1[3];
-+ /* cmd word 1 */
-+ __le32 rate;
-+ __le32 pad2;
-+ /* cmd word 2 */
-+ __le64 options;
-+};
++ return 0;
++}
+
-+struct dpsw_vlan_add {
-+ __le16 fdb_id;
-+ __le16 vlan_id;
-+};
++/**
++ * dpni_add_mac_addr() - Add MAC address filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to add
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6])
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_add_mac_addr *cmd_params;
++ int i;
+
-+struct dpsw_cmd_vlan_manage_if {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ __le16 vlan_id;
-+ __le32 pad1;
-+ /* cmd word 1 */
-+ __le64 if_id[4];
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
-+struct dpsw_cmd_vlan_remove {
-+ __le16 pad;
-+ __le16 vlan_id;
-+};
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+struct dpsw_cmd_vlan_get_attr {
-+ __le16 vlan_id;
-+};
++/**
++ * dpni_remove_mac_addr() - Remove MAC address filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6])
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_remove_mac_addr *cmd_params;
++ int i;
+
-+struct dpsw_rsp_vlan_get_attr {
-+ /* cmd word 0 */
-+ __le64 pad;
-+ /* cmd word 1 */
-+ __le16 fdb_id;
-+ __le16 num_ifs;
-+ __le16 num_untagged_ifs;
-+ __le16 num_flooding_ifs;
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
-+struct dpsw_cmd_vlan_get_if {
-+ __le16 vlan_id;
-+};
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+struct dpsw_rsp_vlan_get_if {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ __le16 num_ifs;
-+ u8 pad1[4];
-+ /* cmd word 1 */
-+ __le64 if_id[4];
-+};
++/**
++ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @unicast: Set to '1' to clear unicast addresses
++ * @multicast: Set to '1' to clear multicast addresses
++ *
++ * The primary MAC address is not cleared by this operation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int unicast,
++ int multicast)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_clear_mac_filters *cmd_params;
+
-+struct dpsw_cmd_vlan_get_if_untagged {
-+ __le16 vlan_id;
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
++ dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
++ dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
+
-+struct dpsw_rsp_vlan_get_if_untagged {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ __le16 num_ifs;
-+ u8 pad1[4];
-+ /* cmd word 1 */
-+ __le64 if_id[4];
-+};
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+struct dpsw_cmd_vlan_get_if_flooding {
-+ __le16 vlan_id;
-+};
++/**
++ * dpni_set_tx_priorities() - Set transmission TC priority configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Transmission selection configuration
++ *
++ * warning: Allowed only when DPNI is disabled
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_priorities_cfg *cfg)
++{
++ struct dpni_cmd_set_tx_priorities *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ int i;
+
-+struct dpsw_rsp_vlan_get_if_flooding {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ __le16 num_ifs;
-+ u8 pad1[4];
-+ /* cmd word 1 */
-+ __le64 if_id[4];
-+};
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_PRIORITIES,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
++ dpni_set_field(cmd_params->flags,
++ SEPARATE_GRP,
++ cfg->separate_groups);
++ cmd_params->prio_group_A = cfg->prio_group_A;
++ cmd_params->prio_group_B = cfg->prio_group_B;
++
++ for (i = 0; i + 1 < DPNI_MAX_TC; i += 2) {
++ dpni_set_field(cmd_params->modes[i / 2],
++ MODE_1,
++ cfg->tc_sched[i].mode);
++ dpni_set_field(cmd_params->modes[i / 2],
++ MODE_2,
++ cfg->tc_sched[i + 1].mode);
++ }
+
-+struct dpsw_cmd_fdb_add {
-+ __le32 pad;
-+ __le16 fdb_aging_time;
-+ __le16 num_fdb_entries;
-+};
++ for (i = 0; i < DPNI_MAX_TC; i++) {
++ cmd_params->delta_bandwidth[i] =
++ cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
++ }
+
-+struct dpsw_rsp_fdb_add {
-+ __le16 fdb_id;
-+};
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+struct dpsw_cmd_fdb_remove {
-+ __le16 fdb_id;
-+};
++/**
++ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Traffic class distribution configuration
++ *
++ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
++ * first to prepare the key_cfg_iova parameter
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rx_tc_dist_cfg *cfg)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_rx_tc_dist *cmd_params;
+
-+#define DPSW_ENTRY_TYPE_SHIFT 0
-+#define DPSW_ENTRY_TYPE_SIZE 4
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
++ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
++ cmd_params->tc_id = tc_id;
++ dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
++ dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
++ cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
++ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
-+struct dpsw_cmd_fdb_add_unicast {
-+ /* cmd word 0 */
-+ __le16 fdb_id;
-+ u8 mac_addr[6];
-+ /* cmd word 1 */
-+ u8 if_egress;
-+ u8 pad;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+};
-+
-+struct dpsw_cmd_fdb_get_unicast {
-+ __le16 fdb_id;
-+ u8 mac_addr[6];
-+};
-+
-+struct dpsw_rsp_fdb_get_unicast {
-+ __le64 pad;
-+ __le16 if_egress;
-+ /* only first 4 bits from LSB */
-+ u8 type;
-+};
-+
-+struct dpsw_cmd_fdb_remove_unicast {
-+ /* cmd word 0 */
-+ __le16 fdb_id;
-+ u8 mac_addr[6];
-+ /* cmd word 1 */
-+ __le16 if_egress;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+};
-+
-+struct dpsw_cmd_fdb_add_multicast {
-+ /* cmd word 0 */
-+ __le16 fdb_id;
-+ __le16 num_ifs;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ u8 mac_addr[6];
-+ __le16 pad2;
-+ /* cmd word 2 */
-+ __le64 if_id[4];
-+};
-+
-+struct dpsw_cmd_fdb_get_multicast {
-+ __le16 fdb_id;
-+ u8 mac_addr[6];
-+};
-+
-+struct dpsw_rsp_fdb_get_multicast {
-+ /* cmd word 0 */
-+ __le64 pad0;
-+ /* cmd word 1 */
-+ __le16 num_ifs;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+ u8 pad1[5];
-+ /* cmd word 2 */
-+ __le64 if_id[4];
-+};
-+
-+struct dpsw_cmd_fdb_remove_multicast {
-+ /* cmd word 0 */
-+ __le16 fdb_id;
-+ __le16 num_ifs;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ u8 mac_addr[6];
-+ __le16 pad2;
-+ /* cmd word 2 */
-+ __le64 if_id[4];
-+};
-+
-+#define DPSW_LEARNING_MODE_SHIFT 0
-+#define DPSW_LEARNING_MODE_SIZE 4
-+
-+struct dpsw_cmd_fdb_set_learning_mode {
-+ __le16 fdb_id;
-+ /* only the first 4 bits from LSB */
-+ u8 mode;
-+};
-+
-+struct dpsw_cmd_fdb_get_attr {
-+ __le16 fdb_id;
-+};
-+
-+struct dpsw_rsp_fdb_get_attr {
-+ /* cmd word 0 */
-+ __le16 pad;
-+ __le16 max_fdb_entries;
-+ __le16 fdb_aging_time;
-+ __le16 num_fdb_mc_groups;
-+ /* cmd word 1 */
-+ __le16 max_fdb_mc_groups;
-+ /* only the first 4 bits from LSB */
-+ u8 learning_mode;
-+};
-+
-+struct dpsw_cmd_acl_add {
-+ __le16 pad;
-+ __le16 max_entries;
-+};
-+
-+struct dpsw_rsp_acl_add {
-+ __le16 acl_id;
-+};
-+
-+struct dpsw_cmd_acl_remove {
-+ __le16 acl_id;
-+};
-+
-+struct dpsw_prep_acl_entry {
-+ u8 match_l2_dest_mac[6];
-+ __le16 match_l2_tpid;
-+
-+ u8 match_l2_source_mac[6];
-+ __le16 match_l2_vlan_id;
-+
-+ __le32 match_l3_dest_ip;
-+ __le32 match_l3_source_ip;
-+
-+ __le16 match_l4_dest_port;
-+ __le16 match_l4_source_port;
-+ __le16 match_l2_ether_type;
-+ u8 match_l2_pcp_dei;
-+ u8 match_l3_dscp;
-+
-+ u8 mask_l2_dest_mac[6];
-+ __le16 mask_l2_tpid;
-+
-+ u8 mask_l2_source_mac[6];
-+ __le16 mask_l2_vlan_id;
-+
-+ __le32 mask_l3_dest_ip;
-+ __le32 mask_l3_source_ip;
-+
-+ __le16 mask_l4_dest_port;
-+ __le16 mask_l4_source_port;
-+ __le16 mask_l2_ether_type;
-+ u8 mask_l2_pcp_dei;
-+ u8 mask_l3_dscp;
-+
-+ u8 match_l3_protocol;
-+ u8 mask_l3_protocol;
-+};
-+
-+#define DPSW_RESULT_ACTION_SHIFT 0
-+#define DPSW_RESULT_ACTION_SIZE 4
-+
-+struct dpsw_cmd_acl_entry {
-+ __le16 acl_id;
-+ __le16 result_if_id;
-+ __le32 precedence;
-+ /* from LSB only the first 4 bits */
-+ u8 result_action;
-+ u8 pad[7];
-+ __le64 pad2[4];
-+ __le64 key_iova;
-+};
-+
-+struct dpsw_cmd_acl_if {
-+ /* cmd word 0 */
-+ __le16 acl_id;
-+ __le16 num_ifs;
-+ __le32 pad;
-+ /* cmd word 1 */
-+ __le64 if_id[4];
-+};
-+
-+struct dpsw_cmd_acl_get_attr {
-+ __le16 acl_id;
-+};
-+
-+struct dpsw_rsp_acl_get_attr {
-+ /* cmd word 0 */
-+ __le64 pad;
-+ /* cmd word 1 */
-+ __le16 max_entries;
-+ __le16 num_entries;
-+ __le16 num_ifs;
-+};
-+
-+struct dpsw_rsp_ctrl_if_get_attr {
-+ /* cmd word 0 */
-+ __le64 pad;
-+ /* cmd word 1 */
-+ __le32 rx_fqid;
-+ __le32 rx_err_fqid;
-+ /* cmd word 2 */
-+ __le32 tx_err_conf_fqid;
-+};
-+
-+struct dpsw_cmd_ctrl_if_set_pools {
-+ u8 num_dpbp;
-+ /* from LSB: POOL0_BACKUP_POOL:1 ... POOL7_BACKUP_POOL */
-+ u8 backup_pool;
-+ __le16 pad;
-+ __le32 dpbp_id[8];
-+ __le16 buffer_size[8];
-+};
-+
-+struct dpsw_rsp_get_api_version {
-+ __le16 version_major;
-+ __le16 version_minor;
-+};
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+#endif /* __FSL_DPSW_CMD_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
-@@ -0,0 +1,2762 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
++/*
++ * dpni_set_qos_table() - Set QoS mapping table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: QoS table configuration
+ *
++ * This function and all QoS-related functions require that
++ *'max_tcs > 1' was set at DPNI creation.
+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
++ * warning: Before calling this function, call dpkg_prepare_key_cfg() to
++ * prepare the key_cfg_iova parameter
+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
-+#include "dpsw.h"
-+#include "dpsw-cmd.h"
-+
-+static void build_if_id_bitmap(__le64 *bmap,
-+ const u16 *id,
-+ const u16 num_ifs) {
-+ int i;
-+
-+ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++)
-+ bmap[id[i] / 64] = dpsw_set_bit(bmap[id[i] / 64],
-+ (id[i] % 64),
-+ 1);
-+}
-+
-+static void read_if_id_bitmap(u16 *if_id,
-+ u16 *num_ifs,
-+ __le64 *bmap) {
-+ int bitmap[DPSW_MAX_IF] = { 0 };
-+ int i, j = 0;
-+ int count = 0;
-+
-+ for (i = 0; i < DPSW_MAX_IF; i++) {
-+ bitmap[i] = dpsw_get_bit(le64_to_cpu(bmap[i / 64]),
-+ i % 64);
-+ count += bitmap[i];
-+ }
++int dpni_set_qos_table(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_qos_tbl_cfg *cfg)
++{
++ struct dpni_cmd_set_qos_table *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
-+ *num_ifs = (u16)count;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
++ cmd_params->default_tc = cfg->default_tc;
++ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
++ dpni_set_field(cmd_params->discard_on_miss,
++ ENABLE,
++ cfg->discard_on_miss);
+
-+ for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) {
-+ if (bitmap[i]) {
-+ if_id[j] = (u16)i;
-+ j++;
-+ }
-+ }
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_open() - Open a control session for the specified object
++ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpsw_id: DPSW unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpsw_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
++ * @token: Token of DPNI object
++ * @cfg: QoS rule to add
++ * @tc_id: Traffic class selection (0-7)
++ * @index: Location in the QoS table where to insert the entry.
++ * Only relevant if MASKING is enabled for QoS classification on
++ * this DPNI, it is ignored for exact match.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpsw_id,
-+ u16 *token)
++int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rule_cfg *cfg,
++ u8 tc_id,
++ u16 index)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_open *cmd_params;
-+ int err;
++ struct dpni_cmd_add_qos_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
+ cmd_flags,
-+ 0);
-+ cmd_params = (struct dpsw_cmd_open *)cmd.params;
-+ cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
++ token);
++ cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
++ cmd_params->tc_id = tc_id;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->index = cpu_to_le16(index);
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_close() - Close the control session of the object
++ * dpni_remove_qos_entry() - Remove QoS mapping entry
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
++ * @token: Token of DPNI object
++ * @cfg: QoS rule to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rule_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct dpni_cmd_remove_qos_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
+ cmd_flags,
+ token);
++ cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_enable() - Enable DPSW functionality
++ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
++ * (to select a flow ID)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @index: Location in the QoS table where to insert the entry.
++ * Only relevant if MASKING is enabled for QoS
++ * classification on this DPNI, it is ignored for exact match.
++ * @cfg: Flow steering rule to add
++ * @action: Action to be taken as result of a classification hit
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ u16 index,
++ const struct dpni_rule_cfg *cfg,
++ const struct dpni_fs_action_cfg *action)
+{
-+ struct mc_command cmd = { 0 };
++ struct dpni_cmd_add_fs_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
+ cmd_flags,
+ token);
++ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
++ cmd_params->tc_id = tc_id;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->index = cpu_to_le16(index);
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
++ cmd_params->options = cpu_to_le16(action->options);
++ cmd_params->flow_id = cpu_to_le16(action->flow_id);
++ cmd_params->flc = cpu_to_le64(action->flc);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_disable() - Disable DPSW functionality
++ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
++ * traffic class
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Flow steering rule to remove
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rule_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct dpni_cmd_remove_fs_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
+ cmd_flags,
+ token);
++ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
++ cmd_params->tc_id = tc_id;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_is_enabled() - Check if the DPSW is enabled
-+ *
++ * dpni_set_congestion_notification() - Set traffic class congestion
++ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
++ * @token: Token of DPNI object
++ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Congestion notification configuration
+ *
-+ * Return: '0' on Success; Error code otherwise
++ * Return: '0' on Success; error code otherwise.
+ */
-+int dpsw_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en)
++int dpni_set_congestion_notification(
++ struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc_id,
++ const struct dpni_congestion_notification_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_rsp_is_enabled *cmd_rsp;
-+ int err;
++ struct dpni_cmd_set_congestion_notification *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags,
-+ token);
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc_id;
++ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
++ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
++ cmd_params->dest_priority = cfg->dest_cfg.priority;
++ dpni_set_field(cmd_params->type_units, DEST_TYPE,
++ cfg->dest_cfg.dest_type);
++ dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
++ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
++ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
++ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
++ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ cmd_rsp = (struct dpsw_rsp_is_enabled *)cmd.params;
-+ *en = dpsw_get_field(cmd_rsp->enabled, ENABLE);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
++ * dpni_get_congestion_notification() - Get traffic class congestion
++ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
++ * @token: Token of DPNI object
++ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
++ * @tc_id: bits 7-4 contain ceetm channel index (valid only for TX);
++ * bits 3-0 contain traffic class.
++ * Use macro DPNI_BUILD_CH_TC() to build correct value for
++ * tc_id parameter.
++ * @cfg: congestion notification configuration
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: '0' on Success; error code otherwise.
+ */
-+int dpsw_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++int dpni_get_congestion_notification(
++ struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc_id,
++ struct dpni_congestion_notification_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct dpni_rsp_get_congestion_notification *rsp_params;
++ struct dpni_cmd_get_congestion_notification *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
-+ cmd_flags,
-+ token);
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc_id;
+
+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
++ cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
++ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
++ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
++ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
++ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
++ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
++ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
++ cfg->dest_cfg.priority = rsp_params->dest_priority;
++ cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
++ DEST_TYPE);
++
++ return 0;
+}
+
+/**
-+ * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt.
++ * dpni_set_queue() - Set queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
++ * @token: Token of DPNI object
++ * @qtype: Type of queue - all queue types are supported, although
++ * the command is ignored for Tx
++ * @tc: Traffic class, in range 0 to NUM_TCS - 1
++ * @index: Selects the specific queue out of the set allocated for the
++ * same TC. Value must be in range 0 to NUM_QUEUES - 1
++ * @options: A combination of DPNI_QUEUE_OPT_ values that control what
++ * configuration options are set on the queue
++ * @queue: Queue structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_set_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ struct dpsw_irq_cfg *irq_cfg)
++int dpni_set_queue(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ u8 options,
++ const struct dpni_queue *queue)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_set_irq *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_queue *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ,
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_set_irq *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
-+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
-+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
++ cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc;
++ cmd_params->index = index;
++ cmd_params->options = options;
++ cmd_params->dest_id = cpu_to_le32(queue->destination.id);
++ cmd_params->dest_prio = queue->destination.priority;
++ dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
++ dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
++ dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
++ queue->destination.hold_active);
++ cmd_params->flc = cpu_to_le64(queue->flc.value);
++ cmd_params->user_context = cpu_to_le64(queue->user_context);
+
-+ /* send command to mc*/
++ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_get_irq() - Get IRQ information from the DPSW
-+ *
++ * dpni_get_queue() - Get queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
++ * @token: Token of DPNI object
++ * @qtype: Type of queue - all queue types are supported
++ * @tc: Traffic class, in range 0 to NUM_TCS - 1
++ * @index: Selects the specific queue out of the set allocated for the
++ * same TC. Value must be in range 0 to NUM_QUEUES - 1
++ * @queue: Queue configuration structure
++ * @qid: Queue identification
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_get_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ int *type,
-+ struct dpsw_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_get_irq *cmd_params;
-+ struct dpsw_rsp_get_irq *rsp_params;
++int dpni_get_queue(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ struct dpni_queue *queue,
++ struct dpni_queue_id *qid)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_queue *cmd_params;
++ struct dpni_rsp_get_queue *rsp_params;
+ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ,
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_get_irq *)cmd.params;
-+ cmd_params->irq_index = irq_index;
++ cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc;
++ cmd_params->index = index;
+
-+ /* send command to mc*/
++ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_get_irq *)cmd.params;
-+ irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
-+ irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
-+ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
-+ *type = le32_to_cpu(rsp_params->irq_type);
++ rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
++ queue->destination.id = le32_to_cpu(rsp_params->dest_id);
++ queue->destination.priority = rsp_params->dest_prio;
++ queue->destination.type = dpni_get_field(rsp_params->flags,
++ DEST_TYPE);
++ queue->flc.stash_control = dpni_get_field(rsp_params->flags,
++ STASH_CTRL);
++ queue->destination.hold_active = dpni_get_field(rsp_params->flags,
++ HOLD_ACTIVE);
++ queue->flc.value = le64_to_cpu(rsp_params->flc);
++ queue->user_context = le64_to_cpu(rsp_params->user_context);
++ qid->fqid = le32_to_cpu(rsp_params->fqid);
++ qid->qdbin = le16_to_cpu(rsp_params->qdbin);
+
+ return 0;
+}
+
+/**
-+ * dpsw_set_irq_enable() - Set overall interrupt state.
++ * dpni_get_statistics() - Get DPNI statistics
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt
++ * @token: Token of DPNI object
++ * @page: Selects the statistics page to retrieve, see
++ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
++ * @param: Custom parameter for some pages used to select a certain
++ * statistic source, for example the TC.
++ * @stat: Structure containing the statistics
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
++int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
-+ u8 irq_index,
-+ u8 en)
++ u8 page,
++ u8 param,
++ union dpni_statistics *stat)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_set_irq_enable *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_statistics *cmd_params;
++ struct dpni_rsp_get_statistics *rsp_params;
++ int i, err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
-+ dpsw_set_field(cmd_params->enable_state, ENABLE, en);
-+ cmd_params->irq_index = irq_index;
++ cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
++ cmd_params->page_number = page;
++ cmd_params->param = param;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
++ for (i = 0; i < DPNI_STATISTICS_CNT; i++)
++ stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
++
++ return 0;
+}
+
+/**
-+ * dpsw_set_irq_mask() - Set interrupt mask.
++ * dpni_reset_statistics() - Clears DPNI statistics
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_reset_statistics(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_taildrop() - Set taildrop per queue or TC
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
++ * @token: Token of DPNI object
++ * @cg_point: Congestion point
++ * @q_type: Queue type on which the taildrop is configured.
++ * Only Rx queues are supported for now
++ * @tc: bits 7-4 contain ceetm channel index (valid only for TX);
++ * bits 3-0 contain traffic class.
++ * Use macro DPNI_BUILD_CH_TC() to build correct value for
++ * tc parameter.
++ * @q_index: Index of the queue if the DPNI supports multiple queues for
++ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
++ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
++int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
-+ u8 irq_index,
-+ u32 mask)
++ enum dpni_congestion_point cg_point,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ struct dpni_taildrop *taildrop)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_set_irq_mask *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_taildrop *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
-+ cmd_params->mask = cpu_to_le32(mask);
-+ cmd_params->irq_index = irq_index;
++ cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
++ cmd_params->congestion_point = cg_point;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc;
++ cmd_params->index = index;
++ dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
++ cmd_params->units = taildrop->units;
++ cmd_params->threshold = cpu_to_le32(taildrop->threshold);
+
-+ /* send command to mc*/
++ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_get_irq_status() - Get the current status of any pending interrupts
++ * dpni_get_taildrop() - Get taildrop information
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
++ * @token: Token of DPNI object
++ * @cg_point: Congestion point
++ * @q_type: Queue type on which the taildrop is configured.
++ * Only Rx queues are supported for now
++ * @tc: bits 7-4 contain ceetm channel index (valid only for TX);
++ * bits 3-0 contain traffic class.
++ * Use macro DPNI_BUILD_CH_TC() to build correct value for
++ * tc parameter.
++ * @q_index: Index of the queue if the DPNI supports multiple queues for
++ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
++ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
++int dpni_get_taildrop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_congestion_point cg_point,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ struct dpni_taildrop *taildrop)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_taildrop *cmd_params;
++ struct dpni_rsp_get_taildrop *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
++ cmd_params->congestion_point = cg_point;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc;
++ cmd_params->index = index;
++
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
++ taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
++ taildrop->units = rsp_params->units;
++ taildrop->threshold = le32_to_cpu(rsp_params->threshold);
++
++ return 0;
++}
++
++/**
++ * dpni_get_api_version() - Get Data Path Network Interface API version
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of data path network interface API
++ * @minor_ver: Minor version of data path network interface API
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct dpni_rsp_get_api_version *rsp_params;
++ struct fsl_mc_command cmd = { 0 };
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
++ *major_ver = le16_to_cpu(rsp_params->major);
++ *minor_ver = le16_to_cpu(rsp_params->minor);
++
++ return 0;
++}
++
++/**
++ * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Distribution configuration
++ * If the FS is already enabled with a previous call the classification
++ * key will be changed but all the table rules are kept. If the
++ * existing rules do not match the key the results will not be
++ * predictable. It is the user responsibility to keep key integrity.
++ * If cfg.enable is set to 1 the command will create a flow steering table
++ * and will classify packets according to this table. The packets that
++ * miss all the table rules will be classified according to settings
++ * made in dpni_set_rx_hash_dist()
++ * If cfg.enable is set to 0 the command will clear flow steering table.
++ * The packets will be classified according to settings made in
++ * dpni_set_rx_hash_dist()
++ */
++int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
-+ u8 irq_index,
-+ u32 *status)
++ const struct dpni_rx_dist_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_get_irq_status *cmd_params;
-+ struct dpsw_rsp_get_irq_status *rsp_params;
-+ int err;
++ struct dpni_cmd_set_rx_fs_dist *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
++ cmd_params->dist_size = le16_to_cpu(cfg->dist_size);
++ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
++ cmd_params->tc = cfg->tc;
++ cmd_params->miss_flow_id = le16_to_cpu(cfg->fs_miss_flow_id);
++ cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Distribution configuration
++ * If cfg.enable is set to 1 the packets will be classified using a hash
++ * function based on the key received in cfg.key_cfg_iova parameter.
++ * If cfg.enable is set to 0 the packets will be sent to the queue configured
++ * in dpni_set_rx_dist_default_queue() call
++ */
++int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rx_dist_cfg *cfg)
++{
++ struct dpni_cmd_set_rx_hash_dist *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
++ cmd_params->dist_size = le16_to_cpu(cfg->dist_size);
++ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
++ cmd_params->tc = cfg->tc;
++ cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
+@@ -0,0 +1,1172 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPNI_H
++#define __FSL_DPNI_H
++
++#include "dpkg.h"
++
++struct fsl_mc_io;
++
++/**
++ * Data Path Network Interface API
++ * Contains initialization APIs and runtime control APIs for DPNI
++ */
++
++/** General DPNI macros */
++
++/**
++ * Maximum number of traffic classes
++ */
++#define DPNI_MAX_TC 8
++/**
++ * Maximum number of buffer pools per DPNI
++ */
++#define DPNI_MAX_DPBP 8
++/**
++ * Maximum number of senders
++ */
++#define DPNI_MAX_SENDERS 16
++/**
++ * Maximum distribution size
++ */
++#define DPNI_MAX_DIST_SIZE 16
++
++/**
++ * All traffic classes considered; see dpni_set_queue()
++ */
++#define DPNI_ALL_TCS (u8)(-1)
++/**
++ * All flows within traffic class considered; see dpni_set_queue()
++ */
++#define DPNI_ALL_TC_FLOWS (u16)(-1)
++/**
++ * Generate new flow ID; see dpni_set_queue()
++ */
++#define DPNI_NEW_FLOW_ID (u16)(-1)
++
++/**
++ * Tx traffic is always released to a buffer pool on transmit, there are no
++ * resources allocated to have the frames confirmed back to the source after
++ * transmission.
++ */
++#define DPNI_OPT_TX_FRM_RELEASE 0x000001
++/**
++ * Disables support for MAC address filtering for addresses other than primary
++ * MAC address. This affects both unicast and multicast. Promiscuous mode can
++ * still be enabled/disabled for both unicast and multicast. If promiscuous mode
++ * is disabled, only traffic matching the primary MAC address will be accepted.
++ */
++#define DPNI_OPT_NO_MAC_FILTER 0x000002
++/**
++ * Allocate policers for this DPNI. They can be used to rate-limit traffic per
++ * traffic class (TC) basis.
++ */
++#define DPNI_OPT_HAS_POLICING 0x000004
++/**
++ * Congestion can be managed in several ways, allowing the buffer pool to
++ * deplete on ingress, taildrop on each queue or use congestion groups for sets
++ * of queues. If set, it configures a single congestion groups across all TCs.
++ * If reset, a congestion group is allocated for each TC. Only relevant if the
++ * DPNI has multiple traffic classes.
++ */
++#define DPNI_OPT_SHARED_CONGESTION 0x000008
++/**
++ * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
++ * look-ups are exact match. Note that TCAM is not available on LS1088 and its
++ * variants. Setting this bit on these SoCs will trigger an error.
++ */
++#define DPNI_OPT_HAS_KEY_MASKING 0x000010
++/**
++ * Disables the flow steering table.
++ */
++#define DPNI_OPT_NO_FS 0x000020
++
++int dpni_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpni_id,
++ u16 *token);
++
++int dpni_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * struct dpni_pools_cfg - Structure representing buffer pools configuration
++ * @num_dpbp: Number of DPBPs
++ * @pools: Array of buffer pools parameters; The number of valid entries
++ * must match 'num_dpbp' value
++ */
++struct dpni_pools_cfg {
++ u8 num_dpbp;
++ /**
++ * struct pools - Buffer pools parameters
++ * @dpbp_id: DPBP object ID
++ * @priority_mask: priorities served by DPBP
++ * @buffer_size: Buffer size
++ * @backup_pool: Backup pool
++ */
++ struct {
++ u16 dpbp_id;
++ u8 priority_mask;
++ u16 buffer_size;
++ u8 backup_pool;
++ } pools[DPNI_MAX_DPBP];
++};
++
++int dpni_set_pools(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_pools_cfg *cfg);
++
++int dpni_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpni_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpni_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
++
++int dpni_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * DPNI IRQ Index and Events
++ */
++
++/**
++ * IRQ index
++ */
++#define DPNI_IRQ_INDEX 0
++/**
++ * IRQ event - indicates a change in link state
++ */
++#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
++
++int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en);
++
++int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en);
++
++int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask);
++
++int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask);
++
++int dpni_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status);
++
++int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status);
++
++/**
++ * struct dpni_attr - Structure representing DPNI attributes
++ * @options: Any combination of the following options:
++ * DPNI_OPT_TX_FRM_RELEASE
++ * DPNI_OPT_NO_MAC_FILTER
++ * DPNI_OPT_HAS_POLICING
++ * DPNI_OPT_SHARED_CONGESTION
++ * DPNI_OPT_HAS_KEY_MASKING
++ * DPNI_OPT_NO_FS
++ * @num_queues: Number of Tx and Rx queues used for traffic distribution.
++ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
++ * @mac_filter_entries: Number of entries in the MAC address filtering table.
++ * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
++ * @qos_entries: Number of entries in the QoS classification table.
++ * @fs_entries: Number of entries in the flow steering table.
++ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
++ * than this when adding QoS entries will result in an error.
++ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
++ * key larger than this when composing the hash + FS key will
++ * result in an error.
++ * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
++ * on 6, 5, 5 bits respectively.
++ */
++struct dpni_attr {
++ u32 options;
++ u8 num_queues;
++ u8 num_tcs;
++ u8 mac_filter_entries;
++ u8 vlan_filter_entries;
++ u8 qos_entries;
++ u16 fs_entries;
++ u8 qos_key_size;
++ u8 fs_key_size;
++ u16 wriop_version;
++};
++
++int dpni_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_attr *attr);
++
++/**
++ * DPNI errors
++ */
++
++/**
++ * Extract out of frame header error
++ */
++#define DPNI_ERROR_EOFHE 0x00020000
++/**
++ * Frame length error
++ */
++#define DPNI_ERROR_FLE 0x00002000
++/**
++ * Frame physical error
++ */
++#define DPNI_ERROR_FPE 0x00001000
++/**
++ * Parsing header error
++ */
++#define DPNI_ERROR_PHE 0x00000020
++/**
++ * Parser L3 checksum error
++ */
++#define DPNI_ERROR_L3CE 0x00000004
++/**
++ * Parser L3 checksum error
++ */
++#define DPNI_ERROR_L4CE 0x00000001
++
++/**
++ * enum dpni_error_action - Defines DPNI behavior for errors
++ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
++ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
++ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
++ */
++enum dpni_error_action {
++ DPNI_ERROR_ACTION_DISCARD = 0,
++ DPNI_ERROR_ACTION_CONTINUE = 1,
++ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
++};
++
++/**
++ * struct dpni_error_cfg - Structure representing DPNI errors treatment
++ * @errors: Errors mask; use 'DPNI_ERROR__<X>
++ * @error_action: The desired action for the errors mask
++ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
++ * status (FAS); relevant only for the non-discard action
++ */
++struct dpni_error_cfg {
++ u32 errors;
++ enum dpni_error_action error_action;
++ int set_frame_annotation;
++};
++
++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_error_cfg *cfg);
++
++/**
++ * DPNI buffer layout modification options
++ */
++
++/**
++ * Select to modify the time-stamp setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
++/**
++ * Select to modify the parser-result setting; not applicable for Tx
++ */
++#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
++/**
++ * Select to modify the frame-status setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
++/**
++ * Select to modify the private-data-size setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
++/**
++ * Select to modify the data-alignment setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
++/**
++ * Select to modify the data-head-room setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
++/**
++ * Select to modify the data-tail-room setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
++
++/**
++ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
++ * @options: Flags representing the suggested modifications to the buffer
++ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
++ * @pass_timestamp: Pass timestamp value
++ * @pass_parser_result: Pass parser results
++ * @pass_frame_status: Pass frame status
++ * @private_data_size: Size kept for private data (in bytes)
++ * @data_align: Data alignment
++ * @data_head_room: Data head room
++ * @data_tail_room: Data tail room
++ */
++struct dpni_buffer_layout {
++ u32 options;
++ int pass_timestamp;
++ int pass_parser_result;
++ int pass_frame_status;
++ u16 private_data_size;
++ u16 data_align;
++ u16 data_head_room;
++ u16 data_tail_room;
++};
++
++/**
++ * enum dpni_queue_type - Identifies a type of queue targeted by the command
++ * @DPNI_QUEUE_RX: Rx queue
++ * @DPNI_QUEUE_TX: Tx queue
++ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
++ * @DPNI_QUEUE_RX_ERR: Rx error queue
++ */enum dpni_queue_type {
++ DPNI_QUEUE_RX,
++ DPNI_QUEUE_TX,
++ DPNI_QUEUE_TX_CONFIRM,
++ DPNI_QUEUE_RX_ERR,
++};
++
++int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ struct dpni_buffer_layout *layout);
++
++int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ const struct dpni_buffer_layout *layout);
++
++/**
++ * enum dpni_offload - Identifies a type of offload targeted by the command
++ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
++ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
++ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
++ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
++ */
++enum dpni_offload {
++ DPNI_OFF_RX_L3_CSUM,
++ DPNI_OFF_RX_L4_CSUM,
++ DPNI_OFF_TX_L3_CSUM,
++ DPNI_OFF_TX_L4_CSUM,
++};
++
++int dpni_set_offload(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_offload type,
++ u32 config);
++
++int dpni_get_offload(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_offload type,
++ u32 *config);
++
++int dpni_get_qdid(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u16 *qdid);
++
++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *data_offset);
++
++#define DPNI_STATISTICS_CNT 7
++
++union dpni_statistics {
++ /**
++ * struct page_0 - Page_0 statistics structure
++ * @ingress_all_frames: Ingress frame count
++ * @ingress_all_bytes: Ingress byte count
++ * @ingress_multicast_frames: Ingress multicast frame count
++ * @ingress_multicast_bytes: Ingress multicast byte count
++ * @ingress_broadcast_frames: Ingress broadcast frame count
++ * @ingress_broadcast_bytes: Ingress broadcast byte count
++ */
++ struct {
++ u64 ingress_all_frames;
++ u64 ingress_all_bytes;
++ u64 ingress_multicast_frames;
++ u64 ingress_multicast_bytes;
++ u64 ingress_broadcast_frames;
++ u64 ingress_broadcast_bytes;
++ } page_0;
++ /**
++ * struct page_1 - Page_1 statistics structure
++ * @egress_all_frames: Egress frame count
++ * @egress_all_bytes: Egress byte count
++ * @egress_multicast_frames: Egress multicast frame count
++ * @egress_multicast_bytes: Egress multicast byte count
++ * @egress_broadcast_frames: Egress broadcast frame count
++ * @egress_broadcast_bytes: Egress broadcast byte count
++ */
++ struct {
++ u64 egress_all_frames;
++ u64 egress_all_bytes;
++ u64 egress_multicast_frames;
++ u64 egress_multicast_bytes;
++ u64 egress_broadcast_frames;
++ u64 egress_broadcast_bytes;
++ } page_1;
++ /**
++ * struct page_2 - Page_2 statistics structure
++ * @ingress_filtered_frames: Ingress filtered frame count
++ * @ingress_discarded_frames: Ingress discarded frame count
++ * @ingress_nobuffer_discards: Ingress discarded frame count
++ * due to lack of buffers
++ * @egress_discarded_frames: Egress discarded frame count
++ * @egress_confirmed_frames: Egress confirmed frame count
++ */
++ struct {
++ u64 ingress_filtered_frames;
++ u64 ingress_discarded_frames;
++ u64 ingress_nobuffer_discards;
++ u64 egress_discarded_frames;
++ u64 egress_confirmed_frames;
++ } page_2;
++ /**
++ * struct page_3 - Page_3 statistics structure with values for the
++ * selected TC
++ * @ceetm_dequeue_bytes: Cumulative count of the number of bytes
++ * dequeued
++ * @ceetm_dequeue_frames: Cumulative count of the number of frames
++ * dequeued
++ * @ceetm_reject_bytes: Cumulative count of the number of bytes in all
++ * frames whose enqueue was rejected
++ * @ceetm_reject_frames: Cumulative count of all frame enqueues
++ * rejected
++ */
++ struct {
++ u64 ceetm_dequeue_bytes;
++ u64 ceetm_dequeue_frames;
++ u64 ceetm_reject_bytes;
++ u64 ceetm_reject_frames;
++ } page_3;
++ /**
++ * struct raw - raw statistics structure
++ */
++ struct {
++ u64 counter[DPNI_STATISTICS_CNT];
++ } raw;
++};
++
++int dpni_get_statistics(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 page,
++ u8 param,
++ union dpni_statistics *stat);
++
++int dpni_reset_statistics(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++/**
++ * Enable priority flow control pause frames
++ */
++#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
++
++/**
++ * struct - Structure representing DPNI link configuration
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
++ */
++struct dpni_link_cfg {
++ u32 rate;
++ u64 options;
++};
++
++int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_link_cfg *cfg);
++
++/**
++ * struct dpni_link_state - Structure representing DPNI link state
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
++ * @up: Link state; '0' for down, '1' for up
++ */
++struct dpni_link_state {
++ u32 rate;
++ u64 options;
++ int up;
++};
++
++int dpni_get_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_link_state *state);
++
++/**
++ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
++ * @rate_limit: rate in Mbps
++ * @max_burst_size: burst size in bytes (up to 64KB)
++ */
++struct dpni_tx_shaping_cfg {
++ u32 rate_limit;
++ u16 max_burst_size;
++};
++
++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
++ const struct dpni_tx_shaping_cfg *tx_er_shaper,
++ int coupled);
++
++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 max_frame_length);
++
++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *max_frame_length);
++
++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int en);
++
++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
++
++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int en);
++
++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
++
++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6]);
++
++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 mac_addr[6]);
++
++int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cm_flags,
++ u16 token,
++ u8 mac_addr[6]);
++
++int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6]);
++
++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6]);
++
++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int unicast,
++ int multicast);
++
++/**
++ * enum dpni_dist_mode - DPNI distribution mode
++ * @DPNI_DIST_MODE_NONE: No distribution
++ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
++ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
++ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
++ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
++ */
++enum dpni_dist_mode {
++ DPNI_DIST_MODE_NONE = 0,
++ DPNI_DIST_MODE_HASH = 1,
++ DPNI_DIST_MODE_FS = 2
++};
++
++/**
++ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
++ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
++ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
++ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
++ */
++enum dpni_fs_miss_action {
++ DPNI_FS_MISS_DROP = 0,
++ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
++ DPNI_FS_MISS_HASH = 2
++};
++
++/**
++ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
++ * @miss_action: Miss action selection
++ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
++ */
++struct dpni_fs_tbl_cfg {
++ enum dpni_fs_miss_action miss_action;
++ u16 default_flow_id;
++};
++
++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
++ u8 *key_cfg_buf);
++
++/**
++ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
++ * key extractions to be used as the QoS criteria by calling
++ * dpkg_prepare_key_cfg()
++ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
++ * '0' to use the 'default_tc' in such cases
++ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
++ */
++struct dpni_qos_tbl_cfg {
++ u64 key_cfg_iova;
++ int discard_on_miss;
++ u8 default_tc;
++};
++
++int dpni_set_qos_table(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_qos_tbl_cfg *cfg);
++
++/**
++ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
++ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
++ * @DPNI_TX_SCHED_WEIGHTED_A: weighted based scheduling in group A
++ * @DPNI_TX_SCHED_WEIGHTED_B: weighted based scheduling in group B
++ */
++enum dpni_tx_schedule_mode {
++ DPNI_TX_SCHED_STRICT_PRIORITY = 0,
++ DPNI_TX_SCHED_WEIGHTED_A,
++ DPNI_TX_SCHED_WEIGHTED_B,
++};
++
++/**
++ * struct dpni_tx_schedule_cfg - Structure representing Tx scheduling conf
++ * @mode: Scheduling mode
++ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000;
++ * not applicable for 'strict-priority' mode;
++ */
++struct dpni_tx_schedule_cfg {
++ enum dpni_tx_schedule_mode mode;
++ u16 delta_bandwidth;
++};
++
++/**
++ * struct dpni_tx_priorities_cfg - Structure representing transmission
++ * priorities for DPNI TCs
++ * @tc_sched: An array of traffic-classes
++ * @prio_group_A: Priority of group A
++ * @prio_group_B: Priority of group B
++ * @separate_groups: Treat A and B groups as separate
++ * @ceetm_ch_idx: ceetm channel index to apply the changes
++ */
++struct dpni_tx_priorities_cfg {
++ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
++ u8 prio_group_A;
++ u8 prio_group_B;
++ u8 separate_groups;
++};
++
++int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_priorities_cfg *cfg);
++
++/**
++ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
++ * @dist_size: Set the distribution size;
++ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
++ * 112,128,192,224,256,384,448,512,768,896,1024
++ * @dist_mode: Distribution mode
++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
++ * the extractions to be used for the distribution key by calling
++ * dpni_prepare_key_cfg() relevant only when
++ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
++ * @fs_cfg: Flow Steering table configuration; only relevant if
++ * 'dist_mode = DPNI_DIST_MODE_FS'
++ */
++struct dpni_rx_tc_dist_cfg {
++ u16 dist_size;
++ enum dpni_dist_mode dist_mode;
++ u64 key_cfg_iova;
++ struct dpni_fs_tbl_cfg fs_cfg;
++};
++
++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rx_tc_dist_cfg *cfg);
++
++/**
++ * enum dpni_dest - DPNI destination types
++ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
++ * does not generate FQDAN notifications; user is expected to
++ * dequeue from the queue based on polling or other user-defined
++ * method
++ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
++ * notifications to the specified DPIO; user is expected to dequeue
++ * from the queue only after notification is received
++ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
++ * FQDAN notifications, but is connected to the specified DPCON
++ * object; user is expected to dequeue from the DPCON channel
++ */
++enum dpni_dest {
++ DPNI_DEST_NONE = 0,
++ DPNI_DEST_DPIO = 1,
++ DPNI_DEST_DPCON = 2
++};
++
++/**
++ * struct dpni_queue - Queue structure
++ * @user_context: User data, presented to the user along with any frames from
++ * this queue. Not relevant for Tx queues.
++ */
++struct dpni_queue {
++/**
++ * struct destination - Destination structure
++ * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
++ * Identifies either a DPIO or a DPCON object. Not relevant for
++ * Tx queues.
++ * @type: May be one of the following:
++ * 0 - No destination, queue can be manually queried, but will not
++ * push traffic or notifications to a DPIO;
++ * 1 - The destination is a DPIO. When traffic becomes available in
++ * the queue a FQDAN (FQ data available notification) will be
++ * generated to selected DPIO;
++ * 2 - The destination is a DPCON. The queue is associated with a
++ * DPCON object for the purpose of scheduling between multiple
++ * queues. The DPCON may be independently configured to
++ * generate notifications. Not relevant for Tx queues.
++ * @hold_active: Hold active, maintains a queue scheduled for longer
++ * in a DPIO during dequeue to reduce spread of traffic.
++ * Only relevant if queues are not affined to a single DPIO.
++ */
++ struct {
++ u16 id;
++ enum dpni_dest type;
++ char hold_active;
++ u8 priority;
++ } destination;
++ u64 user_context;
++ struct {
++ u64 value;
++ char stash_control;
++ } flc;
++};
++
++/**
++ * struct dpni_queue_id - Queue identification, used for enqueue commands
++ * or queue control
++ * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
++ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
++ * for Tx queues.
++ */
++struct dpni_queue_id {
++ u32 fqid;
++ u16 qdbin;
++};
++
++/**
++ * Set User Context
++ */
++#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
++#define DPNI_QUEUE_OPT_DEST 0x00000002
++#define DPNI_QUEUE_OPT_FLC 0x00000004
++#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
++
++int dpni_set_queue(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ u8 options,
++ const struct dpni_queue *queue);
++
++int dpni_get_queue(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ struct dpni_queue *queue,
++ struct dpni_queue_id *qid);
++
++/**
++ * enum dpni_congestion_unit - DPNI congestion units
++ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
++ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
++ */
++enum dpni_congestion_unit {
++ DPNI_CONGESTION_UNIT_BYTES = 0,
++ DPNI_CONGESTION_UNIT_FRAMES
++};
++
++/**
++ * enum dpni_congestion_point - Structure representing congestion point
++ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
++ * QUEUE_INDEX
++ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
++ * define the DPNI this can be either per TC (default) or per
++ * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
++ * QUEUE_INDEX is ignored if this type is used.
++ */
++enum dpni_congestion_point {
++ DPNI_CP_QUEUE,
++ DPNI_CP_GROUP,
++};
++
++/**
++ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
++ * @dest_type: Destination type
++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
++ * @priority: Priority selection within the DPIO or DPCON channel; valid
++ * values are 0-1 or 0-7, depending on the number of priorities
++ * in that channel; not relevant for 'DPNI_DEST_NONE' option
++ */
++struct dpni_dest_cfg {
++ enum dpni_dest dest_type;
++ int dest_id;
++ u8 priority;
++};
++
++/* DPNI congestion options */
++
++/**
++ * CSCN message is written to message_iova once entering a
++ * congestion state (see 'threshold_entry')
++ */
++#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
++/**
++ * CSCN message is written to message_iova once exiting a
++ * congestion state (see 'threshold_exit')
++ */
++#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
++/**
++ * CSCN write will attempt to allocate into a cache (coherent write);
++ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
++ */
++#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
++ * DPIO/DPCON's WQ channel once entering a congestion state
++ * (see 'threshold_entry')
++ */
++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
++ * DPIO/DPCON's WQ channel once exiting a congestion state
++ * (see 'threshold_exit')
++ */
++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
++ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
++ */
++#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
++/**
++ * This congestion will trigger flow control or priority flow control.
++ * This will have effect only if flow control is enabled with
++ * dpni_set_link_cfg().
++ */
++#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
++
++/**
++ * struct dpni_congestion_notification_cfg - congestion notification
++ * configuration
++ * @units: Units type
++ * @threshold_entry: Above this threshold we enter a congestion state.
++ * set it to '0' to disable it
++ * @threshold_exit: Below this threshold we exit the congestion state.
++ * @message_ctx: The context that will be part of the CSCN message
++ * @message_iova: I/O virtual address (must be in DMA-able memory),
++ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
++ * is contained in 'options'
++ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
++ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
++ */
++
++struct dpni_congestion_notification_cfg {
++ enum dpni_congestion_unit units;
++ u32 threshold_entry;
++ u32 threshold_exit;
++ u64 message_ctx;
++ u64 message_iova;
++ struct dpni_dest_cfg dest_cfg;
++ u16 notification_mode;
++};
++
++/** Compose TC parameter for function dpni_set_congestion_notification()
++ * and dpni_get_congestion_notification().
++ */
++#define DPNI_BUILD_CH_TC(ceetm_ch_idx, tc) \
++ ((((ceetm_ch_idx) & 0x0F) << 4) | ((tc) & 0x0F))
++
++int dpni_set_congestion_notification(
++ struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc_id,
++ const struct dpni_congestion_notification_cfg *cfg);
++
++int dpni_get_congestion_notification(
++ struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc_id,
++ struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * struct dpni_taildrop - Structure representing the taildrop
++ * @enable: Indicates whether the taildrop is active or not.
++ * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
++ * byte units, this field is ignored and assumed = 0 if
++ * CONGESTION_POINT is 0.
++ * @threshold: Threshold value, in units identified by UNITS field. Value 0
++ * cannot be used as a valid taildrop threshold, THRESHOLD must
++ * be > 0 if the taildrop is enabled.
++ */
++struct dpni_taildrop {
++ char enable;
++ enum dpni_congestion_unit units;
++ u32 threshold;
++};
++
++int dpni_set_taildrop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_congestion_point cg_point,
++ enum dpni_queue_type q_type,
++ u8 tc,
++ u8 q_index,
++ struct dpni_taildrop *taildrop);
++
++int dpni_get_taildrop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_congestion_point cg_point,
++ enum dpni_queue_type q_type,
++ u8 tc,
++ u8 q_index,
++ struct dpni_taildrop *taildrop);
++
++/**
++ * struct dpni_rule_cfg - Rule configuration for table lookup
++ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
++ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
++ * @key_size: key and mask size (in bytes)
++ */
++struct dpni_rule_cfg {
++ u64 key_iova;
++ u64 mask_iova;
++ u8 key_size;
++};
++
++int dpni_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
++
++int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rule_cfg *cfg,
++ u8 tc_id,
++ u16 index);
++
++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rule_cfg *cfg);
++
++int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * Discard matching traffic. If set, this takes precedence over any other
++ * configuration and matching traffic is always discarded.
++ */
++ #define DPNI_FS_OPT_DISCARD 0x1
++
++/**
++ * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
++ * override the FLC value set per queue.
++ * For more details check the Frame Descriptor section in the hardware
++ * documentation.
++ */
++#define DPNI_FS_OPT_SET_FLC 0x2
++
++/*
++ * Indicates whether the 6 lowest significant bits of FLC are used for stash
++ * control. If set, the 6 least significant bits in value are interpreted as
++ * follows:
++ * - bits 0-1: indicates the number of 64 byte units of context that are
++ * stashed. FLC value is interpreted as a memory address in this case,
++ * excluding the 6 LS bits.
++ * - bits 2-3: indicates the number of 64 byte units of frame annotation
++ * to be stashed. Annotation is placed at FD[ADDR].
++ * - bits 4-5: indicates the number of 64 byte units of frame data to be
++ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
++ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
++ */
++#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
++
++/**
++ * struct dpni_fs_action_cfg - Action configuration for table look-up
++ * @flc: FLC value for traffic matching this rule. Please check the
++ * Frame Descriptor section in the hardware documentation for
++ * more information.
++ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
++ * values are in range 0 to num_queue-1.
++ * @options: Any combination of DPNI_FS_OPT_ values.
++ */
++struct dpni_fs_action_cfg {
++ u64 flc;
++ u16 flow_id;
++ u16 options;
++};
++
++int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ u16 index,
++ const struct dpni_rule_cfg *cfg,
++ const struct dpni_fs_action_cfg *action);
++
++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rule_cfg *cfg);
++
++/**
++ * When used for queue_idx in function dpni_set_rx_dist_default_queue
++ * will signal to dpni to drop all unclassified frames
++ */
++#define DPNI_FS_MISS_DROP ((uint16_t)-1)
++
++/**
++ * struct dpni_rx_dist_cfg - distribution configuration
++ * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
++ * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
++ * 512,768,896,1024
++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
++ * the extractions to be used for the distribution key by calling
++ * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
++ * it can be '0'
++ * @enable: enable/disable the distribution.
++ * @tc: TC id for which distribution is set
++ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
++ * hash is disabled it will be put into this queue id; use
++ * DPNI_FS_MISS_DROP to drop frames. The value of this field is
++ * used only when flow steering distribution is enabled and hash
++ * distribution is disabled
++ */
++struct dpni_rx_dist_cfg {
++ u16 dist_size;
++ u64 key_cfg_iova;
++ u8 enable;
++ u8 tc;
++ u16 fs_miss_flow_id;
++};
++
++int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rx_dist_cfg *cfg);
++
++int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rx_dist_cfg *cfg);
++
++#endif /* __FSL_DPNI_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
+@@ -0,0 +1,480 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_NET_H
++#define __FSL_NET_H
++
++#define LAST_HDR_INDEX 0xFFFFFFFF
++
++/*****************************************************************************/
++/* Protocol fields */
++/*****************************************************************************/
++
++/************************* Ethernet fields *********************************/
++#define NH_FLD_ETH_DA (1)
++#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
++#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
++#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
++#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
++#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
++#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
++
++#define NH_FLD_ETH_ADDR_SIZE 6
++
++/*************************** VLAN fields ***********************************/
++#define NH_FLD_VLAN_VPRI (1)
++#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
++#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
++#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
++#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
++#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
++
++#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
++ NH_FLD_VLAN_CFI | \
++ NH_FLD_VLAN_VID)
++
++/************************ IP (generic) fields ******************************/
++#define NH_FLD_IP_VER (1)
++#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
++#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
++#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
++#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
++#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
++#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
++#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
++#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
++
++#define NH_FLD_IP_PROTO_SIZE 1
++
++/***************************** IPV4 fields *********************************/
++#define NH_FLD_IPV4_VER (1)
++#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
++#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
++#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
++#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
++#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
++#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
++#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
++#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
++#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
++#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
++#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
++#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
++#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
++#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
++#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
++
++#define NH_FLD_IPV4_ADDR_SIZE 4
++#define NH_FLD_IPV4_PROTO_SIZE 1
++
++/***************************** IPV6 fields *********************************/
++#define NH_FLD_IPV6_VER (1)
++#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
++#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
++#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
++#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
++#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
++#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
++#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
++#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
++
++#define NH_FLD_IPV6_ADDR_SIZE 16
++#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
++
++/***************************** ICMP fields *********************************/
++#define NH_FLD_ICMP_TYPE (1)
++#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
++#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
++#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
++#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
++#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
++
++#define NH_FLD_ICMP_CODE_SIZE 1
++#define NH_FLD_ICMP_TYPE_SIZE 1
++
++/***************************** IGMP fields *********************************/
++#define NH_FLD_IGMP_VERSION (1)
++#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
++#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
++#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
++#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
++
++/***************************** TCP fields **********************************/
++#define NH_FLD_TCP_PORT_SRC (1)
++#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
++#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
++#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
++#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
++#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
++#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
++#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
++#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
++#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
++#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
++#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
++
++#define NH_FLD_TCP_PORT_SIZE 2
++
++/***************************** UDP fields **********************************/
++#define NH_FLD_UDP_PORT_SRC (1)
++#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
++#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
++#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
++#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
++
++#define NH_FLD_UDP_PORT_SIZE 2
++
++/*************************** UDP-lite fields *******************************/
++#define NH_FLD_UDP_LITE_PORT_SRC (1)
++#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
++#define NH_FLD_UDP_LITE_ALL_FIELDS \
++ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
++
++#define NH_FLD_UDP_LITE_PORT_SIZE 2
++
++/*************************** UDP-encap-ESP fields **************************/
++#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
++#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
++#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
++#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
++#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
++#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
++#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
++ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
++
++#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
++#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
++
++/***************************** SCTP fields *********************************/
++#define NH_FLD_SCTP_PORT_SRC (1)
++#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
++#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
++#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
++#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
++
++#define NH_FLD_SCTP_PORT_SIZE 2
++
++/***************************** DCCP fields *********************************/
++#define NH_FLD_DCCP_PORT_SRC (1)
++#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
++#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
++
++#define NH_FLD_DCCP_PORT_SIZE 2
++
++/***************************** IPHC fields *********************************/
++#define NH_FLD_IPHC_CID (1)
++#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
++#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
++#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
++#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
++#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
++
++/***************************** SCTP fields *********************************/
++#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
++#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
++#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
++#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
++#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
++#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
++#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
++#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
++#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
++ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
++
++/*************************** L2TPV2 fields *********************************/
++#define NH_FLD_L2TPV2_TYPE_BIT (1)
++#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
++#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
++#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
++#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
++#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
++#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
++#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
++#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
++#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
++#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
++#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
++#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
++#define NH_FLD_L2TPV2_ALL_FIELDS \
++ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
++
++/*************************** L2TPV3 fields *********************************/
++#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
++#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
++#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
++#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
++#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
++#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
++#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
++#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
++#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
++#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
++ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
++
++#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
++#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
++#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
++#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
++#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
++ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
++
++/**************************** PPP fields ***********************************/
++#define NH_FLD_PPP_PID (1)
++#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
++#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
++
++/************************** PPPoE fields ***********************************/
++#define NH_FLD_PPPOE_VER (1)
++#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
++#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
++#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
++#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
++#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
++#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
++#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
++
++/************************* PPP-Mux fields **********************************/
++#define NH_FLD_PPPMUX_PID (1)
++#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
++#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
++#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
++
++/*********************** PPP-Mux sub-frame fields **************************/
++#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
++#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
++#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
++#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
++#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
++#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
++ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
++
++/*************************** LLC fields ************************************/
++#define NH_FLD_LLC_DSAP (1)
++#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
++#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
++#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
++
++/*************************** NLPID fields **********************************/
++#define NH_FLD_NLPID_NLPID (1)
++#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
++
++/*************************** SNAP fields ***********************************/
++#define NH_FLD_SNAP_OUI (1)
++#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
++#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
++
++/*************************** LLC SNAP fields *******************************/
++#define NH_FLD_LLC_SNAP_TYPE (1)
++#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
++
++#define NH_FLD_ARP_HTYPE (1)
++#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
++#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
++#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
++#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
++#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
++#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
++#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
++#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
++#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
++
++/*************************** RFC2684 fields ********************************/
++#define NH_FLD_RFC2684_LLC (1)
++#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
++#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
++#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
++#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
++#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
++#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
++
++/*************************** User defined fields ***************************/
++#define NH_FLD_USER_DEFINED_SRCPORT (1)
++#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
++#define NH_FLD_USER_DEFINED_ALL_FIELDS \
++ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
++
++/*************************** Payload fields ********************************/
++#define NH_FLD_PAYLOAD_BUFFER (1)
++#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
++#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
++#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
++#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
++#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
++#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
++
++/*************************** GRE fields ************************************/
++#define NH_FLD_GRE_TYPE (1)
++#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(*status);
-+ cmd_params->irq_index = irq_index;
++/*************************** MINENCAP fields *******************************/
++#define NH_FLD_MINENCAP_SRC_IP (1)
++#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
++#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
++#define NH_FLD_MINENCAP_ALL_FIELDS \
++ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++/*************************** IPSEC AH fields *******************************/
++#define NH_FLD_IPSEC_AH_SPI (1)
++#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
++#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
-+ *status = le32_to_cpu(rsp_params->status);
++/*************************** IPSEC ESP fields ******************************/
++#define NH_FLD_IPSEC_ESP_SPI (1)
++#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
++#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
+
-+ return 0;
-+}
++#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
+
-+/**
-+ * dpsw_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_clear_irq_status *cmd_params;
++/*************************** MPLS fields ***********************************/
++#define NH_FLD_MPLS_LABEL_STACK (1)
++#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
++ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(status);
-+ cmd_params->irq_index = irq_index;
++/*************************** MACSEC fields *********************************/
++#define NH_FLD_MACSEC_SECTAG (1)
++#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/*************************** GTP fields ************************************/
++#define NH_FLD_GTP_TEID (1)
+
-+/**
-+ * dpsw_get_attributes() - Retrieve DPSW attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @attr: Returned DPSW attributes
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpsw_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_rsp_get_attr *rsp_params;
-+ int err;
++/* Protocol options */
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
++/* Ethernet options */
++#define NH_OPT_ETH_BROADCAST 1
++#define NH_OPT_ETH_MULTICAST 2
++#define NH_OPT_ETH_UNICAST 3
++#define NH_OPT_ETH_BPDU 4
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
++/* also applicable for broadcast */
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
-+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ attr->max_fdbs = rsp_params->max_fdbs;
-+ attr->num_fdbs = rsp_params->num_fdbs;
-+ attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
-+ attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
-+ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
-+ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
-+ attr->id = le32_to_cpu(rsp_params->dpsw_id);
-+ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
-+ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
-+ attr->max_meters_per_if = rsp_params->max_meters_per_if;
-+ attr->options = le64_to_cpu(rsp_params->options);
-+ attr->component_type = dpsw_get_field(rsp_params->component_type,
-+ COMPONENT_TYPE);
++/* VLAN options */
++#define NH_OPT_VLAN_CFI 1
+
-+ return 0;
-+}
++/* IPV4 options */
++#define NH_OPT_IPV4_UNICAST 1
++#define NH_OPT_IPV4_MULTICAST 2
++#define NH_OPT_IPV4_BROADCAST 3
++#define NH_OPT_IPV4_OPTION 4
++#define NH_OPT_IPV4_FRAG 5
++#define NH_OPT_IPV4_INITIAL_FRAG 6
+
-+/**
-+ * dpsw_set_reflection_if() - Set target interface for reflected interfaces.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Id
-+ *
-+ * Only one reflection receive interface is allowed per switch
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_set_reflection_if *cmd_params;
++/* IPV6 options */
++#define NH_OPT_IPV6_UNICAST 1
++#define NH_OPT_IPV6_MULTICAST 2
++#define NH_OPT_IPV6_OPTION 3
++#define NH_OPT_IPV6_FRAG 4
++#define NH_OPT_IPV6_INITIAL_FRAG 5
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
++/* General IP options (may be used for any version) */
++#define NH_OPT_IP_FRAG 1
++#define NH_OPT_IP_INITIAL_FRAG 2
++#define NH_OPT_IP_OPTION 3
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/* Minenc. options */
++#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
+
-+/**
-+ * dpsw_if_set_link_cfg() - Set the link configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface id
-+ * @cfg: Link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_link_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_link_cfg *cmd_params;
++/* GRE. options */
++#define NH_OPT_GRE_ROUTING_PRESENT 1
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->rate = cpu_to_le32(cfg->rate);
-+ cmd_params->options = cpu_to_le64(cfg->options);
++/* TCP options */
++#define NH_OPT_TCP_OPTIONS 1
++#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
++#define NH_OPT_TCP_CONTROL_LOW_BITS 3
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/* CAPWAP options */
++#define NH_OPT_CAPWAP_DTLS 1
++
++enum net_prot {
++ NET_PROT_NONE = 0,
++ NET_PROT_PAYLOAD,
++ NET_PROT_ETH,
++ NET_PROT_VLAN,
++ NET_PROT_IPV4,
++ NET_PROT_IPV6,
++ NET_PROT_IP,
++ NET_PROT_TCP,
++ NET_PROT_UDP,
++ NET_PROT_UDP_LITE,
++ NET_PROT_IPHC,
++ NET_PROT_SCTP,
++ NET_PROT_SCTP_CHUNK_DATA,
++ NET_PROT_PPPOE,
++ NET_PROT_PPP,
++ NET_PROT_PPPMUX,
++ NET_PROT_PPPMUX_SUBFRM,
++ NET_PROT_L2TPV2,
++ NET_PROT_L2TPV3_CTRL,
++ NET_PROT_L2TPV3_SESS,
++ NET_PROT_LLC,
++ NET_PROT_LLC_SNAP,
++ NET_PROT_NLPID,
++ NET_PROT_SNAP,
++ NET_PROT_MPLS,
++ NET_PROT_IPSEC_AH,
++ NET_PROT_IPSEC_ESP,
++ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
++ NET_PROT_MACSEC,
++ NET_PROT_GRE,
++ NET_PROT_MINENCAP,
++ NET_PROT_DCCP,
++ NET_PROT_ICMP,
++ NET_PROT_IGMP,
++ NET_PROT_ARP,
++ NET_PROT_CAPWAP_DATA,
++ NET_PROT_CAPWAP_CTRL,
++ NET_PROT_RFC2684,
++ NET_PROT_ICMPV6,
++ NET_PROT_FCOE,
++ NET_PROT_FIP,
++ NET_PROT_ISCSI,
++ NET_PROT_GTP,
++ NET_PROT_USER_DEFINED_L2,
++ NET_PROT_USER_DEFINED_L3,
++ NET_PROT_USER_DEFINED_L4,
++ NET_PROT_USER_DEFINED_L5,
++ NET_PROT_USER_DEFINED_SHIM1,
++ NET_PROT_USER_DEFINED_SHIM2,
++
++ NET_PROT_DUMMY_LAST
++};
++
++/*! IEEE8021.Q */
++#define NH_IEEE8021Q_ETYPE 0x8100
++#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
++ ((((u32)((etype) & 0xFFFF)) << 16) | \
++ (((u32)((pcp) & 0x07)) << 13) | \
++ (((u32)((dei) & 0x01)) << 12) | \
++ (((u32)((vlan_id) & 0xFFF))))
+
-+/**
-+ * dpsw_if_get_link_state - Return the link state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface id
-+ * @state: Link state 1 - linkup, 0 - link down or disconnected
-+ *
-+ * @Return '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_link_state *state)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_get_link_state *cmd_params;
-+ struct dpsw_rsp_if_get_link_state *rsp_params;
-+ int err;
++#endif /* __FSL_NET_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
+@@ -0,0 +1,10 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++# Makefile for the Freescale DPAA2 Ethernet Switch
++#
++# Copyright 2014-2017 Freescale Semiconductor, Inc.
++# Copyright 2017-2018 NXP
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
++obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++dpaa2-ethsw-objs := ethsw.o ethsw-ethtool.o dpsw.o
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/README
+@@ -0,0 +1,106 @@
++DPAA2 Ethernet Switch driver
++============================
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
-+ state->rate = le32_to_cpu(rsp_params->rate);
-+ state->options = le64_to_cpu(rsp_params->options);
-+ state->up = dpsw_get_field(rsp_params->up, UP);
++This file provides documentation for the DPAA2 Ethernet Switch driver
+
-+ return 0;
-+}
+
-+/**
-+ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @en: 1 - enable, 0 - disable
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_flooding *cmd_params;
++Contents
++========
++ Supported Platforms
++ Architecture Overview
++ Creating an Ethernet Switch
++ Features
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->enable, ENABLE, en);
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ Supported Platforms
++===================
++This driver provides networking support for Freescale LS2085A, LS2088A
++DPAA2 SoCs.
+
-+/**
-+ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @en: 1 - enable, 0 - disable
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_broadcast *cmd_params;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->enable, ENABLE, en);
++Architecture Overview
++=====================
++The Ethernet Switch in the DPAA2 architecture consists of several hardware
++resources that provide the functionality. These are allocated and
++configured via the Management Complex (MC) portals. MC abstracts most of
++these resources as DPAA2 objects and exposes ABIs through which they can
++be configured and controlled.
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++For a more detailed description of the DPAA2 architecture and its object
++abstractions see:
++ drivers/staging/fsl-mc/README.txt
+
-+/**
-+ * dpsw_if_set_multicast() - Enable/disable multicast for particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @en: 1 - enable, 0 - disable
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_multicast *cmd_params;
++The Ethernet Switch is built on top of a Datapath Switch (DPSW) object.
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_multicast *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->enable, ENABLE, en);
++Configuration interface:
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ ---------------------
++ | DPAA2 Switch driver |
++ ---------------------
++ .
++ .
++ ----------
++ | DPSW API |
++ ----------
++ . software
++ ================= . ==============
++ . hardware
++ ---------------------
++ | MC hardware portals |
++ ---------------------
++ .
++ .
++ ------
++ | DPSW |
++ ------
++
++Driver uses the switch device driver model and exposes each switch port as
++a network interface, which can be included in a bridge. Traffic switched
++between ports is offloaded into the hardware. Exposed network interfaces
++are not used for I/O, they are used just for configuration. This
++limitation is going to be addressed in the future.
++
++The DPSW can have ports connected to DPNIs or to PHYs via DPMACs.
++
++
++ [ethA] [ethB] [ethC] [ethD] [ethE] [ethF]
++ : : : : : :
++ : : : : : :
++[eth drv] [eth drv] [ ethsw drv ]
++ : : : : : : kernel
++========================================================================
++ : : : : : : hardware
++ [DPNI] [DPNI] [============= DPSW =================]
++ | | | | | |
++ | ---------- | [DPMAC] [DPMAC]
++ ------------------------------- | |
++ | |
++ [PHY] [PHY]
++
++For a more detailed description of the Ethernet switch device driver model
++see:
++ Documentation/networking/switchdev.txt
++
++Creating an Ethernet Switch
++===========================
++A device is created for the switch objects probed on the MC bus. Each DPSW
++has a number of properties which determine the configuration options and
++associated hardware resources.
++
++A DPSW object (and the other DPAA2 objects needed for a DPAA2 switch) can
++be added to a container on the MC bus in one of two ways: statically,
++through a Datapath Layout Binary file (DPL) that is parsed by MC at boot
++time; or created dynamically at runtime, via the DPAA2 objects APIs.
++
++Features
++========
++Driver configures DPSW to perform hardware switching offload of
++unicast/multicast/broadcast (VLAN tagged or untagged) traffic between its
++ports.
+
-+/**
-+ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Tag Control Information Configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_tci_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_tci *cmd_params;
++It allows configuration of hardware learning, flooding, multicast groups,
++port VLAN configuration and STP state.
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->conf, VLAN_ID, cfg->vlan_id);
-+ dpsw_set_field(cmd_params->conf, DEI, cfg->dei);
-+ dpsw_set_field(cmd_params->conf, PCP, cfg->pcp);
-+ cmd_params->conf = cpu_to_le16(cmd_params->conf);
++Static entries can be added/removed from the FDB.
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++Hardware statistics for each port are provided through ethtool -S option.
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/TODO
+@@ -0,0 +1,14 @@
++* Add I/O capabilities on switch port netdevices. This will allow control
++traffic to reach the CPU.
++* Add ACL to redirect control traffic to CPU.
++* Add support for displaying learned FDB entries
++* MC firmware uprev; the DPAA2 objects used by the Ethernet Switch driver
++need to be kept in sync with binary interface changes in MC
++* refine README file
++* cleanup
++
++NOTE: At least first three of the above are required before getting the
++DPAA2 Ethernet Switch driver out of staging. Another requirement is that
++the fsl-mc bus driver is moved to drivers/bus and dpio driver is moved to
++drivers/soc (this is required for I/O).
+
-+/**
-+ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Tag Control Information Configuration
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
+@@ -0,0 +1,359 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017-2018 NXP
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_tci_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_get_tci *cmd_params;
-+ struct dpsw_rsp_if_get_tci *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++#ifndef __FSL_DPSW_CMD_H
++#define __FSL_DPSW_CMD_H
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
-+ cfg->pcp = rsp_params->pcp;
-+ cfg->dei = rsp_params->dei;
-+ cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
++/* DPSW Version */
++#define DPSW_VER_MAJOR 8
++#define DPSW_VER_MINOR 0
+
-+ return 0;
-+}
++#define DPSW_CMD_BASE_VERSION 1
++#define DPSW_CMD_ID_OFFSET 4
+
-+/**
-+ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: STP State configuration parameters
-+ *
-+ * The following STP states are supported -
-+ * blocking, listening, learning, forwarding and disabled.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_stp_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_stp *cmd_params;
++#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
-+ dpsw_set_field(cmd_params->state, STATE, cfg->state);
++/* Command IDs */
++#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
++#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
+
-+/**
-+ * dpsw_if_set_accepted_frames()
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Frame types configuration
-+ *
-+ * When is admit_only_vlan_tagged- the device will discard untagged
-+ * frames or Priority-Tagged frames received on this interface.
-+ * When admit_only_untagged- untagged frames or Priority-Tagged
-+ * frames received on this interface will be accepted and assigned
-+ * to a VID based on the PVID and VID Set for this interface.
-+ * When admit_all - the device will accept VLAN tagged, untagged
-+ * and priority tagged frames.
-+ * The default is admit_all
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_accepted_frames_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_accepted_frames *cmd_params;
++#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
++#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
++#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
++#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_accepted_frames *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->unaccepted, FRAME_TYPE, cfg->type);
-+ dpsw_set_field(cmd_params->unaccepted, UNACCEPTED_ACT,
-+ cfg->unaccept_act);
++#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
+
-+/**
-+ * dpsw_if_set_accept_all_vlan()
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @accept_all: Accept or drop frames having different VLAN
-+ *
-+ * When this is accept (FALSE), the device will discard incoming
-+ * frames for VLANs that do not include this interface in its
-+ * Member set. When accept (TRUE), the interface will accept all incoming frames
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int accept_all)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_accept_all_vlan *cmd_params;
++#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
++#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_accept_all_vlan *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->accept_all, ACCEPT_ALL, accept_all);
++#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
++#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
+
-+/**
-+ * dpsw_if_get_counter() - Get specific counter of particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @type: Counter type
-+ * @counter: return value
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpsw_counter type,
-+ u64 *counter)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_get_counter *cmd_params;
-+ struct dpsw_rsp_if_get_counter *rsp_params;
-+ int err;
++#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
++#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
++#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
++#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
++#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
-+ *counter = le64_to_cpu(rsp_params->counter);
++#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
+
-+ return 0;
-+}
++#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
+
-+/**
-+ * dpsw_if_set_counter() - Set specific counter of particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @type: Counter type
-+ * @counter: New counter value
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpsw_counter type,
-+ u64 counter)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_counter *cmd_params;
++#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
++#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
++#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_counter *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->counter = cpu_to_le64(counter);
-+ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
++#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
++#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
++#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
++#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
++#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
++#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
++#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
++#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
+
-+/**
-+ * dpsw_if_set_tx_selection() - Function is used for mapping variety
-+ * of frame fields
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Traffic class mapping configuration
-+ *
-+ * Function is used for mapping variety of frame fields (DSCP, PCP)
-+ * to Traffic Class. Traffic class is a number
-+ * in the range from 0 to 7
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_tx_selection_cfg *cfg)
-+{
-+ struct dpsw_cmd_if_set_tx_selection *cmd_params;
-+ struct mc_command cmd = { 0 };
-+ int i;
++/* Macros for accessing command fields smaller than 1byte */
++#define DPSW_MASK(field) \
++ GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
++ DPSW_##field##_SHIFT)
++#define dpsw_set_field(var, field, val) \
++ ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
++#define dpsw_get_field(var, field) \
++ (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
++#define dpsw_get_bit(var, bit) \
++ (((var) >> (bit)) & GENMASK(0, 0))
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_tx_selection *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->priority_selector, PRIORITY_SELECTOR,
-+ cfg->priority_selector);
++struct dpsw_cmd_open {
++ __le32 dpsw_id;
++};
+
-+ for (i = 0; i < 8; i++) {
-+ cmd_params->tc_sched[i].delta_bandwidth =
-+ cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
-+ dpsw_set_field(cmd_params->tc_sched[i].mode, SCHED_MODE,
-+ cfg->tc_sched[i].mode);
-+ cmd_params->tc_id[i] = cfg->tc_id[i];
-+ }
++#define DPSW_COMPONENT_TYPE_SHIFT 0
++#define DPSW_COMPONENT_TYPE_SIZE 4
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_cmd_create {
++ /* cmd word 0 */
++ __le16 num_ifs;
++ u8 max_fdbs;
++ u8 max_meters_per_if;
++ /* from LSB: only the first 4 bits */
++ u8 component_type;
++ u8 pad[3];
++ /* cmd word 1 */
++ __le16 max_vlans;
++ __le16 max_fdb_entries;
++ __le16 fdb_aging_time;
++ __le16 max_fdb_mc_groups;
++ /* cmd word 2 */
++ __le64 options;
++};
+
-+/**
-+ * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Reflection configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_reflection_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_reflection *cmd_params;
++struct dpsw_cmd_destroy {
++ __le32 dpsw_id;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
-+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
++#define DPSW_ENABLE_SHIFT 0
++#define DPSW_ENABLE_SIZE 1
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_rsp_is_enabled {
++ /* from LSB: enable:1 */
++ u8 enabled;
++};
+
-+/**
-+ * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Reflection configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_reflection_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_reflection *cmd_params;
++struct dpsw_cmd_set_irq_enable {
++ u8 enable_state;
++ u8 pad[3];
++ u8 irq_index;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
-+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
++struct dpsw_cmd_get_irq_enable {
++ __le32 pad;
++ u8 irq_index;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_rsp_get_irq_enable {
++ u8 enable_state;
++};
+
-+/**
-+ * dpsw_if_set_flooding_metering() - Set flooding metering
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Metering parameters
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_metering_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_flooding_metering *cmd_params;
++struct dpsw_cmd_set_irq_mask {
++ __le32 mask;
++ u8 irq_index;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_flooding_metering *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
-+ dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
-+ cmd_params->cir = cpu_to_le32(cfg->cir);
-+ cmd_params->eir = cpu_to_le32(cfg->eir);
-+ cmd_params->cbs = cpu_to_le32(cfg->cbs);
-+ cmd_params->ebs = cpu_to_le32(cfg->ebs);
++struct dpsw_cmd_get_irq_mask {
++ __le32 pad;
++ u8 irq_index;
++};
++
++struct dpsw_rsp_get_irq_mask {
++ __le32 mask;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_cmd_get_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
+
-+/**
-+ * dpsw_if_set_metering() - Set interface metering for flooding
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @tc_id: Traffic class ID
-+ * @cfg: Metering parameters
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 tc_id,
-+ const struct dpsw_metering_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_metering *cmd_params;
++struct dpsw_rsp_get_irq_status {
++ __le32 status;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_metering *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->tc_id = tc_id;
-+ dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
-+ dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
-+ cmd_params->cir = cpu_to_le32(cfg->cir);
-+ cmd_params->eir = cpu_to_le32(cfg->eir);
-+ cmd_params->cbs = cpu_to_le32(cfg->cbs);
-+ cmd_params->ebs = cpu_to_le32(cfg->ebs);
++struct dpsw_cmd_clear_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_COMPONENT_TYPE_SHIFT 0
++#define DPSW_COMPONENT_TYPE_SIZE 4
+
-+/**
-+ * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface
-+ * @cfg: Early-drop configuration
-+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before dpsw_if_tc_set_early_drop
-+ *
-+ */
-+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
-+ u8 *early_drop_buf)
-+{
-+ struct dpsw_prep_early_drop *ext_params;
++struct dpsw_rsp_get_attr {
++ /* cmd word 0 */
++ __le16 num_ifs;
++ u8 max_fdbs;
++ u8 num_fdbs;
++ __le16 max_vlans;
++ __le16 num_vlans;
++ /* cmd word 1 */
++ __le16 max_fdb_entries;
++ __le16 fdb_aging_time;
++ __le32 dpsw_id;
++ /* cmd word 2 */
++ __le16 mem_size;
++ __le16 max_fdb_mc_groups;
++ u8 max_meters_per_if;
++ /* from LSB only the first 4 bits */
++ u8 component_type;
++ __le16 pad;
++ /* cmd word 3 */
++ __le64 options;
++};
+
-+ ext_params = (struct dpsw_prep_early_drop *)early_drop_buf;
-+ dpsw_set_field(ext_params->conf, EARLY_DROP_MODE, cfg->drop_mode);
-+ dpsw_set_field(ext_params->conf, EARLY_DROP_UNIT, cfg->units);
-+ ext_params->tail_drop_threshold = cpu_to_le32(cfg->tail_drop_threshold);
-+ ext_params->green_drop_probability = cfg->green.drop_probability;
-+ ext_params->green_max_threshold = cpu_to_le64(cfg->green.max_threshold);
-+ ext_params->green_min_threshold = cpu_to_le64(cfg->green.min_threshold);
-+ ext_params->yellow_drop_probability = cfg->yellow.drop_probability;
-+ ext_params->yellow_max_threshold =
-+ cpu_to_le64(cfg->yellow.max_threshold);
-+ ext_params->yellow_min_threshold =
-+ cpu_to_le64(cfg->yellow.min_threshold);
-+}
++struct dpsw_cmd_if_set_flooding {
++ __le16 if_id;
++ /* from LSB: enable:1 */
++ u8 enable;
++};
+
-+/**
-+ * dpsw_if_set_early_drop() - Set interface traffic class early-drop
-+ * configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @tc_id: Traffic class selection (0-7)
-+ * @early_drop_iova: I/O virtual address of 64 bytes;
-+ * Must be cacheline-aligned and DMA-able memory
-+ *
-+ * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop()
-+ * to prepare the early_drop_iova parameter
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 tc_id,
-+ u64 early_drop_iova)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_early_drop *cmd_params;
++struct dpsw_cmd_if_set_broadcast {
++ __le16 if_id;
++ /* from LSB: enable:1 */
++ u8 enable;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_early_drop *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
++#define DPSW_VLAN_ID_SHIFT 0
++#define DPSW_VLAN_ID_SIZE 12
++#define DPSW_DEI_SHIFT 12
++#define DPSW_DEI_SIZE 1
++#define DPSW_PCP_SHIFT 13
++#define DPSW_PCP_SIZE 3
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_cmd_if_set_tci {
++ __le16 if_id;
++ /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
++ __le16 conf;
++};
+
-+/**
-+ * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @cfg: Tag Protocol identifier
-+ *
-+ * API Configures a distinct Ethernet type value (or TPID value)
-+ * to indicate a VLAN tag in addition to the common
-+ * TPID values 0x8100 and 0x88A8.
-+ * Two additional TPID's are supported
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_custom_tpid_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_custom_tpid *cmd_params;
++struct dpsw_cmd_if_get_tci {
++ __le16 if_id;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
-+ cmd_params->tpid = cpu_to_le16(cfg->tpid);
++struct dpsw_rsp_if_get_tci {
++ __le16 pad;
++ __le16 vlan_id;
++ u8 dei;
++ u8 pcp;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_STATE_SHIFT 0
++#define DPSW_STATE_SIZE 4
+
-+/**
-+ * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @cfg: Tag Protocol identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_custom_tpid_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_custom_tpid *cmd_params;
++struct dpsw_cmd_if_set_stp {
++ __le16 if_id;
++ __le16 vlan_id;
++ /* only the first LSB 4 bits */
++ u8 state;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
-+ cmd_params->tpid = cpu_to_le16(cfg->tpid);
++#define DPSW_COUNTER_TYPE_SHIFT 0
++#define DPSW_COUNTER_TYPE_SIZE 5
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_cmd_if_get_counter {
++ __le16 if_id;
++ /* from LSB: type:5 */
++ u8 type;
++};
+
-+/**
-+ * dpsw_if_enable() - Enable Interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if *cmd_params;
++struct dpsw_rsp_if_get_counter {
++ __le64 pad;
++ __le64 counter;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
++struct dpsw_cmd_if {
++ __le16 if_id;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_cmd_if_set_max_frame_length {
++ __le16 if_id;
++ __le16 frame_length;
++};
+
-+/**
-+ * dpsw_if_disable() - Disable Interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if *cmd_params;
++struct dpsw_cmd_if_set_link_cfg {
++ /* cmd word 0 */
++ __le16 if_id;
++ u8 pad[6];
++ /* cmd word 1 */
++ __le32 rate;
++ __le32 pad1;
++ /* cmd word 2 */
++ __le64 options;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
++struct dpsw_cmd_if_get_link_state {
++ __le16 if_id;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_UP_SHIFT 0
++#define DPSW_UP_SIZE 1
+
-+/**
-+ * dpsw_if_get_attributes() - Function obtains attributes of interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @attr: Returned interface attributes
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_if_attr *attr)
-+{
-+ struct dpsw_rsp_if_get_attr *rsp_params;
-+ struct dpsw_cmd_if *cmd_params;
-+ struct mc_command cmd = { 0 };
-+ int err;
++struct dpsw_rsp_if_get_link_state {
++ /* cmd word 0 */
++ __le32 pad0;
++ u8 up;
++ u8 pad1[3];
++ /* cmd word 1 */
++ __le32 rate;
++ __le32 pad2;
++ /* cmd word 2 */
++ __le64 options;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
++struct dpsw_vlan_add {
++ __le16 fdb_id;
++ __le16 vlan_id;
++};
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++struct dpsw_cmd_vlan_manage_if {
++ /* cmd word 0 */
++ __le16 pad0;
++ __le16 vlan_id;
++ __le32 pad1;
++ /* cmd word 1-4 */
++ __le64 if_id[4];
++};
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params;
-+ attr->num_tcs = rsp_params->num_tcs;
-+ attr->rate = le32_to_cpu(rsp_params->rate);
-+ attr->options = le32_to_cpu(rsp_params->options);
-+ attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED);
-+ attr->accept_all_vlan = dpsw_get_field(rsp_params->conf,
-+ ACCEPT_ALL_VLAN);
-+ attr->admit_untagged = dpsw_get_field(rsp_params->conf, ADMIT_UNTAGGED);
-+ attr->qdid = le16_to_cpu(rsp_params->qdid);
++struct dpsw_cmd_vlan_remove {
++ __le16 pad;
++ __le16 vlan_id;
++};
+
-+ return 0;
-+}
++struct dpsw_cmd_fdb_add {
++ __le32 pad;
++ __le16 fdb_aging_time;
++ __le16 num_fdb_entries;
++};
+
-+/**
-+ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @frame_length: Maximum Frame Length
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u16 frame_length)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_max_frame_length *cmd_params;
++struct dpsw_rsp_fdb_add {
++ __le16 fdb_id;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->frame_length = cpu_to_le16(frame_length);
++struct dpsw_cmd_fdb_remove {
++ __le16 fdb_id;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_ENTRY_TYPE_SHIFT 0
++#define DPSW_ENTRY_TYPE_SIZE 4
+
-+/**
-+ * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @frame_length: Returned maximum Frame Length
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u16 *frame_length)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_get_max_frame_length *cmd_params;
-+ struct dpsw_rsp_if_get_max_frame_length *rsp_params;
-+ int err;
++struct dpsw_cmd_fdb_unicast_op {
++ /* cmd word 0 */
++ __le16 fdb_id;
++ u8 mac_addr[6];
++ /* cmd word 1 */
++ __le16 if_egress;
++ /* only the first 4 bits from LSB */
++ u8 type;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_get_max_frame_length *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
++struct dpsw_cmd_fdb_multicast_op {
++ /* cmd word 0 */
++ __le16 fdb_id;
++ __le16 num_ifs;
++ /* only the first 4 bits from LSB */
++ u8 type;
++ u8 pad[3];
++ /* cmd word 1 */
++ u8 mac_addr[6];
++ __le16 pad2;
++ /* cmd word 2-5 */
++ __le64 if_id[4];
++};
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++#define DPSW_LEARNING_MODE_SHIFT 0
++#define DPSW_LEARNING_MODE_SIZE 4
+
-+ rsp_params = (struct dpsw_rsp_if_get_max_frame_length *)cmd.params;
-+ *frame_length = le16_to_cpu(rsp_params->frame_length);
++struct dpsw_cmd_fdb_set_learning_mode {
++ __le16 fdb_id;
++ /* only the first 4 bits from LSB */
++ u8 mode;
++};
+
-+ return 0;
-+}
++struct dpsw_rsp_get_api_version {
++ __le16 version_major;
++ __le16 version_minor;
++};
+
-+/**
-+ * dpsw_vlan_add() - Adding new VLAN to DPSW.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: VLAN configuration
-+ *
-+ * Only VLAN ID and FDB ID are required parameters here.
-+ * 12 bit VLAN ID is defined in IEEE802.1Q.
-+ * Adding a duplicate VLAN ID is not allowed.
-+ * FDB ID can be shared across multiple VLANs. Shared learning
-+ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
-+ * with same fdb_id
++#endif /* __FSL_DPSW_CMD_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
+@@ -0,0 +1,1165 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017-2018 NXP
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_vlan_add *cmd_params;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_vlan_add *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++#include <linux/fsl/mc.h>
++#include "dpsw.h"
++#include "dpsw-cmd.h"
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++static void build_if_id_bitmap(__le64 *bmap,
++ const u16 *id,
++ const u16 num_ifs)
++{
++ int i;
++
++ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) {
++ if (id[i] < DPSW_MAX_IF)
++ bmap[id[i] / 64] |= cpu_to_le64(BIT_MASK(id[i] % 64));
++ }
+}
+
+/**
-+ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
++ * dpsw_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces to add
++ * @dpsw_id: DPSW unique ID
++ * @token: Returned token; use in subsequent API calls
+ *
-+ * It adds only interfaces not belonging to this VLAN yet,
-+ * otherwise an error is generated and an entire command is
-+ * ignored. This function can be called numerous times always
-+ * providing required interfaces delta.
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpsw_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
++int dpsw_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpsw_id,
++ u16 *token)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_open *cmd_params;
++ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++ 0);
++ cmd_params = (struct dpsw_cmd_open *)cmd.params;
++ cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
+
+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return 0;
+}
+
+/**
-+ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
-+ * transmitted as untagged.
++ * dpsw_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be transmitted as untagged
+ *
-+ * These interfaces should already belong to this VLAN.
-+ * By default all interfaces are transmitted as tagged.
-+ * Providing un-existing interface or untagged interface that is
-+ * configured untagged already generates an error and the entire
-+ * command is ignored.
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
++int dpsw_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be
-+ * included in flooding when frame with unknown destination
-+ * unicast MAC arrived.
++ * dpsw_enable() - Enable DPSW functionality
+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be used for flooding
-+ *
-+ * These interfaces should belong to this VLAN. By default all
-+ * interfaces are included into flooding list. Providing
-+ * un-existing interface or an interface that already in the
-+ * flooding list generates an error and the entire command is
-+ * ignored.
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
++int dpsw_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
++ * dpsw_disable() - Disable DPSW functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be removed
-+ *
-+ * Interfaces must belong to this VLAN, otherwise an error
-+ * is returned and an the command is ignored
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
++int dpsw_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
-+ * converted from transmitted as untagged to transmit as tagged.
++ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be removed
+ *
-+ * Interfaces provided by API have to belong to this VLAN and
-+ * configured untagged, otherwise an error is returned and the
-+ * command is ignored
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
++int dpsw_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be
-+ * removed from the flooding list.
++ * dpsw_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces used for flooding
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt
++ *
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
++int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++ cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
++ dpsw_set_field(cmd_params->enable_state, ENABLE, en);
++ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_remove() - Remove an entire VLAN
++ * dpsw_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id)
++int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_remove *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
++ cmd_params->mask = cpu_to_le32(mask);
++ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_get_attributes() - Get VLAN attributes
++ * dpsw_get_irq_status() - Get the current status of any pending interrupts
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @attr: Returned DPSW attributes
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_attr *attr)
++int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_get_attr *cmd_params;
-+ struct dpsw_rsp_vlan_get_attr *rsp_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_get_irq_status *cmd_params;
++ struct dpsw_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_get_attr *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(*status);
++ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ return err;
+
+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_vlan_get_attr *)cmd.params;
-+ attr->fdb_id = le16_to_cpu(rsp_params->fdb_id);
-+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ attr->num_untagged_ifs = le16_to_cpu(rsp_params->num_untagged_ifs);
-+ attr->num_flooding_ifs = le16_to_cpu(rsp_params->num_flooding_ifs);
++ rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
++ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
-+ * dpsw_vlan_get_if() - Get interfaces belong to this VLAN
++ * dpsw_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Returned set of interfaces belong to this VLAN
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg)
++int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_get_if *cmd_params;
-+ struct dpsw_rsp_vlan_get_if *rsp_params;
-+ int err;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_get_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(status);
++ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_vlan_get_if *)cmd.params;
-+ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN
++ * dpsw_get_attributes() - Retrieve DPSW attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Returned set of flooding interfaces
++ * @attr: Returned DPSW attributes
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+
-+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg)
++int dpsw_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpsw_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_get_if_flooding *cmd_params;
-+ struct dpsw_rsp_vlan_get_if_flooding *rsp_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_rsp_get_attr *rsp_params;
+ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_get_if_flooding *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ return err;
+
+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_vlan_get_if_flooding *)cmd.params;
-+ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
++ rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
++ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
++ attr->max_fdbs = rsp_params->max_fdbs;
++ attr->num_fdbs = rsp_params->num_fdbs;
++ attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
++ attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
++ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
++ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
++ attr->id = le32_to_cpu(rsp_params->dpsw_id);
++ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
++ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
++ attr->max_meters_per_if = rsp_params->max_meters_per_if;
++ attr->options = le64_to_cpu(rsp_params->options);
++ attr->component_type = dpsw_get_field(rsp_params->component_type,
++ COMPONENT_TYPE);
+
+ return 0;
+}
+
+/**
-+ * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as
-+ * untagged
++ * dpsw_if_set_link_cfg() - Set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Returned set of untagged interfaces
++ * @if_id: Interface id
++ * @cfg: Link configuration
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg)
++int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_link_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_get_if_untagged *cmd_params;
-+ struct dpsw_rsp_vlan_get_if_untagged *rsp_params;
-+ int err;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_link_cfg *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_get_if_untagged *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->rate = cpu_to_le32(cfg->rate);
++ cmd_params->options = cpu_to_le64(cfg->options);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_vlan_get_if_untagged *)cmd.params;
-+ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
-+ * the reference
++ * dpsw_if_get_link_state - Return the link state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Returned Forwarding Database Identifier
-+ * @cfg: FDB Configuration
++ * @if_id: Interface id
++ * @state: Link state 1 - linkup, 0 - link down or disconnected
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * @Return '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *fdb_id,
-+ const struct dpsw_fdb_cfg *cfg)
++int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_link_state *state)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_add *cmd_params;
-+ struct dpsw_rsp_fdb_add *rsp_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_get_link_state *cmd_params;
++ struct dpsw_rsp_if_get_link_state *rsp_params;
+ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params;
-+ cmd_params->fdb_aging_time = cpu_to_le16(cfg->fdb_aging_time);
-+ cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries);
++ cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ return err;
+
+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params;
-+ *fdb_id = le16_to_cpu(rsp_params->fdb_id);
++ rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
++ state->rate = le32_to_cpu(rsp_params->rate);
++ state->options = le64_to_cpu(rsp_params->options);
++ state->up = dpsw_get_field(rsp_params->up, UP);
+
+ return 0;
+}
+
+/**
-+ * dpsw_fdb_remove() - Remove FDB from switch
++ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id)
++int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u8 en)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_remove *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_flooding *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
++ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Unicast entry configuration
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_unicast_cfg *cfg)
++int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u8 en)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_add_unicast *cmd_params;
-+ int i;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_broadcast *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_add_unicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
-+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
++ cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by
-+ * unicast Ethernet address
++ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Returned unicast entry configuration
++ * @if_id: Interface Identifier
++ * @cfg: Tag Control Information Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_unicast_cfg *cfg)
++int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_tci_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_get_unicast *cmd_params;
-+ struct dpsw_rsp_fdb_get_unicast *rsp_params;
-+ int err, i;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_tci *cmd_params;
++ u16 tmp_conf = 0;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_get_unicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(tmp_conf, VLAN_ID, cfg->vlan_id);
++ dpsw_set_field(tmp_conf, DEI, cfg->dei);
++ dpsw_set_field(tmp_conf, PCP, cfg->pcp);
++ cmd_params->conf = cpu_to_le16(tmp_conf);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_fdb_get_unicast *)cmd.params;
-+ cfg->if_egress = le16_to_cpu(rsp_params->if_egress);
-+ cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
++ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Unicast entry configuration
++ * @if_id: Interface Identifier
++ * @cfg: Tag Control Information Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_unicast_cfg *cfg)
++int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_tci_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_remove_unicast *cmd_params;
-+ int i;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_get_tci *cmd_params;
++ struct dpsw_rsp_if_get_tci *rsp_params;
++ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_remove_unicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
-+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
-+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
++ cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
++ cfg->pcp = rsp_params->pcp;
++ cfg->dei = rsp_params->dei;
++ cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
++
++ return 0;
+}
+
+/**
-+ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
++ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Multicast entry configuration
++ * @if_id: Interface Identifier
++ * @cfg: STP State configuration parameters
+ *
-+ * If group doesn't exist, it will be created.
-+ * It adds only interfaces not belonging to this multicast group
-+ * yet, otherwise error will be generated and the command is
-+ * ignored.
-+ * This function may be called numerous times always providing
-+ * required interfaces delta.
++ * The following STP states are supported -
++ * blocking, listening, learning, forwarding and disabled.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg)
++int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_stp_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_add_multicast *cmd_params;
-+ int i;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_stp *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_add_multicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
++ dpsw_set_field(cmd_params->state, STATE, cfg->state);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet
-+ * address.
++ * dpsw_if_get_counter() - Get specific counter of particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Returned multicast entry configuration
++ * @if_id: Interface Identifier
++ * @type: Counter type
++ * @counter: return value
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_multicast_cfg *cfg)
++int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ enum dpsw_counter type,
++ u64 *counter)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_get_multicast *cmd_params;
-+ struct dpsw_rsp_fdb_get_multicast *rsp_params;
-+ int err, i;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_get_counter *cmd_params;
++ struct dpsw_rsp_if_get_counter *rsp_params;
++ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_get_multicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ return err;
+
+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_fdb_get_multicast *)cmd.params;
-+ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
++ rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
++ *counter = le64_to_cpu(rsp_params->counter);
+
+ return 0;
+}
+
+/**
-+ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
-+ * group.
++ * dpsw_if_enable() - Enable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Multicast entry configuration
-+ *
-+ * Interfaces provided by this API have to exist in the group,
-+ * otherwise an error will be returned and an entire command
-+ * ignored. If there is no interface left in the group,
-+ * an entire group is deleted
++ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg)
++int dpsw_if_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_remove_multicast *cmd_params;
-+ int i;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_remove_multicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ cmd_params = (struct dpsw_cmd_if *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_set_learning_mode() - Define FDB learning mode
++ * dpsw_if_disable() - Disable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @mode: Learning mode
++ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ enum dpsw_fdb_learning_mode mode)
++int dpsw_if_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
++ cmd_params = (struct dpsw_cmd_if *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_get_attributes() - Get FDB attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @attr: Returned FDB attributes
++ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @frame_length: Maximum Frame Length
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_attr *attr)
++int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u16 frame_length)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_get_attr *cmd_params;
-+ struct dpsw_rsp_fdb_get_attr *rsp_params;
-+ int err;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_max_frame_length *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_get_attr *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->frame_length = cpu_to_le16(frame_length);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_fdb_get_attr *)cmd.params;
-+ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
-+ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
-+ attr->learning_mode = dpsw_get_field(rsp_params->learning_mode,
-+ LEARNING_MODE);
-+ attr->num_fdb_mc_groups = le16_to_cpu(rsp_params->num_fdb_mc_groups);
-+ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_acl_add() - Adds ACL to L2 switch.
++ * dpsw_vlan_add() - Adding new VLAN to DPSW.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: Returned ACL ID, for the future reference
-+ * @cfg: ACL configuration
++ * @vlan_id: VLAN Identifier
++ * @cfg: VLAN configuration
+ *
-+ * Create Access Control List. Multiple ACLs can be created and
-+ * co-exist in L2 switch
++ * Only VLAN ID and FDB ID are required parameters here.
++ * 12 bit VLAN ID is defined in IEEE802.1Q.
++ * Adding a duplicate VLAN ID is not allowed.
++ * FDB ID can be shared across multiple VLANs. Shared learning
++ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
++ * with same fdb_id
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *acl_id,
-+ const struct dpsw_acl_cfg *cfg)
++int dpsw_vlan_add(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_add *cmd_params;
-+ struct dpsw_rsp_acl_add *rsp_params;
-+ int err;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_vlan_add *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_add *)cmd.params;
-+ cmd_params->max_entries = cpu_to_le16(cfg->max_entries);
++ cmd_params = (struct dpsw_vlan_add *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_acl_add *)cmd.params;
-+ *acl_id = le16_to_cpu(rsp_params->acl_id);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_acl_remove() - Removes ACL from L2 switch.
++ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces to add
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * It adds only interfaces not belonging to this VLAN yet,
++ * otherwise an error is generated and an entire command is
++ * ignored. This function can be called numerous times always
++ * providing required interfaces delta.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id)
++int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_remove *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL.
-+ * @key: Key
-+ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before adding or removing acl_entry
-+ *
-+ */
-+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
-+ u8 *entry_cfg_buf)
-+{
-+ struct dpsw_prep_acl_entry *ext_params;
-+ int i;
-+
-+ ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf;
-+
-+ for (i = 0; i < 6; i++) {
-+ ext_params->match_l2_dest_mac[i] =
-+ key->match.l2_dest_mac[5 - i];
-+ ext_params->match_l2_source_mac[i] =
-+ key->match.l2_source_mac[5 - i];
-+ ext_params->mask_l2_dest_mac[i] =
-+ key->mask.l2_dest_mac[5 - i];
-+ ext_params->mask_l2_source_mac[i] =
-+ key->mask.l2_source_mac[5 - i];
-+ }
-+
-+ ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid);
-+ ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id);
-+ ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip);
-+ ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip);
-+ ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port);
-+ ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type);
-+ ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei;
-+ ext_params->match_l3_dscp = key->match.l3_dscp;
-+ ext_params->match_l4_source_port =
-+ cpu_to_le16(key->match.l4_source_port);
-+
-+ ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid);
-+ ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id);
-+ ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip);
-+ ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip);
-+ ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port);
-+ ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port);
-+ ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type);
-+ ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei;
-+ ext_params->mask_l3_dscp = key->mask.l3_dscp;
-+ ext_params->match_l3_protocol = key->match.l3_protocol;
-+ ext_params->mask_l3_protocol = key->mask.l3_protocol;
-+}
-+
-+/**
-+ * dpsw_acl_add_entry() - Adds an entry to ACL.
++ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
++ * transmitted as untagged.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: Entry configuration
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be transmitted as untagged
+ *
-+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
++ * These interfaces should already belong to this VLAN.
++ * By default all interfaces are transmitted as tagged.
++ * Providing un-existing interface or untagged interface that is
++ * configured untagged already generates an error and the entire
++ * command is ignored.
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg)
++int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
-+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
-+ dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
-+ cfg->result.action);
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_acl_remove_entry() - Removes an entry from ACL.
++ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: Entry configuration
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be removed
+ *
-+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
++ * Interfaces must belong to this VLAN, otherwise an error
++ * is returned and an the command is ignored
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg)
++int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
-+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
-+ dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
-+ cfg->result.action);
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_acl_add_if() - Associate interface/interfaces with ACL.
++ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
++ * converted from transmitted as untagged to transmit as tagged.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: Interfaces list
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be removed
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Interfaces provided by API have to belong to this VLAN and
++ * configured untagged, otherwise an error is returned and the
++ * command is ignored
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_if_cfg *cfg)
++int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+}
+
+/**
-+ * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL.
++ * dpsw_vlan_remove() - Remove an entire VLAN
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: Interfaces list
++ * @vlan_id: VLAN Identifier
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_if_cfg *cfg)
++int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_remove *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++ cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_acl_get_attributes() - Get specific counter of particular interface
++ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: ACL Identifier
-+ * @attr: Returned ACL attributes
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Unicast entry configuration
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ struct dpsw_acl_attr *attr)
++int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_get_attr *cmd_params;
-+ struct dpsw_rsp_acl_get_attr *rsp_params;
-+ int err;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_unicast_op *cmd_params;
++ int i;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_get_attr *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
++ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_acl_get_attr *)cmd.params;
-+ attr->max_entries = le16_to_cpu(rsp_params->max_entries);
-+ attr->num_entries = le16_to_cpu(rsp_params->num_entries);
-+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
++ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @attr: Returned control interface attributes
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Unicast entry configuration
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpsw_ctrl_if_attr *attr)
++int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_rsp_ctrl_if_get_attr *rsp_params;
-+ int err;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_unicast_op *cmd_params;
++ int i;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
+ cmd_flags,
+ token);
++ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params;
-+ attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid);
-+ attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid);
-+ attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_ctrl_if_set_pools() - Set control interface buffer pools
++ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @cfg: Buffer pools configuration
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Multicast entry configuration
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * If group doesn't exist, it will be created.
++ * It adds only interfaces not belonging to this multicast group
++ * yet, otherwise error will be generated and the command is
++ * ignored.
++ * This function may be called numerous times always providing
++ * required interfaces delta.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
++int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
-+ const struct dpsw_ctrl_if_pools_cfg *pools)
++ u16 fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_ctrl_if_set_pools *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_multicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params;
-+ cmd_params->num_dpbp = pools->num_dpbp;
-+ for (i = 0; i < 8; i++) {
-+ cmd_params->backup_pool = dpsw_set_bit(cmd_params->backup_pool,
-+ i,
-+ pools->pools[i].backup_pool);
-+ cmd_params->buffer_size[i] =
-+ cpu_to_le16(pools->pools[i].buffer_size);
-+ cmd_params->dpbp_id[i] =
-+ cpu_to_le32(pools->pools[i].dpbp_id);
-+ }
++ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_ctrl_if_enable() - Enable control interface
++ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
++ * group.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Multicast entry configuration
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Interfaces provided by this API have to exist in the group,
++ * otherwise an error will be returned and an entire command
++ * ignored. If there is no interface left in the group,
++ * an entire group is deleted
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_multicast_op *cmd_params;
++ int i;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
+ cmd_flags,
+ token);
++ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_ctrl_if_disable() - Function disables control interface
++ * dpsw_fdb_set_learning_mode() - Define FDB learning mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @mode: Learning mode
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ enum dpsw_fdb_learning_mode mode)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
+ cmd_flags,
+ token);
++ cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+ u16 *major_ver,
+ u16 *minor_ver)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
-+ cmd_flags,
-+ 0);
++ cmd_flags,
++ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+}
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
-@@ -0,0 +1,1269 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
+@@ -0,0 +1,592 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017-2018 NXP
+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
+ */
++
+#ifndef __FSL_DPSW_H
+#define __FSL_DPSW_H
+
+ } adv;
+};
+
-+int dpsw_create(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ const struct dpsw_cfg *cfg,
-+ u32 *obj_id);
-+
-+int dpsw_destroy(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ u32 object_id);
-+
+int dpsw_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+ u32 cmd_flags,
+ u16 token);
+
-+int dpsw_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en);
-+
+int dpsw_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+ int irq_num;
+};
+
-+int dpsw_set_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ struct dpsw_irq_cfg *irq_cfg);
-+
-+int dpsw_get_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ int *type,
-+ struct dpsw_irq_cfg *irq_cfg);
-+
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
-+int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 *en);
-+
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
-+int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *mask);
-+
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 token,
+ struct dpsw_attr *attr);
+
-+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id);
-+
+/**
+ * enum dpsw_action - Action selection for special/control frames
+ * @DPSW_ACTION_DROP: Drop frame
+struct dpsw_link_state {
+ u32 rate;
+ u64 options;
-+ int up;
++ u8 up;
+};
+
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
-+ int en);
++ u8 en);
+
+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
-+ int en);
-+
-+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int en);
++ u8 en);
+
+/**
-+ * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration
++ * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration
+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
+ * to the IEEE 802.1p priority
+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
+
+int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_tci_cfg *cfg);
-+
-+/**
-+ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
-+ * @DPSW_STP_STATE_BLOCKING: Blocking state
-+ * @DPSW_STP_STATE_LISTENING: Listening state
-+ * @DPSW_STP_STATE_LEARNING: Learning state
-+ * @DPSW_STP_STATE_FORWARDING: Forwarding state
-+ *
-+ */
-+enum dpsw_stp_state {
-+ DPSW_STP_STATE_BLOCKING = 0,
-+ DPSW_STP_STATE_LISTENING = 1,
-+ DPSW_STP_STATE_LEARNING = 2,
-+ DPSW_STP_STATE_FORWARDING = 3
-+};
-+
-+/**
-+ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
-+ * @vlan_id: VLAN ID STP state
-+ * @state: STP state
-+ */
-+struct dpsw_stp_cfg {
-+ u16 vlan_id;
-+ enum dpsw_stp_state state;
-+};
-+
-+int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_stp_cfg *cfg);
-+
-+/**
-+ * enum dpsw_accepted_frames - Types of frames to accept
-+ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
-+ * priority tagged frames
-+ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
-+ * Priority-Tagged frames received on this interface.
-+ *
-+ */
-+enum dpsw_accepted_frames {
-+ DPSW_ADMIT_ALL = 1,
-+ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
-+};
-+
-+/**
-+ * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration
-+ * @type: Defines ingress accepted frames
-+ * @unaccept_act: When a frame is not accepted, it may be discarded or
-+ * redirected to control interface depending on this mode
-+ */
-+struct dpsw_accepted_frames_cfg {
-+ enum dpsw_accepted_frames type;
-+ enum dpsw_action unaccept_act;
-+};
-+
-+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_accepted_frames_cfg *cfg);
-+
-+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int accept_all);
-+
-+/**
-+ * enum dpsw_counter - Counters types
-+ * @DPSW_CNT_ING_FRAME: Counts ingress frames
-+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
-+ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
-+ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
-+ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
-+ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
-+ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
-+ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
-+ * @DPSW_CNT_EGR_FRAME: Counts egress frames
-+ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
-+ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
-+ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
-+ */
-+enum dpsw_counter {
-+ DPSW_CNT_ING_FRAME = 0x0,
-+ DPSW_CNT_ING_BYTE = 0x1,
-+ DPSW_CNT_ING_FLTR_FRAME = 0x2,
-+ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
-+ DPSW_CNT_ING_MCAST_FRAME = 0x4,
-+ DPSW_CNT_ING_MCAST_BYTE = 0x5,
-+ DPSW_CNT_ING_BCAST_FRAME = 0x6,
-+ DPSW_CNT_ING_BCAST_BYTES = 0x7,
-+ DPSW_CNT_EGR_FRAME = 0x8,
-+ DPSW_CNT_EGR_BYTE = 0x9,
-+ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
-+ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
-+};
-+
-+int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpsw_counter type,
-+ u64 *counter);
-+
-+int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpsw_counter type,
-+ u64 counter);
-+
-+/**
-+ * Maximum number of TC
-+ */
-+#define DPSW_MAX_TC 8
-+
-+/**
-+ * enum dpsw_priority_selector - User priority
-+ * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which
-+ * refers to the IEEE 802.1p priority.
-+ * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit
-+ * field from IP header
-+ *
-+ */
-+enum dpsw_priority_selector {
-+ DPSW_UP_PCP = 0,
-+ DPSW_UP_DSCP = 1
-+};
-+
-+/**
-+ * enum dpsw_schedule_mode - Traffic classes scheduling
-+ * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority
-+ * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm
-+ */
-+enum dpsw_schedule_mode {
-+ DPSW_SCHED_STRICT_PRIORITY,
-+ DPSW_SCHED_WEIGHTED
-+};
-+
-+/**
-+ * struct dpsw_tx_schedule_cfg - traffic class configuration
-+ * @mode: Strict or weight-based scheduling
-+ * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000
-+ */
-+struct dpsw_tx_schedule_cfg {
-+ enum dpsw_schedule_mode mode;
-+ u16 delta_bandwidth;
-+};
-+
-+/**
-+ * struct dpsw_tx_selection_cfg - Mapping user priority into traffic
-+ * class configuration
-+ * @priority_selector: Source for user priority regeneration
-+ * @tc_id: The Regenerated User priority that the incoming
-+ * User Priority is mapped to for this interface
-+ * @tc_sched: Traffic classes configuration
-+ */
-+struct dpsw_tx_selection_cfg {
-+ enum dpsw_priority_selector priority_selector;
-+ u8 tc_id[DPSW_MAX_PRIORITIES];
-+ struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC];
-+};
-+
-+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_tx_selection_cfg *cfg);
-+
-+/**
-+ * enum dpsw_reflection_filter - Filter type for frames to reflect
-+ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
-+ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to
-+ * particular VLAN defined by vid parameter
-+ *
-+ */
-+enum dpsw_reflection_filter {
-+ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
-+ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
-+};
-+
-+/**
-+ * struct dpsw_reflection_cfg - Structure representing reflection information
-+ * @filter: Filter type for frames to reflect
-+ * @vlan_id: Vlan Id to reflect; valid only when filter type is
-+ * DPSW_INGRESS_VLAN
-+ */
-+struct dpsw_reflection_cfg {
-+ enum dpsw_reflection_filter filter;
-+ u16 vlan_id;
-+};
-+
-+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_reflection_cfg *cfg);
-+
-+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_reflection_cfg *cfg);
-+
-+/**
-+ * enum dpsw_metering_mode - Metering modes
-+ * @DPSW_METERING_MODE_NONE: metering disabled
-+ * @DPSW_METERING_MODE_RFC2698: RFC 2698
-+ * @DPSW_METERING_MODE_RFC4115: RFC 4115
-+ */
-+enum dpsw_metering_mode {
-+ DPSW_METERING_MODE_NONE = 0,
-+ DPSW_METERING_MODE_RFC2698,
-+ DPSW_METERING_MODE_RFC4115
-+};
-+
-+/**
-+ * enum dpsw_metering_unit - Metering count
-+ * @DPSW_METERING_UNIT_BYTES: count bytes
-+ * @DPSW_METERING_UNIT_FRAMES: count frames
-+ */
-+enum dpsw_metering_unit {
-+ DPSW_METERING_UNIT_BYTES = 0,
-+ DPSW_METERING_UNIT_FRAMES
-+};
-+
-+/**
-+ * struct dpsw_metering_cfg - Metering configuration
-+ * @mode: metering modes
-+ * @units: Bytes or frame units
-+ * @cir: Committed information rate (CIR) in Kbits/s
-+ * @eir: Peak information rate (PIR) Kbit/s rfc2698
-+ * Excess information rate (EIR) Kbit/s rfc4115
-+ * @cbs: Committed burst size (CBS) in bytes
-+ * @ebs: Peak burst size (PBS) in bytes for rfc2698
-+ * Excess bust size (EBS) in bytes rfc4115
-+ *
-+ */
-+struct dpsw_metering_cfg {
-+ enum dpsw_metering_mode mode;
-+ enum dpsw_metering_unit units;
-+ u32 cir;
-+ u32 eir;
-+ u32 cbs;
-+ u32 ebs;
-+};
-+
-+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_metering_cfg *cfg);
-+
-+int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 tc_id,
-+ const struct dpsw_metering_cfg *cfg);
++ u16 token,
++ u16 if_id,
++ struct dpsw_tci_cfg *cfg);
+
+/**
-+ * enum dpsw_early_drop_unit - DPSW early drop unit
-+ * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes
-+ * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames
++ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
++ * @DPSW_STP_STATE_BLOCKING: Blocking state
++ * @DPSW_STP_STATE_LISTENING: Listening state
++ * @DPSW_STP_STATE_LEARNING: Learning state
++ * @DPSW_STP_STATE_FORWARDING: Forwarding state
++ *
+ */
-+enum dpsw_early_drop_unit {
-+ DPSW_EARLY_DROP_UNIT_BYTE = 0,
-+ DPSW_EARLY_DROP_UNIT_FRAMES
++enum dpsw_stp_state {
++ DPSW_STP_STATE_DISABLED = 0,
++ DPSW_STP_STATE_LISTENING = 1,
++ DPSW_STP_STATE_LEARNING = 2,
++ DPSW_STP_STATE_FORWARDING = 3,
++ DPSW_STP_STATE_BLOCKING = 0
+};
+
+/**
-+ * enum dpsw_early_drop_mode - DPSW early drop mode
-+ * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled
-+ * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
-+ * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode
++ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
++ * @vlan_id: VLAN ID STP state
++ * @state: STP state
+ */
-+enum dpsw_early_drop_mode {
-+ DPSW_EARLY_DROP_MODE_NONE = 0,
-+ DPSW_EARLY_DROP_MODE_TAIL,
-+ DPSW_EARLY_DROP_MODE_WRED
++struct dpsw_stp_cfg {
++ u16 vlan_id;
++ enum dpsw_stp_state state;
+};
+
-+/**
-+ * struct dpsw_wred_cfg - WRED configuration
-+ * @max_threshold: maximum threshold that packets may be discarded. Above this
-+ * threshold all packets are discarded; must be less than 2^39;
-+ * approximated to be expressed as (x+256)*2^(y-1) due to HW
-+ * implementation.
-+ * @min_threshold: minimum threshold that packets may be discarded at
-+ * @drop_probability: probability that a packet will be discarded (1-100,
-+ * associated with the maximum threshold)
-+ */
-+struct dpsw_wred_cfg {
-+ u64 min_threshold;
-+ u64 max_threshold;
-+ u8 drop_probability;
-+};
++int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_stp_cfg *cfg);
+
+/**
-+ * struct dpsw_early_drop_cfg - early-drop configuration
-+ * @drop_mode: drop mode
-+ * @units: count units
-+ * @yellow: WRED - 'yellow' configuration
-+ * @green: WRED - 'green' configuration
-+ * @tail_drop_threshold: tail drop threshold
++ * enum dpsw_accepted_frames - Types of frames to accept
++ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
++ * priority tagged frames
++ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
++ * Priority-Tagged frames received on this interface.
++ *
+ */
-+struct dpsw_early_drop_cfg {
-+ enum dpsw_early_drop_mode drop_mode;
-+ enum dpsw_early_drop_unit units;
-+ struct dpsw_wred_cfg yellow;
-+ struct dpsw_wred_cfg green;
-+ u32 tail_drop_threshold;
++enum dpsw_accepted_frames {
++ DPSW_ADMIT_ALL = 1,
++ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
+};
+
-+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
-+ u8 *early_drop_buf);
-+
-+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 tc_id,
-+ u64 early_drop_iova);
-+
+/**
-+ * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier
-+ * @tpid: An additional tag protocol identifier
++ * enum dpsw_counter - Counters types
++ * @DPSW_CNT_ING_FRAME: Counts ingress frames
++ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
++ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
++ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
++ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
++ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
++ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
++ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
++ * @DPSW_CNT_EGR_FRAME: Counts egress frames
++ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
++ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
++ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
+ */
-+struct dpsw_custom_tpid_cfg {
-+ u16 tpid;
++enum dpsw_counter {
++ DPSW_CNT_ING_FRAME = 0x0,
++ DPSW_CNT_ING_BYTE = 0x1,
++ DPSW_CNT_ING_FLTR_FRAME = 0x2,
++ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
++ DPSW_CNT_ING_MCAST_FRAME = 0x4,
++ DPSW_CNT_ING_MCAST_BYTE = 0x5,
++ DPSW_CNT_ING_BCAST_FRAME = 0x6,
++ DPSW_CNT_ING_BCAST_BYTES = 0x7,
++ DPSW_CNT_EGR_FRAME = 0x8,
++ DPSW_CNT_EGR_BYTE = 0x9,
++ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
++ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
+};
+
-+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_custom_tpid_cfg *cfg);
-+
-+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_custom_tpid_cfg *cfg);
++int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ enum dpsw_counter type,
++ u64 *counter);
+
+int dpsw_if_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id);
+
-+/**
-+ * struct dpsw_if_attr - Structure representing DPSW interface attributes
-+ * @num_tcs: Number of traffic classes
-+ * @rate: Transmit rate in bits per second
-+ * @options: Interface configuration options (bitmap)
-+ * @enabled: Indicates if interface is enabled
-+ * @accept_all_vlan: The device discards/accepts incoming frames
-+ * for VLANs that do not include this interface
-+ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
-+ * discards untagged frames or priority-tagged frames received on
-+ * this interface;
-+ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
-+ * tagged frames received on this interface are accepted
-+ * @qdid: control frames transmit qdid
-+ */
-+struct dpsw_if_attr {
-+ u8 num_tcs;
-+ u32 rate;
-+ u32 options;
-+ int enabled;
-+ int accept_all_vlan;
-+ enum dpsw_accepted_frames admit_untagged;
-+ u16 qdid;
-+};
-+
-+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_if_attr *attr);
-+
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ u16 frame_length);
+
-+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u16 *frame_length);
-+
+/**
+ * struct dpsw_vlan_cfg - VLAN Configuration
+ * @fdb_id: Forwarding Data Base
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg);
+
-+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg);
+
-+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id);
+
+/**
-+ * struct dpsw_vlan_attr - VLAN attributes
-+ * @fdb_id: Associated FDB ID
-+ * @num_ifs: Number of interfaces
-+ * @num_untagged_ifs: Number of untagged interfaces
-+ * @num_flooding_ifs: Number of flooding interfaces
-+ */
-+struct dpsw_vlan_attr {
-+ u16 fdb_id;
-+ u16 num_ifs;
-+ u16 num_untagged_ifs;
-+ u16 num_flooding_ifs;
-+};
-+
-+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_attr *attr);
-+
-+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg);
-+
-+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg);
-+
-+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg);
-+
-+/**
-+ * struct dpsw_fdb_cfg - FDB Configuration
-+ * @num_fdb_entries: Number of FDB entries
-+ * @fdb_aging_time: Aging time in seconds
-+ */
-+struct dpsw_fdb_cfg {
-+ u16 num_fdb_entries;
-+ u16 fdb_aging_time;
-+};
-+
-+int dpsw_fdb_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *fdb_id,
-+ const struct dpsw_fdb_cfg *cfg);
-+
-+int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id);
-+
-+/**
+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
+ * @DPSW_FDB_ENTRY_STATIC: Static entry
+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
+ u16 fdb_id,
+ const struct dpsw_fdb_unicast_cfg *cfg);
+
-+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_unicast_cfg *cfg);
-+
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ const struct dpsw_fdb_multicast_cfg *cfg);
+
-+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_multicast_cfg *cfg);
-+
+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ */
+enum dpsw_fdb_learning_mode {
+ DPSW_FDB_LEARNING_MODE_DIS = 0,
-+ DPSW_FDB_LEARNING_MODE_HW = 1,
-+ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
-+ DPSW_FDB_LEARNING_MODE_SECURE = 3
-+};
-+
-+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ enum dpsw_fdb_learning_mode mode);
-+
-+/**
-+ * struct dpsw_fdb_attr - FDB Attributes
-+ * @max_fdb_entries: Number of FDB entries
-+ * @fdb_aging_time: Aging time in seconds
-+ * @learning_mode: Learning mode
-+ * @num_fdb_mc_groups: Current number of multicast groups
-+ * @max_fdb_mc_groups: Maximum number of multicast groups
-+ */
-+struct dpsw_fdb_attr {
-+ u16 max_fdb_entries;
-+ u16 fdb_aging_time;
-+ enum dpsw_fdb_learning_mode learning_mode;
-+ u16 num_fdb_mc_groups;
-+ u16 max_fdb_mc_groups;
-+};
-+
-+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_attr *attr);
-+
-+/**
-+ * struct dpsw_acl_cfg - ACL Configuration
-+ * @max_entries: Number of FDB entries
-+ */
-+struct dpsw_acl_cfg {
-+ u16 max_entries;
-+};
-+
-+/**
-+ * struct dpsw_acl_fields - ACL fields.
-+ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
-+ * slow protocols, MVRP, STP
-+ * @l2_source_mac: Source MAC address
-+ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
-+ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
-+ * Q-in-Q, IPv4, IPv6, PPPoE
-+ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
-+ * @l2_vlan_id: layer 2 VLAN ID
-+ * @l2_ether_type: layer 2 Ethernet type
-+ * @l3_dscp: Layer 3 differentiated services code point
-+ * @l3_protocol: Tells the Network layer at the destination host, to which
-+ * Protocol this packet belongs to. The following protocol are
-+ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
-+ * (encapsulation), GRE, PTP
-+ * @l3_source_ip: Source IPv4 IP
-+ * @l3_dest_ip: Destination IPv4 IP
-+ * @l4_source_port: Source TCP/UDP Port
-+ * @l4_dest_port: Destination TCP/UDP Port
-+ */
-+struct dpsw_acl_fields {
-+ u8 l2_dest_mac[6];
-+ u8 l2_source_mac[6];
-+ u16 l2_tpid;
-+ u8 l2_pcp_dei;
-+ u16 l2_vlan_id;
-+ u16 l2_ether_type;
-+ u8 l3_dscp;
-+ u8 l3_protocol;
-+ u32 l3_source_ip;
-+ u32 l3_dest_ip;
-+ u16 l4_source_port;
-+ u16 l4_dest_port;
-+};
-+
-+/**
-+ * struct dpsw_acl_key - ACL key
-+ * @match: Match fields
-+ * @mask: Mask: b'1 - valid, b'0 don't care
-+ */
-+struct dpsw_acl_key {
-+ struct dpsw_acl_fields match;
-+ struct dpsw_acl_fields mask;
-+};
-+
-+/**
-+ * enum dpsw_acl_action
-+ * @DPSW_ACL_ACTION_DROP: Drop frame
-+ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
-+ * @DPSW_ACL_ACTION_ACCEPT: Accept frame
-+ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
-+ */
-+enum dpsw_acl_action {
-+ DPSW_ACL_ACTION_DROP,
-+ DPSW_ACL_ACTION_REDIRECT,
-+ DPSW_ACL_ACTION_ACCEPT,
-+ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
-+};
-+
-+/**
-+ * struct dpsw_acl_result - ACL action
-+ * @action: Action should be taken when ACL entry hit
-+ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
-+ * action
-+ */
-+struct dpsw_acl_result {
-+ enum dpsw_acl_action action;
-+ u16 if_id;
-+};
-+
-+/**
-+ * struct dpsw_acl_entry_cfg - ACL entry
-+ * @key_iova: I/O virtual address of DMA-able memory filled with key after call
-+ * to dpsw_acl_prepare_entry_cfg()
-+ * @result: Required action when entry hit occurs
-+ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
-+ * during the lifetime of a Policy. It is user responsibility to
-+ * space the priorities according to consequent rule additions.
-+ */
-+struct dpsw_acl_entry_cfg {
-+ u64 key_iova;
-+ struct dpsw_acl_result result;
-+ int precedence;
-+};
-+
-+int dpsw_acl_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *acl_id,
-+ const struct dpsw_acl_cfg *cfg);
-+
-+int dpsw_acl_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id);
-+
-+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
-+ uint8_t *entry_cfg_buf);
-+
-+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg);
-+
-+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg);
-+
-+/**
-+ * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL
-+ * @num_ifs: Number of interfaces
-+ * @if_id: List of interfaces
-+ */
-+struct dpsw_acl_if_cfg {
-+ u16 num_ifs;
-+ u16 if_id[DPSW_MAX_IF];
-+};
-+
-+int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_if_cfg *cfg);
-+
-+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_if_cfg *cfg);
-+
-+/**
-+ * struct dpsw_acl_attr - ACL Attributes
-+ * @max_entries: Max number of ACL entries
-+ * @num_entries: Number of used ACL entries
-+ * @num_ifs: Number of interfaces associated with ACL
-+ */
-+struct dpsw_acl_attr {
-+ u16 max_entries;
-+ u16 num_entries;
-+ u16 num_ifs;
-+};
-+
-+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ struct dpsw_acl_attr *attr);
-+/**
-+ * struct dpsw_ctrl_if_attr - Control interface attributes
-+ * @rx_fqid: Receive FQID
-+ * @rx_err_fqid: Receive error FQID
-+ * @tx_err_conf_fqid: Transmit error and confirmation FQID
-+ */
-+struct dpsw_ctrl_if_attr {
-+ u32 rx_fqid;
-+ u32 rx_err_fqid;
-+ u32 tx_err_conf_fqid;
-+};
-+
-+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpsw_ctrl_if_attr *attr);
-+
-+/**
-+ * Maximum number of DPBP
-+ */
-+#define DPSW_MAX_DPBP 8
-+
-+/**
-+ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
-+ * @num_dpbp: Number of DPBPs
-+ * @pools: Array of buffer pools parameters; The number of valid entries
-+ * must match 'num_dpbp' value
-+ */
-+struct dpsw_ctrl_if_pools_cfg {
-+ u8 num_dpbp;
-+ /**
-+ * struct pools - Buffer pools parameters
-+ * @dpbp_id: DPBP object ID
-+ * @buffer_size: Buffer size
-+ * @backup_pool: Backup pool
-+ */
-+ struct {
-+ int dpbp_id;
-+ u16 buffer_size;
-+ int backup_pool;
-+ } pools[DPSW_MAX_DPBP];
++ DPSW_FDB_LEARNING_MODE_HW = 1,
++ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
++ DPSW_FDB_LEARNING_MODE_SECURE = 3
+};
+
-+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_ctrl_if_pools_cfg *cfg);
-+
-+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ enum dpsw_fdb_learning_mode mode);
+
-+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++/**
++ * struct dpsw_fdb_attr - FDB Attributes
++ * @max_fdb_entries: Number of FDB entries
++ * @fdb_aging_time: Aging time in seconds
++ * @learning_mode: Learning mode
++ * @num_fdb_mc_groups: Current number of multicast groups
++ * @max_fdb_mc_groups: Maximum number of multicast groups
++ */
++struct dpsw_fdb_attr {
++ u16 max_fdb_entries;
++ u16 fdb_aging_time;
++ enum dpsw_fdb_learning_mode learning_mode;
++ u16 num_fdb_mc_groups;
++ u16 max_fdb_mc_groups;
++};
+
+int dpsw_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+
+#endif /* __FSL_DPSW_H */
--- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c
-@@ -0,0 +1,1857 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
++++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
+@@ -0,0 +1,206 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
-+#include <linux/module.h>
-+#include <linux/msi.h>
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/rtnetlink.h>
-+#include <linux/if_vlan.h>
-+
-+#include <uapi/linux/if_bridge.h>
-+#include <net/netlink.h>
-+
-+#include "../../fsl-mc/include/mc.h"
-+#include "dpsw.h"
-+#include "dpsw-cmd.h"
-+
-+static const char ethsw_drv_version[] = "0.1";
-+
-+/* Minimal supported DPSE version */
-+#define DPSW_MIN_VER_MAJOR 8
-+#define DPSW_MIN_VER_MINOR 0
-+
-+/* IRQ index */
-+#define DPSW_MAX_IRQ_NUM 2
-+
-+#define ETHSW_VLAN_MEMBER 1
-+#define ETHSW_VLAN_UNTAGGED 2
-+#define ETHSW_VLAN_PVID 4
-+#define ETHSW_VLAN_GLOBAL 8
-+
-+/* Maximum Frame Length supported by HW (currently 10k) */
-+#define DPAA2_MFL (10 * 1024)
-+#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
-+#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
-+
-+struct ethsw_port_priv {
-+ struct net_device *netdev;
-+ struct list_head list;
-+ u16 port_index;
-+ struct ethsw_dev_priv *ethsw_priv;
-+ u8 stp_state;
-+
-+ char vlans[VLAN_VID_MASK + 1];
-+
-+};
-+
-+struct ethsw_dev_priv {
-+ struct net_device *netdev;
-+ struct fsl_mc_io *mc_io;
-+ u16 dpsw_handle;
-+ struct dpsw_attr sw_attr;
-+ int dev_id;
-+ /*TODO: redundant, we can use the slave dev list */
-+ struct list_head port_list;
-+
-+ bool flood;
-+ bool learning;
-+
-+ char vlans[VLAN_VID_MASK + 1];
-+};
-+
-+static int ethsw_port_stop(struct net_device *netdev);
-+static int ethsw_port_open(struct net_device *netdev);
-+
-+static inline void __get_priv(struct net_device *netdev,
-+ struct ethsw_dev_priv **priv,
-+ struct ethsw_port_priv **port_priv)
-+{
-+ struct ethsw_dev_priv *_priv = NULL;
-+ struct ethsw_port_priv *_port_priv = NULL;
-+
-+ if (netdev->flags & IFF_MASTER) {
-+ _priv = netdev_priv(netdev);
-+ } else {
-+ _port_priv = netdev_priv(netdev);
-+ _priv = _port_priv->ethsw_priv;
-+ }
-+
-+ if (priv)
-+ *priv = _priv;
-+ if (port_priv)
-+ *port_priv = _port_priv;
-+}
-+
-+/* -------------------------------------------------------------------------- */
-+/* ethsw netdevice ops */
-+
-+static netdev_tx_t ethsw_dropframe(struct sk_buff *skb, struct net_device *dev)
-+{
-+ /* we don't support I/O for now, drop the frame */
-+ dev_kfree_skb_any(skb);
-+ return NETDEV_TX_OK;
-+}
-+
-+static int ethsw_open(struct net_device *netdev)
-+{
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ struct list_head *pos;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err;
-+
-+ err = dpsw_enable(priv->mc_io, 0, priv->dpsw_handle);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_enable err %d\n", err);
-+ return err;
-+ }
-+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv, list);
-+ err = dev_open(port_priv->netdev);
-+ if (err)
-+ netdev_err(port_priv->netdev, "dev_open err %d\n", err);
-+ }
-+
-+ return 0;
-+}
-+
-+static int ethsw_stop(struct net_device *netdev)
-+{
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ struct list_head *pos;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err;
-+
-+ err = dpsw_disable(priv->mc_io, 0, priv->dpsw_handle);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_disable err %d\n", err);
-+ return err;
-+ }
-+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv, list);
-+ err = dev_close(port_priv->netdev);
-+ if (err)
-+ netdev_err(port_priv->netdev,
-+ "dev_close err %d\n", err);
-+ }
-+
-+ return 0;
-+}
-+
-+static int ethsw_add_vlan(struct net_device *netdev, u16 vid)
-+{
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ int err;
-+
-+ struct dpsw_vlan_cfg vcfg = {
-+ /* TODO: add support for VLAN private FDBs */
-+ .fdb_id = 0,
-+ };
-+ if (priv->vlans[vid]) {
-+ netdev_err(netdev, "VLAN already configured\n");
-+ return -EEXIST;
-+ }
-+
-+ err = dpsw_vlan_add(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_vlan_add err %d\n", err);
-+ return err;
-+ }
-+ priv->vlans[vid] = ETHSW_VLAN_MEMBER;
-+
-+ return 0;
-+}
-+
-+static int ethsw_port_add_vlan(struct net_device *netdev, u16 vid, u16 flags)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
-+ int err;
-+
-+ struct dpsw_vlan_if_cfg vcfg = {
-+ .num_ifs = 1,
-+ .if_id[0] = port_priv->port_index,
-+ };
-+
-+ if (port_priv->vlans[vid]) {
-+ netdev_err(netdev, "VLAN already configured\n");
-+ return -EEXIST;
-+ }
-+
-+ if (flags & BRIDGE_VLAN_INFO_PVID && netif_oper_up(netdev)) {
-+ netdev_err(netdev, "interface must be down to change PVID!\n");
-+ return -EBUSY;
-+ }
-+
-+ err = dpsw_vlan_add_if(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
-+ return err;
-+ }
-+ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
-+
-+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
-+ err = dpsw_vlan_add_if_untagged(priv->mc_io, 0,
-+ priv->dpsw_handle, vid, &vcfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_vlan_add_if_untagged err %d\n",
-+ err);
-+ return err;
-+ }
-+ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
-+ }
-+
-+ if (flags & BRIDGE_VLAN_INFO_PVID) {
-+ struct dpsw_tci_cfg tci_cfg = {
-+ /* TODO: at least add better defaults if these cannot
-+ * be configured
-+ */
-+ .pcp = 0,
-+ .dei = 0,
-+ .vlan_id = vid,
-+ };
-+
-+ err = dpsw_if_set_tci(priv->mc_io, 0, priv->dpsw_handle,
-+ port_priv->port_index, &tci_cfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
-+ return err;
-+ }
-+ port_priv->vlans[vid] |= ETHSW_VLAN_PVID;
-+ }
++#include "ethsw.h"
+
-+ return 0;
-+}
++static struct {
++ enum dpsw_counter id;
++ char name[ETH_GSTRING_LEN];
++} ethsw_ethtool_counters[] = {
++ {DPSW_CNT_ING_FRAME, "rx frames"},
++ {DPSW_CNT_ING_BYTE, "rx bytes"},
++ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
++ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
++ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
++ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
++ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
++ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
++ {DPSW_CNT_EGR_FRAME, "tx frames"},
++ {DPSW_CNT_EGR_BYTE, "tx bytes"},
++ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
+
-+static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
-+ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
-+ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
-+ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
-+ .len = sizeof(struct bridge_vlan_info), },
+};
+
-+static int ethsw_setlink_af_spec(struct net_device *netdev,
-+ struct nlattr **tb)
-+{
-+ struct bridge_vlan_info *vinfo;
-+ struct ethsw_dev_priv *priv = NULL;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err = 0;
-+
-+ if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
-+ netdev_err(netdev, "no VLAN INFO in nlmsg\n");
-+ return -EOPNOTSUPP;
-+ }
-+
-+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
-+
-+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
-+ return -EINVAL;
-+
-+ __get_priv(netdev, &priv, &port_priv);
-+
-+ if (!port_priv || !priv->vlans[vinfo->vid]) {
-+ /* command targets switch device or this is a new VLAN */
-+ err = ethsw_add_vlan(priv->netdev, vinfo->vid);
-+ if (err)
-+ return err;
-+
-+ /* command targets switch device; mark it*/
-+ if (!port_priv)
-+ priv->vlans[vinfo->vid] |= ETHSW_VLAN_GLOBAL;
-+ }
-+
-+ if (port_priv) {
-+ /* command targets switch port */
-+ err = ethsw_port_add_vlan(netdev, vinfo->vid, vinfo->flags);
-+ if (err)
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
-+ [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_COST] = { .type = NLA_U32 },
-+ [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
-+ [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
-+};
++#define ETHSW_NUM_COUNTERS ARRAY_SIZE(ethsw_ethtool_counters)
+
-+static int ethsw_set_learning(struct net_device *netdev, u8 flag)
++static void ethsw_get_drvinfo(struct net_device *netdev,
++ struct ethtool_drvinfo *drvinfo)
+{
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ enum dpsw_fdb_learning_mode learn_mode;
-+ int err;
-+
-+ if (flag)
-+ learn_mode = DPSW_FDB_LEARNING_MODE_HW;
-+ else
-+ learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ u16 version_major, version_minor;
++ int err;
+
-+ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle,
-+ 0, learn_mode);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
-+ return err;
-+ }
-+ priv->learning = !!flag;
++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+
-+ return 0;
++ err = dpsw_get_api_version(port_priv->ethsw_data->mc_io, 0,
++ &version_major,
++ &version_minor);
++ if (err)
++ strlcpy(drvinfo->fw_version, "N/A",
++ sizeof(drvinfo->fw_version));
++ else
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%u.%u", version_major, version_minor);
++
++ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
++ sizeof(drvinfo->bus_info));
+}
+
-+static int ethsw_port_set_flood(struct net_device *netdev, u8 flag)
++static int
++ethsw_get_link_ksettings(struct net_device *netdev,
++ struct ethtool_link_ksettings *link_ksettings)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
-+ int err;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_state state = {0};
++ int err = 0;
+
-+ err = dpsw_if_set_flooding(priv->mc_io, 0, priv->dpsw_handle,
-+ port_priv->port_index, (int)flag);
++ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ &state);
+ if (err) {
-+ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
-+ return err;
++ netdev_err(netdev, "ERROR %d getting link state", err);
++ goto out;
+ }
-+ priv->flood = !!flag;
+
-+ return 0;
++ /* At the moment, we have no way of interrogating the DPMAC
++ * from the DPSW side or there may not exist a DPMAC at all.
++ * Report only autoneg state, duplexity and speed.
++ */
++ if (state.options & DPSW_LINK_OPT_AUTONEG)
++ link_ksettings->base.autoneg = AUTONEG_ENABLE;
++ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
++ link_ksettings->base.duplex = DUPLEX_FULL;
++ link_ksettings->base.speed = state.rate;
++
++out:
++ return err;
+}
+
-+static int ethsw_port_set_state(struct net_device *netdev, u8 state)
++static int
++ethsw_set_link_ksettings(struct net_device *netdev,
++ const struct ethtool_link_ksettings *link_ksettings)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
-+ u8 old_state = port_priv->stp_state;
-+ int err;
-+
-+ struct dpsw_stp_cfg stp_cfg = {
-+ .vlan_id = 1,
-+ .state = state,
-+ };
-+ /* TODO: check port state, interface may be down */
-+
-+ if (state > BR_STATE_BLOCKING)
-+ return -EINVAL;
-+
-+ if (state == port_priv->stp_state)
-+ return 0;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_cfg cfg = {0};
++ int err = 0;
+
-+ if (state == BR_STATE_DISABLED) {
-+ port_priv->stp_state = state;
++ netdev_dbg(netdev, "Setting link parameters...");
+
-+ err = ethsw_port_stop(netdev);
-+ if (err)
-+ goto error;
-+ } else {
-+ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle,
-+ port_priv->port_index, &stp_cfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_set_stp err %d\n", err);
-+ return err;
-+ }
++ /* Due to a temporary MC limitation, the DPSW port must be down
++ * in order to be able to change link settings. Taking steps to let
++ * the user know that.
++ */
++ if (netif_running(netdev)) {
++ netdev_info(netdev, "Sorry, interface must be brought down first.\n");
++ return -EACCES;
++ }
+
-+ port_priv->stp_state = state;
++ cfg.rate = link_ksettings->base.speed;
++ if (link_ksettings->base.autoneg == AUTONEG_ENABLE)
++ cfg.options |= DPSW_LINK_OPT_AUTONEG;
++ else
++ cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
++ if (link_ksettings->base.duplex == DUPLEX_HALF)
++ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
++ else
++ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
+
-+ if (old_state == BR_STATE_DISABLED) {
-+ err = ethsw_port_open(netdev);
-+ if (err)
-+ goto error;
-+ }
-+ }
++ err = dpsw_if_set_link_cfg(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ &cfg);
++ if (err)
++ /* ethtool will be loud enough if we return an error; no point
++ * in putting our own error message on the console by default
++ */
++ netdev_dbg(netdev, "ERROR %d setting link cfg", err);
+
-+ return 0;
-+error:
-+ port_priv->stp_state = old_state;
+ return err;
+}
+
-+static int ethsw_setlink_protinfo(struct net_device *netdev,
-+ struct nlattr **tb)
++static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
-+ struct ethsw_dev_priv *priv;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err = 0;
-+
-+ __get_priv(netdev, &priv, &port_priv);
-+
-+ if (tb[IFLA_BRPORT_LEARNING]) {
-+ u8 flag = nla_get_u8(tb[IFLA_BRPORT_LEARNING]);
-+
-+ if (port_priv)
-+ netdev_warn(netdev,
-+ "learning set on whole switch dev\n");
++ switch (sset) {
++ case ETH_SS_STATS:
++ return ETHSW_NUM_COUNTERS;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
+
-+ err = ethsw_set_learning(priv->netdev, flag);
-+ if (err)
-+ return err;
++static void ethsw_ethtool_get_strings(struct net_device *netdev,
++ u32 stringset, u8 *data)
++{
++ int i;
+
-+ } else if (tb[IFLA_BRPORT_UNICAST_FLOOD] && port_priv) {
-+ u8 flag = nla_get_u8(tb[IFLA_BRPORT_UNICAST_FLOOD]);
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ETHSW_NUM_COUNTERS; i++)
++ memcpy(data + i * ETH_GSTRING_LEN,
++ ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
++ break;
++ }
++}
+
-+ err = ethsw_port_set_flood(port_priv->netdev, flag);
-+ if (err)
-+ return err;
++static void ethsw_ethtool_get_stats(struct net_device *netdev,
++ struct ethtool_stats *stats,
++ u64 *data)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int i, err;
+
-+ } else if (tb[IFLA_BRPORT_STATE] && port_priv) {
-+ u8 state = nla_get_u8(tb[IFLA_BRPORT_STATE]);
++ memset(data, 0,
++ sizeof(u64) * ETHSW_NUM_COUNTERS);
+
-+ err = ethsw_port_set_state(port_priv->netdev, state);
++ for (i = 0; i < ETHSW_NUM_COUNTERS; i++) {
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ ethsw_ethtool_counters[i].id,
++ &data[i]);
+ if (err)
-+ return err;
-+
-+ } else {
-+ return -EOPNOTSUPP;
++ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
++ ethsw_ethtool_counters[i].name, err);
+ }
-+
-+ return 0;
+}
+
-+static int ethsw_setlink(struct net_device *netdev,
-+ struct nlmsghdr *nlh,
-+ u16 flags)
-+{
-+ struct nlattr *attr;
-+ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
-+ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
-+ int err = 0;
++const struct ethtool_ops ethsw_port_ethtool_ops = {
++ .get_drvinfo = ethsw_get_drvinfo,
++ .get_link = ethtool_op_get_link,
++ .get_link_ksettings = ethsw_get_link_ksettings,
++ .set_link_ksettings = ethsw_set_link_ksettings,
++ .get_strings = ethsw_ethtool_get_strings,
++ .get_ethtool_stats = ethsw_ethtool_get_stats,
++ .get_sset_count = ethsw_ethtool_get_sset_count,
++};
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+@@ -0,0 +1,1438 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
+
-+ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
-+ if (attr) {
-+ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
-+ ifla_br_policy);
-+ if (err) {
-+ netdev_err(netdev,
-+ "nla_parse_nested for br_policy err %d\n",
-+ err);
-+ return err;
-+ }
++#include <linux/module.h>
+
-+ err = ethsw_setlink_af_spec(netdev, tb);
-+ return err;
-+ }
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++#include <linux/kthread.h>
++#include <linux/workqueue.h>
+
-+ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
-+ if (attr) {
-+ err = nla_parse_nested(tb, IFLA_BRPORT_MAX, attr,
-+ ifla_brport_policy);
-+ if (err) {
-+ netdev_err(netdev,
-+ "nla_parse_nested for brport_policy err %d\n",
-+ err);
-+ return err;
-+ }
++#include <linux/fsl/mc.h>
+
-+ err = ethsw_setlink_protinfo(netdev, tb);
-+ return err;
-+ }
++#include "ethsw.h"
+
-+ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC/PROTINFO\n");
-+ return -EOPNOTSUPP;
-+}
++static struct workqueue_struct *ethsw_owq;
++
++/* Minimal supported DPSW version */
++#define DPSW_MIN_VER_MAJOR 8
++#define DPSW_MIN_VER_MINOR 0
+
-+static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev,
-+ struct ethsw_dev_priv *priv)
++#define DEFAULT_VLAN_ID 1
++
++static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
+{
-+ u8 operstate = netif_running(netdev) ? netdev->operstate : IF_OPER_DOWN;
-+ int iflink;
+ int err;
+
-+ err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u32(skb, IFLA_MASTER, priv->netdev->ifindex);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
-+ if (err)
-+ goto nla_put_err;
-+ if (netdev->addr_len) {
-+ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
-+ netdev->dev_addr);
-+ if (err)
-+ goto nla_put_err;
++ struct dpsw_vlan_cfg vcfg = {
++ .fdb_id = 0,
++ };
++
++ if (ethsw->vlans[vid]) {
++ dev_err(ethsw->dev, "VLAN already configured\n");
++ return -EEXIST;
+ }
+
-+ iflink = dev_get_iflink(netdev);
-+ if (netdev->ifindex != iflink) {
-+ err = nla_put_u32(skb, IFLA_LINK, iflink);
-+ if (err)
-+ goto nla_put_err;
++ err = dpsw_vlan_add(ethsw->mc_io, 0,
++ ethsw->dpsw_handle, vid, &vcfg);
++ if (err) {
++ dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
++ return err;
+ }
++ ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ return 0;
-+
-+nla_put_err:
-+ netdev_err(netdev, "nla_put_ err %d\n", err);
-+ return err;
+}
+
-+static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev,
-+ struct ethsw_port_priv *port_priv)
++static int ethsw_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
+{
-+ struct nlattr *nest;
-+ int err;
-+
-+ u8 stp_state = port_priv->stp_state;
++ struct ethsw_core *ethsw = port_priv->ethsw_data;
++ struct net_device *netdev = port_priv->netdev;
++ struct dpsw_tci_cfg tci_cfg = { 0 };
++ bool is_oper;
++ int err, ret;
+
-+ if (port_priv->stp_state == DPSW_STP_STATE_BLOCKING)
-+ stp_state = BR_STATE_BLOCKING;
-+
-+ nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
-+ if (!nest) {
-+ netdev_err(netdev, "nla_nest_start failed\n");
-+ return -ENOMEM;
++ err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ port_priv->idx, &tci_cfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
++ return err;
+ }
+
-+ err = nla_put_u8(skb, IFLA_BRPORT_STATE, stp_state);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING,
-+ port_priv->ethsw_priv->learning);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
-+ port_priv->ethsw_priv->flood);
-+ if (err)
-+ goto nla_put_err;
-+ nla_nest_end(skb, nest);
++ tci_cfg.vlan_id = pvid;
+
-+ return 0;
++ /* Interface needs to be down to change PVID */
++ is_oper = netif_oper_up(netdev);
++ if (is_oper) {
++ err = dpsw_if_disable(ethsw->mc_io, 0,
++ ethsw->dpsw_handle,
++ port_priv->idx);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
++ return err;
++ }
++ }
++
++ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ port_priv->idx, &tci_cfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
++ goto set_tci_error;
++ }
++
++ /* Delete previous PVID info and mark the new one */
++ port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
++ port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
++ port_priv->pvid = pvid;
++
++set_tci_error:
++ if (is_oper) {
++ ret = dpsw_if_enable(ethsw->mc_io, 0,
++ ethsw->dpsw_handle,
++ port_priv->idx);
++ if (ret) {
++ netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
++ return ret;
++ }
++ }
+
-+nla_put_err:
-+ netdev_err(netdev, "nla_put_ err %d\n", err);
-+ nla_nest_cancel(skb, nest);
+ return err;
+}
+
-+static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev,
-+ struct ethsw_dev_priv *priv,
-+ struct ethsw_port_priv *port_priv)
++static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
++ u16 vid, u16 flags)
+{
-+ struct nlattr *nest;
-+ struct bridge_vlan_info vinfo;
-+ const char *vlans;
-+ u16 i;
++ struct ethsw_core *ethsw = port_priv->ethsw_data;
++ struct net_device *netdev = port_priv->netdev;
++ struct dpsw_vlan_if_cfg vcfg;
+ int err;
+
-+ nest = nla_nest_start(skb, IFLA_AF_SPEC);
-+ if (!nest) {
-+ netdev_err(netdev, "nla_nest_start failed");
-+ return -ENOMEM;
++ if (port_priv->vlans[vid]) {
++ netdev_warn(netdev, "VLAN %d already configured\n", vid);
++ return -EEXIST;
+ }
+
-+ if (port_priv)
-+ vlans = port_priv->vlans;
-+ else
-+ vlans = priv->vlans;
-+
-+ for (i = 0; i < VLAN_VID_MASK + 1; i++) {
-+ vinfo.flags = 0;
-+ vinfo.vid = i;
-+
-+ if (vlans[i] & ETHSW_VLAN_UNTAGGED)
-+ vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
++ vcfg.num_ifs = 1;
++ vcfg.if_id[0] = port_priv->idx;
++ err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
++ return err;
++ }
+
-+ if (vlans[i] & ETHSW_VLAN_PVID)
-+ vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
++ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
+
-+ if (vlans[i] & ETHSW_VLAN_MEMBER) {
-+ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
-+ sizeof(vinfo), &vinfo);
-+ if (err)
-+ goto nla_put_err;
++ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
++ err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
++ ethsw->dpsw_handle,
++ vid, &vcfg);
++ if (err) {
++ netdev_err(netdev,
++ "dpsw_vlan_add_if_untagged err %d\n", err);
++ return err;
+ }
++ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
+ }
+
-+ nla_nest_end(skb, nest);
++ if (flags & BRIDGE_VLAN_INFO_PVID) {
++ err = ethsw_port_set_pvid(port_priv, vid);
++ if (err)
++ return err;
++ }
+
+ return 0;
-+nla_put_err:
-+ netdev_err(netdev, "nla_put_ err %d\n", err);
-+ nla_nest_cancel(skb, nest);
-+ return err;
+}
+
-+static int ethsw_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-+ struct net_device *netdev, u32 filter_mask,
-+ int nlflags)
++static int ethsw_set_learning(struct ethsw_core *ethsw, u8 flag)
+{
-+ struct ethsw_dev_priv *priv;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ struct ifinfomsg *hdr;
-+ struct nlmsghdr *nlh;
++ enum dpsw_fdb_learning_mode learn_mode;
+ int err;
+
-+ __get_priv(netdev, &priv, &port_priv);
++ if (flag)
++ learn_mode = DPSW_FDB_LEARNING_MODE_HW;
++ else
++ learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
+
-+ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
-+ if (!nlh)
-+ return -EMSGSIZE;
++ err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
++ learn_mode);
++ if (err) {
++ dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
++ return err;
++ }
++ ethsw->learning = !!flag;
+
-+ hdr = nlmsg_data(nlh);
-+ memset(hdr, 0, sizeof(*hdr));
-+ hdr->ifi_family = AF_BRIDGE;
-+ hdr->ifi_type = netdev->type;
-+ hdr->ifi_index = netdev->ifindex;
-+ hdr->ifi_flags = dev_get_flags(netdev);
++ return 0;
++}
+
-+ err = __nla_put_netdev(skb, netdev, priv);
-+ if (err)
-+ goto nla_put_err;
++static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, u8 flag)
++{
++ int err;
+
-+ if (port_priv) {
-+ err = __nla_put_port(skb, netdev, port_priv);
-+ if (err)
-+ goto nla_put_err;
++ err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx, flag);
++ if (err) {
++ netdev_err(port_priv->netdev,
++ "dpsw_fdb_set_learning_mode err %d\n", err);
++ return err;
+ }
++ port_priv->flood = !!flag;
+
-+ /* Check if the VID information is requested */
-+ if (filter_mask & RTEXT_FILTER_BRVLAN) {
-+ err = __nla_put_vlan(skb, netdev, priv, port_priv);
-+ if (err)
-+ goto nla_put_err;
++ return 0;
++}
++
++static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
++{
++ struct dpsw_stp_cfg stp_cfg = {
++ .vlan_id = DEFAULT_VLAN_ID,
++ .state = state,
++ };
++ int err;
++
++ if (!netif_oper_up(port_priv->netdev) || state == port_priv->stp_state)
++ return 0; /* Nothing to do */
++
++ err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx, &stp_cfg);
++ if (err) {
++ netdev_err(port_priv->netdev,
++ "dpsw_if_set_stp err %d\n", err);
++ return err;
+ }
+
-+ nlmsg_end(skb, nlh);
-+ return skb->len;
++ port_priv->stp_state = state;
+
-+nla_put_err:
-+ nlmsg_cancel(skb, nlh);
-+ return -EMSGSIZE;
++ return 0;
+}
+
-+static int ethsw_dellink_switch(struct ethsw_dev_priv *priv, u16 vid)
++static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
+{
-+ struct list_head *pos;
-+ struct ethsw_port_priv *ppriv_local = NULL;
-+ int err = 0;
++ struct ethsw_port_priv *ppriv_local = NULL;
++ int i, err;
+
-+ if (!priv->vlans[vid])
++ if (!ethsw->vlans[vid])
+ return -ENOENT;
+
-+ err = dpsw_vlan_remove(priv->mc_io, 0, priv->dpsw_handle, vid);
++ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
+ if (err) {
-+ netdev_err(priv->netdev, "dpsw_vlan_remove err %d\n", err);
++ dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
+ return err;
+ }
-+ priv->vlans[vid] = 0;
++ ethsw->vlans[vid] = 0;
+
-+ list_for_each(pos, &priv->port_list) {
-+ ppriv_local = list_entry(pos, struct ethsw_port_priv,
-+ list);
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ ppriv_local = ethsw->ports[i];
+ ppriv_local->vlans[vid] = 0;
+ }
+
+ return 0;
+}
+
-+static int ethsw_dellink_port(struct ethsw_dev_priv *priv,
-+ struct ethsw_port_priv *port_priv,
-+ u16 vid)
++static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
++ const unsigned char *addr)
+{
-+ struct list_head *pos;
-+ struct ethsw_port_priv *ppriv_local = NULL;
-+ struct dpsw_vlan_if_cfg vcfg = {
-+ .num_ifs = 1,
-+ .if_id[0] = port_priv->port_index,
-+ };
-+ unsigned int count = 0;
-+ int err = 0;
++ struct dpsw_fdb_unicast_cfg entry = {0};
++ int err;
+
-+ if (!port_priv->vlans[vid])
-+ return -ENOENT;
++ entry.if_egress = port_priv->idx;
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ ether_addr_copy(entry.mac_addr, addr);
+
-+ /* VLAN will be deleted from switch if global flag is not set
-+ * and is configured on only one port
-+ */
-+ if (!(priv->vlans[vid] & ETHSW_VLAN_GLOBAL)) {
-+ list_for_each(pos, &priv->port_list) {
-+ ppriv_local = list_entry(pos, struct ethsw_port_priv,
-+ list);
-+ if (ppriv_local->vlans[vid] & ETHSW_VLAN_MEMBER)
-+ count++;
-+ }
++ err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ 0, &entry);
++ if (err)
++ netdev_err(port_priv->netdev,
++ "dpsw_fdb_add_unicast err %d\n", err);
++ return err;
++}
+
-+ if (count == 1)
-+ return ethsw_dellink_switch(priv, vid);
-+ }
++static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
++ const unsigned char *addr)
++{
++ struct dpsw_fdb_unicast_cfg entry = {0};
++ int err;
+
-+ err = dpsw_vlan_remove_if(priv->mc_io, 0, priv->dpsw_handle,
-+ vid, &vcfg);
-+ if (err) {
-+ netdev_err(priv->netdev, "dpsw_vlan_remove_if err %d\n", err);
-+ return err;
-+ }
-+ port_priv->vlans[vid] = 0;
-+ return 0;
++ entry.if_egress = port_priv->idx;
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ ether_addr_copy(entry.mac_addr, addr);
++
++ err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ 0, &entry);
++ /* Silently discard calling multiple times the del command */
++ if (err && err != -ENXIO)
++ netdev_err(port_priv->netdev,
++ "dpsw_fdb_remove_unicast err %d\n", err);
++ return err;
+}
+
-+static int ethsw_dellink(struct net_device *netdev,
-+ struct nlmsghdr *nlh,
-+ u16 flags)
++static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
++ const unsigned char *addr)
+{
-+ struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
-+ struct nlattr *spec;
-+ struct bridge_vlan_info *vinfo;
-+ struct ethsw_dev_priv *priv;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err = 0;
++ struct dpsw_fdb_multicast_cfg entry = {0};
++ int err;
+
-+ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
-+ if (!spec)
-+ return 0;
++ ether_addr_copy(entry.mac_addr, addr);
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ entry.num_ifs = 1;
++ entry.if_id[0] = port_priv->idx;
+
-+ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
++ err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ 0, &entry);
++ /* Silently discard calling multiple times the add command */
++ if (err && err != -ENXIO)
++ netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
++ err);
++ return err;
++}
++
++static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
++ const unsigned char *addr)
++{
++ struct dpsw_fdb_multicast_cfg entry = {0};
++ int err;
++
++ ether_addr_copy(entry.mac_addr, addr);
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ entry.num_ifs = 1;
++ entry.if_id[0] = port_priv->idx;
++
++ err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ 0, &entry);
++ /* Silently discard calling multiple times the del command */
++ if (err && err != -ENAVAIL)
++ netdev_err(port_priv->netdev,
++ "dpsw_fdb_remove_multicast err %d\n", err);
++ return err;
++}
++
++static void port_get_stats(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ u64 tmp;
++ int err;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_ING_FRAME, &stats->rx_packets);
+ if (err)
-+ return err;
++ goto error;
+
-+ if (!tb[IFLA_BRIDGE_VLAN_INFO])
-+ return -EOPNOTSUPP;
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_EGR_FRAME, &stats->tx_packets);
++ if (err)
++ goto error;
+
-+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_ING_BYTE, &stats->rx_bytes);
++ if (err)
++ goto error;
+
-+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
-+ return -EINVAL;
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
++ if (err)
++ goto error;
+
-+ __get_priv(netdev, &priv, &port_priv);
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_ING_FRAME_DISCARD,
++ &stats->rx_dropped);
++ if (err)
++ goto error;
+
-+ /* decide if command targets switch device or port */
-+ if (!port_priv)
-+ err = ethsw_dellink_switch(priv, vinfo->vid);
-+ else
-+ err = ethsw_dellink_port(priv, port_priv, vinfo->vid);
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_ING_FLTR_FRAME,
++ &tmp);
++ if (err)
++ goto error;
++ stats->rx_dropped += tmp;
+
-+ return err;
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_EGR_FRAME_DISCARD,
++ &stats->tx_dropped);
++ if (err)
++ goto error;
++
++ return;
++
++error:
++ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
+}
+
-+static const struct net_device_ops ethsw_ops = {
-+ .ndo_open = ðsw_open,
-+ .ndo_stop = ðsw_stop,
++static bool port_has_offload_stats(const struct net_device *netdev,
++ int attr_id)
++{
++ return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
++}
+
-+ .ndo_bridge_setlink = ðsw_setlink,
-+ .ndo_bridge_getlink = ðsw_getlink,
-+ .ndo_bridge_dellink = ðsw_dellink,
++static int port_get_offload_stats(int attr_id,
++ const struct net_device *netdev,
++ void *sp)
++{
++ switch (attr_id) {
++ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
++ port_get_stats((struct net_device *)netdev, sp);
++ return 0;
++ }
+
-+ .ndo_start_xmit = ðsw_dropframe,
-+};
++ return -EINVAL;
++}
++
++static int port_change_mtu(struct net_device *netdev, int mtu)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
++
++ err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
++ 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ (u16)ETHSW_L2_MAX_FRM(mtu));
++ if (err) {
++ netdev_err(netdev,
++ "dpsw_if_set_max_frame_length() err %d\n", err);
++ return err;
++ }
+
-+/*--------------------------------------------------------------------------- */
-+/* switch port netdevice ops */
++ netdev->mtu = mtu;
++ return 0;
++}
+
-+static int _ethsw_port_carrier_state_sync(struct net_device *netdev)
++static int port_carrier_state_sync(struct net_device *netdev)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_link_state state;
-+ int err;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_state state;
++ int err;
+
-+ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index, &state);
-+ if (unlikely(err)) {
++ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx, &state);
++ if (err) {
+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
+ return err;
+ }
+
+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
-+ if (state.up)
-+ netif_carrier_on(port_priv->netdev);
-+ else
-+ netif_carrier_off(port_priv->netdev);
-+
++ if (state.up != port_priv->link_state) {
++ if (state.up)
++ netif_carrier_on(netdev);
++ else
++ netif_carrier_off(netdev);
++ port_priv->link_state = state.up;
++ }
+ return 0;
+}
+
-+static int ethsw_port_open(struct net_device *netdev)
++static int port_open(struct net_device *netdev)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
++
++ /* No need to allow Tx as control interface is disabled */
++ netif_tx_stop_all_queues(netdev);
+
-+ err = dpsw_if_enable(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index);
++ err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
+ return err;
+ }
+
+ /* sync carrier state */
-+ err = _ethsw_port_carrier_state_sync(netdev);
++ err = port_carrier_state_sync(netdev);
+ if (err) {
-+ netdev_err(netdev, "_ethsw_port_carrier_state_sync err %d\n",
-+ err);
++ netdev_err(netdev,
++ "port_carrier_state_sync err %d\n", err);
+ goto err_carrier_sync;
+ }
+
+ return 0;
+
+err_carrier_sync:
-+ dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index);
++ dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx);
+ return err;
+}
+
-+static int ethsw_port_stop(struct net_device *netdev)
++static int port_stop(struct net_device *netdev)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
+
-+ err = dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index);
++ err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ return 0;
+}
+
-+static int ethsw_port_fdb_add_uc(struct net_device *netdev,
-+ const unsigned char *addr)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_fdb_unicast_cfg entry = {0};
-+ int err;
-+
-+ entry.if_egress = port_priv->port_index;
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ ether_addr_copy(entry.mac_addr, addr);
-+
-+ err = dpsw_fdb_add_unicast(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ 0, &entry);
-+ if (err)
-+ netdev_err(netdev, "dpsw_fdb_add_unicast err %d\n", err);
-+ return err;
-+}
-+
-+static int ethsw_port_fdb_del_uc(struct net_device *netdev,
-+ const unsigned char *addr)
++static netdev_tx_t port_dropframe(struct sk_buff *skb,
++ struct net_device *netdev)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_fdb_unicast_cfg entry = {0};
-+ int err;
-+
-+ entry.if_egress = port_priv->port_index;
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ ether_addr_copy(entry.mac_addr, addr);
++ /* we don't support I/O for now, drop the frame */
++ dev_kfree_skb_any(skb);
+
-+ err = dpsw_fdb_remove_unicast(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ 0, &entry);
-+ if (err)
-+ netdev_err(netdev, "dpsw_fdb_remove_unicast err %d\n", err);
-+ return err;
++ return NETDEV_TX_OK;
+}
+
-+static int ethsw_port_fdb_add_mc(struct net_device *netdev,
-+ const unsigned char *addr)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_fdb_multicast_cfg entry = {0};
-+ int err;
-+
-+ ether_addr_copy(entry.mac_addr, addr);
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ entry.num_ifs = 1;
-+ entry.if_id[0] = port_priv->port_index;
++static const struct net_device_ops ethsw_port_ops = {
++ .ndo_open = port_open,
++ .ndo_stop = port_stop,
+
-+ err = dpsw_fdb_add_multicast(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ 0, &entry);
-+ if (err)
-+ netdev_err(netdev, "dpsw_fdb_add_multicast err %d\n", err);
-+ return err;
-+}
++ .ndo_set_mac_address = eth_mac_addr,
++ .ndo_change_mtu = port_change_mtu,
++ .ndo_has_offload_stats = port_has_offload_stats,
++ .ndo_get_offload_stats = port_get_offload_stats,
+
-+static int ethsw_port_fdb_del_mc(struct net_device *netdev,
-+ const unsigned char *addr)
++ .ndo_start_xmit = port_dropframe,
++};
++
++static void ethsw_links_state_update(struct ethsw_core *ethsw)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_fdb_multicast_cfg entry = {0};
-+ int err;
++ int i;
+
-+ ether_addr_copy(entry.mac_addr, addr);
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ entry.num_ifs = 1;
-+ entry.if_id[0] = port_priv->port_index;
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
++ port_carrier_state_sync(ethsw->ports[i]->netdev);
++}
+
-+ err = dpsw_fdb_remove_multicast(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ 0, &entry);
-+ if (err)
-+ netdev_err(netdev, "dpsw_fdb_remove_multicast err %d\n", err);
-+ return err;
++static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
++{
++ return IRQ_WAKE_THREAD;
+}
+
-+static int _lookup_address(struct net_device *netdev, int is_uc,
-+ const unsigned char *addr)
++static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
+{
-+ struct netdev_hw_addr *ha;
-+ struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
++ struct device *dev = (struct device *)arg;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+
-+ netif_addr_lock_bh(netdev);
-+ list_for_each_entry(ha, &list->list, list) {
-+ if (ether_addr_equal(ha->addr, addr)) {
-+ netif_addr_unlock_bh(netdev);
-+ return 1;
-+ }
++ /* Mask the events and the if_id reserved bits to be cleared on read */
++ u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
++ int err;
++
++ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, &status);
++ if (err) {
++ dev_err(dev, "Can't get irq status (err %d)", err);
++
++ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
++ if (err)
++ dev_err(dev, "Can't clear irq status (err %d)", err);
++ goto out;
+ }
-+ netif_addr_unlock_bh(netdev);
-+ return 0;
++
++ if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
++ ethsw_links_state_update(ethsw);
++
++out:
++ return IRQ_HANDLED;
+}
+
-+static int ethsw_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-+ struct net_device *netdev,
-+ const unsigned char *addr, u16 vid,
-+ u16 flags)
++static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
+{
-+ struct list_head *pos;
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
++ struct fsl_mc_device_irq *irq;
+ int err;
+
-+ /* TODO: add replace support when added to iproute bridge */
-+ if (!(flags & NLM_F_REQUEST)) {
-+ netdev_err(netdev,
-+ "ethsw_port_fdb_add unexpected flags value %08x\n",
-+ flags);
-+ return -EINVAL;
++ err = fsl_mc_allocate_irqs(sw_dev);
++ if (err) {
++ dev_err(dev, "MC irqs allocation failed\n");
++ return err;
+ }
+
-+ if (is_unicast_ether_addr(addr)) {
-+ /* if entry cannot be replaced, return error if exists */
-+ if (flags & NLM_F_EXCL || flags & NLM_F_APPEND) {
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos,
-+ struct ethsw_port_priv,
-+ list);
-+ if (_lookup_address(port_priv->netdev,
-+ 1, addr))
-+ return -EEXIST;
-+ }
-+ }
-+
-+ err = ethsw_port_fdb_add_uc(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "ethsw_port_fdb_add_uc err %d\n",
-+ err);
-+ return err;
-+ }
-+
-+ /* we might have replaced an existing entry for a different
-+ * switch port, make sure the address doesn't linger in any
-+ * port address list
-+ */
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv,
-+ list);
-+ dev_uc_del(port_priv->netdev, addr);
-+ }
++ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
++ err = -EINVAL;
++ goto free_irq;
++ }
+
-+ err = dev_uc_add(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "dev_uc_add err %d\n", err);
-+ return err;
-+ }
-+ } else {
-+ struct dpsw_fdb_multicast_cfg entry = {
-+ .type = DPSW_FDB_ENTRY_STATIC,
-+ .num_ifs = 0,
-+ };
++ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 0);
++ if (err) {
++ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
++ goto free_irq;
++ }
+
-+ /* check if address is already set on this port */
-+ if (_lookup_address(netdev, 0, addr))
-+ return -EEXIST;
++ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
+
-+ /* check if the address exists on other port */
-+ ether_addr_copy(entry.mac_addr, addr);
-+ err = dpsw_fdb_get_multicast(priv->mc_io, 0, priv->dpsw_handle,
-+ 0, &entry);
-+ if (!err) {
-+ /* entry exists, can we replace it? */
-+ if (flags & NLM_F_EXCL)
-+ return -EEXIST;
-+ } else if (err != -ENAVAIL) {
-+ netdev_err(netdev, "dpsw_fdb_get_unicast err %d\n",
-+ err);
-+ return err;
-+ }
++ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
++ ethsw_irq0_handler,
++ ethsw_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(dev), dev);
++ if (err) {
++ dev_err(dev, "devm_request_threaded_irq(): %d", err);
++ goto free_irq;
++ }
+
-+ err = ethsw_port_fdb_add_mc(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "ethsw_port_fdb_add_mc err %d\n",
-+ err);
-+ return err;
-+ }
++ err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, mask);
++ if (err) {
++ dev_err(dev, "dpsw_set_irq_mask(): %d", err);
++ goto free_devm_irq;
++ }
+
-+ err = dev_mc_add(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "dev_mc_add err %d\n", err);
-+ return err;
-+ }
++ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 1);
++ if (err) {
++ dev_err(dev, "dpsw_set_irq_enable(): %d", err);
++ goto free_devm_irq;
+ }
+
+ return 0;
++
++free_devm_irq:
++ devm_free_irq(dev, irq->msi_desc->irq, dev);
++free_irq:
++ fsl_mc_free_irqs(sw_dev);
++ return err;
+}
+
-+static int ethsw_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-+ struct net_device *netdev,
-+ const unsigned char *addr, u16 vid)
++static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
+{
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++ struct fsl_mc_device_irq *irq;
+ int err;
+
-+ if (is_unicast_ether_addr(addr)) {
-+ err = ethsw_port_fdb_del_uc(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "ethsw_port_fdb_del_uc err %d\n",
-+ err);
-+ return err;
-+ }
++ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
++ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 0);
++ if (err)
++ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
+
-+ /* also delete if configured on port */
-+ err = dev_uc_del(netdev, addr);
-+ if (err && err != -ENOENT) {
-+ netdev_err(netdev, "dev_uc_del err %d\n", err);
-+ return err;
-+ }
-+ } else {
-+ if (!_lookup_address(netdev, 0, addr))
-+ return -ENOENT;
++ fsl_mc_free_irqs(sw_dev);
++}
+
-+ err = dev_mc_del(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "dev_mc_del err %d\n", err);
-+ return err;
-+ }
++static int swdev_port_attr_get(struct net_device *netdev,
++ struct switchdev_attr *attr)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
-+ err = ethsw_port_fdb_del_mc(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "ethsw_port_fdb_del_mc err %d\n",
-+ err);
-+ return err;
-+ }
++ switch (attr->id) {
++ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
++ attr->u.ppid.id_len = 1;
++ attr->u.ppid.id[0] = port_priv->ethsw_data->dev_id;
++ break;
++ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
++ attr->u.brport_flags =
++ (port_priv->ethsw_data->learning ? BR_LEARNING : 0) |
++ (port_priv->flood ? BR_FLOOD : 0);
++ break;
++ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
++ attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
++ break;
++ default:
++ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
-+void ethsw_port_get_stats(struct net_device *netdev,
-+ struct rtnl_link_stats64 *storage)
++static int port_attr_stp_state_set(struct net_device *netdev,
++ struct switchdev_trans *trans,
++ u8 state)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ u64 tmp;
-+ int err;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_ING_FRAME, &storage->rx_packets);
-+ if (err)
-+ goto error;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_EGR_FRAME, &storage->tx_packets);
-+ if (err)
-+ goto error;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_ING_BYTE, &storage->rx_bytes);
-+ if (err)
-+ goto error;
++ if (switchdev_trans_ph_prepare(trans))
++ return 0;
+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_EGR_BYTE, &storage->tx_bytes);
-+ if (err)
-+ goto error;
++ return ethsw_port_set_stp_state(port_priv, state);
++}
+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_ING_FRAME_DISCARD,
-+ &storage->rx_dropped);
-+ if (err)
-+ goto error;
++static int port_attr_br_flags_set(struct net_device *netdev,
++ struct switchdev_trans *trans,
++ unsigned long flags)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err = 0;
+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_ING_FLTR_FRAME,
-+ &tmp);
-+ if (err)
-+ goto error;
-+ storage->rx_dropped += tmp;
++ if (switchdev_trans_ph_prepare(trans))
++ return 0;
+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_EGR_FRAME_DISCARD,
-+ &storage->tx_dropped);
++ /* Learning is enabled per switch */
++ err = ethsw_set_learning(port_priv->ethsw_data, !!(flags & BR_LEARNING));
+ if (err)
-+ goto error;
++ goto exit;
+
-+ return;
++ err = ethsw_port_set_flood(port_priv, !!(flags & BR_FLOOD));
+
-+error:
-+ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
++exit:
++ return err;
+}
+
-+static int ethsw_port_change_mtu(struct net_device *netdev, int mtu)
++static int swdev_port_attr_set(struct net_device *netdev,
++ const struct switchdev_attr *attr,
++ struct switchdev_trans *trans)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err;
-+
-+ if (mtu < ETH_ZLEN || mtu > ETHSW_MAX_FRAME_LENGTH) {
-+ netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
-+ mtu, ETH_ZLEN, ETHSW_MAX_FRAME_LENGTH);
-+ return -EINVAL;
-+ }
++ int err = 0;
+
-+ err = dpsw_if_set_max_frame_length(port_priv->ethsw_priv->mc_io,
-+ 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ (u16)ETHSW_L2_MAX_FRM(mtu));
-+ if (err) {
-+ netdev_err(netdev,
-+ "dpsw_if_set_max_frame_length() err %d\n", err);
-+ return err;
++ switch (attr->id) {
++ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
++ err = port_attr_stp_state_set(netdev, trans,
++ attr->u.stp_state);
++ break;
++ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
++ err = port_attr_br_flags_set(netdev, trans,
++ attr->u.brport_flags);
++ break;
++ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
++ /* VLANs are supported by default */
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ break;
+ }
+
-+ netdev->mtu = mtu;
-+ return 0;
++ return err;
+}
+
-+static const struct net_device_ops ethsw_port_ops = {
-+ .ndo_open = ðsw_port_open,
-+ .ndo_stop = ðsw_port_stop,
++static int port_vlans_add(struct net_device *netdev,
++ const struct switchdev_obj_port_vlan *vlan,
++ struct switchdev_trans *trans)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int vid, err;
+
-+ .ndo_fdb_add = ðsw_port_fdb_add,
-+ .ndo_fdb_del = ðsw_port_fdb_del,
-+ .ndo_fdb_dump = &ndo_dflt_fdb_dump,
++ if (switchdev_trans_ph_prepare(trans))
++ return 0;
+
-+ .ndo_get_stats64 = ðsw_port_get_stats,
-+ .ndo_change_mtu = ðsw_port_change_mtu,
++ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
++ if (!port_priv->ethsw_data->vlans[vid]) {
++ /* this is a new VLAN */
++ err = ethsw_add_vlan(port_priv->ethsw_data, vid);
++ if (err)
++ return err;
+
-+ .ndo_start_xmit = ðsw_dropframe,
-+};
++ port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
++ }
++ err = ethsw_port_add_vlan(port_priv, vid, vlan->flags);
++ if (err)
++ break;
++ }
+
-+static void ethsw_get_drvinfo(struct net_device *netdev,
-+ struct ethtool_drvinfo *drvinfo)
++ return err;
++}
++
++static int swdev_port_obj_add(struct net_device *netdev,
++ const struct switchdev_obj *obj,
++ struct switchdev_trans *trans)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ u16 version_major, version_minor;
+ int err;
+
-+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
-+ strlcpy(drvinfo->version, ethsw_drv_version, sizeof(drvinfo->version));
-+
-+ err = dpsw_get_api_version(port_priv->ethsw_priv->mc_io, 0,
-+ &version_major,
-+ &version_minor);
-+ if (err)
-+ strlcpy(drvinfo->fw_version, "N/A",
-+ sizeof(drvinfo->fw_version));
-+ else
-+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%u.%u", version_major, version_minor);
++ switch (obj->id) {
++ case SWITCHDEV_OBJ_ID_PORT_VLAN:
++ err = port_vlans_add(netdev,
++ SWITCHDEV_OBJ_PORT_VLAN(obj),
++ trans);
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ break;
++ }
+
-+ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
-+ sizeof(drvinfo->bus_info));
++ return err;
+}
+
-+static int ethsw_get_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
++static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_link_state state = {0};
-+ int err = 0;
++ struct ethsw_core *ethsw = port_priv->ethsw_data;
++ struct net_device *netdev = port_priv->netdev;
++ struct dpsw_vlan_if_cfg vcfg;
++ int i, err;
+
-+ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ &state);
-+ if (err) {
-+ netdev_err(netdev, "ERROR %d getting link state", err);
-+ goto out;
++ if (!port_priv->vlans[vid])
++ return -ENOENT;
++
++ if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
++ err = ethsw_port_set_pvid(port_priv, 0);
++ if (err)
++ return err;
++ }
++
++ vcfg.num_ifs = 1;
++ vcfg.if_id[0] = port_priv->idx;
++ if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
++ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
++ ethsw->dpsw_handle,
++ vid, &vcfg);
++ if (err) {
++ netdev_err(netdev,
++ "dpsw_vlan_remove_if_untagged err %d\n",
++ err);
++ }
++ port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
+ }
+
-+ /* At the moment, we have no way of interrogating the DPMAC
-+ * from the DPSW side or there may not exist a DPMAC at all.
-+ * Report only autoneg state, duplexity and speed.
-+ */
-+ if (state.options & DPSW_LINK_OPT_AUTONEG)
-+ cmd->autoneg = AUTONEG_ENABLE;
-+ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
-+ cmd->autoneg = DUPLEX_FULL;
-+ ethtool_cmd_speed_set(cmd, state.rate);
++ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
++ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ vid, &vcfg);
++ if (err) {
++ netdev_err(netdev,
++ "dpsw_vlan_remove_if err %d\n", err);
++ return err;
++ }
++ port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
++
++ /* Delete VLAN from switch if it is no longer configured on
++ * any port
++ */
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
++ if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
++ return 0; /* Found a port member in VID */
++
++ ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
+
-+out:
-+ return err;
++ err = ethsw_dellink_switch(ethsw, vid);
++ if (err)
++ return err;
++ }
++
++ return 0;
+}
+
-+static int ethsw_set_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
++static int port_vlans_del(struct net_device *netdev,
++ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_link_state state = {0};
-+ struct dpsw_link_cfg cfg = {0};
-+ int err = 0;
-+
-+ netdev_dbg(netdev, "Setting link parameters...");
-+
-+ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ &state);
-+ if (err) {
-+ netdev_err(netdev, "ERROR %d getting link state", err);
-+ goto out;
-+ }
++ int vid, err;
+
-+ /* Due to a temporary MC limitation, the DPSW port must be down
-+ * in order to be able to change link settings. Taking steps to let
-+ * the user know that.
-+ */
-+ if (netif_running(netdev)) {
-+ netdev_info(netdev,
-+ "Sorry, interface must be brought down first.\n");
-+ return -EACCES;
++ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
++ err = ethsw_port_del_vlan(port_priv, vid);
++ if (err)
++ break;
+ }
+
-+ cfg.options = state.options;
-+ cfg.rate = ethtool_cmd_speed(cmd);
-+ if (cmd->autoneg == AUTONEG_ENABLE)
-+ cfg.options |= DPSW_LINK_OPT_AUTONEG;
-+ else
-+ cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
-+ if (cmd->duplex == DUPLEX_HALF)
-+ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
-+ else
-+ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
++ return err;
++}
+
-+ err = dpsw_if_set_link_cfg(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ &cfg);
-+ if (err)
-+ /* ethtool will be loud enough if we return an error; no point
-+ * in putting our own error message on the console by default
-+ */
-+ netdev_dbg(netdev, "ERROR %d setting link cfg", err);
++static int swdev_port_obj_del(struct net_device *netdev,
++ const struct switchdev_obj *obj)
++{
++ int err;
+
-+out:
++ switch (obj->id) {
++ case SWITCHDEV_OBJ_ID_PORT_VLAN:
++ err = port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ break;
++ }
+ return err;
+}
+
-+static struct {
-+ enum dpsw_counter id;
-+ char name[ETH_GSTRING_LEN];
-+} ethsw_ethtool_counters[] = {
-+ {DPSW_CNT_ING_FRAME, "rx frames"},
-+ {DPSW_CNT_ING_BYTE, "rx bytes"},
-+ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
-+ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
-+ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
-+ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
-+ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
-+ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
-+ {DPSW_CNT_EGR_FRAME, "tx frames"},
-+ {DPSW_CNT_EGR_BYTE, "tx bytes"},
-+ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
-+
++static const struct switchdev_ops ethsw_port_switchdev_ops = {
++ .switchdev_port_attr_get = swdev_port_attr_get,
++ .switchdev_port_attr_set = swdev_port_attr_set,
++ .switchdev_port_obj_add = swdev_port_obj_add,
++ .switchdev_port_obj_del = swdev_port_obj_del,
+};
+
-+static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
++/* For the moment, only flood setting needs to be updated */
++static int port_bridge_join(struct net_device *netdev)
+{
-+ switch (sset) {
-+ case ETH_SS_STATS:
-+ return ARRAY_SIZE(ethsw_ethtool_counters);
-+ default:
-+ return -EOPNOTSUPP;
-+ }
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++
++ /* Enable flooding */
++ return ethsw_port_set_flood(port_priv, 1);
+}
+
-+static void ethsw_ethtool_get_strings(struct net_device *netdev,
-+ u32 stringset, u8 *data)
++static int port_bridge_leave(struct net_device *netdev)
+{
-+ u32 i;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
-+ switch (stringset) {
-+ case ETH_SS_STATS:
-+ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++)
-+ memcpy(data + i * ETH_GSTRING_LEN,
-+ ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
-+ break;
-+ }
++ /* Disable flooding */
++ return ethsw_port_set_flood(port_priv, 0);
+}
+
-+static void ethsw_ethtool_get_stats(struct net_device *netdev,
-+ struct ethtool_stats *stats,
-+ u64 *data)
++static int port_netdevice_event(struct notifier_block *unused,
++ unsigned long event, void *ptr)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ u32 i;
-+ int err;
++ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
++ struct netdev_notifier_changeupper_info *info = ptr;
++ struct net_device *upper_dev;
++ int err = 0;
+
-+ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) {
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ ethsw_ethtool_counters[i].id,
-+ &data[i]);
-+ if (err)
-+ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
-+ ethsw_ethtool_counters[i].name, err);
++ if (netdev->netdev_ops != ðsw_port_ops)
++ return NOTIFY_DONE;
++
++ /* Handle just upper dev link/unlink for the moment */
++ if (event == NETDEV_CHANGEUPPER) {
++ upper_dev = info->upper_dev;
++ if (netif_is_bridge_master(upper_dev)) {
++ if (info->linking)
++ err = port_bridge_join(netdev);
++ else
++ err = port_bridge_leave(netdev);
++ }
+ }
++
++ return notifier_from_errno(err);
+}
+
-+static const struct ethtool_ops ethsw_port_ethtool_ops = {
-+ .get_drvinfo = ðsw_get_drvinfo,
-+ .get_link = ðtool_op_get_link,
-+ .get_settings = ðsw_get_settings,
-+ .set_settings = ðsw_set_settings,
-+ .get_strings = ðsw_ethtool_get_strings,
-+ .get_ethtool_stats = ðsw_ethtool_get_stats,
-+ .get_sset_count = ðsw_ethtool_get_sset_count,
++static struct notifier_block port_nb __read_mostly = {
++ .notifier_call = port_netdevice_event,
+};
+
-+/* -------------------------------------------------------------------------- */
-+/* ethsw driver functions */
++struct ethsw_switchdev_event_work {
++ struct work_struct work;
++ struct switchdev_notifier_fdb_info fdb_info;
++ struct net_device *dev;
++ unsigned long event;
++};
+
-+static int ethsw_links_state_update(struct ethsw_dev_priv *priv)
++static void ethsw_switchdev_event_work(struct work_struct *work)
+{
-+ struct list_head *pos;
-+ struct ethsw_port_priv *port_priv;
-+ int err;
-+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv,
-+ list);
++ struct ethsw_switchdev_event_work *switchdev_work =
++ container_of(work, struct ethsw_switchdev_event_work, work);
++ struct net_device *dev = switchdev_work->dev;
++ struct switchdev_notifier_fdb_info *fdb_info;
++ struct ethsw_port_priv *port_priv;
+
-+ err = _ethsw_port_carrier_state_sync(port_priv->netdev);
-+ if (err)
-+ netdev_err(port_priv->netdev,
-+ "_ethsw_port_carrier_state_sync err %d\n",
-+ err);
++ rtnl_lock();
++ port_priv = netdev_priv(dev);
++ fdb_info = &switchdev_work->fdb_info;
++
++ switch (switchdev_work->event) {
++ case SWITCHDEV_FDB_ADD_TO_DEVICE:
++ if (is_unicast_ether_addr(fdb_info->addr))
++ ethsw_port_fdb_add_uc(netdev_priv(dev), fdb_info->addr);
++ else
++ ethsw_port_fdb_add_mc(netdev_priv(dev), fdb_info->addr);
++ break;
++ case SWITCHDEV_FDB_DEL_TO_DEVICE:
++ if (is_unicast_ether_addr(fdb_info->addr))
++ ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
++ else
++ ethsw_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
++ break;
+ }
+
-+ return 0;
++ rtnl_unlock();
++ kfree(switchdev_work->fdb_info.addr);
++ kfree(switchdev_work);
++ dev_put(dev);
+}
+
-+static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
++/* Called under rcu_read_lock() */
++static int port_switchdev_event(struct notifier_block *unused,
++ unsigned long event, void *ptr)
+{
-+ return IRQ_WAKE_THREAD;
-+}
++ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
++ struct ethsw_switchdev_event_work *switchdev_work;
++ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
-+static irqreturn_t _ethsw_irq0_handler_thread(int irq_num, void *arg)
-+{
-+ struct device *dev = (struct device *)arg;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
++ if (!switchdev_work)
++ return NOTIFY_BAD;
+
-+ struct fsl_mc_io *io = priv->mc_io;
-+ u16 token = priv->dpsw_handle;
-+ int irq_index = DPSW_IRQ_INDEX_IF;
++ INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work);
++ switchdev_work->dev = dev;
++ switchdev_work->event = event;
+
-+ /* Mask the events and the if_id reserved bits to be cleared on read */
-+ u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
-+ int err;
++ switch (event) {
++ case SWITCHDEV_FDB_ADD_TO_DEVICE:
++ case SWITCHDEV_FDB_DEL_TO_DEVICE:
++ memcpy(&switchdev_work->fdb_info, ptr,
++ sizeof(switchdev_work->fdb_info));
++ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
++ if (!switchdev_work->fdb_info.addr)
++ goto err_addr_alloc;
+
-+ err = dpsw_get_irq_status(io, 0, token, irq_index, &status);
-+ if (unlikely(err)) {
-+ netdev_err(netdev, "Can't get irq status (err %d)", err);
++ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
++ fdb_info->addr);
+
-+ err = dpsw_clear_irq_status(io, 0, token, irq_index,
-+ 0xFFFFFFFF);
-+ if (unlikely(err))
-+ netdev_err(netdev, "Can't clear irq status (err %d)",
-+ err);
-+ goto out;
++ /* Take a reference on the device to avoid being freed. */
++ dev_hold(dev);
++ break;
++ default:
++ return NOTIFY_DONE;
+ }
+
-+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
-+ err = ethsw_links_state_update(priv);
-+ if (unlikely(err))
-+ goto out;
-+ }
++ queue_work(ethsw_owq, &switchdev_work->work);
+
-+out:
-+ return IRQ_HANDLED;
++ return NOTIFY_DONE;
++
++err_addr_alloc:
++ kfree(switchdev_work);
++ return NOTIFY_BAD;
+}
+
-+static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
++static struct notifier_block port_switchdev_nb = {
++ .notifier_call = port_switchdev_event,
++};
++
++static int ethsw_register_notifier(struct device *dev)
+{
-+ struct device *dev = &sw_dev->dev;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ int err = 0;
-+ struct fsl_mc_device_irq *irq;
-+ const int irq_index = DPSW_IRQ_INDEX_IF;
-+ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
++ int err;
+
-+ err = fsl_mc_allocate_irqs(sw_dev);
-+ if (unlikely(err)) {
-+ dev_err(dev, "MC irqs allocation failed\n");
++ err = register_netdevice_notifier(&port_nb);
++ if (err) {
++ dev_err(dev, "Failed to register netdev notifier\n");
+ return err;
+ }
+
-+ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_MAX_IRQ_NUM)) {
-+ err = -EINVAL;
-+ goto free_irq;
++ err = register_switchdev_notifier(&port_switchdev_nb);
++ if (err) {
++ dev_err(dev, "Failed to register switchdev notifier\n");
++ goto err_switchdev_nb;
+ }
+
-+ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
-+ irq_index, 0);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
-+ goto free_irq;
-+ }
++ return 0;
+
-+ irq = sw_dev->irqs[irq_index];
++err_switchdev_nb:
++ unregister_netdevice_notifier(&port_nb);
++ return err;
++}
+
-+ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
-+ ethsw_irq0_handler,
-+ _ethsw_irq0_handler_thread,
-+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
-+ dev_name(dev), dev);
-+ if (unlikely(err)) {
-+ dev_err(dev, "devm_request_threaded_irq(): %d", err);
-+ goto free_irq;
-+ }
++static int ethsw_open(struct ethsw_core *ethsw)
++{
++ struct ethsw_port_priv *port_priv = NULL;
++ int i, err;
+
-+ err = dpsw_set_irq_mask(priv->mc_io, 0, priv->dpsw_handle,
-+ irq_index, mask);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpsw_set_irq_mask(): %d", err);
-+ goto free_devm_irq;
++ err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
++ if (err) {
++ dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
++ return err;
+ }
+
-+ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
-+ irq_index, 1);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpsw_set_irq_enable(): %d", err);
-+ goto free_devm_irq;
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ port_priv = ethsw->ports[i];
++ err = dev_open(port_priv->netdev);
++ if (err) {
++ netdev_err(port_priv->netdev, "dev_open err %d\n", err);
++ return err;
++ }
+ }
+
+ return 0;
-+
-+free_devm_irq:
-+ devm_free_irq(dev, irq->msi_desc->irq, dev);
-+free_irq:
-+ fsl_mc_free_irqs(sw_dev);
-+ return err;
+}
+
-+static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
++static int ethsw_stop(struct ethsw_core *ethsw)
+{
-+ struct device *dev = &sw_dev->dev;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ struct ethsw_port_priv *port_priv = NULL;
++ int i, err;
+
-+ dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
-+ DPSW_IRQ_INDEX_IF, 0);
-+ devm_free_irq(dev,
-+ sw_dev->irqs[DPSW_IRQ_INDEX_IF]->msi_desc->irq,
-+ dev);
-+ fsl_mc_free_irqs(sw_dev);
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ port_priv = ethsw->ports[i];
++ dev_close(port_priv->netdev);
++ }
++
++ err = dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
++ if (err) {
++ dev_err(ethsw->dev, "dpsw_disable err %d\n", err);
++ return err;
++ }
++
++ return 0;
+}
+
-+static int __cold
-+ethsw_init(struct fsl_mc_device *sw_dev)
++static int ethsw_init(struct fsl_mc_device *sw_dev)
+{
-+ struct device *dev = &sw_dev->dev;
-+ struct ethsw_dev_priv *priv;
-+ struct net_device *netdev;
-+ int err = 0;
-+ u16 i;
-+ u16 version_major, version_minor;
-+ const struct dpsw_stp_cfg stp_cfg = {
-+ .vlan_id = 1,
-+ .state = DPSW_STP_STATE_FORWARDING,
-+ };
-+
-+ netdev = dev_get_drvdata(dev);
-+ priv = netdev_priv(netdev);
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++ u16 version_major, version_minor, i;
++ struct dpsw_stp_cfg stp_cfg;
++ int err;
+
-+ priv->dev_id = sw_dev->obj_desc.id;
++ ethsw->dev_id = sw_dev->obj_desc.id;
+
-+ err = dpsw_open(priv->mc_io, 0, priv->dev_id, &priv->dpsw_handle);
++ err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_open err %d\n", err);
-+ goto err_exit;
-+ }
-+ if (!priv->dpsw_handle) {
-+ dev_err(dev, "dpsw_open returned null handle but no error\n");
-+ err = -EFAULT;
-+ goto err_exit;
++ return err;
+ }
+
-+ err = dpsw_get_attributes(priv->mc_io, 0, priv->dpsw_handle,
-+ &priv->sw_attr);
++ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ ðsw->sw_attr);
+ if (err) {
+ dev_err(dev, "dpsw_get_attributes err %d\n", err);
+ goto err_close;
+ }
+
-+ err = dpsw_get_api_version(priv->mc_io, 0,
++ err = dpsw_get_api_version(ethsw->mc_io, 0,
+ &version_major,
+ &version_minor);
+ if (err) {
+ goto err_close;
+ }
+
-+ err = dpsw_reset(priv->mc_io, 0, priv->dpsw_handle);
++ err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_reset err %d\n", err);
+ goto err_close;
+ }
+
-+ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, 0,
++ err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
+ DPSW_FDB_LEARNING_MODE_HW);
+ if (err) {
+ dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
+ goto err_close;
+ }
+
-+ for (i = 0; i < priv->sw_attr.num_ifs; i++) {
-+ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, i,
++ stp_cfg.vlan_id = DEFAULT_VLAN_ID;
++ stp_cfg.state = DPSW_STP_STATE_FORWARDING;
++
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
+ &stp_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
+ goto err_close;
+ }
+
-+ err = dpsw_if_set_broadcast(priv->mc_io, 0,
-+ priv->dpsw_handle, i, 1);
-+ if (err) {
-+ dev_err(dev,
-+ "dpsw_if_set_broadcast err %d for port %d\n",
-+ err, i);
-+ goto err_close;
-+ }
++ err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
++ ethsw->dpsw_handle, i, 1);
++ if (err) {
++ dev_err(dev,
++ "dpsw_if_set_broadcast err %d for port %d\n",
++ err, i);
++ goto err_close;
++ }
++ }
++
++ ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
++ "ethsw");
++ if (!ethsw_owq) {
++ err = -ENOMEM;
++ goto err_close;
++ }
++
++ err = ethsw_register_notifier(dev);
++ if (err)
++ goto err_destroy_ordered_workqueue;
++
++ return 0;
++
++err_destroy_ordered_workqueue:
++ destroy_workqueue(ethsw_owq);
++
++err_close:
++ dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
++ return err;
++}
++
++static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
++{
++ const char def_mcast[ETH_ALEN] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
++ struct net_device *netdev = port_priv->netdev;
++ struct ethsw_core *ethsw = port_priv->ethsw_data;
++ struct dpsw_vlan_if_cfg vcfg;
++ int err;
++
++ /* Switch starts with all ports configured to VLAN 1. Need to
++ * remove this setting to allow configuration at bridge join
++ */
++ vcfg.num_ifs = 1;
++ vcfg.if_id[0] = port_priv->idx;
++
++ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DEFAULT_VLAN_ID, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n",
++ err);
++ return err;
++ }
++
++ err = ethsw_port_set_pvid(port_priv, 0);
++ if (err)
++ return err;
++
++ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DEFAULT_VLAN_ID, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
++ return err;
+ }
+
-+ return 0;
++ err = ethsw_port_fdb_add_mc(port_priv, def_mcast);
+
-+err_close:
-+ dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
-+err_exit:
+ return err;
+}
+
-+static int __cold
-+ethsw_takedown(struct fsl_mc_device *sw_dev)
++static void ethsw_unregister_notifier(struct device *dev)
+{
-+ struct device *dev = &sw_dev->dev;
-+ struct net_device *netdev;
-+ struct ethsw_dev_priv *priv;
-+ int err;
++ int err;
+
-+ netdev = dev_get_drvdata(dev);
-+ priv = netdev_priv(netdev);
++ err = unregister_switchdev_notifier(&port_switchdev_nb);
++ if (err)
++ dev_err(dev,
++ "Failed to unregister switchdev notifier (%d)\n", err);
+
-+ err = dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
++ err = unregister_netdevice_notifier(&port_nb);
+ if (err)
-+ dev_warn(dev, "dpsw_close err %d\n", err);
++ dev_err(dev,
++ "Failed to unregister netdev notifier (%d)\n", err);
++}
+
-+ return 0;
++static void ethsw_takedown(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++ int err;
++
++ ethsw_unregister_notifier(dev);
++
++ err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
++ if (err)
++ dev_warn(dev, "dpsw_close err %d\n", err);
+}
+
-+static int __cold
-+ethsw_remove(struct fsl_mc_device *sw_dev)
++static int ethsw_remove(struct fsl_mc_device *sw_dev)
+{
-+ struct device *dev;
-+ struct net_device *netdev;
-+ struct ethsw_dev_priv *priv;
-+ struct ethsw_port_priv *port_priv;
-+ struct list_head *pos;
++ struct ethsw_port_priv *port_priv;
++ struct ethsw_core *ethsw;
++ struct device *dev;
++ int i;
+
+ dev = &sw_dev->dev;
-+ netdev = dev_get_drvdata(dev);
-+ priv = netdev_priv(netdev);
++ ethsw = dev_get_drvdata(dev);
+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv, list);
++ ethsw_teardown_irqs(sw_dev);
+
-+ rtnl_lock();
-+ netdev_upper_dev_unlink(port_priv->netdev, netdev);
-+ rtnl_unlock();
++ destroy_workqueue(ethsw_owq);
++
++ rtnl_lock();
++ ethsw_stop(ethsw);
++ rtnl_unlock();
+
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ port_priv = ethsw->ports[i];
+ unregister_netdev(port_priv->netdev);
+ free_netdev(port_priv->netdev);
+ }
-+
-+ ethsw_teardown_irqs(sw_dev);
-+
-+ unregister_netdev(netdev);
++ kfree(ethsw->ports);
+
+ ethsw_takedown(sw_dev);
-+ fsl_mc_portal_free(priv->mc_io);
++ fsl_mc_portal_free(ethsw->mc_io);
++
++ kfree(ethsw);
+
+ dev_set_drvdata(dev, NULL);
-+ free_netdev(netdev);
+
+ return 0;
+}
+
-+static int __cold
-+ethsw_probe(struct fsl_mc_device *sw_dev)
++static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
+{
-+ struct device *dev;
-+ struct net_device *netdev = NULL;
-+ struct ethsw_dev_priv *priv = NULL;
-+ int err = 0;
-+ u16 i;
-+ const char def_mcast[ETH_ALEN] = {
-+ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01,
-+ };
-+ char port_name[IFNAMSIZ];
-+
-+ dev = &sw_dev->dev;
++ struct ethsw_port_priv *port_priv;
++ struct device *dev = ethsw->dev;
++ struct net_device *port_netdev;
++ int err;
+
-+ /* register switch device, it's for management only - no I/O */
-+ netdev = alloc_etherdev(sizeof(*priv));
-+ if (!netdev) {
++ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
++ if (!port_netdev) {
+ dev_err(dev, "alloc_etherdev error\n");
+ return -ENOMEM;
+ }
-+ netdev->netdev_ops = ðsw_ops;
+
-+ SET_NETDEV_DEV(netdev, dev);
-+ dev_set_drvdata(dev, netdev);
++ port_priv = netdev_priv(port_netdev);
++ port_priv->netdev = port_netdev;
++ port_priv->ethsw_data = ethsw;
+
-+ priv = netdev_priv(netdev);
-+ priv->netdev = netdev;
++ port_priv->idx = port_idx;
++ port_priv->stp_state = BR_STATE_FORWARDING;
+
-+ err = fsl_mc_portal_allocate(sw_dev, 0, &priv->mc_io);
-+ if (err) {
-+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
-+ goto err_free_netdev;
-+ }
-+ if (!priv->mc_io) {
-+ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
-+ err = -EFAULT;
-+ goto err_free_netdev;
-+ }
++ /* Flooding is implicitly enabled */
++ port_priv->flood = true;
+
-+ err = ethsw_init(sw_dev);
-+ if (err) {
-+ dev_err(dev, "switch init err %d\n", err);
-+ goto err_free_cmdport;
-+ }
++ SET_NETDEV_DEV(port_netdev, dev);
++ port_netdev->netdev_ops = ðsw_port_ops;
++ port_netdev->ethtool_ops = ðsw_port_ethtool_ops;
++ port_netdev->switchdev_ops = ðsw_port_switchdev_ops;
+
-+ netdev->flags = netdev->flags | IFF_PROMISC | IFF_MASTER;
++ /* Set MTU limits */
++ port_netdev->min_mtu = ETH_MIN_MTU;
++ port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
+
-+ /* TODO: should we hold rtnl_lock here? We can't register_netdev under
-+ * lock
-+ */
-+ dev_alloc_name(netdev, "sw%d");
-+ err = register_netdev(netdev);
++ err = register_netdev(port_netdev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev error %d\n", err);
-+ goto err_takedown;
-+ }
-+ if (err)
-+ dev_info(dev, "register_netdev res %d\n", err);
-+
-+ /* VLAN 1 is implicitly configured on the switch */
-+ priv->vlans[1] = ETHSW_VLAN_MEMBER;
-+ /* Flooding, learning are implicitly enabled */
-+ priv->learning = true;
-+ priv->flood = true;
-+
-+ /* register switch ports */
-+ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
-+
-+ INIT_LIST_HEAD(&priv->port_list);
-+ for (i = 0; i < priv->sw_attr.num_ifs; i++) {
-+ struct net_device *port_netdev;
-+ struct ethsw_port_priv *port_priv;
-+
-+ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
-+ if (!port_netdev) {
-+ dev_err(dev, "alloc_etherdev error\n");
-+ goto err_takedown;
++ free_netdev(port_netdev);
++ return err;
+ }
+
-+ port_priv = netdev_priv(port_netdev);
-+ port_priv->netdev = port_netdev;
-+ port_priv->ethsw_priv = priv;
++ ethsw->ports[port_idx] = port_priv;
+
-+ port_priv->port_index = i;
-+ port_priv->stp_state = BR_STATE_FORWARDING;
-+ /* VLAN 1 is configured by default on all switch ports */
-+ port_priv->vlans[1] = ETHSW_VLAN_MEMBER | ETHSW_VLAN_UNTAGGED |
-+ ETHSW_VLAN_PVID;
++ return ethsw_port_init(port_priv, port_idx);
++}
+
-+ SET_NETDEV_DEV(port_netdev, dev);
-+ port_netdev->netdev_ops = ðsw_port_ops;
-+ port_netdev->ethtool_ops = ðsw_port_ethtool_ops;
++static int ethsw_probe(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw;
++ int i, err;
+
-+ port_netdev->flags = port_netdev->flags |
-+ IFF_PROMISC | IFF_SLAVE;
++ /* Allocate switch core*/
++ ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
+
-+ dev_alloc_name(port_netdev, port_name);
-+ err = register_netdev(port_netdev);
-+ if (err < 0) {
-+ dev_err(dev, "register_netdev error %d\n", err);
-+ free_netdev(port_netdev);
-+ goto err_takedown;
-+ }
++ if (!ethsw)
++ return -ENOMEM;
+
-+ rtnl_lock();
++ ethsw->dev = dev;
++ dev_set_drvdata(dev, ethsw);
+
-+ err = netdev_master_upper_dev_link(port_netdev, netdev,
-+ NULL, NULL);
-+ if (err) {
-+ dev_err(dev, "netdev_master_upper_dev_link error %d\n",
-+ err);
-+ unregister_netdev(port_netdev);
-+ free_netdev(port_netdev);
-+ rtnl_unlock();
-+ goto err_takedown;
-+ }
++ err = fsl_mc_portal_allocate(sw_dev, 0, ðsw->mc_io);
++ if (err) {
++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
++ goto err_free_drvdata;
++ }
+
-+ rtmsg_ifinfo(RTM_NEWLINK, port_netdev, IFF_SLAVE, GFP_KERNEL);
++ err = ethsw_init(sw_dev);
++ if (err)
++ goto err_free_cmdport;
+
-+ rtnl_unlock();
++ /* DEFAULT_VLAN_ID is implicitly configured on the switch */
++ ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
+
-+ list_add(&port_priv->list, &priv->port_list);
++ /* Learning is implicitly enabled */
++ ethsw->learning = true;
++
++ ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
++ GFP_KERNEL);
++ if (!(ethsw->ports)) {
++ err = -ENOMEM;
++ goto err_takedown;
++ }
+
-+ /* TODO: implmenet set_rm_mode instead of this */
-+ err = ethsw_port_fdb_add_mc(port_netdev, def_mcast);
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ err = ethsw_probe_port(ethsw, i);
+ if (err)
-+ dev_warn(&netdev->dev,
-+ "ethsw_port_fdb_add_mc err %d\n", err);
++ goto err_free_ports;
+ }
+
-+ /* the switch starts up enabled */
++ /* Switch starts up enabled */
+ rtnl_lock();
-+ err = dev_open(netdev);
++ err = ethsw_open(ethsw);
+ rtnl_unlock();
+ if (err)
-+ dev_warn(dev, "dev_open err %d\n", err);
++ goto err_free_ports;
+
-+ /* setup irqs */
++ /* Setup IRQs */
+ err = ethsw_setup_irqs(sw_dev);
-+ if (unlikely(err)) {
-+ dev_warn(dev, "ethsw_setup_irqs err %d\n", err);
-+ goto err_takedown;
-+ }
++ if (err)
++ goto err_stop;
+
-+ dev_info(&netdev->dev,
-+ "probed %d port switch\n", priv->sw_attr.num_ifs);
++ dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
+ return 0;
+
++err_stop:
++ rtnl_lock();
++ ethsw_stop(ethsw);
++ rtnl_unlock();
++
++err_free_ports:
++ /* Cleanup registered ports only */
++ for (i--; i >= 0; i--) {
++ unregister_netdev(ethsw->ports[i]->netdev);
++ free_netdev(ethsw->ports[i]->netdev);
++ }
++ kfree(ethsw->ports);
++
+err_takedown:
-+ ethsw_remove(sw_dev);
++ ethsw_takedown(sw_dev);
++
+err_free_cmdport:
-+ fsl_mc_portal_free(priv->mc_io);
-+err_free_netdev:
++ fsl_mc_portal_free(ethsw->mc_io);
++
++err_free_drvdata:
++ kfree(ethsw);
+ dev_set_drvdata(dev, NULL);
-+ free_netdev(netdev);
+
+ return err;
+}
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpsw",
+ },
-+ {}
++ { .vendor = 0x0 }
+};
++MODULE_DEVICE_TABLE(fslmc, ethsw_match_id_table);
+
+static struct fsl_mc_driver eth_sw_drv = {
+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .owner = THIS_MODULE,
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
+ },
-+ .probe = ethsw_probe,
-+ .remove = ethsw_remove,
-+ .match_id_table = ethsw_match_id_table,
++ .probe = ethsw_probe,
++ .remove = ethsw_remove,
++ .match_id_table = ethsw_match_id_table
+};
+
+module_fsl_mc_driver(eth_sw_drv);
+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver (prototype)");
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
+@@ -0,0 +1,90 @@
++/* Copyright 2014-2017 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __ETHSW_H
++#define __ETHSW_H
++
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_vlan.h>
++#include <uapi/linux/if_bridge.h>
++#include <net/switchdev.h>
++#include <linux/if_bridge.h>
++
++#include "dpsw.h"
++
++/* Number of IRQs supported */
++#define DPSW_IRQ_NUM 2
++
++#define ETHSW_VLAN_MEMBER 1
++#define ETHSW_VLAN_UNTAGGED 2
++#define ETHSW_VLAN_PVID 4
++#define ETHSW_VLAN_GLOBAL 8
++
++/* Maximum Frame Length supported by HW (currently 10k) */
++#define DPAA2_MFL (10 * 1024)
++#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
++#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
++
++extern const struct ethtool_ops ethsw_port_ethtool_ops;
++
++struct ethsw_core;
++
++/* Per port private data */
++struct ethsw_port_priv {
++ struct net_device *netdev;
++ u16 idx;
++ struct ethsw_core *ethsw_data;
++ u8 link_state;
++ u8 stp_state;
++ bool flood;
++
++ u8 vlans[VLAN_VID_MASK + 1];
++ u16 pvid;
++};
++
++/* Switch data */
++struct ethsw_core {
++ struct device *dev;
++ struct fsl_mc_io *mc_io;
++ u16 dpsw_handle;
++ struct dpsw_attr sw_attr;
++ int dev_id;
++ struct ethsw_port_priv **ports;
++
++ u8 vlans[VLAN_VID_MASK + 1];
++ bool learning;
++};
++
++#endif /* __ETHSW_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/Kconfig
@@ -0,0 +1,7 @@
+#endif /* _FSL_DPDMUX_CMD_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
-@@ -0,0 +1,1112 @@
+@@ -0,0 +1,1111 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
+#include "dpdmux.h"
+#include "dpdmux-cmd.h"
+
+ int dpdmux_id,
+ u16 *token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_open *cmd_params;
+ int err;
+
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
+ const struct dpdmux_cfg *cfg,
+ u32 *obj_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_create *cmd_params;
+ int err;
+
+ u32 cmd_flags,
+ u32 object_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_destroy *cmd_params;
+
+ /* prepare command */
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
+ u16 token,
+ int *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_rsp_is_enabled *rsp_params;
+ int err;
+
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
+ u8 irq_index,
+ u8 en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ u8 irq_index,
+ u8 *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_get_irq_enable *cmd_params;
+ struct dpdmux_rsp_get_irq_enable *rsp_params;
+ int err;
+ u8 irq_index,
+ u32 mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ u8 irq_index,
+ u32 *mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_get_irq_mask *cmd_params;
+ struct dpdmux_rsp_get_irq_mask *rsp_params;
+ int err;
+ u8 irq_index,
+ u32 *status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_get_irq_status *cmd_params;
+ struct dpdmux_rsp_get_irq_status *rsp_params;
+ int err;
+ u8 irq_index,
+ u32 status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ u16 token,
+ struct dpdmux_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_rsp_get_attr *rsp_params;
+ int err;
+
+ u16 if_id)
+{
+ struct dpdmux_cmd_if *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
+ u16 if_id)
+{
+ struct dpdmux_cmd_if *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
+ u16 token,
+ u16 max_frame_length)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_set_max_frame_length *cmd_params;
+
+ /* prepare command */
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
+ u16 if_id,
+ const struct dpdmux_accepted_frames *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
+
+ /* prepare command */
+ u16 if_id,
+ struct dpdmux_if_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if *cmd_params;
+ struct dpdmux_rsp_if_get_attr *rsp_params;
+ int err;
+ u16 if_id,
+ const struct dpdmux_l2_rule *rule)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_l2_rule *cmd_params;
+
+ /* prepare command */
+ u16 if_id,
+ const struct dpdmux_l2_rule *rule)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_l2_rule *cmd_params;
+
+ /* prepare command */
+ enum dpdmux_counter_type counter_type,
+ u64 *counter)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_get_counter *cmd_params;
+ struct dpdmux_rsp_if_get_counter *rsp_params;
+ int err;
+ u16 if_id,
+ struct dpdmux_link_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_set_link_cfg *cmd_params;
+
+ /* prepare command */
+ u16 if_id,
+ struct dpdmux_link_state *state)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_get_link_state *cmd_params;
+ struct dpdmux_rsp_if_get_link_state *rsp_params;
+ int err;
+ u64 key_cfg_iova)
+{
+ struct dpdmux_set_custom_key *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY,
+ struct dpdmux_cls_action *action)
+{
+ struct dpdmux_cmd_add_custom_cls_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY,
+ struct dpdmux_rule_cfg *rule)
+{
+ struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_rsp_get_api_version *rsp_params;
+ int err;
+
+#endif /* __FSL_DPDMUX_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/evb.c
-@@ -0,0 +1,1350 @@
+@@ -0,0 +1,1354 @@
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+#include <uapi/linux/if_bridge.h>
+#include <net/netlink.h>
+
-+#include "../../fsl-mc/include/mc.h"
++#include <linux/fsl/mc.h>
+
+#include "dpdmux.h"
+#include "dpdmux-cmd.h"
+#define DPDMUX_MAX_IRQ_NUM 2
+
+/* MAX FRAME LENGTH (currently 10k) */
-+#define EVB_MAX_FRAME_LENGTH (10 * 1024)
-+/* MIN FRAME LENGTH (64 bytes + 4 bytes CRC) */
-+#define EVB_MIN_FRAME_LENGTH 68
++#define EVB_MAX_FRAME_LENGTH (10 * 1024)
++#define EVB_MAX_MTU (EVB_MAX_FRAME_LENGTH - VLAN_ETH_HLEN)
++#define EVB_MIN_MTU 68
+
+struct evb_port_priv {
+ struct net_device *netdev;
+ if (port_priv->port_index > 0)
+ return -EPERM;
+
-+ if (mtu < EVB_MIN_FRAME_LENGTH || mtu > EVB_MAX_FRAME_LENGTH) {
-+ netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
-+ mtu, EVB_MIN_FRAME_LENGTH, EVB_MAX_FRAME_LENGTH);
-+ return -EINVAL;
-+ }
-+
+ err = dpdmux_set_max_frame_length(evb_priv->mc_io,
+ 0,
+ evb_priv->mux_handle,
-+ (uint16_t)mtu);
++ (uint16_t)(mtu + VLAN_ETH_HLEN));
+
+ if (unlikely(err)) {
+ netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n",
+ return 0;
+}
+
-+void evb_port_get_stats(struct net_device *netdev,
++struct rtnl_link_stats64 *evb_port_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ if (unlikely(err))
+ goto error;
+
-+ return;
++ return storage;
+
+error:
+ netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err);
++ return storage;
+}
+
+static const struct net_device_ops evb_port_ops = {
+
+ priv = netdev_priv(netdev);
+
-+ err = fsl_mc_portal_allocate(evb_dev, 0, &priv->mc_io);
-+ if (unlikely(err)) {
-+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
++ err = fsl_mc_portal_allocate(evb_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &priv->mc_io);
++ if (err) {
++ if (err == -ENXIO)
++ err = -EPROBE_DEFER;
++ else
++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_free_netdev;
+ }
++
+ if (!priv->mc_io) {
+ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
+ err = -EFAULT;
+
+ list_add(&port_priv->list, &priv->port_list);
+ } else {
++ /* Set MTU limits only on uplink */
++ port_netdev->min_mtu = EVB_MIN_MTU;
++ port_netdev->max_mtu = EVB_MAX_MTU;
++
+ err = register_netdev(netdev);
+
+ if (err < 0) {
+#endif /* _FSL_DPMAC_CMD_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
-@@ -0,0 +1,620 @@
+@@ -0,0 +1,619 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+ u16 *token)
+{
+ struct dpmac_cmd_open *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
+ u32 *obj_id)
+{
+ struct dpmac_cmd_create *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ u32 object_id)
+{
+ struct dpmac_cmd_destroy *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
+ u8 en)
+{
+ struct dpmac_cmd_set_irq_enable *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
+{
+ struct dpmac_cmd_get_irq_enable *cmd_params;
+ struct dpmac_rsp_get_irq_enable *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ u32 mask)
+{
+ struct dpmac_cmd_set_irq_mask *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
+{
+ struct dpmac_cmd_get_irq_mask *cmd_params;
+ struct dpmac_rsp_get_irq_mask *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+{
+ struct dpmac_cmd_get_irq_status *cmd_params;
+ struct dpmac_rsp_get_irq_status *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ u32 status)
+{
+ struct dpmac_cmd_clear_irq_status *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
+ struct dpmac_attr *attr)
+{
+ struct dpmac_rsp_get_attributes *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ struct dpmac_link_cfg *cfg)
+{
+ struct dpmac_rsp_get_link_cfg *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ /* prepare command */
+ struct dpmac_link_state *link_state)
+{
+ struct dpmac_cmd_set_link_state *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
+{
+ struct dpmac_cmd_get_counter *dpmac_cmd;
+ struct dpmac_rsp_get_counter *dpmac_rsp;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ /* prepare command */
+ const u8 addr[6])
+{
+ struct dpmac_cmd_set_port_mac_addr *dpmac_cmd;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR,
+ u16 *minor_ver)
+{
+ struct dpmac_rsp_get_api_version *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
+#endif /* __FSL_DPMAC_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/mac.c
-@@ -0,0 +1,666 @@
+@@ -0,0 +1,673 @@
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+
-+#include "../../fsl-mc/include/mc.h"
-+#include "../../fsl-mc/include/mc-sys.h"
++#include <linux/fsl/mc.h>
+
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+ dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err);
+}
+
-+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
-+static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb,
-+ struct net_device *dev)
-+{
-+ /* we don't support I/O for now, drop the frame */
-+ dev_kfree_skb_any(skb);
-+ return NETDEV_TX_OK;
-+}
-+
+static int dpaa2_mac_open(struct net_device *netdev)
+{
+ /* start PHY state machine */
+ return 0;
+}
+
-+static int dpaa2_mac_get_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
++static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ /* we don't support I/O for now, drop the frame */
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++}
++
++static int dpaa2_mac_get_link_ksettings(struct net_device *netdev,
++ struct ethtool_link_ksettings *ks)
+{
-+ return phy_ethtool_gset(netdev->phydev, cmd);
++ phy_ethtool_ksettings_get(netdev->phydev, ks);
++
++ return 0;
+}
+
-+static int dpaa2_mac_set_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
++static int dpaa2_mac_set_link_ksettings(struct net_device *netdev,
++ const struct ethtool_link_ksettings *ks)
+{
-+ return phy_ethtool_sset(netdev->phydev, cmd);
++ return phy_ethtool_ksettings_set(netdev->phydev, ks);
+}
+
-+static void dpaa2_mac_get_stats(struct net_device *netdev,
++static struct rtnl_link_stats64 *dpaa2_mac_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
+ if (err)
+ goto error;
+
-+ return;
++ return storage;
+error:
+ netdev_err(netdev, "dpmac_get_counter err %d\n", err);
++ return storage;
+}
+
+static struct {
+}
+
+static const struct net_device_ops dpaa2_mac_ndo_ops = {
-+ .ndo_start_xmit = &dpaa2_mac_drop_frame,
+ .ndo_open = &dpaa2_mac_open,
+ .ndo_stop = &dpaa2_mac_stop,
++ .ndo_start_xmit = &dpaa2_mac_drop_frame,
+ .ndo_get_stats64 = &dpaa2_mac_get_stats,
+};
+
+static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
-+ .get_settings = &dpaa2_mac_get_settings,
-+ .set_settings = &dpaa2_mac_set_settings,
++ .get_link_ksettings = &dpaa2_mac_get_link_ksettings,
++ .set_link_ksettings = &dpaa2_mac_set_link_ksettings,
+ .get_strings = &dpaa2_mac_get_strings,
+ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats,
+ .get_sset_count = &dpaa2_mac_get_sset_count,
+
+ dev_set_drvdata(dev, priv);
+
-+ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
++ /* We may need to issue MC commands while in atomic context */
++ err = fsl_mc_portal_allocate(mc_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &mc_dev->mc_io);
+ if (err || !mc_dev->mc_io) {
-+ dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err);
-+ err = -ENODEV;
++ dev_dbg(dev, "fsl_mc_portal_allocate error: %d\n", err);
++ err = -EPROBE_DEFER;
+ goto err_free_netdev;
+ }
+
+ }
+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
+
-+ /* probe the PHY as a fixed-link if the link type declared in DPC
-+ * explicitly mandates this
++ /* probe the PHY as a fixed-link if there's a phy-handle defined
++ * in the device tree
+ */
-+
+ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0);
+ if (!phy_node) {
+ goto probe_fixed_link;
+ dev_info(dev, "Registered fixed PHY.\n");
+ }
+
-+ /* start PHY state machine */
-+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
+ dpaa2_mac_open(netdev);
-+#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
-+ phy_start(netdev->phydev);
-+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
++
+ return 0;
+
+err_defer:
+{
+ struct device *dev = &mc_dev->dev;
+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
++ struct net_device *netdev = priv->netdev;
++
++ dpaa2_mac_stop(netdev);
++
++ if (phy_is_pseudo_fixed_link(netdev->phydev))
++ fixed_phy_unregister(netdev->phydev);
++ else
++ phy_disconnect(netdev->phydev);
++ netdev->phydev = NULL;
+
+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
+ unregister_netdev(priv->netdev);
+ free_netdev(priv->netdev);
+
+ dev_set_drvdata(dev, NULL);
-+ kfree(priv);
+
+ return 0;
+}
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
++
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+ uint16_t *token)
+{
+ struct dprtc_cmd_open *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
+ const struct dprtc_cfg *cfg,
+ uint32_t *obj_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ (void)(cfg); /* unused */
+ uint32_t object_id)
+{
+ struct dprtc_cmd_destroy *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
+ int *en)
+{
+ struct dprtc_rsp_is_enabled *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
+ uint8_t en)
+{
+ struct dprtc_cmd_set_irq_enable *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
+{
+ struct dprtc_rsp_get_irq_enable *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint32_t mask)
+{
+ struct dprtc_cmd_set_irq_mask *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
+{
+ struct dprtc_rsp_get_irq_mask *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+{
+ struct dprtc_cmd_get_irq_status *cmd_params;
+ struct dprtc_rsp_get_irq_status *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint32_t status)
+{
+ struct dprtc_cmd_clear_irq_status *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
+ struct dprtc_attr *attr)
+{
+ struct dprtc_rsp_get_attributes *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ int64_t offset)
+{
+ struct dprtc_cmd_set_clock_offset *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
+ uint32_t freq_compensation)
+{
+ struct dprtc_get_freq_compensation *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
+ uint32_t *freq_compensation)
+{
+ struct dprtc_get_freq_compensation *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint64_t *time)
+{
+ struct dprtc_time *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint64_t time)
+{
+ struct dprtc_time *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
+ uint16_t token, uint64_t time)
+{
+ struct dprtc_time *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
+ uint16_t *minor_ver)
+{
+ struct dprtc_rsp_get_api_version *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
+#endif /* __FSL_DPRTC_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
-@@ -0,0 +1,243 @@
+@@ -0,0 +1,242 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+
-+#include "../../fsl-mc/include/mc.h"
-+#include "../../fsl-mc/include/mc-sys.h"
++#include <linux/fsl/mc.h>
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)");
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -429,12 +429,15 @@ struct sk_filter {
+
+ struct bpf_skb_data_end {
+ struct qdisc_skb_cb qdisc_cb;
++ void *data_meta;
+ void *data_end;
+ };
+
+ struct xdp_buff {
+ void *data;
+ void *data_end;
++ void *data_meta;
++ void *data_hard_start;
+ };
+
+ /* compute the linear packet data range [data, data_end) which