1 From 936ce2452068cb0f6d48ca7d77d6b975802c19ae Mon Sep 17 00:00:00 2001
2 From: Ioana Radulescu <ruxandra.radulescu@nxp.com>
3 Date: Tue, 3 Sep 2019 14:13:32 +0300
4 Subject: [PATCH] dpaa2-eth: Add support for Rx traffic classes
6 The firmware reserves for each DPNI a number of RX frame queues
7 equal to the number of configured flows x number of configured
10 Current driver configuration directs all incoming traffic to
11 FQs corresponding to TC0, leaving all other priority levels unused.
13 Start adding support for multiple ingress traffic classes, by
14 configuring the FQs associated with all priority levels, not just
15 TC0. All settings that are per-TC, such as those related to
16 hashing and flow steering, are also updated.
18 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
20 .../ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c | 7 ++-
21 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 70 +++++++++++++++-------
22 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 4 +-
23 .../net/ethernet/freescale/dpaa2/dpaa2-ethtool.c | 19 ++++--
24 4 files changed, 68 insertions(+), 32 deletions(-)
26 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
27 +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
28 @@ -81,8 +81,8 @@ static int dpaa2_dbg_fqs_show(struct seq
31 seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
32 - seq_printf(file, "%s%16s%16s%16s%16s\n",
33 - "VFQID", "CPU", "Type", "Frames", "Pending frames");
34 + seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
35 + "VFQID", "CPU", "TC", "Type", "Frames", "Pending frames");
37 for (i = 0; i < priv->num_fqs; i++) {
39 @@ -90,9 +90,10 @@ static int dpaa2_dbg_fqs_show(struct seq
43 - seq_printf(file, "%5d%16d%16s%16llu%16u\n",
44 + seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
51 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
52 +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
53 @@ -1231,6 +1231,7 @@ static void disable_ch_napi(struct dpaa2
54 static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
56 struct dpni_taildrop td = {0};
57 + struct dpaa2_eth_fq *fq;
60 if (priv->rx_td_enabled == enable)
61 @@ -1240,11 +1241,12 @@ static void dpaa2_eth_set_rx_taildrop(st
62 td.threshold = DPAA2_ETH_TAILDROP_THRESH;
64 for (i = 0; i < priv->num_fqs; i++) {
65 - if (priv->fq[i].type != DPAA2_RX_FQ)
67 + if (fq->type != DPAA2_RX_FQ)
69 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
70 - DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
71 - priv->fq[i].flowid, &td);
72 + DPNI_CP_QUEUE, DPNI_QUEUE_RX,
73 + fq->tc, fq->flowid, &td);
75 netdev_err(priv->net_dev,
76 "dpni_set_taildrop() failed\n");
77 @@ -2338,7 +2340,7 @@ static void set_fq_affinity(struct dpaa2
79 static void setup_fqs(struct dpaa2_eth_priv *priv)
84 /* We have one TxConf FQ per Tx flow.
85 * The number of Tx and Rx queues is the same.
86 @@ -2350,10 +2352,13 @@ static void setup_fqs(struct dpaa2_eth_p
87 priv->fq[priv->num_fqs++].flowid = (u16)i;
90 - for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
91 - priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
92 - priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
93 - priv->fq[priv->num_fqs++].flowid = (u16)i;
94 + for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
95 + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
96 + priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
97 + priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
98 + priv->fq[priv->num_fqs].tc = (u8)j;
99 + priv->fq[priv->num_fqs++].flowid = (u16)i;
103 /* For each FQ, decide on which core to process incoming frames */
104 @@ -2701,7 +2706,7 @@ static int setup_rx_flow(struct dpaa2_et
107 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
108 - DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
109 + DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
111 dev_err(dev, "dpni_get_queue(RX) failed\n");
113 @@ -2714,7 +2719,7 @@ static int setup_rx_flow(struct dpaa2_et
114 queue.destination.priority = 1;
115 queue.user_context = (u64)(uintptr_t)fq;
116 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
117 - DPNI_QUEUE_RX, 0, fq->flowid,
118 + DPNI_QUEUE_RX, fq->tc, fq->flowid,
119 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
122 @@ -2723,6 +2728,10 @@ static int setup_rx_flow(struct dpaa2_et
126 + /* only once for each channel */
130 err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
133 @@ -2860,7 +2869,7 @@ static int config_legacy_hash_key(struct
135 struct device *dev = priv->net_dev->dev.parent;
136 struct dpni_rx_tc_dist_cfg dist_cfg;
140 memset(&dist_cfg, 0, sizeof(dist_cfg));
142 @@ -2868,9 +2877,14 @@ static int config_legacy_hash_key(struct
143 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
144 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
146 - err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
148 - dev_err(dev, "dpni_set_rx_tc_dist failed\n");
149 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
150 + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
153 + dev_err(dev, "dpni_set_rx_tc_dist failed\n");
160 @@ -2880,7 +2894,7 @@ static int config_hash_key(struct dpaa2_
162 struct device *dev = priv->net_dev->dev.parent;
163 struct dpni_rx_dist_cfg dist_cfg;
167 memset(&dist_cfg, 0, sizeof(dist_cfg));
169 @@ -2888,9 +2902,15 @@ static int config_hash_key(struct dpaa2_
170 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
173 - err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
175 - dev_err(dev, "dpni_set_rx_hash_dist failed\n");
176 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
178 + err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
181 + dev_err(dev, "dpni_set_rx_hash_dist failed\n");
188 @@ -2900,7 +2920,7 @@ static int config_cls_key(struct dpaa2_e
190 struct device *dev = priv->net_dev->dev.parent;
191 struct dpni_rx_dist_cfg dist_cfg;
195 memset(&dist_cfg, 0, sizeof(dist_cfg));
197 @@ -2908,9 +2928,15 @@ static int config_cls_key(struct dpaa2_e
198 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
201 - err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
203 - dev_err(dev, "dpni_set_rx_fs_dist failed\n");
204 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
206 + err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
209 + dev_err(dev, "dpni_set_rx_fs_dist failed\n");
216 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
217 +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
218 @@ -291,7 +291,9 @@ struct dpaa2_eth_ch_stats {
220 /* Maximum number of queues associated with a DPNI */
221 #define DPAA2_ETH_MAX_TCS 8
222 -#define DPAA2_ETH_MAX_RX_QUEUES 16
223 +#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
224 +#define DPAA2_ETH_MAX_RX_QUEUES \
225 + (DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS)
226 #define DPAA2_ETH_MAX_TX_QUEUES 16
227 #define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
228 DPAA2_ETH_MAX_TX_QUEUES)
229 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
230 +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
231 @@ -502,7 +502,7 @@ static int do_cls_rule(struct net_device
238 if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
239 fs->ring_cookie >= dpaa2_eth_queue_count(priv))
240 @@ -562,11 +562,18 @@ static int do_cls_rule(struct net_device
241 fs_act.options |= DPNI_FS_OPT_DISCARD;
243 fs_act.flow_id = fs->ring_cookie;
244 - err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
245 - fs->location, &rule_cfg, &fs_act);
247 - err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
250 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
252 + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
253 + i, fs->location, &rule_cfg,
256 + err = dpni_remove_fs_entry(priv->mc_io, 0,
263 dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);