kernel: update kernel 4.4 to version 4.4.110
[openwrt/openwrt.git] / target / linux / mvebu / patches-4.4 / 034-net-mvneta-Add-naive-RSS-support.patch
1 From: Gregory CLEMENT <gregory.clement@free-electrons.com>
2 Date: Wed, 9 Dec 2015 18:23:50 +0100
3 Subject: [PATCH] net: mvneta: Add naive RSS support
4
5 This patch adds the support for the RSS related ethtool
6 function. Currently it only uses one entry in the indirection table which
7 allows associating an mvneta interface to a given CPU.
8
9 Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
10 Tested-by: Marcin Wojtas <mw@semihalf.com>
11 Signed-off-by: David S. Miller <davem@davemloft.net>
12 ---
13
14 --- a/drivers/net/ethernet/marvell/mvneta.c
15 +++ b/drivers/net/ethernet/marvell/mvneta.c
16 @@ -261,6 +261,11 @@
17
18 #define MVNETA_TX_MTU_MAX 0x3ffff
19
20 +/* The RSS lookup table actually has 256 entries but we do not use
21 + * them yet
22 + */
23 +#define MVNETA_RSS_LU_TABLE_SIZE 1
24 +
25 /* TSO header size */
26 #define TSO_HEADER_SIZE 128
27
28 @@ -382,6 +387,8 @@ struct mvneta_port {
29 unsigned int use_inband_status:1;
30
31 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
32 +
33 + u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
34 };
35
36 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
37 @@ -1071,7 +1078,7 @@ static void mvneta_defaults_set(struct m
38 if ((rxq % max_cpu) == cpu)
39 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
40
41 - if (cpu == rxq_def)
42 + if (cpu == pp->rxq_def)
43 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
44
45 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
46 @@ -2512,6 +2519,18 @@ static void mvneta_percpu_unmask_interru
47 MVNETA_MISCINTR_INTR_MASK);
48 }
49
50 +static void mvneta_percpu_mask_interrupt(void *arg)
51 +{
52 + struct mvneta_port *pp = arg;
53 +
54 + /* All the queue are masked, but actually only the ones
55 + * maped to this CPU will be masked
56 + */
57 + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
58 + mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
59 + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
60 +}
61 +
62 static void mvneta_start_dev(struct mvneta_port *pp)
63 {
64 unsigned int cpu;
65 @@ -3233,6 +3252,106 @@ static int mvneta_ethtool_get_sset_count
66 return -EOPNOTSUPP;
67 }
68
69 +static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
70 +{
71 + return MVNETA_RSS_LU_TABLE_SIZE;
72 +}
73 +
74 +static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
75 + struct ethtool_rxnfc *info,
76 + u32 *rules __always_unused)
77 +{
78 + switch (info->cmd) {
79 + case ETHTOOL_GRXRINGS:
80 + info->data = rxq_number;
81 + return 0;
82 + case ETHTOOL_GRXFH:
83 + return -EOPNOTSUPP;
84 + default:
85 + return -EOPNOTSUPP;
86 + }
87 +}
88 +
89 +static int mvneta_config_rss(struct mvneta_port *pp)
90 +{
91 + int cpu;
92 + u32 val;
93 +
94 + netif_tx_stop_all_queues(pp->dev);
95 +
96 + for_each_online_cpu(cpu)
97 + smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
98 + pp, true);
99 +
100 + /* We have to synchronise on the napi of each CPU */
101 + for_each_online_cpu(cpu) {
102 + struct mvneta_pcpu_port *pcpu_port =
103 + per_cpu_ptr(pp->ports, cpu);
104 +
105 + napi_synchronize(&pcpu_port->napi);
106 + napi_disable(&pcpu_port->napi);
107 + }
108 +
109 + pp->rxq_def = pp->indir[0];
110 +
111 + /* Update unicast mapping */
112 + mvneta_set_rx_mode(pp->dev);
113 +
114 + /* Update val of portCfg register accordingly with all RxQueue types */
115 + val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
116 + mvreg_write(pp, MVNETA_PORT_CONFIG, val);
117 +
118 + /* Update the elected CPU matching the new rxq_def */
119 + mvneta_percpu_elect(pp);
120 +
121 + /* We have to synchronise on the napi of each CPU */
122 + for_each_online_cpu(cpu) {
123 + struct mvneta_pcpu_port *pcpu_port =
124 + per_cpu_ptr(pp->ports, cpu);
125 +
126 + napi_enable(&pcpu_port->napi);
127 + }
128 +
129 + netif_tx_start_all_queues(pp->dev);
130 +
131 + return 0;
132 +}
133 +
134 +static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
135 + const u8 *key, const u8 hfunc)
136 +{
137 + struct mvneta_port *pp = netdev_priv(dev);
138 + /* We require at least one supported parameter to be changed
139 + * and no change in any of the unsupported parameters
140 + */
141 + if (key ||
142 + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
143 + return -EOPNOTSUPP;
144 +
145 + if (!indir)
146 + return 0;
147 +
148 + memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
149 +
150 + return mvneta_config_rss(pp);
151 +}
152 +
153 +static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
154 + u8 *hfunc)
155 +{
156 + struct mvneta_port *pp = netdev_priv(dev);
157 +
158 + if (hfunc)
159 + *hfunc = ETH_RSS_HASH_TOP;
160 +
161 + if (!indir)
162 + return 0;
163 +
164 + memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
165 +
166 + return 0;
167 +}
168 +
169 static const struct net_device_ops mvneta_netdev_ops = {
170 .ndo_open = mvneta_open,
171 .ndo_stop = mvneta_stop,
172 @@ -3257,6 +3376,10 @@ const struct ethtool_ops mvneta_eth_tool
173 .get_strings = mvneta_ethtool_get_strings,
174 .get_ethtool_stats = mvneta_ethtool_get_stats,
175 .get_sset_count = mvneta_ethtool_get_sset_count,
176 + .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
177 + .get_rxnfc = mvneta_ethtool_get_rxnfc,
178 + .get_rxfh = mvneta_ethtool_get_rxfh,
179 + .set_rxfh = mvneta_ethtool_set_rxfh,
180 };
181
182 /* Initialize hw */
183 @@ -3448,6 +3571,8 @@ static int mvneta_probe(struct platform_
184
185 pp->rxq_def = rxq_def;
186
187 + pp->indir[0] = rxq_def;
188 +
189 pp->clk = devm_clk_get(&pdev->dev, NULL);
190 if (IS_ERR(pp->clk)) {
191 err = PTR_ERR(pp->clk);