d0a8ee8cfe4ba7aa7cae575c0079410a2dc1957b
[openwrt/staging/dedeckeh.git] / target / linux / realtek / files-5.15 / drivers / net / dsa / rtl83xx / tc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <net/dsa.h>
4 #include <linux/delay.h>
5 #include <linux/netdevice.h>
6 #include <net/flow_offload.h>
7 #include <linux/rhashtable.h>
8
9 #include <asm/mach-rtl838x/mach-rtl83xx.h>
10 #include "rtl83xx.h"
11 #include "rtl838x.h"
12
13 /*
14 * Parse the flow rule for the matching conditions
15 */
16 static int rtl83xx_parse_flow_rule(struct rtl838x_switch_priv *priv,
17 struct flow_rule *rule, struct rtl83xx_flow *flow)
18 {
19 struct flow_dissector *dissector = rule->match.dissector;
20
21 pr_debug("In %s\n", __func__);
22 /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
23 if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
24 (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
25 pr_err("Cannot form TC key: used_keys = 0x%x\n", dissector->used_keys);
26 return -EOPNOTSUPP;
27 }
28
29 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
30 struct flow_match_basic match;
31
32 pr_debug("%s: BASIC\n", __func__);
33 flow_rule_match_basic(rule, &match);
34 if (match.key->n_proto == htons(ETH_P_ARP))
35 flow->rule.frame_type = 0;
36 if (match.key->n_proto == htons(ETH_P_IP))
37 flow->rule.frame_type = 2;
38 if (match.key->n_proto == htons(ETH_P_IPV6))
39 flow->rule.frame_type = 3;
40 if ((match.key->n_proto == htons(ETH_P_ARP)) || flow->rule.frame_type)
41 flow->rule.frame_type_m = 3;
42 if (flow->rule.frame_type >= 2) {
43 if (match.key->ip_proto == IPPROTO_UDP)
44 flow->rule.frame_type_l4 = 0;
45 if (match.key->ip_proto == IPPROTO_TCP)
46 flow->rule.frame_type_l4 = 1;
47 if (match.key->ip_proto == IPPROTO_ICMP
48 || match.key->ip_proto ==IPPROTO_ICMPV6)
49 flow->rule.frame_type_l4 = 2;
50 if (match.key->ip_proto == IPPROTO_TCP)
51 flow->rule.frame_type_l4 = 3;
52 if ((match.key->ip_proto == IPPROTO_UDP) || flow->rule.frame_type_l4)
53 flow->rule.frame_type_l4_m = 7;
54 }
55 }
56
57 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
58 struct flow_match_eth_addrs match;
59
60 pr_debug("%s: ETH_ADDR\n", __func__);
61 flow_rule_match_eth_addrs(rule, &match);
62 ether_addr_copy(flow->rule.dmac, match.key->dst);
63 ether_addr_copy(flow->rule.dmac_m, match.mask->dst);
64 ether_addr_copy(flow->rule.smac, match.key->src);
65 ether_addr_copy(flow->rule.smac_m, match.mask->src);
66 }
67
68 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
69 struct flow_match_vlan match;
70
71 pr_debug("%s: VLAN\n", __func__);
72 flow_rule_match_vlan(rule, &match);
73 flow->rule.itag = match.key->vlan_id;
74 flow->rule.itag_m = match.mask->vlan_id;
75 // TODO: What about match.key->vlan_priority ?
76 }
77
78 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
79 struct flow_match_ipv4_addrs match;
80
81 pr_debug("%s: IPV4\n", __func__);
82 flow_rule_match_ipv4_addrs(rule, &match);
83 flow->rule.is_ipv6 = false;
84 flow->rule.dip = match.key->dst;
85 flow->rule.dip_m = match.mask->dst;
86 flow->rule.sip = match.key->src;
87 flow->rule.sip_m = match.mask->src;
88 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
89 struct flow_match_ipv6_addrs match;
90
91 pr_debug("%s: IPV6\n", __func__);
92 flow->rule.is_ipv6 = true;
93 flow_rule_match_ipv6_addrs(rule, &match);
94 flow->rule.dip6 = match.key->dst;
95 flow->rule.dip6_m = match.mask->dst;
96 flow->rule.sip6 = match.key->src;
97 flow->rule.sip6_m = match.mask->src;
98 }
99
100 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
101 struct flow_match_ports match;
102
103 pr_debug("%s: PORTS\n", __func__);
104 flow_rule_match_ports(rule, &match);
105 flow->rule.dport = match.key->dst;
106 flow->rule.dport_m = match.mask->dst;
107 flow->rule.sport = match.key->src;
108 flow->rule.sport_m = match.mask->src;
109 }
110
111 // TODO: ICMP
112 return 0;
113 }
114
115 static void rtl83xx_flow_bypass_all(struct rtl83xx_flow *flow)
116 {
117 flow->rule.bypass_sel = true;
118 flow->rule.bypass_all = true;
119 flow->rule.bypass_igr_stp = true;
120 flow->rule.bypass_ibc_sc = true;
121 }
122
123 static int rtl83xx_parse_fwd(struct rtl838x_switch_priv *priv,
124 const struct flow_action_entry *act, struct rtl83xx_flow *flow)
125 {
126 struct net_device *dev = act->dev;
127 int port;
128
129 port = rtl83xx_port_is_under(dev, priv);
130 if (port < 0) {
131 netdev_info(dev, "%s: not a DSA device.\n", __func__);
132 return -EINVAL;
133 }
134
135 flow->rule.fwd_sel = true;
136 flow->rule.fwd_data = port;
137 pr_debug("Using port index: %d\n", port);
138 rtl83xx_flow_bypass_all(flow);
139
140 return 0;
141 }
142
143 static int rtl83xx_add_flow(struct rtl838x_switch_priv *priv, struct flow_cls_offload *f,
144 struct rtl83xx_flow *flow)
145 {
146 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
147 const struct flow_action_entry *act;
148 int i, err;
149
150 pr_debug("%s\n", __func__);
151
152 rtl83xx_parse_flow_rule(priv, rule, flow);
153
154 flow_action_for_each(i, act, &rule->action) {
155 switch (act->id) {
156 case FLOW_ACTION_DROP:
157 pr_debug("%s: DROP\n", __func__);
158 flow->rule.drop = true;
159 rtl83xx_flow_bypass_all(flow);
160 return 0;
161
162 case FLOW_ACTION_TRAP:
163 pr_debug("%s: TRAP\n", __func__);
164 flow->rule.fwd_data = priv->cpu_port;
165 flow->rule.fwd_act = PIE_ACT_REDIRECT_TO_PORT;
166 rtl83xx_flow_bypass_all(flow);
167 break;
168
169 case FLOW_ACTION_MANGLE:
170 pr_err("%s: FLOW_ACTION_MANGLE not supported\n", __func__);
171 return -EOPNOTSUPP;
172
173 case FLOW_ACTION_ADD:
174 pr_err("%s: FLOW_ACTION_ADD not supported\n", __func__);
175 return -EOPNOTSUPP;
176
177 case FLOW_ACTION_VLAN_PUSH:
178 pr_debug("%s: VLAN_PUSH\n", __func__);
179 // TODO: act->vlan.proto
180 flow->rule.ivid_act = PIE_ACT_VID_ASSIGN;
181 flow->rule.ivid_sel = true;
182 flow->rule.ivid_data = htons(act->vlan.vid);
183 flow->rule.ovid_act = PIE_ACT_VID_ASSIGN;
184 flow->rule.ovid_sel = true;
185 flow->rule.ovid_data = htons(act->vlan.vid);
186 flow->rule.fwd_mod_to_cpu = true;
187 break;
188
189 case FLOW_ACTION_VLAN_POP:
190 pr_debug("%s: VLAN_POP\n", __func__);
191 flow->rule.ivid_act = PIE_ACT_VID_ASSIGN;
192 flow->rule.ivid_data = 0;
193 flow->rule.ivid_sel = true;
194 flow->rule.ovid_act = PIE_ACT_VID_ASSIGN;
195 flow->rule.ovid_data = 0;
196 flow->rule.ovid_sel = true;
197 flow->rule.fwd_mod_to_cpu = true;
198 break;
199
200 case FLOW_ACTION_CSUM:
201 pr_err("%s: FLOW_ACTION_CSUM not supported\n", __func__);
202 return -EOPNOTSUPP;
203
204 case FLOW_ACTION_REDIRECT:
205 pr_debug("%s: REDIRECT\n", __func__);
206 err = rtl83xx_parse_fwd(priv, act, flow);
207 if (err)
208 return err;
209 flow->rule.fwd_act = PIE_ACT_REDIRECT_TO_PORT;
210 break;
211
212 case FLOW_ACTION_MIRRED:
213 pr_debug("%s: MIRRED\n", __func__);
214 err = rtl83xx_parse_fwd(priv, act, flow);
215 if (err)
216 return err;
217 flow->rule.fwd_act = PIE_ACT_COPY_TO_PORT;
218 break;
219
220 default:
221 pr_err("%s: Flow action not supported: %d\n", __func__, act->id);
222 return -EOPNOTSUPP;
223 }
224 }
225
226 return 0;
227 }
228
229 static const struct rhashtable_params tc_ht_params = {
230 .head_offset = offsetof(struct rtl83xx_flow, node),
231 .key_offset = offsetof(struct rtl83xx_flow, cookie),
232 .key_len = sizeof(((struct rtl83xx_flow *)0)->cookie),
233 .automatic_shrinking = true,
234 };
235
236 static int rtl83xx_configure_flower(struct rtl838x_switch_priv *priv,
237 struct flow_cls_offload *f)
238 {
239 struct rtl83xx_flow *flow;
240 int err = 0;
241
242 pr_debug("In %s\n", __func__);
243
244 rcu_read_lock();
245 pr_debug("Cookie %08lx\n", f->cookie);
246 flow = rhashtable_lookup(&priv->tc_ht, &f->cookie, tc_ht_params);
247 if (flow) {
248 pr_info("%s: Got flow\n", __func__);
249 err = -EEXIST;
250 goto rcu_unlock;
251 }
252
253 rcu_unlock:
254 rcu_read_unlock();
255 if (flow)
256 goto out;
257 pr_debug("%s: New flow\n", __func__);
258
259 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
260 if (!flow) {
261 err = -ENOMEM;
262 goto out;
263 }
264
265 flow->cookie = f->cookie;
266 flow->priv = priv;
267
268 err = rhashtable_insert_fast(&priv->tc_ht, &flow->node, tc_ht_params);
269 if (err) {
270 pr_err("Could not insert add new rule\n");
271 goto out_free;
272 }
273
274 rtl83xx_add_flow(priv, f, flow); // TODO: check error
275
276 // Add log action to flow
277 flow->rule.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
278 if (flow->rule.packet_cntr >= 0) {
279 pr_debug("Using packet counter %d\n", flow->rule.packet_cntr);
280 flow->rule.log_sel = true;
281 flow->rule.log_data = flow->rule.packet_cntr;
282 }
283
284 err = priv->r->pie_rule_add(priv, &flow->rule);
285 return err;
286
287 out_free:
288 kfree(flow);
289 out:
290 pr_err("%s: error %d\n", __func__, err);
291 return err;
292 }
293
294 static int rtl83xx_delete_flower(struct rtl838x_switch_priv *priv,
295 struct flow_cls_offload * cls_flower)
296 {
297 struct rtl83xx_flow *flow;
298
299 pr_debug("In %s\n", __func__);
300 rcu_read_lock();
301 flow = rhashtable_lookup_fast(&priv->tc_ht, &cls_flower->cookie, tc_ht_params);
302 if (!flow) {
303 rcu_read_unlock();
304 return -EINVAL;
305 }
306
307 priv->r->pie_rule_rm(priv, &flow->rule);
308
309 rhashtable_remove_fast(&priv->tc_ht, &flow->node, tc_ht_params);
310
311 kfree_rcu(flow, rcu_head);
312
313 rcu_read_unlock();
314 return 0;
315 }
316
317 static int rtl83xx_stats_flower(struct rtl838x_switch_priv *priv,
318 struct flow_cls_offload * cls_flower)
319 {
320 struct rtl83xx_flow *flow;
321 unsigned long lastused = 0;
322 int total_packets, new_packets;
323
324 pr_debug("%s: \n", __func__);
325 flow = rhashtable_lookup_fast(&priv->tc_ht, &cls_flower->cookie, tc_ht_params);
326 if (!flow)
327 return -1;
328
329 if (flow->rule.packet_cntr >= 0) {
330 total_packets = priv->r->packet_cntr_read(flow->rule.packet_cntr);
331 pr_debug("Total packets: %d\n", total_packets);
332 new_packets = total_packets - flow->rule.last_packet_cnt;
333 flow->rule.last_packet_cnt = total_packets;
334 }
335
336 // TODO: We need a second PIE rule to count the bytes
337 flow_stats_update(&cls_flower->stats, 100 * new_packets, new_packets, 0, lastused,
338 FLOW_ACTION_HW_STATS_IMMEDIATE);
339 return 0;
340 }
341
342 static int rtl83xx_setup_tc_cls_flower(struct rtl838x_switch_priv *priv,
343 struct flow_cls_offload *cls_flower)
344 {
345 pr_debug("%s: %d\n", __func__, cls_flower->command);
346 switch (cls_flower->command) {
347 case FLOW_CLS_REPLACE:
348 return rtl83xx_configure_flower(priv, cls_flower);
349 case FLOW_CLS_DESTROY:
350 return rtl83xx_delete_flower(priv, cls_flower);
351 case FLOW_CLS_STATS:
352 return rtl83xx_stats_flower(priv, cls_flower);
353 default:
354 return -EOPNOTSUPP;
355 }
356 }
357
358
359 static int rtl83xx_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
360 void *cb_priv)
361 {
362 struct rtl838x_switch_priv *priv = cb_priv;
363
364 switch (type) {
365 case TC_SETUP_CLSFLOWER:
366 pr_debug("%s: TC_SETUP_CLSFLOWER\n", __func__);
367 return rtl83xx_setup_tc_cls_flower(priv, type_data);
368 default:
369 return -EOPNOTSUPP;
370 }
371 }
372
373 static LIST_HEAD(rtl83xx_block_cb_list);
374
375 int rtl83xx_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
376 {
377 struct rtl838x_switch_priv *priv;
378 struct flow_block_offload *f = type_data;
379 static bool first_time = true;
380 int err;
381
382 pr_debug("%s: %d\n", __func__, type);
383
384 if(!netdev_uses_dsa(dev)) {
385 pr_err("%s: no DSA\n", __func__);
386 return 0;
387 }
388 priv = dev->dsa_ptr->ds->priv;
389
390 switch (type) {
391 case TC_SETUP_BLOCK:
392 if (first_time) {
393 first_time = false;
394 err = rhashtable_init(&priv->tc_ht, &tc_ht_params);
395 if (err)
396 pr_err("%s: Could not initialize hash table\n", __func__);
397 }
398
399 f->unlocked_driver_cb = true;
400 return flow_block_cb_setup_simple(type_data,
401 &rtl83xx_block_cb_list,
402 rtl83xx_setup_tc_block_cb,
403 priv, priv, true);
404 default:
405 return -EOPNOTSUPP;
406 }
407
408 return 0;
409 }