kernel: add linux 5.10 support
[openwrt/staging/wigyori.git] / target / linux / generic / pending-5.10 / 770-16-net-ethernet-mtk_eth_soc-add-flow-offloading-support.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Thu, 10 Dec 2020 12:19:18 +0100
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: add flow offloading support
4
5 This adds support for offloading IPv4 routed flows, including SNAT/DNAT,
6 one VLAN, and DSA.
7
8 Signed-off-by: Felix Fietkau <nbd@nbd.name>
9 Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
10 ---
11 create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_offload.c
12
13 --- a/drivers/net/ethernet/mediatek/Makefile
14 +++ b/drivers/net/ethernet/mediatek/Makefile
15 @@ -4,5 +4,5 @@
16 #
17
18 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
19 -mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o
20 +mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
21 obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
22 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
23 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
24 @@ -2896,6 +2896,7 @@ static const struct net_device_ops mtk_n
25 #ifdef CONFIG_NET_POLL_CONTROLLER
26 .ndo_poll_controller = mtk_poll_controller,
27 #endif
28 + .ndo_setup_tc = mtk_eth_setup_tc,
29 };
30
31 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
32 @@ -3161,6 +3162,10 @@ static int mtk_probe(struct platform_dev
33 eth->base + MTK_ETH_PPE_BASE, 2);
34 if (err)
35 goto err_free_dev;
36 +
37 + err = mtk_eth_offload_init(eth);
38 + if (err)
39 + goto err_free_dev;
40 }
41
42 for (i = 0; i < MTK_MAX_DEVS; i++) {
43 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
44 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
45 @@ -16,6 +16,7 @@
46 #include <linux/refcount.h>
47 #include <linux/phylink.h>
48 #include <linux/dim.h>
49 +#include <linux/rhashtable.h>
50 #include "mtk_ppe.h"
51
52 #define MTK_QDMA_PAGE_SIZE 2048
53 @@ -41,7 +42,8 @@
54 NETIF_F_HW_VLAN_CTAG_RX | \
55 NETIF_F_SG | NETIF_F_TSO | \
56 NETIF_F_TSO6 | \
57 - NETIF_F_IPV6_CSUM)
58 + NETIF_F_IPV6_CSUM |\
59 + NETIF_F_HW_TC)
60 #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
61 #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
62
63 @@ -929,6 +931,7 @@ struct mtk_eth {
64 int ip_align;
65
66 struct mtk_ppe ppe;
67 + struct rhashtable flow_table;
68 };
69
70 /* struct mtk_mac - the structure that holds the info about the MACs of the
71 @@ -973,4 +976,9 @@ int mtk_gmac_sgmii_path_setup(struct mtk
72 int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
73 int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
74
75 +int mtk_eth_offload_init(struct mtk_eth *eth);
76 +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
77 + void *type_data);
78 +
79 +
80 #endif /* MTK_ETH_H */
81 --- /dev/null
82 +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
83 @@ -0,0 +1,478 @@
84 +// SPDX-License-Identifier: GPL-2.0-only
85 +/*
86 + * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
87 + */
88 +
89 +#include <linux/if_ether.h>
90 +#include <linux/rhashtable.h>
91 +#include <linux/if_ether.h>
92 +#include <linux/ip.h>
93 +#include <net/flow_offload.h>
94 +#include <net/pkt_cls.h>
95 +#include <net/dsa.h>
96 +#include "mtk_eth_soc.h"
97 +
98 +struct mtk_flow_data {
99 + struct ethhdr eth;
100 +
101 + union {
102 + struct {
103 + __be32 src_addr;
104 + __be32 dst_addr;
105 + } v4;
106 + };
107 +
108 + __be16 src_port;
109 + __be16 dst_port;
110 +
111 + struct {
112 + u16 id;
113 + __be16 proto;
114 + u8 num;
115 + } vlan;
116 +};
117 +
118 +struct mtk_flow_entry {
119 + struct rhash_head node;
120 + unsigned long cookie;
121 + u16 hash;
122 +};
123 +
124 +static const struct rhashtable_params mtk_flow_ht_params = {
125 + .head_offset = offsetof(struct mtk_flow_entry, node),
126 + .head_offset = offsetof(struct mtk_flow_entry, cookie),
127 + .key_len = sizeof(unsigned long),
128 + .automatic_shrinking = true,
129 +};
130 +
131 +static u32
132 +mtk_eth_timestamp(struct mtk_eth *eth)
133 +{
134 + return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
135 +}
136 +
137 +static int
138 +mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
139 + bool egress)
140 +{
141 + return mtk_foe_entry_set_ipv4_tuple(foe, egress,
142 + data->v4.src_addr, data->src_port,
143 + data->v4.dst_addr, data->dst_port);
144 +}
145 +
146 +static void
147 +mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
148 +{
149 + void *dest = eth + act->mangle.offset;
150 + const void *src = &act->mangle.val;
151 +
152 + if (act->mangle.offset > 8)
153 + return;
154 +
155 + if (act->mangle.mask == 0xffff) {
156 + src += 2;
157 + dest += 2;
158 + }
159 +
160 + memcpy(dest, src, act->mangle.mask ? 2 : 4);
161 +}
162 +
163 +
164 +static int
165 +mtk_flow_mangle_ports(const struct flow_action_entry *act,
166 + struct mtk_flow_data *data)
167 +{
168 + u32 val = ntohl(act->mangle.val);
169 +
170 + switch (act->mangle.offset) {
171 + case 0:
172 + if (act->mangle.mask == ~htonl(0xffff))
173 + data->dst_port = cpu_to_be16(val);
174 + else
175 + data->src_port = cpu_to_be16(val >> 16);
176 + break;
177 + case 2:
178 + data->dst_port = cpu_to_be16(val);
179 + break;
180 + default:
181 + return -EINVAL;
182 + }
183 +
184 + return 0;
185 +}
186 +
187 +static int
188 +mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
189 + struct mtk_flow_data *data)
190 +{
191 + __be32 *dest;
192 +
193 + switch (act->mangle.offset) {
194 + case offsetof(struct iphdr, saddr):
195 + dest = &data->v4.src_addr;
196 + break;
197 + case offsetof(struct iphdr, daddr):
198 + dest = &data->v4.dst_addr;
199 + break;
200 + default:
201 + return -EINVAL;
202 + }
203 +
204 + memcpy(dest, &act->mangle.val, sizeof(u32));
205 +
206 + return 0;
207 +}
208 +
209 +static int
210 +mtk_flow_get_dsa_port(struct net_device **dev)
211 +{
212 +#if IS_ENABLED(CONFIG_NET_DSA)
213 + struct dsa_port *dp;
214 +
215 + dp = dsa_port_from_netdev(*dev);
216 + if (IS_ERR(dp))
217 + return -ENODEV;
218 +
219 + if (!dp->cpu_dp)
220 + return -ENODEV;
221 +
222 + if (!dp->cpu_dp->tag_ops)
223 + return -ENODEV;
224 +
225 + if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
226 + return -ENODEV;
227 +
228 + *dev = dp->cpu_dp->master;
229 +
230 + return dp->index;
231 +#else
232 + return -ENODEV;
233 +#endif
234 +}
235 +
236 +static int
237 +mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
238 + struct net_device *dev)
239 +{
240 + int pse_port, dsa_port;
241 +
242 + dsa_port = mtk_flow_get_dsa_port(&dev);
243 + if (dsa_port >= 0)
244 + mtk_foe_entry_set_dsa(foe, dsa_port);
245 +
246 + if (dev == eth->netdev[0])
247 + pse_port = 1;
248 + else if (dev == eth->netdev[1])
249 + pse_port = 2;
250 + else
251 + return -EOPNOTSUPP;
252 +
253 + mtk_foe_entry_set_pse_port(foe, pse_port);
254 +
255 + return 0;
256 +}
257 +
258 +static int
259 +mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
260 +{
261 + struct flow_rule *rule = flow_cls_offload_flow_rule(f);
262 + struct flow_action_entry *act;
263 + struct mtk_flow_data data = {};
264 + struct mtk_foe_entry foe;
265 + struct net_device *odev = NULL;
266 + struct mtk_flow_entry *entry;
267 + int offload_type = 0;
268 + u16 addr_type = 0;
269 + u32 timestamp;
270 + u8 l4proto = 0;
271 + int err = 0;
272 + int hash;
273 + int i;
274 +
275 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
276 + struct flow_match_meta match;
277 +
278 + flow_rule_match_meta(rule, &match);
279 + } else {
280 + return -EOPNOTSUPP;
281 + }
282 +
283 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
284 + struct flow_match_control match;
285 +
286 + flow_rule_match_control(rule, &match);
287 + addr_type = match.key->addr_type;
288 + } else {
289 + return -EOPNOTSUPP;
290 + }
291 +
292 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
293 + struct flow_match_basic match;
294 +
295 + flow_rule_match_basic(rule, &match);
296 + l4proto = match.key->ip_proto;
297 + } else {
298 + return -EOPNOTSUPP;
299 + }
300 +
301 + flow_action_for_each(i, act, &rule->action) {
302 + switch (act->id) {
303 + case FLOW_ACTION_MANGLE:
304 + if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
305 + mtk_flow_offload_mangle_eth(act, &data.eth);
306 + break;
307 + case FLOW_ACTION_REDIRECT:
308 + odev = act->dev;
309 + break;
310 + case FLOW_ACTION_CSUM:
311 + break;
312 + case FLOW_ACTION_VLAN_PUSH:
313 + if (data.vlan.num == 1 ||
314 + data.vlan.proto != ETH_P_8021Q)
315 + return -EOPNOTSUPP;
316 +
317 + data.vlan.id = act->vlan.vid;
318 + data.vlan.proto = act->vlan.proto;
319 + data.vlan.num++;
320 + break;
321 + default:
322 + return -EOPNOTSUPP;
323 + }
324 + }
325 +
326 + switch (addr_type) {
327 + case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
328 + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
329 + break;
330 + default:
331 + return -EOPNOTSUPP;
332 + }
333 +
334 + if (!is_valid_ether_addr(data.eth.h_source) ||
335 + !is_valid_ether_addr(data.eth.h_dest))
336 + return -EINVAL;
337 +
338 + err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
339 + data.eth.h_source,
340 + data.eth.h_dest);
341 + if (err)
342 + return err;
343 +
344 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
345 + struct flow_match_ports ports;
346 +
347 + flow_rule_match_ports(rule, &ports);
348 + data.src_port = ports.key->src;
349 + data.dst_port = ports.key->dst;
350 + } else {
351 + return -EOPNOTSUPP;
352 + }
353 +
354 + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
355 + struct flow_match_ipv4_addrs addrs;
356 +
357 + flow_rule_match_ipv4_addrs(rule, &addrs);
358 +
359 + data.v4.src_addr = addrs.key->src;
360 + data.v4.dst_addr = addrs.key->dst;
361 +
362 + mtk_flow_set_ipv4_addr(&foe, &data, false);
363 + }
364 +
365 + flow_action_for_each(i, act, &rule->action) {
366 + if (act->id != FLOW_ACTION_MANGLE)
367 + continue;
368 +
369 + switch (act->mangle.htype) {
370 + case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
371 + case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
372 + err = mtk_flow_mangle_ports(act, &data);
373 + break;
374 + case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
375 + err = mtk_flow_mangle_ipv4(act, &data);
376 + break;
377 + case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
378 + /* handled earlier */
379 + break;
380 + default:
381 + return -EOPNOTSUPP;
382 + }
383 +
384 + if (err)
385 + return err;
386 + }
387 +
388 + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
389 + err = mtk_flow_set_ipv4_addr(&foe, &data, true);
390 + if (err)
391 + return err;
392 + }
393 +
394 + if (data.vlan.num == 1) {
395 + if (data.vlan.proto != ETH_P_8021Q)
396 + return -EOPNOTSUPP;
397 +
398 + mtk_foe_entry_set_vlan(&foe, data.vlan.id);
399 + }
400 +
401 + err = mtk_flow_set_output_device(eth, &foe, odev);
402 + if (err)
403 + return err;
404 +
405 + entry = kzalloc(sizeof(*entry), GFP_KERNEL);
406 + if (!entry)
407 + return -ENOMEM;
408 +
409 + entry->cookie = f->cookie;
410 + timestamp = mtk_eth_timestamp(eth);
411 + hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
412 + if (hash < 0) {
413 + err = hash;
414 + goto free;
415 + }
416 +
417 + entry->hash = hash;
418 + err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
419 + mtk_flow_ht_params);
420 + if (err < 0)
421 + goto clear_flow;
422 +
423 + return 0;
424 +clear_flow:
425 + mtk_foe_entry_clear(&eth->ppe, hash);
426 +free:
427 + kfree(entry);
428 + return err;
429 +}
430 +
431 +static int
432 +mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
433 +{
434 + struct mtk_flow_entry *entry;
435 +
436 + entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
437 + mtk_flow_ht_params);
438 + if (!entry)
439 + return -ENOENT;
440 +
441 + mtk_foe_entry_clear(&eth->ppe, entry->hash);
442 + rhashtable_remove_fast(&eth->flow_table, &entry->node,
443 + mtk_flow_ht_params);
444 + kfree(entry);
445 +
446 + return 0;
447 +}
448 +
449 +static int
450 +mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
451 +{
452 + struct mtk_flow_entry *entry;
453 + int timestamp;
454 + u32 idle;
455 +
456 + entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
457 + mtk_flow_ht_params);
458 + if (!entry)
459 + return -ENOENT;
460 +
461 + timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
462 + if (timestamp < 0)
463 + return -ETIMEDOUT;
464 +
465 + idle = mtk_eth_timestamp(eth) - timestamp;
466 + f->stats.lastused = jiffies - idle * HZ;
467 +
468 + return 0;
469 +}
470 +
471 +static int
472 +mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
473 +{
474 + struct flow_cls_offload *cls = type_data;
475 + struct net_device *dev = cb_priv;
476 + struct mtk_mac *mac = netdev_priv(dev);
477 + struct mtk_eth *eth = mac->hw;
478 +
479 + if (!tc_can_offload(dev))
480 + return -EOPNOTSUPP;
481 +
482 + if (type != TC_SETUP_CLSFLOWER)
483 + return -EOPNOTSUPP;
484 +
485 + switch (cls->command) {
486 + case FLOW_CLS_REPLACE:
487 + return mtk_flow_offload_replace(eth, cls);
488 + case FLOW_CLS_DESTROY:
489 + return mtk_flow_offload_destroy(eth, cls);
490 + case FLOW_CLS_STATS:
491 + return mtk_flow_offload_stats(eth, cls);
492 + default:
493 + return -EOPNOTSUPP;
494 + }
495 +
496 + return 0;
497 +}
498 +
499 +static int
500 +mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
501 +{
502 + struct mtk_mac *mac = netdev_priv(dev);
503 + struct mtk_eth *eth = mac->hw;
504 + static LIST_HEAD(block_cb_list);
505 + struct flow_block_cb *block_cb;
506 + flow_setup_cb_t *cb;
507 +
508 + if (!eth->ppe.foe_table)
509 + return -EOPNOTSUPP;
510 +
511 + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
512 + return -EOPNOTSUPP;
513 +
514 + cb = mtk_eth_setup_tc_block_cb;
515 + f->driver_block_list = &block_cb_list;
516 +
517 + switch (f->command) {
518 + case FLOW_BLOCK_BIND:
519 + block_cb = flow_block_cb_lookup(f->block, cb, dev);
520 + if (block_cb) {
521 + flow_block_cb_incref(block_cb);
522 + return 0;
523 + }
524 + block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
525 + if (IS_ERR(block_cb))
526 + return PTR_ERR(block_cb);
527 +
528 + flow_block_cb_add(block_cb, f);
529 + list_add_tail(&block_cb->driver_list, &block_cb_list);
530 + return 0;
531 + case FLOW_BLOCK_UNBIND:
532 + block_cb = flow_block_cb_lookup(f->block, cb, dev);
533 + if (!block_cb)
534 + return -ENOENT;
535 +
536 + if (flow_block_cb_decref(block_cb)) {
537 + flow_block_cb_remove(block_cb, f);
538 + list_del(&block_cb->driver_list);
539 + }
540 + return 0;
541 + default:
542 + return -EOPNOTSUPP;
543 + }
544 +}
545 +
546 +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
547 + void *type_data)
548 +{
549 + if (type == TC_SETUP_FT)
550 + return mtk_eth_setup_tc_block(dev, type_data);
551 +
552 + return -EOPNOTSUPP;
553 +}
554 +
555 +int mtk_eth_offload_init(struct mtk_eth *eth)
556 +{
557 + if (!eth->ppe.foe_table)
558 + return 0;
559 +
560 + return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
561 +}