kernel: bump 5.10 to 5.10.118
[openwrt/openwrt.git] / target / linux / generic / hack-5.10 / 600-bridge_offload.patch
1 --- a/include/linux/if_bridge.h
2 +++ b/include/linux/if_bridge.h
3 @@ -57,6 +57,7 @@ struct br_ip_list {
4 #define BR_MRP_LOST_CONT BIT(18)
5 #define BR_MRP_LOST_IN_CONT BIT(19)
6 #define BR_BPDU_FILTER BIT(20)
7 +#define BR_OFFLOAD BIT(21)
8
9 #define BR_DEFAULT_AGEING_TIME (300 * HZ)
10
11 --- a/net/bridge/Makefile
12 +++ b/net/bridge/Makefile
13 @@ -5,7 +5,7 @@
14
15 obj-$(CONFIG_BRIDGE) += bridge.o
16
17 -bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
18 +bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o br_offload.o \
19 br_ioctl.o br_stp.o br_stp_bpdu.o \
20 br_stp_if.o br_stp_timer.o br_netlink.o \
21 br_netlink_tunnel.o br_arp_nd_proxy.o
22 --- a/net/bridge/br.c
23 +++ b/net/bridge/br.c
24 @@ -18,6 +18,7 @@
25 #include <net/switchdev.h>
26
27 #include "br_private.h"
28 +#include "br_private_offload.h"
29
30 /*
31 * Handle changes in state of network devices enslaved to a bridge.
32 @@ -332,6 +333,10 @@ static int __init br_init(void)
33 if (err)
34 goto err_out;
35
36 + err = br_offload_init();
37 + if (err)
38 + goto err_out0;
39 +
40 err = register_pernet_subsys(&br_net_ops);
41 if (err)
42 goto err_out1;
43 @@ -375,6 +380,8 @@ err_out3:
44 err_out2:
45 unregister_pernet_subsys(&br_net_ops);
46 err_out1:
47 + br_offload_fini();
48 +err_out0:
49 br_fdb_fini();
50 err_out:
51 stp_proto_unregister(&br_stp_proto);
52 @@ -396,6 +403,7 @@ static void __exit br_deinit(void)
53 #if IS_ENABLED(CONFIG_ATM_LANE)
54 br_fdb_test_addr_hook = NULL;
55 #endif
56 + br_offload_fini();
57 br_fdb_fini();
58 }
59
60 --- a/net/bridge/br_device.c
61 +++ b/net/bridge/br_device.c
62 @@ -529,6 +529,8 @@ void br_dev_setup(struct net_device *dev
63 br->bridge_hello_time = br->hello_time = 2 * HZ;
64 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
65 br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
66 + br->offload_cache_size = 128;
67 + br->offload_cache_reserved = 8;
68 dev->max_mtu = ETH_MAX_MTU;
69
70 br_netfilter_rtable_init(br);
71 --- a/net/bridge/br_fdb.c
72 +++ b/net/bridge/br_fdb.c
73 @@ -23,6 +23,7 @@
74 #include <net/switchdev.h>
75 #include <trace/events/bridge.h>
76 #include "br_private.h"
77 +#include "br_private_offload.h"
78
79 static const struct rhashtable_params br_fdb_rht_params = {
80 .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
81 @@ -513,6 +514,8 @@ static struct net_bridge_fdb_entry *fdb_
82 fdb->key.vlan_id = vid;
83 fdb->flags = flags;
84 fdb->updated = fdb->used = jiffies;
85 + INIT_HLIST_HEAD(&fdb->offload_in);
86 + INIT_HLIST_HEAD(&fdb->offload_out);
87 if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
88 &fdb->rhnode,
89 br_fdb_rht_params)) {
90 @@ -734,6 +737,8 @@ static void fdb_notify(struct net_bridge
91 struct sk_buff *skb;
92 int err = -ENOBUFS;
93
94 + br_offload_fdb_update(fdb);
95 +
96 if (swdev_notify)
97 br_switchdev_fdb_notify(br, fdb, type);
98
99 --- a/net/bridge/br_forward.c
100 +++ b/net/bridge/br_forward.c
101 @@ -16,6 +16,7 @@
102 #include <linux/if_vlan.h>
103 #include <linux/netfilter_bridge.h>
104 #include "br_private.h"
105 +#include "br_private_offload.h"
106
107 /* Don't forward packets to originating port or forwarding disabled */
108 static inline int should_deliver(const struct net_bridge_port *p,
109 @@ -32,6 +33,8 @@ static inline int should_deliver(const s
110
111 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
112 {
113 + br_offload_output(skb);
114 +
115 skb_push(skb, ETH_HLEN);
116 if (!is_skb_forwardable(skb->dev, skb))
117 goto drop;
118 --- a/net/bridge/br_if.c
119 +++ b/net/bridge/br_if.c
120 @@ -25,6 +25,7 @@
121 #include <net/net_namespace.h>
122
123 #include "br_private.h"
124 +#include "br_private_offload.h"
125
126 /*
127 * Determine initial path cost based on speed.
128 @@ -427,7 +428,7 @@ static struct net_bridge_port *new_nbp(s
129 p->path_cost = port_cost(dev);
130 p->priority = 0x8000 >> BR_PORT_BITS;
131 p->port_no = index;
132 - p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
133 + p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_OFFLOAD;
134 br_init_port(p);
135 br_set_state(p, BR_STATE_DISABLED);
136 br_stp_port_timer_init(p);
137 @@ -777,6 +778,9 @@ void br_port_flags_change(struct net_bri
138
139 if (mask & BR_NEIGH_SUPPRESS)
140 br_recalculate_neigh_suppress_enabled(br);
141 +
142 + if (mask & BR_OFFLOAD)
143 + br_offload_port_state(p);
144 }
145
146 bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
147 --- a/net/bridge/br_input.c
148 +++ b/net/bridge/br_input.c
149 @@ -22,6 +22,7 @@
150 #include <linux/rculist.h>
151 #include "br_private.h"
152 #include "br_private_tunnel.h"
153 +#include "br_private_offload.h"
154
155 static int
156 br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
157 @@ -169,6 +170,7 @@ int br_handle_frame_finish(struct net *n
158 dst->used = now;
159 br_forward(dst->dst, skb, local_rcv, false);
160 } else {
161 + br_offload_skb_disable(skb);
162 if (!mcast_hit)
163 br_flood(br, skb, pkt_type, local_rcv, false);
164 else
165 @@ -287,6 +289,9 @@ static rx_handler_result_t br_handle_fra
166 memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
167
168 p = br_port_get_rcu(skb->dev);
169 + if (br_offload_input(p, skb))
170 + return RX_HANDLER_CONSUMED;
171 +
172 if (p->flags & BR_VLAN_TUNNEL) {
173 if (br_handle_ingress_vlan_tunnel(skb, p,
174 nbp_vlan_group_rcu(p)))
175 --- /dev/null
176 +++ b/net/bridge/br_offload.c
177 @@ -0,0 +1,436 @@
178 +// SPDX-License-Identifier: GPL-2.0-only
179 +#include <linux/kernel.h>
180 +#include <linux/workqueue.h>
181 +#include "br_private.h"
182 +#include "br_private_offload.h"
183 +
184 +static DEFINE_SPINLOCK(offload_lock);
185 +
186 +struct bridge_flow_key {
187 + u8 dest[ETH_ALEN];
188 + u8 src[ETH_ALEN];
189 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
190 + u16 vlan_tag;
191 + bool vlan_present;
192 +#endif
193 +};
194 +
195 +struct bridge_flow {
196 + struct net_bridge_port *port;
197 + struct rhash_head node;
198 + struct bridge_flow_key key;
199 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
200 + bool vlan_out_present;
201 + u16 vlan_out;
202 +#endif
203 +
204 + unsigned long used;
205 + struct net_bridge_fdb_entry *fdb_in, *fdb_out;
206 + struct hlist_node fdb_list_in, fdb_list_out;
207 +
208 + struct rcu_head rcu;
209 +};
210 +
211 +static const struct rhashtable_params flow_params = {
212 + .automatic_shrinking = true,
213 + .head_offset = offsetof(struct bridge_flow, node),
214 + .key_len = sizeof(struct bridge_flow_key),
215 + .key_offset = offsetof(struct bridge_flow, key),
216 +};
217 +
218 +static struct kmem_cache *offload_cache __read_mostly;
219 +
220 +static void
221 +flow_rcu_free(struct rcu_head *head)
222 +{
223 + struct bridge_flow *flow;
224 +
225 + flow = container_of(head, struct bridge_flow, rcu);
226 + kmem_cache_free(offload_cache, flow);
227 +}
228 +
229 +static void
230 +__br_offload_flow_free(struct bridge_flow *flow)
231 +{
232 + flow->used = 0;
233 + hlist_del(&flow->fdb_list_in);
234 + hlist_del(&flow->fdb_list_out);
235 +
236 + call_rcu(&flow->rcu, flow_rcu_free);
237 +}
238 +
239 +static void
240 +br_offload_flow_free(struct bridge_flow *flow)
241 +{
242 + if (rhashtable_remove_fast(&flow->port->offload.rht, &flow->node,
243 + flow_params) != 0)
244 + return;
245 +
246 + __br_offload_flow_free(flow);
247 +}
248 +
249 +static bool
250 +br_offload_flow_fdb_refresh_time(struct bridge_flow *flow,
251 + struct net_bridge_fdb_entry *fdb)
252 +{
253 + if (!time_after(flow->used, fdb->updated))
254 + return false;
255 +
256 + fdb->updated = flow->used;
257 +
258 + return true;
259 +}
260 +
261 +
262 +static void
263 +br_offload_flow_refresh_time(struct bridge_flow *flow)
264 +{
265 + br_offload_flow_fdb_refresh_time(flow, flow->fdb_in);
266 + br_offload_flow_fdb_refresh_time(flow, flow->fdb_out);
267 +}
268 +
269 +static void
270 +br_offload_destroy_cb(void *ptr, void *arg)
271 +{
272 + struct bridge_flow *flow = ptr;
273 +
274 + __br_offload_flow_free(flow);
275 +}
276 +
277 +static bool
278 +br_offload_need_gc(struct net_bridge_port *p)
279 +{
280 + return (atomic_read(&p->offload.rht.nelems) +
281 + p->br->offload_cache_reserved) >= p->br->offload_cache_size;
282 +}
283 +
284 +static void
285 +br_offload_gc_work(struct work_struct *work)
286 +{
287 + struct rhashtable_iter hti;
288 + struct net_bridge_port *p;
289 + struct bridge_flow *gc_flow = NULL;
290 + struct bridge_flow *flow;
291 + unsigned long gc_used;
292 +
293 + p = container_of(work, struct net_bridge_port, offload.gc_work);
294 +
295 + if (!br_offload_need_gc(p))
296 + return;
297 +
298 + rhashtable_walk_enter(&p->offload.rht, &hti);
299 + rhashtable_walk_start(&hti);
300 + while ((flow = rhashtable_walk_next(&hti)) != NULL) {
301 + unsigned long used;
302 +
303 + if (IS_ERR(flow))
304 + continue;
305 +
306 + used = READ_ONCE(flow->used);
307 + if (!used)
308 + continue;
309 +
310 + if (gc_flow && !time_before(used, gc_used))
311 + continue;
312 +
313 + gc_flow = flow;
314 + gc_used = used;
315 + }
316 + rhashtable_walk_stop(&hti);
317 + rhashtable_walk_exit(&hti);
318 +
319 + if (!gc_flow)
320 + return;
321 +
322 + spin_lock_bh(&offload_lock);
323 + if (br_offload_need_gc(p) && gc_flow &&
324 + gc_flow->used == gc_used)
325 + br_offload_flow_free(gc_flow);
326 + if (p->offload.enabled && br_offload_need_gc(p))
327 + queue_work(system_long_wq, work);
328 + spin_unlock_bh(&offload_lock);
329 +
330 +}
331 +
332 +void br_offload_port_state(struct net_bridge_port *p)
333 +{
334 + struct net_bridge_port_offload *o = &p->offload;
335 + bool enabled = true;
336 + bool flush = false;
337 +
338 + if (p->state != BR_STATE_FORWARDING ||
339 + !(p->flags & BR_OFFLOAD))
340 + enabled = false;
341 +
342 + spin_lock_bh(&offload_lock);
343 + if (o->enabled == enabled)
344 + goto out;
345 +
346 + if (enabled) {
347 + if (!o->gc_work.func)
348 + INIT_WORK(&o->gc_work, br_offload_gc_work);
349 + rhashtable_init(&o->rht, &flow_params);
350 + } else {
351 + flush = true;
352 + rhashtable_free_and_destroy(&o->rht, br_offload_destroy_cb, o);
353 + }
354 +
355 + o->enabled = enabled;
356 +
357 +out:
358 + spin_unlock_bh(&offload_lock);
359 +
360 + if (flush)
361 + flush_work(&o->gc_work);
362 +}
363 +
364 +void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb)
365 +{
366 + struct bridge_flow *f;
367 + struct hlist_node *tmp;
368 +
369 + spin_lock_bh(&offload_lock);
370 +
371 + hlist_for_each_entry_safe(f, tmp, &fdb->offload_in, fdb_list_in)
372 + br_offload_flow_free(f);
373 +
374 + hlist_for_each_entry_safe(f, tmp, &fdb->offload_out, fdb_list_out)
375 + br_offload_flow_free(f);
376 +
377 + spin_unlock_bh(&offload_lock);
378 +}
379 +
380 +static void
381 +br_offload_prepare_key(struct net_bridge_port *p, struct bridge_flow_key *key,
382 + struct sk_buff *skb)
383 +{
384 + memset(key, 0, sizeof(*key));
385 + memcpy(key, eth_hdr(skb), 2 * ETH_ALEN);
386 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
387 + if (!br_opt_get(p->br, BROPT_VLAN_ENABLED))
388 + return;
389 +
390 + if (!skb_vlan_tag_present(skb) || skb->vlan_proto != p->br->vlan_proto)
391 + return;
392 +
393 + key->vlan_present = true;
394 + key->vlan_tag = skb_vlan_tag_get_id(skb);
395 +#endif
396 +}
397 +
398 +void br_offload_output(struct sk_buff *skb)
399 +{
400 + struct net_bridge_port_offload *o;
401 + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
402 + struct net_bridge_port *p, *inp;
403 + struct net_device *dev;
404 + struct net_bridge_fdb_entry *fdb_in, *fdb_out;
405 + struct net_bridge_vlan_group *vg;
406 + struct bridge_flow_key key;
407 + struct bridge_flow *flow;
408 + u16 vlan;
409 +
410 + if (!cb->offload)
411 + return;
412 +
413 + rcu_read_lock();
414 +
415 + p = br_port_get_rcu(skb->dev);
416 + if (!p)
417 + goto out;
418 +
419 + o = &p->offload;
420 + if (!o->enabled)
421 + goto out;
422 +
423 + if (atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size)
424 + goto out;
425 +
426 + dev = dev_get_by_index_rcu(dev_net(p->br->dev), cb->input_ifindex);
427 + if (!dev)
428 + goto out;
429 +
430 + inp = br_port_get_rcu(dev);
431 + if (!inp)
432 + goto out;
433 +
434 + vg = nbp_vlan_group_rcu(inp);
435 + vlan = cb->input_vlan_present ? cb->input_vlan_tag : br_get_pvid(vg);
436 + fdb_in = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_source, vlan);
437 + if (!fdb_in || !fdb_in->dst)
438 + goto out;
439 +
440 + vg = nbp_vlan_group_rcu(p);
441 + vlan = skb_vlan_tag_present(skb) ? skb_vlan_tag_get_id(skb) : br_get_pvid(vg);
442 + fdb_out = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_dest, vlan);
443 + if (!fdb_out || !fdb_out->dst)
444 + goto out;
445 +
446 + br_offload_prepare_key(p, &key, skb);
447 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
448 + key.vlan_present = cb->input_vlan_present;
449 + key.vlan_tag = cb->input_vlan_tag;
450 +#endif
451 +
452 + flow = kmem_cache_alloc(offload_cache, GFP_ATOMIC);
453 + flow->port = inp;
454 + memcpy(&flow->key, &key, sizeof(key));
455 +
456 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
457 + flow->vlan_out_present = skb_vlan_tag_present(skb);
458 + flow->vlan_out = skb_vlan_tag_get(skb);
459 +#endif
460 +
461 + flow->fdb_in = fdb_in;
462 + flow->fdb_out = fdb_out;
463 + flow->used = jiffies;
464 +
465 + spin_lock_bh(&offload_lock);
466 + if (!o->enabled ||
467 + atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size ||
468 + rhashtable_insert_fast(&inp->offload.rht, &flow->node, flow_params)) {
469 + kmem_cache_free(offload_cache, flow);
470 + goto out_unlock;
471 + }
472 +
473 + hlist_add_head(&flow->fdb_list_in, &fdb_in->offload_in);
474 + hlist_add_head(&flow->fdb_list_out, &fdb_out->offload_out);
475 +
476 + if (br_offload_need_gc(p))
477 + queue_work(system_long_wq, &p->offload.gc_work);
478 +
479 +out_unlock:
480 + spin_unlock_bh(&offload_lock);
481 +
482 +out:
483 + rcu_read_unlock();
484 +}
485 +
486 +bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb)
487 +{
488 + struct net_bridge_port_offload *o = &p->offload;
489 + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
490 + struct bridge_flow_key key;
491 + struct net_bridge_port *dst;
492 + struct bridge_flow *flow;
493 + unsigned long now = jiffies;
494 + bool ret = false;
495 +
496 + if (skb->len < sizeof(key))
497 + return false;
498 +
499 + if (!o->enabled)
500 + return false;
501 +
502 + if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
503 + return false;
504 +
505 + br_offload_prepare_key(p, &key, skb);
506 +
507 + rcu_read_lock();
508 + flow = rhashtable_lookup(&o->rht, &key, flow_params);
509 + if (!flow) {
510 + cb->offload = 1;
511 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
512 + cb->input_vlan_present = key.vlan_present != 0;
513 + cb->input_vlan_tag = key.vlan_tag;
514 +#endif
515 + cb->input_ifindex = p->dev->ifindex;
516 + goto out;
517 + }
518 +
519 + if (flow->fdb_in->dst != p)
520 + goto out;
521 +
522 + dst = flow->fdb_out->dst;
523 + if (!dst)
524 + goto out;
525 +
526 + ret = true;
527 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
528 + if (!flow->vlan_out_present && key.vlan_present) {
529 + __vlan_hwaccel_clear_tag(skb);
530 + } else if (flow->vlan_out_present) {
531 + if (skb_vlan_tag_present(skb) &&
532 + skb->vlan_proto != p->br->vlan_proto) {
533 + /* Protocol-mismatch, empty out vlan_tci for new tag */
534 + skb_push(skb, ETH_HLEN);
535 + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
536 + skb_vlan_tag_get(skb));
537 + if (unlikely(!skb))
538 + goto out;
539 +
540 + skb_pull(skb, ETH_HLEN);
541 + skb_reset_mac_len(skb);
542 + }
543 +
544 + __vlan_hwaccel_put_tag(skb, p->br->vlan_proto,
545 + flow->vlan_out);
546 + }
547 +#endif
548 +
549 + skb->dev = dst->dev;
550 + skb_push(skb, ETH_HLEN);
551 +
552 + if (skb_warn_if_lro(skb) || !is_skb_forwardable(skb->dev, skb)) {
553 + kfree_skb(skb);
554 + goto out;
555 + }
556 +
557 + if (now - flow->used >= HZ) {
558 + flow->used = now;
559 + br_offload_flow_refresh_time(flow);
560 + }
561 +
562 + skb_forward_csum(skb);
563 + dev_queue_xmit(skb);
564 +
565 +out:
566 + rcu_read_unlock();
567 + return ret;
568 +}
569 +
570 +static void
571 +br_offload_check_gc(struct net_bridge *br)
572 +{
573 + struct net_bridge_port *p;
574 +
575 + spin_lock_bh(&br->lock);
576 + list_for_each_entry(p, &br->port_list, list)
577 + if (br_offload_need_gc(p))
578 + queue_work(system_long_wq, &p->offload.gc_work);
579 + spin_unlock_bh(&br->lock);
580 +}
581 +
582 +
583 +int br_offload_set_cache_size(struct net_bridge *br, unsigned long val)
584 +{
585 + br->offload_cache_size = val;
586 + br_offload_check_gc(br);
587 +
588 + return 0;
589 +}
590 +
591 +int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val)
592 +{
593 + br->offload_cache_reserved = val;
594 + br_offload_check_gc(br);
595 +
596 + return 0;
597 +}
598 +
599 +int __init br_offload_init(void)
600 +{
601 + offload_cache = kmem_cache_create("bridge_offload_cache",
602 + sizeof(struct bridge_flow),
603 + 0, SLAB_HWCACHE_ALIGN, NULL);
604 + if (!offload_cache)
605 + return -ENOMEM;
606 +
607 + return 0;
608 +}
609 +
610 +void br_offload_fini(void)
611 +{
612 + kmem_cache_destroy(offload_cache);
613 +}
614 --- a/net/bridge/br_private.h
615 +++ b/net/bridge/br_private.h
616 @@ -207,7 +207,13 @@ struct net_bridge_fdb_entry {
617 unsigned long updated ____cacheline_aligned_in_smp;
618 unsigned long used;
619
620 - struct rcu_head rcu;
621 + union {
622 + struct {
623 + struct hlist_head offload_in;
624 + struct hlist_head offload_out;
625 + };
626 + struct rcu_head rcu;
627 + };
628 };
629
630 #define MDB_PG_FLAGS_PERMANENT BIT(0)
631 @@ -280,6 +286,12 @@ struct net_bridge_mdb_entry {
632 struct rcu_head rcu;
633 };
634
635 +struct net_bridge_port_offload {
636 + struct rhashtable rht;
637 + struct work_struct gc_work;
638 + bool enabled;
639 +};
640 +
641 struct net_bridge_port {
642 struct net_bridge *br;
643 struct net_device *dev;
644 @@ -337,6 +349,7 @@ struct net_bridge_port {
645 u16 backup_redirected_cnt;
646
647 struct bridge_stp_xstats stp_xstats;
648 + struct net_bridge_port_offload offload;
649 };
650
651 #define kobj_to_brport(obj) container_of(obj, struct net_bridge_port, kobj)
652 @@ -475,6 +488,9 @@ struct net_bridge {
653 struct kobject *ifobj;
654 u32 auto_cnt;
655
656 + u32 offload_cache_size;
657 + u32 offload_cache_reserved;
658 +
659 #ifdef CONFIG_NET_SWITCHDEV
660 int offload_fwd_mark;
661 #endif
662 @@ -501,6 +517,10 @@ struct br_input_skb_cb {
663 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
664 u8 br_netfilter_broute:1;
665 #endif
666 + u8 offload:1;
667 + u8 input_vlan_present:1;
668 + u16 input_vlan_tag;
669 + int input_ifindex;
670
671 #ifdef CONFIG_NET_SWITCHDEV
672 int offload_fwd_mark;
673 --- /dev/null
674 +++ b/net/bridge/br_private_offload.h
675 @@ -0,0 +1,21 @@
676 +#ifndef __BR_OFFLOAD_H
677 +#define __BR_OFFLOAD_H
678 +
679 +bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb);
680 +void br_offload_output(struct sk_buff *skb);
681 +void br_offload_port_state(struct net_bridge_port *p);
682 +void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb);
683 +int br_offload_init(void);
684 +void br_offload_fini(void);
685 +int br_offload_set_cache_size(struct net_bridge *br, unsigned long val);
686 +int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val);
687 +
688 +static inline void br_offload_skb_disable(struct sk_buff *skb)
689 +{
690 + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
691 +
692 + if (cb->offload)
693 + cb->offload = 0;
694 +}
695 +
696 +#endif
697 --- a/net/bridge/br_stp.c
698 +++ b/net/bridge/br_stp.c
699 @@ -12,6 +12,7 @@
700
701 #include "br_private.h"
702 #include "br_private_stp.h"
703 +#include "br_private_offload.h"
704
705 /* since time values in bpdu are in jiffies and then scaled (1/256)
706 * before sending, make sure that is at least one STP tick.
707 @@ -52,6 +53,8 @@ void br_set_state(struct net_bridge_port
708 (unsigned int) p->port_no, p->dev->name,
709 br_port_state_names[p->state]);
710
711 + br_offload_port_state(p);
712 +
713 if (p->br->stp_enabled == BR_KERNEL_STP) {
714 switch (p->state) {
715 case BR_STATE_BLOCKING:
716 --- a/net/bridge/br_sysfs_br.c
717 +++ b/net/bridge/br_sysfs_br.c
718 @@ -18,6 +18,7 @@
719 #include <linux/sched/signal.h>
720
721 #include "br_private.h"
722 +#include "br_private_offload.h"
723
724 #define to_bridge(cd) ((struct net_bridge *)netdev_priv(to_net_dev(cd)))
725
726 @@ -842,6 +843,38 @@ static ssize_t vlan_stats_per_port_store
727 static DEVICE_ATTR_RW(vlan_stats_per_port);
728 #endif
729
730 +static ssize_t offload_cache_size_show(struct device *d,
731 + struct device_attribute *attr,
732 + char *buf)
733 +{
734 + struct net_bridge *br = to_bridge(d);
735 + return sprintf(buf, "%u\n", br->offload_cache_size);
736 +}
737 +
738 +static ssize_t offload_cache_size_store(struct device *d,
739 + struct device_attribute *attr,
740 + const char *buf, size_t len)
741 +{
742 + return store_bridge_parm(d, buf, len, br_offload_set_cache_size);
743 +}
744 +static DEVICE_ATTR_RW(offload_cache_size);
745 +
746 +static ssize_t offload_cache_reserved_show(struct device *d,
747 + struct device_attribute *attr,
748 + char *buf)
749 +{
750 + struct net_bridge *br = to_bridge(d);
751 + return sprintf(buf, "%u\n", br->offload_cache_reserved);
752 +}
753 +
754 +static ssize_t offload_cache_reserved_store(struct device *d,
755 + struct device_attribute *attr,
756 + const char *buf, size_t len)
757 +{
758 + return store_bridge_parm(d, buf, len, br_offload_set_cache_reserved);
759 +}
760 +static DEVICE_ATTR_RW(offload_cache_reserved);
761 +
762 static struct attribute *bridge_attrs[] = {
763 &dev_attr_forward_delay.attr,
764 &dev_attr_hello_time.attr,
765 @@ -896,6 +929,8 @@ static struct attribute *bridge_attrs[]
766 &dev_attr_vlan_stats_enabled.attr,
767 &dev_attr_vlan_stats_per_port.attr,
768 #endif
769 + &dev_attr_offload_cache_size.attr,
770 + &dev_attr_offload_cache_reserved.attr,
771 NULL
772 };
773
774 --- a/net/bridge/br_sysfs_if.c
775 +++ b/net/bridge/br_sysfs_if.c
776 @@ -234,6 +234,7 @@ BRPORT_ATTR_FLAG(broadcast_flood, BR_BCA
777 BRPORT_ATTR_FLAG(neigh_suppress, BR_NEIGH_SUPPRESS);
778 BRPORT_ATTR_FLAG(isolated, BR_ISOLATED);
779 BRPORT_ATTR_FLAG(bpdu_filter, BR_BPDU_FILTER);
780 +BRPORT_ATTR_FLAG(offload, BR_OFFLOAD);
781
782 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
783 static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
784 @@ -288,6 +289,7 @@ static const struct brport_attribute *br
785 &brport_attr_isolated,
786 &brport_attr_bpdu_filter,
787 &brport_attr_backup_port,
788 + &brport_attr_offload,
789 NULL
790 };
791
792 --- a/net/bridge/br_vlan_tunnel.c
793 +++ b/net/bridge/br_vlan_tunnel.c
794 @@ -15,6 +15,7 @@
795
796 #include "br_private.h"
797 #include "br_private_tunnel.h"
798 +#include "br_private_offload.h"
799
800 static inline int br_vlan_tunid_cmp(struct rhashtable_compare_arg *arg,
801 const void *ptr)
802 @@ -180,6 +181,7 @@ int br_handle_ingress_vlan_tunnel(struct
803 skb_dst_drop(skb);
804
805 __vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
806 + br_offload_skb_disable(skb);
807
808 return 0;
809 }
810 @@ -203,6 +205,7 @@ int br_handle_egress_vlan_tunnel(struct
811 if (err)
812 return err;
813
814 + br_offload_skb_disable(skb);
815 tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
816 if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
817 skb_dst_set(skb, &tunnel_dst->dst);