ath25: switch default kernel to 5.15
[openwrt/openwrt.git] / target / linux / generic / hack-5.10 / 600-bridge_offload.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Subject: bridge: Add a fast path for the bridge code
3
4 This caches flows between MAC addresses on separate ports, including their VLAN
5 in order to bypass the normal bridge forwarding code.
6 In my test on MT7622, this reduces LAN->WLAN bridging CPU usage by 6-10%,
7 potentially even more on weaker platforms
8
9 Submitted-by: Felix Fietkau <nbd@nbd.name>
10 ---
11 include/linux/if_bridge.h | 1 +
12 net/bridge/Makefile | 2 +-
13 net/bridge/br.c | 8 +++
14 net/bridge/br_device.c | 7 +++
15 net/bridge/br_forward.c | 3 ++
16 net/bridge/br_if.c | 7 ++-
17 net/bridge/br_input.c | 5 ++
18 net/bridge/br_offload.c | 436 +++++++++++++++
19 net/bridge/br_private.h | 22 ++++-
20 net/bridge/br_private_offload.h | 21 +++++
21 net/bridge/br_stp.c | 3 +
22 net/bridge/br_sysfs_br.c | 35 ++++++
23 net/bridge/br_sysfs_if.c | 2 +
24 net/bridge/br_vlan_tunnel.c | 3 ++
25 14 files changed, 552 insertions(+), 3 deletions(-)
26 create mode 100644 net/bridge/br_offload.c
27 create mode 100644 net/bridge/br_private_offload.h
28
29 --- a/include/linux/if_bridge.h
30 +++ b/include/linux/if_bridge.h
31 @@ -57,6 +57,7 @@ struct br_ip_list {
32 #define BR_MRP_LOST_CONT BIT(18)
33 #define BR_MRP_LOST_IN_CONT BIT(19)
34 #define BR_BPDU_FILTER BIT(20)
35 +#define BR_OFFLOAD BIT(21)
36
37 #define BR_DEFAULT_AGEING_TIME (300 * HZ)
38
39 --- a/net/bridge/Makefile
40 +++ b/net/bridge/Makefile
41 @@ -5,7 +5,7 @@
42
43 obj-$(CONFIG_BRIDGE) += bridge.o
44
45 -bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
46 +bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o br_offload.o \
47 br_ioctl.o br_stp.o br_stp_bpdu.o \
48 br_stp_if.o br_stp_timer.o br_netlink.o \
49 br_netlink_tunnel.o br_arp_nd_proxy.o
50 --- a/net/bridge/br.c
51 +++ b/net/bridge/br.c
52 @@ -18,6 +18,7 @@
53 #include <net/switchdev.h>
54
55 #include "br_private.h"
56 +#include "br_private_offload.h"
57
58 /*
59 * Handle changes in state of network devices enslaved to a bridge.
60 @@ -332,6 +333,10 @@ static int __init br_init(void)
61 if (err)
62 goto err_out;
63
64 + err = br_offload_init();
65 + if (err)
66 + goto err_out0;
67 +
68 err = register_pernet_subsys(&br_net_ops);
69 if (err)
70 goto err_out1;
71 @@ -375,6 +380,8 @@ err_out3:
72 err_out2:
73 unregister_pernet_subsys(&br_net_ops);
74 err_out1:
75 + br_offload_fini();
76 +err_out0:
77 br_fdb_fini();
78 err_out:
79 stp_proto_unregister(&br_stp_proto);
80 @@ -396,6 +403,7 @@ static void __exit br_deinit(void)
81 #if IS_ENABLED(CONFIG_ATM_LANE)
82 br_fdb_test_addr_hook = NULL;
83 #endif
84 + br_offload_fini();
85 br_fdb_fini();
86 }
87
88 --- a/net/bridge/br_device.c
89 +++ b/net/bridge/br_device.c
90 @@ -529,6 +529,8 @@ void br_dev_setup(struct net_device *dev
91 br->bridge_hello_time = br->hello_time = 2 * HZ;
92 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
93 br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
94 + br->offload_cache_size = 128;
95 + br->offload_cache_reserved = 8;
96 dev->max_mtu = ETH_MAX_MTU;
97
98 br_netfilter_rtable_init(br);
99 --- a/net/bridge/br_fdb.c
100 +++ b/net/bridge/br_fdb.c
101 @@ -23,6 +23,7 @@
102 #include <net/switchdev.h>
103 #include <trace/events/bridge.h>
104 #include "br_private.h"
105 +#include "br_private_offload.h"
106
107 static const struct rhashtable_params br_fdb_rht_params = {
108 .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
109 @@ -513,6 +514,8 @@ static struct net_bridge_fdb_entry *fdb_
110 fdb->key.vlan_id = vid;
111 fdb->flags = flags;
112 fdb->updated = fdb->used = jiffies;
113 + INIT_HLIST_HEAD(&fdb->offload_in);
114 + INIT_HLIST_HEAD(&fdb->offload_out);
115 if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
116 &fdb->rhnode,
117 br_fdb_rht_params)) {
118 @@ -734,6 +737,8 @@ static void fdb_notify(struct net_bridge
119 struct sk_buff *skb;
120 int err = -ENOBUFS;
121
122 + br_offload_fdb_update(fdb);
123 +
124 if (swdev_notify)
125 br_switchdev_fdb_notify(br, fdb, type);
126
127 --- a/net/bridge/br_forward.c
128 +++ b/net/bridge/br_forward.c
129 @@ -16,6 +16,7 @@
130 #include <linux/if_vlan.h>
131 #include <linux/netfilter_bridge.h>
132 #include "br_private.h"
133 +#include "br_private_offload.h"
134
135 /* Don't forward packets to originating port or forwarding disabled */
136 static inline int should_deliver(const struct net_bridge_port *p,
137 @@ -32,6 +33,8 @@ static inline int should_deliver(const s
138
139 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
140 {
141 + br_offload_output(skb);
142 +
143 skb_push(skb, ETH_HLEN);
144 if (!is_skb_forwardable(skb->dev, skb))
145 goto drop;
146 --- a/net/bridge/br_if.c
147 +++ b/net/bridge/br_if.c
148 @@ -25,6 +25,7 @@
149 #include <net/net_namespace.h>
150
151 #include "br_private.h"
152 +#include "br_private_offload.h"
153
154 /*
155 * Determine initial path cost based on speed.
156 @@ -427,7 +428,7 @@ static struct net_bridge_port *new_nbp(s
157 p->path_cost = port_cost(dev);
158 p->priority = 0x8000 >> BR_PORT_BITS;
159 p->port_no = index;
160 - p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
161 + p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_OFFLOAD;
162 br_init_port(p);
163 br_set_state(p, BR_STATE_DISABLED);
164 br_stp_port_timer_init(p);
165 @@ -777,6 +778,9 @@ void br_port_flags_change(struct net_bri
166
167 if (mask & BR_NEIGH_SUPPRESS)
168 br_recalculate_neigh_suppress_enabled(br);
169 +
170 + if (mask & BR_OFFLOAD)
171 + br_offload_port_state(p);
172 }
173
174 bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
175 --- a/net/bridge/br_input.c
176 +++ b/net/bridge/br_input.c
177 @@ -22,6 +22,7 @@
178 #include <linux/rculist.h>
179 #include "br_private.h"
180 #include "br_private_tunnel.h"
181 +#include "br_private_offload.h"
182
183 static int
184 br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
185 @@ -169,6 +170,7 @@ int br_handle_frame_finish(struct net *n
186 dst->used = now;
187 br_forward(dst->dst, skb, local_rcv, false);
188 } else {
189 + br_offload_skb_disable(skb);
190 if (!mcast_hit)
191 br_flood(br, skb, pkt_type, local_rcv, false);
192 else
193 @@ -287,6 +289,9 @@ static rx_handler_result_t br_handle_fra
194 memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
195
196 p = br_port_get_rcu(skb->dev);
197 + if (br_offload_input(p, skb))
198 + return RX_HANDLER_CONSUMED;
199 +
200 if (p->flags & BR_VLAN_TUNNEL) {
201 if (br_handle_ingress_vlan_tunnel(skb, p,
202 nbp_vlan_group_rcu(p)))
203 --- /dev/null
204 +++ b/net/bridge/br_offload.c
205 @@ -0,0 +1,436 @@
206 +// SPDX-License-Identifier: GPL-2.0-only
207 +#include <linux/kernel.h>
208 +#include <linux/workqueue.h>
209 +#include "br_private.h"
210 +#include "br_private_offload.h"
211 +
212 +static DEFINE_SPINLOCK(offload_lock);
213 +
214 +struct bridge_flow_key {
215 + u8 dest[ETH_ALEN];
216 + u8 src[ETH_ALEN];
217 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
218 + u16 vlan_tag;
219 + bool vlan_present;
220 +#endif
221 +};
222 +
223 +struct bridge_flow {
224 + struct net_bridge_port *port;
225 + struct rhash_head node;
226 + struct bridge_flow_key key;
227 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
228 + bool vlan_out_present;
229 + u16 vlan_out;
230 +#endif
231 +
232 + unsigned long used;
233 + struct net_bridge_fdb_entry *fdb_in, *fdb_out;
234 + struct hlist_node fdb_list_in, fdb_list_out;
235 +
236 + struct rcu_head rcu;
237 +};
238 +
239 +static const struct rhashtable_params flow_params = {
240 + .automatic_shrinking = true,
241 + .head_offset = offsetof(struct bridge_flow, node),
242 + .key_len = sizeof(struct bridge_flow_key),
243 + .key_offset = offsetof(struct bridge_flow, key),
244 +};
245 +
246 +static struct kmem_cache *offload_cache __read_mostly;
247 +
248 +static void
249 +flow_rcu_free(struct rcu_head *head)
250 +{
251 + struct bridge_flow *flow;
252 +
253 + flow = container_of(head, struct bridge_flow, rcu);
254 + kmem_cache_free(offload_cache, flow);
255 +}
256 +
257 +static void
258 +__br_offload_flow_free(struct bridge_flow *flow)
259 +{
260 + flow->used = 0;
261 + hlist_del(&flow->fdb_list_in);
262 + hlist_del(&flow->fdb_list_out);
263 +
264 + call_rcu(&flow->rcu, flow_rcu_free);
265 +}
266 +
267 +static void
268 +br_offload_flow_free(struct bridge_flow *flow)
269 +{
270 + if (rhashtable_remove_fast(&flow->port->offload.rht, &flow->node,
271 + flow_params) != 0)
272 + return;
273 +
274 + __br_offload_flow_free(flow);
275 +}
276 +
277 +static bool
278 +br_offload_flow_fdb_refresh_time(struct bridge_flow *flow,
279 + struct net_bridge_fdb_entry *fdb)
280 +{
281 + if (!time_after(flow->used, fdb->updated))
282 + return false;
283 +
284 + fdb->updated = flow->used;
285 +
286 + return true;
287 +}
288 +
289 +
290 +static void
291 +br_offload_flow_refresh_time(struct bridge_flow *flow)
292 +{
293 + br_offload_flow_fdb_refresh_time(flow, flow->fdb_in);
294 + br_offload_flow_fdb_refresh_time(flow, flow->fdb_out);
295 +}
296 +
297 +static void
298 +br_offload_destroy_cb(void *ptr, void *arg)
299 +{
300 + struct bridge_flow *flow = ptr;
301 +
302 + __br_offload_flow_free(flow);
303 +}
304 +
305 +static bool
306 +br_offload_need_gc(struct net_bridge_port *p)
307 +{
308 + return (atomic_read(&p->offload.rht.nelems) +
309 + p->br->offload_cache_reserved) >= p->br->offload_cache_size;
310 +}
311 +
312 +static void
313 +br_offload_gc_work(struct work_struct *work)
314 +{
315 + struct rhashtable_iter hti;
316 + struct net_bridge_port *p;
317 + struct bridge_flow *gc_flow = NULL;
318 + struct bridge_flow *flow;
319 + unsigned long gc_used;
320 +
321 + p = container_of(work, struct net_bridge_port, offload.gc_work);
322 +
323 + if (!br_offload_need_gc(p))
324 + return;
325 +
326 + rhashtable_walk_enter(&p->offload.rht, &hti);
327 + rhashtable_walk_start(&hti);
328 + while ((flow = rhashtable_walk_next(&hti)) != NULL) {
329 + unsigned long used;
330 +
331 + if (IS_ERR(flow))
332 + continue;
333 +
334 + used = READ_ONCE(flow->used);
335 + if (!used)
336 + continue;
337 +
338 + if (gc_flow && !time_before(used, gc_used))
339 + continue;
340 +
341 + gc_flow = flow;
342 + gc_used = used;
343 + }
344 + rhashtable_walk_stop(&hti);
345 + rhashtable_walk_exit(&hti);
346 +
347 + if (!gc_flow)
348 + return;
349 +
350 + spin_lock_bh(&offload_lock);
351 + if (br_offload_need_gc(p) && gc_flow &&
352 + gc_flow->used == gc_used)
353 + br_offload_flow_free(gc_flow);
354 + if (p->offload.enabled && br_offload_need_gc(p))
355 + queue_work(system_long_wq, work);
356 + spin_unlock_bh(&offload_lock);
357 +
358 +}
359 +
360 +void br_offload_port_state(struct net_bridge_port *p)
361 +{
362 + struct net_bridge_port_offload *o = &p->offload;
363 + bool enabled = true;
364 + bool flush = false;
365 +
366 + if (p->state != BR_STATE_FORWARDING ||
367 + !(p->flags & BR_OFFLOAD))
368 + enabled = false;
369 +
370 + spin_lock_bh(&offload_lock);
371 + if (o->enabled == enabled)
372 + goto out;
373 +
374 + if (enabled) {
375 + if (!o->gc_work.func)
376 + INIT_WORK(&o->gc_work, br_offload_gc_work);
377 + rhashtable_init(&o->rht, &flow_params);
378 + } else {
379 + flush = true;
380 + rhashtable_free_and_destroy(&o->rht, br_offload_destroy_cb, o);
381 + }
382 +
383 + o->enabled = enabled;
384 +
385 +out:
386 + spin_unlock_bh(&offload_lock);
387 +
388 + if (flush)
389 + flush_work(&o->gc_work);
390 +}
391 +
392 +void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb)
393 +{
394 + struct bridge_flow *f;
395 + struct hlist_node *tmp;
396 +
397 + spin_lock_bh(&offload_lock);
398 +
399 + hlist_for_each_entry_safe(f, tmp, &fdb->offload_in, fdb_list_in)
400 + br_offload_flow_free(f);
401 +
402 + hlist_for_each_entry_safe(f, tmp, &fdb->offload_out, fdb_list_out)
403 + br_offload_flow_free(f);
404 +
405 + spin_unlock_bh(&offload_lock);
406 +}
407 +
408 +static void
409 +br_offload_prepare_key(struct net_bridge_port *p, struct bridge_flow_key *key,
410 + struct sk_buff *skb)
411 +{
412 + memset(key, 0, sizeof(*key));
413 + memcpy(key, eth_hdr(skb), 2 * ETH_ALEN);
414 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
415 + if (!br_opt_get(p->br, BROPT_VLAN_ENABLED))
416 + return;
417 +
418 + if (!skb_vlan_tag_present(skb) || skb->vlan_proto != p->br->vlan_proto)
419 + return;
420 +
421 + key->vlan_present = true;
422 + key->vlan_tag = skb_vlan_tag_get_id(skb);
423 +#endif
424 +}
425 +
426 +void br_offload_output(struct sk_buff *skb)
427 +{
428 + struct net_bridge_port_offload *o;
429 + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
430 + struct net_bridge_port *p, *inp;
431 + struct net_device *dev;
432 + struct net_bridge_fdb_entry *fdb_in, *fdb_out;
433 + struct net_bridge_vlan_group *vg;
434 + struct bridge_flow_key key;
435 + struct bridge_flow *flow;
436 + u16 vlan;
437 +
438 + if (!cb->offload)
439 + return;
440 +
441 + rcu_read_lock();
442 +
443 + p = br_port_get_rcu(skb->dev);
444 + if (!p)
445 + goto out;
446 +
447 + o = &p->offload;
448 + if (!o->enabled)
449 + goto out;
450 +
451 + if (atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size)
452 + goto out;
453 +
454 + dev = dev_get_by_index_rcu(dev_net(p->br->dev), cb->input_ifindex);
455 + if (!dev)
456 + goto out;
457 +
458 + inp = br_port_get_rcu(dev);
459 + if (!inp)
460 + goto out;
461 +
462 + vg = nbp_vlan_group_rcu(inp);
463 + vlan = cb->input_vlan_present ? cb->input_vlan_tag : br_get_pvid(vg);
464 + fdb_in = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_source, vlan);
465 + if (!fdb_in || !fdb_in->dst)
466 + goto out;
467 +
468 + vg = nbp_vlan_group_rcu(p);
469 + vlan = skb_vlan_tag_present(skb) ? skb_vlan_tag_get_id(skb) : br_get_pvid(vg);
470 + fdb_out = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_dest, vlan);
471 + if (!fdb_out || !fdb_out->dst)
472 + goto out;
473 +
474 + br_offload_prepare_key(p, &key, skb);
475 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
476 + key.vlan_present = cb->input_vlan_present;
477 + key.vlan_tag = cb->input_vlan_tag;
478 +#endif
479 +
480 + flow = kmem_cache_alloc(offload_cache, GFP_ATOMIC);
481 + flow->port = inp;
482 + memcpy(&flow->key, &key, sizeof(key));
483 +
484 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
485 + flow->vlan_out_present = skb_vlan_tag_present(skb);
486 + flow->vlan_out = skb_vlan_tag_get(skb);
487 +#endif
488 +
489 + flow->fdb_in = fdb_in;
490 + flow->fdb_out = fdb_out;
491 + flow->used = jiffies;
492 +
493 + spin_lock_bh(&offload_lock);
494 + if (!o->enabled ||
495 + atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size ||
496 + rhashtable_insert_fast(&inp->offload.rht, &flow->node, flow_params)) {
497 + kmem_cache_free(offload_cache, flow);
498 + goto out_unlock;
499 + }
500 +
501 + hlist_add_head(&flow->fdb_list_in, &fdb_in->offload_in);
502 + hlist_add_head(&flow->fdb_list_out, &fdb_out->offload_out);
503 +
504 + if (br_offload_need_gc(p))
505 + queue_work(system_long_wq, &p->offload.gc_work);
506 +
507 +out_unlock:
508 + spin_unlock_bh(&offload_lock);
509 +
510 +out:
511 + rcu_read_unlock();
512 +}
513 +
514 +bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb)
515 +{
516 + struct net_bridge_port_offload *o = &p->offload;
517 + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
518 + struct bridge_flow_key key;
519 + struct net_bridge_port *dst;
520 + struct bridge_flow *flow;
521 + unsigned long now = jiffies;
522 + bool ret = false;
523 +
524 + if (skb->len < sizeof(key))
525 + return false;
526 +
527 + if (!o->enabled)
528 + return false;
529 +
530 + if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
531 + return false;
532 +
533 + br_offload_prepare_key(p, &key, skb);
534 +
535 + rcu_read_lock();
536 + flow = rhashtable_lookup(&o->rht, &key, flow_params);
537 + if (!flow) {
538 + cb->offload = 1;
539 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
540 + cb->input_vlan_present = key.vlan_present != 0;
541 + cb->input_vlan_tag = key.vlan_tag;
542 +#endif
543 + cb->input_ifindex = p->dev->ifindex;
544 + goto out;
545 + }
546 +
547 + if (flow->fdb_in->dst != p)
548 + goto out;
549 +
550 + dst = flow->fdb_out->dst;
551 + if (!dst)
552 + goto out;
553 +
554 + ret = true;
555 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
556 + if (!flow->vlan_out_present && key.vlan_present) {
557 + __vlan_hwaccel_clear_tag(skb);
558 + } else if (flow->vlan_out_present) {
559 + if (skb_vlan_tag_present(skb) &&
560 + skb->vlan_proto != p->br->vlan_proto) {
561 + /* Protocol-mismatch, empty out vlan_tci for new tag */
562 + skb_push(skb, ETH_HLEN);
563 + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
564 + skb_vlan_tag_get(skb));
565 + if (unlikely(!skb))
566 + goto out;
567 +
568 + skb_pull(skb, ETH_HLEN);
569 + skb_reset_mac_len(skb);
570 + }
571 +
572 + __vlan_hwaccel_put_tag(skb, p->br->vlan_proto,
573 + flow->vlan_out);
574 + }
575 +#endif
576 +
577 + skb->dev = dst->dev;
578 + skb_push(skb, ETH_HLEN);
579 +
580 + if (skb_warn_if_lro(skb) || !is_skb_forwardable(skb->dev, skb)) {
581 + kfree_skb(skb);
582 + goto out;
583 + }
584 +
585 + if (now - flow->used >= HZ) {
586 + flow->used = now;
587 + br_offload_flow_refresh_time(flow);
588 + }
589 +
590 + skb_forward_csum(skb);
591 + dev_queue_xmit(skb);
592 +
593 +out:
594 + rcu_read_unlock();
595 + return ret;
596 +}
597 +
598 +static void
599 +br_offload_check_gc(struct net_bridge *br)
600 +{
601 + struct net_bridge_port *p;
602 +
603 + spin_lock_bh(&br->lock);
604 + list_for_each_entry(p, &br->port_list, list)
605 + if (br_offload_need_gc(p))
606 + queue_work(system_long_wq, &p->offload.gc_work);
607 + spin_unlock_bh(&br->lock);
608 +}
609 +
610 +
611 +int br_offload_set_cache_size(struct net_bridge *br, unsigned long val)
612 +{
613 + br->offload_cache_size = val;
614 + br_offload_check_gc(br);
615 +
616 + return 0;
617 +}
618 +
619 +int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val)
620 +{
621 + br->offload_cache_reserved = val;
622 + br_offload_check_gc(br);
623 +
624 + return 0;
625 +}
626 +
627 +int __init br_offload_init(void)
628 +{
629 + offload_cache = kmem_cache_create("bridge_offload_cache",
630 + sizeof(struct bridge_flow),
631 + 0, SLAB_HWCACHE_ALIGN, NULL);
632 + if (!offload_cache)
633 + return -ENOMEM;
634 +
635 + return 0;
636 +}
637 +
638 +void br_offload_fini(void)
639 +{
640 + kmem_cache_destroy(offload_cache);
641 +}
642 --- a/net/bridge/br_private.h
643 +++ b/net/bridge/br_private.h
644 @@ -207,7 +207,13 @@ struct net_bridge_fdb_entry {
645 unsigned long updated ____cacheline_aligned_in_smp;
646 unsigned long used;
647
648 - struct rcu_head rcu;
649 + union {
650 + struct {
651 + struct hlist_head offload_in;
652 + struct hlist_head offload_out;
653 + };
654 + struct rcu_head rcu;
655 + };
656 };
657
658 #define MDB_PG_FLAGS_PERMANENT BIT(0)
659 @@ -280,6 +286,12 @@ struct net_bridge_mdb_entry {
660 struct rcu_head rcu;
661 };
662
663 +struct net_bridge_port_offload {
664 + struct rhashtable rht;
665 + struct work_struct gc_work;
666 + bool enabled;
667 +};
668 +
669 struct net_bridge_port {
670 struct net_bridge *br;
671 struct net_device *dev;
672 @@ -337,6 +349,7 @@ struct net_bridge_port {
673 u16 backup_redirected_cnt;
674
675 struct bridge_stp_xstats stp_xstats;
676 + struct net_bridge_port_offload offload;
677 };
678
679 #define kobj_to_brport(obj) container_of(obj, struct net_bridge_port, kobj)
680 @@ -475,6 +488,9 @@ struct net_bridge {
681 struct kobject *ifobj;
682 u32 auto_cnt;
683
684 + u32 offload_cache_size;
685 + u32 offload_cache_reserved;
686 +
687 #ifdef CONFIG_NET_SWITCHDEV
688 int offload_fwd_mark;
689 #endif
690 @@ -501,6 +517,10 @@ struct br_input_skb_cb {
691 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
692 u8 br_netfilter_broute:1;
693 #endif
694 + u8 offload:1;
695 + u8 input_vlan_present:1;
696 + u16 input_vlan_tag;
697 + int input_ifindex;
698
699 #ifdef CONFIG_NET_SWITCHDEV
700 int offload_fwd_mark;
701 --- /dev/null
702 +++ b/net/bridge/br_private_offload.h
703 @@ -0,0 +1,21 @@
704 +#ifndef __BR_OFFLOAD_H
705 +#define __BR_OFFLOAD_H
706 +
707 +bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb);
708 +void br_offload_output(struct sk_buff *skb);
709 +void br_offload_port_state(struct net_bridge_port *p);
710 +void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb);
711 +int br_offload_init(void);
712 +void br_offload_fini(void);
713 +int br_offload_set_cache_size(struct net_bridge *br, unsigned long val);
714 +int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val);
715 +
716 +static inline void br_offload_skb_disable(struct sk_buff *skb)
717 +{
718 + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
719 +
720 + if (cb->offload)
721 + cb->offload = 0;
722 +}
723 +
724 +#endif
725 --- a/net/bridge/br_stp.c
726 +++ b/net/bridge/br_stp.c
727 @@ -12,6 +12,7 @@
728
729 #include "br_private.h"
730 #include "br_private_stp.h"
731 +#include "br_private_offload.h"
732
733 /* since time values in bpdu are in jiffies and then scaled (1/256)
734 * before sending, make sure that is at least one STP tick.
735 @@ -52,6 +53,8 @@ void br_set_state(struct net_bridge_port
736 (unsigned int) p->port_no, p->dev->name,
737 br_port_state_names[p->state]);
738
739 + br_offload_port_state(p);
740 +
741 if (p->br->stp_enabled == BR_KERNEL_STP) {
742 switch (p->state) {
743 case BR_STATE_BLOCKING:
744 --- a/net/bridge/br_sysfs_br.c
745 +++ b/net/bridge/br_sysfs_br.c
746 @@ -18,6 +18,7 @@
747 #include <linux/sched/signal.h>
748
749 #include "br_private.h"
750 +#include "br_private_offload.h"
751
752 #define to_bridge(cd) ((struct net_bridge *)netdev_priv(to_net_dev(cd)))
753
754 @@ -842,6 +843,38 @@ static ssize_t vlan_stats_per_port_store
755 static DEVICE_ATTR_RW(vlan_stats_per_port);
756 #endif
757
758 +static ssize_t offload_cache_size_show(struct device *d,
759 + struct device_attribute *attr,
760 + char *buf)
761 +{
762 + struct net_bridge *br = to_bridge(d);
763 + return sprintf(buf, "%u\n", br->offload_cache_size);
764 +}
765 +
766 +static ssize_t offload_cache_size_store(struct device *d,
767 + struct device_attribute *attr,
768 + const char *buf, size_t len)
769 +{
770 + return store_bridge_parm(d, buf, len, br_offload_set_cache_size);
771 +}
772 +static DEVICE_ATTR_RW(offload_cache_size);
773 +
774 +static ssize_t offload_cache_reserved_show(struct device *d,
775 + struct device_attribute *attr,
776 + char *buf)
777 +{
778 + struct net_bridge *br = to_bridge(d);
779 + return sprintf(buf, "%u\n", br->offload_cache_reserved);
780 +}
781 +
782 +static ssize_t offload_cache_reserved_store(struct device *d,
783 + struct device_attribute *attr,
784 + const char *buf, size_t len)
785 +{
786 + return store_bridge_parm(d, buf, len, br_offload_set_cache_reserved);
787 +}
788 +static DEVICE_ATTR_RW(offload_cache_reserved);
789 +
790 static struct attribute *bridge_attrs[] = {
791 &dev_attr_forward_delay.attr,
792 &dev_attr_hello_time.attr,
793 @@ -896,6 +929,8 @@ static struct attribute *bridge_attrs[]
794 &dev_attr_vlan_stats_enabled.attr,
795 &dev_attr_vlan_stats_per_port.attr,
796 #endif
797 + &dev_attr_offload_cache_size.attr,
798 + &dev_attr_offload_cache_reserved.attr,
799 NULL
800 };
801
802 --- a/net/bridge/br_sysfs_if.c
803 +++ b/net/bridge/br_sysfs_if.c
804 @@ -234,6 +234,7 @@ BRPORT_ATTR_FLAG(broadcast_flood, BR_BCA
805 BRPORT_ATTR_FLAG(neigh_suppress, BR_NEIGH_SUPPRESS);
806 BRPORT_ATTR_FLAG(isolated, BR_ISOLATED);
807 BRPORT_ATTR_FLAG(bpdu_filter, BR_BPDU_FILTER);
808 +BRPORT_ATTR_FLAG(offload, BR_OFFLOAD);
809
810 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
811 static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
812 @@ -288,6 +289,7 @@ static const struct brport_attribute *br
813 &brport_attr_isolated,
814 &brport_attr_bpdu_filter,
815 &brport_attr_backup_port,
816 + &brport_attr_offload,
817 NULL
818 };
819
820 --- a/net/bridge/br_vlan_tunnel.c
821 +++ b/net/bridge/br_vlan_tunnel.c
822 @@ -15,6 +15,7 @@
823
824 #include "br_private.h"
825 #include "br_private_tunnel.h"
826 +#include "br_private_offload.h"
827
828 static inline int br_vlan_tunid_cmp(struct rhashtable_compare_arg *arg,
829 const void *ptr)
830 @@ -180,6 +181,7 @@ int br_handle_ingress_vlan_tunnel(struct
831 skb_dst_drop(skb);
832
833 __vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
834 + br_offload_skb_disable(skb);
835
836 return 0;
837 }
838 @@ -203,6 +205,7 @@ int br_handle_egress_vlan_tunnel(struct
839 if (err)
840 return err;
841
842 + br_offload_skb_disable(skb);
843 tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
844 if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
845 skb_dst_set(skb, &tunnel_dst->dst);