kernel: bump 5.4 to 5.4.92
[openwrt/openwrt.git] / target / linux / generic / hack-5.4 / 650-netfilter-add-xt_OFFLOAD-target.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Tue, 20 Feb 2018 15:56:02 +0100
3 Subject: [PATCH] netfilter: add xt_OFFLOAD target
4
5 Signed-off-by: Felix Fietkau <nbd@nbd.name>
6 ---
7 create mode 100644 net/netfilter/xt_OFFLOAD.c
8
9 --- a/net/ipv4/netfilter/Kconfig
10 +++ b/net/ipv4/netfilter/Kconfig
11 @@ -56,8 +56,6 @@ config NF_TABLES_ARP
12 help
13 This option enables the ARP support for nf_tables.
14
15 -endif # NF_TABLES
16 -
17 config NF_FLOW_TABLE_IPV4
18 tristate "Netfilter flow table IPv4 module"
19 depends on NF_FLOW_TABLE
20 @@ -66,6 +64,8 @@ config NF_FLOW_TABLE_IPV4
21
22 To compile it as a module, choose M here.
23
24 +endif # NF_TABLES
25 +
26 config NF_DUP_IPV4
27 tristate "Netfilter IPv4 packet duplication to alternate destination"
28 depends on !NF_CONNTRACK || NF_CONNTRACK
29 --- a/net/ipv6/netfilter/Kconfig
30 +++ b/net/ipv6/netfilter/Kconfig
31 @@ -45,7 +45,6 @@ config NFT_FIB_IPV6
32 multicast or blackhole.
33
34 endif # NF_TABLES_IPV6
35 -endif # NF_TABLES
36
37 config NF_FLOW_TABLE_IPV6
38 tristate "Netfilter flow table IPv6 module"
39 @@ -55,6 +54,8 @@ config NF_FLOW_TABLE_IPV6
40
41 To compile it as a module, choose M here.
42
43 +endif # NF_TABLES
44 +
45 config NF_DUP_IPV6
46 tristate "Netfilter IPv6 packet duplication to alternate destination"
47 depends on !NF_CONNTRACK || NF_CONNTRACK
48 --- a/net/netfilter/Kconfig
49 +++ b/net/netfilter/Kconfig
50 @@ -702,8 +702,6 @@ config NFT_FIB_NETDEV
51
52 endif # NF_TABLES_NETDEV
53
54 -endif # NF_TABLES
55 -
56 config NF_FLOW_TABLE_INET
57 tristate "Netfilter flow table mixed IPv4/IPv6 module"
58 depends on NF_FLOW_TABLE
59 @@ -712,11 +710,12 @@ config NF_FLOW_TABLE_INET
60
61 To compile it as a module, choose M here.
62
63 +endif # NF_TABLES
64 +
65 config NF_FLOW_TABLE
66 tristate "Netfilter flow table module"
67 depends on NETFILTER_INGRESS
68 depends on NF_CONNTRACK
69 - depends on NF_TABLES
70 help
71 This option adds the flow table core infrastructure.
72
73 @@ -1005,6 +1004,15 @@ config NETFILTER_XT_TARGET_NOTRACK
74 depends on NETFILTER_ADVANCED
75 select NETFILTER_XT_TARGET_CT
76
77 +config NETFILTER_XT_TARGET_FLOWOFFLOAD
78 + tristate '"FLOWOFFLOAD" target support'
79 + depends on NF_FLOW_TABLE
80 + depends on NETFILTER_INGRESS
81 + help
82 + This option adds a `FLOWOFFLOAD' target, which uses the nf_flow_offload
83 + module to speed up processing of packets by bypassing the usual
84 + netfilter chains
85 +
86 config NETFILTER_XT_TARGET_RATEEST
87 tristate '"RATEEST" target support'
88 depends on NETFILTER_ADVANCED
89 --- a/net/netfilter/Makefile
90 +++ b/net/netfilter/Makefile
91 @@ -144,6 +144,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIF
92 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
93 obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
94 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
95 +obj-$(CONFIG_NETFILTER_XT_TARGET_FLOWOFFLOAD) += xt_FLOWOFFLOAD.o
96 obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
97 obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
98 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
99 --- /dev/null
100 +++ b/net/netfilter/xt_FLOWOFFLOAD.c
101 @@ -0,0 +1,427 @@
102 +/*
103 + * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
104 + *
105 + * This program is free software; you can redistribute it and/or modify
106 + * it under the terms of the GNU General Public License version 2 as
107 + * published by the Free Software Foundation.
108 + */
109 +#include <linux/module.h>
110 +#include <linux/init.h>
111 +#include <linux/netfilter.h>
112 +#include <linux/netfilter/xt_FLOWOFFLOAD.h>
113 +#include <net/ip.h>
114 +#include <net/netfilter/nf_conntrack.h>
115 +#include <net/netfilter/nf_conntrack_extend.h>
116 +#include <net/netfilter/nf_conntrack_helper.h>
117 +#include <net/netfilter/nf_flow_table.h>
118 +
119 +static struct nf_flowtable nf_flowtable;
120 +static HLIST_HEAD(hooks);
121 +static DEFINE_SPINLOCK(hooks_lock);
122 +static struct delayed_work hook_work;
123 +
124 +struct xt_flowoffload_hook {
125 + struct hlist_node list;
126 + struct nf_hook_ops ops;
127 + struct net *net;
128 + bool registered;
129 + bool used;
130 +};
131 +
132 +static unsigned int
133 +xt_flowoffload_net_hook(void *priv, struct sk_buff *skb,
134 + const struct nf_hook_state *state)
135 +{
136 + switch (skb->protocol) {
137 + case htons(ETH_P_IP):
138 + return nf_flow_offload_ip_hook(priv, skb, state);
139 + case htons(ETH_P_IPV6):
140 + return nf_flow_offload_ipv6_hook(priv, skb, state);
141 + }
142 +
143 + return NF_ACCEPT;
144 +}
145 +
146 +int nf_flow_table_iterate(struct nf_flowtable *flow_table,
147 + void (*iter)(struct flow_offload *flow, void *data),
148 + void *data);
149 +
150 +static int
151 +xt_flowoffload_create_hook(struct net_device *dev)
152 +{
153 + struct xt_flowoffload_hook *hook;
154 + struct nf_hook_ops *ops;
155 +
156 + hook = kzalloc(sizeof(*hook), GFP_ATOMIC);
157 + if (!hook)
158 + return -ENOMEM;
159 +
160 + ops = &hook->ops;
161 + ops->pf = NFPROTO_NETDEV;
162 + ops->hooknum = NF_NETDEV_INGRESS;
163 + ops->priority = 10;
164 + ops->priv = &nf_flowtable;
165 + ops->hook = xt_flowoffload_net_hook;
166 + ops->dev = dev;
167 +
168 + hlist_add_head(&hook->list, &hooks);
169 + mod_delayed_work(system_power_efficient_wq, &hook_work, 0);
170 +
171 + return 0;
172 +}
173 +
174 +static struct xt_flowoffload_hook *
175 +flow_offload_lookup_hook(struct net_device *dev)
176 +{
177 + struct xt_flowoffload_hook *hook;
178 +
179 + hlist_for_each_entry(hook, &hooks, list) {
180 + if (hook->ops.dev == dev)
181 + return hook;
182 + }
183 +
184 + return NULL;
185 +}
186 +
187 +static void
188 +xt_flowoffload_check_device(struct net_device *dev)
189 +{
190 + struct xt_flowoffload_hook *hook;
191 +
192 + spin_lock_bh(&hooks_lock);
193 + hook = flow_offload_lookup_hook(dev);
194 + if (hook)
195 + hook->used = true;
196 + else
197 + xt_flowoffload_create_hook(dev);
198 + spin_unlock_bh(&hooks_lock);
199 +}
200 +
201 +static void
202 +xt_flowoffload_register_hooks(void)
203 +{
204 + struct xt_flowoffload_hook *hook;
205 +
206 +restart:
207 + hlist_for_each_entry(hook, &hooks, list) {
208 + if (hook->registered)
209 + continue;
210 +
211 + hook->registered = true;
212 + hook->net = dev_net(hook->ops.dev);
213 + spin_unlock_bh(&hooks_lock);
214 + nf_register_net_hook(hook->net, &hook->ops);
215 + spin_lock_bh(&hooks_lock);
216 + goto restart;
217 + }
218 +
219 +}
220 +
221 +static void
222 +xt_flowoffload_cleanup_hooks(void)
223 +{
224 + struct xt_flowoffload_hook *hook;
225 +
226 +restart:
227 + hlist_for_each_entry(hook, &hooks, list) {
228 + if (hook->used || !hook->registered)
229 + continue;
230 +
231 + hlist_del(&hook->list);
232 + spin_unlock_bh(&hooks_lock);
233 + nf_unregister_net_hook(hook->net, &hook->ops);
234 + kfree(hook);
235 + spin_lock_bh(&hooks_lock);
236 + goto restart;
237 + }
238 +
239 +}
240 +
241 +static void
242 +xt_flowoffload_check_hook(struct flow_offload *flow, void *data)
243 +{
244 + struct flow_offload_tuple *tuple = &flow->tuplehash[0].tuple;
245 + struct xt_flowoffload_hook *hook;
246 + bool *found = data;
247 + struct rtable *rt = (struct rtable *)tuple->dst_cache;
248 +
249 + spin_lock_bh(&hooks_lock);
250 + hlist_for_each_entry(hook, &hooks, list) {
251 + if (hook->ops.dev->ifindex != tuple->iifidx &&
252 + hook->ops.dev->ifindex != rt->dst.dev->ifindex)
253 + continue;
254 +
255 + hook->used = true;
256 + *found = true;
257 + }
258 + spin_unlock_bh(&hooks_lock);
259 +}
260 +
261 +static void
262 +xt_flowoffload_hook_work(struct work_struct *work)
263 +{
264 + struct xt_flowoffload_hook *hook;
265 + bool found = false;
266 + int err;
267 +
268 + spin_lock_bh(&hooks_lock);
269 + xt_flowoffload_register_hooks();
270 + hlist_for_each_entry(hook, &hooks, list)
271 + hook->used = false;
272 + spin_unlock_bh(&hooks_lock);
273 +
274 + err = nf_flow_table_iterate(&nf_flowtable, xt_flowoffload_check_hook,
275 + &found);
276 + if (err && err != -EAGAIN)
277 + goto out;
278 +
279 + spin_lock_bh(&hooks_lock);
280 + xt_flowoffload_cleanup_hooks();
281 + spin_unlock_bh(&hooks_lock);
282 +
283 +out:
284 + if (found)
285 + queue_delayed_work(system_power_efficient_wq, &hook_work, HZ);
286 +}
287 +
288 +static bool
289 +xt_flowoffload_skip(struct sk_buff *skb, int family)
290 +{
291 + if (skb_sec_path(skb))
292 + return true;
293 +
294 + if (family == NFPROTO_IPV4) {
295 + const struct ip_options *opt = &(IPCB(skb)->opt);
296 +
297 + if (unlikely(opt->optlen))
298 + return true;
299 + }
300 +
301 + return false;
302 +}
303 +
304 +static struct dst_entry *
305 +xt_flowoffload_dst(const struct nf_conn *ct, enum ip_conntrack_dir dir,
306 + const struct xt_action_param *par, int ifindex)
307 +{
308 + struct dst_entry *dst = NULL;
309 + struct flowi fl;
310 +
311 + memset(&fl, 0, sizeof(fl));
312 + switch (xt_family(par)) {
313 + case NFPROTO_IPV4:
314 + fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
315 + fl.u.ip4.flowi4_oif = ifindex;
316 + break;
317 + case NFPROTO_IPV6:
318 + fl.u.ip6.saddr = ct->tuplehash[dir].tuple.dst.u3.in6;
319 + fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
320 + fl.u.ip6.flowi6_oif = ifindex;
321 + break;
322 + }
323 +
324 + nf_route(xt_net(par), &dst, &fl, false, xt_family(par));
325 +
326 + return dst;
327 +}
328 +
329 +static int
330 +xt_flowoffload_route(struct sk_buff *skb, const struct nf_conn *ct,
331 + const struct xt_action_param *par,
332 + struct nf_flow_route *route, enum ip_conntrack_dir dir)
333 +{
334 + struct dst_entry *this_dst, *other_dst;
335 +
336 + this_dst = xt_flowoffload_dst(ct, !dir, par, xt_out(par)->ifindex);
337 + other_dst = xt_flowoffload_dst(ct, dir, par, xt_in(par)->ifindex);
338 +
339 + route->tuple[dir].dst = this_dst;
340 + route->tuple[!dir].dst = other_dst;
341 +
342 + if (!this_dst || !other_dst)
343 + return -ENOENT;
344 +
345 + if (dst_xfrm(this_dst) || dst_xfrm(other_dst))
346 + return -EINVAL;
347 +
348 + return 0;
349 +}
350 +
351 +static unsigned int
352 +flowoffload_tg(struct sk_buff *skb, const struct xt_action_param *par)
353 +{
354 + const struct xt_flowoffload_target_info *info = par->targinfo;
355 + struct tcphdr _tcph, *tcph = NULL;
356 + enum ip_conntrack_info ctinfo;
357 + enum ip_conntrack_dir dir;
358 + struct nf_flow_route route;
359 + struct flow_offload *flow = NULL;
360 + struct nf_conn *ct;
361 + struct net *net;
362 +
363 + if (xt_flowoffload_skip(skb, xt_family(par)))
364 + return XT_CONTINUE;
365 +
366 + ct = nf_ct_get(skb, &ctinfo);
367 + if (ct == NULL)
368 + return XT_CONTINUE;
369 +
370 + switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
371 + case IPPROTO_TCP:
372 + if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
373 + return XT_CONTINUE;
374 +
375 + tcph = skb_header_pointer(skb, par->thoff,
376 + sizeof(_tcph), &_tcph);
377 + if (unlikely(!tcph || tcph->fin || tcph->rst))
378 + return XT_CONTINUE;
379 + break;
380 + case IPPROTO_UDP:
381 + break;
382 + default:
383 + return XT_CONTINUE;
384 + }
385 +
386 + if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
387 + ct->status & IPS_SEQ_ADJUST)
388 + return XT_CONTINUE;
389 +
390 + if (!nf_ct_is_confirmed(ct))
391 + return XT_CONTINUE;
392 +
393 + if (!xt_in(par) || !xt_out(par))
394 + return XT_CONTINUE;
395 +
396 + if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
397 + return XT_CONTINUE;
398 +
399 + dir = CTINFO2DIR(ctinfo);
400 +
401 + if (xt_flowoffload_route(skb, ct, par, &route, dir) == 0)
402 + flow = flow_offload_alloc(ct, &route);
403 +
404 + dst_release(route.tuple[dir].dst);
405 + dst_release(route.tuple[!dir].dst);
406 +
407 + if (!flow)
408 + goto err_flow_route;
409 +
410 + if (tcph) {
411 + ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
412 + ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
413 + }
414 +
415 + if (flow_offload_add(&nf_flowtable, flow) < 0)
416 + goto err_flow_add;
417 +
418 + xt_flowoffload_check_device(xt_in(par));
419 + xt_flowoffload_check_device(xt_out(par));
420 +
421 + net = read_pnet(&nf_flowtable.ft_net);
422 + if (!net)
423 + write_pnet(&nf_flowtable.ft_net, xt_net(par));
424 +
425 + if (info->flags & XT_FLOWOFFLOAD_HW)
426 + nf_flow_offload_hw_add(xt_net(par), flow, ct);
427 +
428 + return XT_CONTINUE;
429 +
430 +err_flow_add:
431 + flow_offload_free(flow);
432 +err_flow_route:
433 + clear_bit(IPS_OFFLOAD_BIT, &ct->status);
434 + return XT_CONTINUE;
435 +}
436 +
437 +
438 +static int flowoffload_chk(const struct xt_tgchk_param *par)
439 +{
440 + struct xt_flowoffload_target_info *info = par->targinfo;
441 +
442 + if (info->flags & ~XT_FLOWOFFLOAD_MASK)
443 + return -EINVAL;
444 +
445 + return 0;
446 +}
447 +
448 +static struct xt_target offload_tg_reg __read_mostly = {
449 + .family = NFPROTO_UNSPEC,
450 + .name = "FLOWOFFLOAD",
451 + .revision = 0,
452 + .targetsize = sizeof(struct xt_flowoffload_target_info),
453 + .usersize = sizeof(struct xt_flowoffload_target_info),
454 + .checkentry = flowoffload_chk,
455 + .target = flowoffload_tg,
456 + .me = THIS_MODULE,
457 +};
458 +
459 +static int xt_flowoffload_table_init(struct nf_flowtable *table)
460 +{
461 + table->flags = NF_FLOWTABLE_F_HW;
462 + nf_flow_table_init(table);
463 + return 0;
464 +}
465 +
466 +static void xt_flowoffload_table_cleanup(struct nf_flowtable *table)
467 +{
468 + nf_flow_table_free(table);
469 +}
470 +
471 +static int flow_offload_netdev_event(struct notifier_block *this,
472 + unsigned long event, void *ptr)
473 +{
474 + struct xt_flowoffload_hook *hook = NULL;
475 + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
476 +
477 + if (event != NETDEV_UNREGISTER)
478 + return NOTIFY_DONE;
479 +
480 + spin_lock_bh(&hooks_lock);
481 + hook = flow_offload_lookup_hook(dev);
482 + if (hook) {
483 + hlist_del(&hook->list);
484 + }
485 + spin_unlock_bh(&hooks_lock);
486 + if (hook) {
487 + nf_unregister_net_hook(hook->net, &hook->ops);
488 + kfree(hook);
489 + }
490 +
491 + nf_flow_table_cleanup(dev);
492 +
493 + return NOTIFY_DONE;
494 +}
495 +
496 +static struct notifier_block flow_offload_netdev_notifier = {
497 + .notifier_call = flow_offload_netdev_event,
498 +};
499 +
500 +static int __init xt_flowoffload_tg_init(void)
501 +{
502 + int ret;
503 +
504 + register_netdevice_notifier(&flow_offload_netdev_notifier);
505 +
506 + INIT_DELAYED_WORK(&hook_work, xt_flowoffload_hook_work);
507 +
508 + ret = xt_flowoffload_table_init(&nf_flowtable);
509 + if (ret)
510 + return ret;
511 +
512 + ret = xt_register_target(&offload_tg_reg);
513 + if (ret)
514 + xt_flowoffload_table_cleanup(&nf_flowtable);
515 +
516 + return ret;
517 +}
518 +
519 +static void __exit xt_flowoffload_tg_exit(void)
520 +{
521 + xt_unregister_target(&offload_tg_reg);
522 + xt_flowoffload_table_cleanup(&nf_flowtable);
523 + unregister_netdevice_notifier(&flow_offload_netdev_notifier);
524 +}
525 +
526 +MODULE_LICENSE("GPL");
527 +module_init(xt_flowoffload_tg_init);
528 +module_exit(xt_flowoffload_tg_exit);
529 --- a/net/netfilter/nf_flow_table_core.c
530 +++ b/net/netfilter/nf_flow_table_core.c
531 @@ -7,7 +7,6 @@
532 #include <linux/netdevice.h>
533 #include <net/ip.h>
534 #include <net/ip6_route.h>
535 -#include <net/netfilter/nf_tables.h>
536 #include <net/netfilter/nf_flow_table.h>
537 #include <net/netfilter/nf_conntrack.h>
538 #include <net/netfilter/nf_conntrack_core.h>
539 @@ -338,8 +337,7 @@ flow_offload_lookup(struct nf_flowtable
540 }
541 EXPORT_SYMBOL_GPL(flow_offload_lookup);
542
543 -static int
544 -nf_flow_table_iterate(struct nf_flowtable *flow_table,
545 +int nf_flow_table_iterate(struct nf_flowtable *flow_table,
546 void (*iter)(struct flow_offload *flow, void *data),
547 void *data)
548 {
549 @@ -372,6 +370,7 @@ nf_flow_table_iterate(struct nf_flowtabl
550
551 return err;
552 }
553 +EXPORT_SYMBOL_GPL(nf_flow_table_iterate);
554
555 static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
556 {
557 --- /dev/null
558 +++ b/include/uapi/linux/netfilter/xt_FLOWOFFLOAD.h
559 @@ -0,0 +1,17 @@
560 +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
561 +#ifndef _XT_FLOWOFFLOAD_H
562 +#define _XT_FLOWOFFLOAD_H
563 +
564 +#include <linux/types.h>
565 +
566 +enum {
567 + XT_FLOWOFFLOAD_HW = 1 << 0,
568 +
569 + XT_FLOWOFFLOAD_MASK = XT_FLOWOFFLOAD_HW
570 +};
571 +
572 +struct xt_flowoffload_target_info {
573 + __u32 flags;
574 +};
575 +
576 +#endif /* _XT_FLOWOFFLOAD_H */
577 --- a/include/net/netfilter/nf_flow_table.h
578 +++ b/include/net/netfilter/nf_flow_table.h
579 @@ -130,6 +130,10 @@ static inline void flow_offload_dead(str
580 flow->flags |= FLOW_OFFLOAD_DYING;
581 }
582
583 +int nf_flow_table_iterate(struct nf_flowtable *flow_table,
584 + void (*iter)(struct flow_offload *flow, void *data),
585 + void *data);
586 +
587 int nf_flow_snat_port(const struct flow_offload *flow,
588 struct sk_buff *skb, unsigned int thoff,
589 u8 protocol, enum flow_offload_tuple_dir dir);