kernel: bump 4.14 to 4.14.107
[openwrt/openwrt.git] / target / linux / generic / hack-4.14 / 650-netfilter-add-xt_OFFLOAD-target.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Tue, 20 Feb 2018 15:56:02 +0100
3 Subject: [PATCH] netfilter: add xt_OFFLOAD target
4
5 Signed-off-by: Felix Fietkau <nbd@nbd.name>
6 ---
7 create mode 100644 net/netfilter/xt_OFFLOAD.c
8
9 --- a/net/ipv4/netfilter/Kconfig
10 +++ b/net/ipv4/netfilter/Kconfig
11 @@ -76,8 +76,6 @@ config NF_TABLES_ARP
12 help
13 This option enables the ARP support for nf_tables.
14
15 -endif # NF_TABLES
16 -
17 config NF_FLOW_TABLE_IPV4
18 tristate "Netfilter flow table IPv4 module"
19 depends on NF_FLOW_TABLE
20 @@ -86,6 +84,8 @@ config NF_FLOW_TABLE_IPV4
21
22 To compile it as a module, choose M here.
23
24 +endif # NF_TABLES
25 +
26 config NF_DUP_IPV4
27 tristate "Netfilter IPv4 packet duplication to alternate destination"
28 depends on !NF_CONNTRACK || NF_CONNTRACK
29 --- a/net/ipv6/netfilter/Kconfig
30 +++ b/net/ipv6/netfilter/Kconfig
31 @@ -97,7 +97,6 @@ config NFT_FIB_IPV6
32 multicast or blackhole.
33
34 endif # NF_TABLES_IPV6
35 -endif # NF_TABLES
36
37 config NF_FLOW_TABLE_IPV6
38 tristate "Netfilter flow table IPv6 module"
39 @@ -107,6 +106,8 @@ config NF_FLOW_TABLE_IPV6
40
41 To compile it as a module, choose M here.
42
43 +endif # NF_TABLES
44 +
45 config NF_DUP_IPV6
46 tristate "Netfilter IPv6 packet duplication to alternate destination"
47 depends on !NF_CONNTRACK || NF_CONNTRACK
48 --- a/net/netfilter/Kconfig
49 +++ b/net/netfilter/Kconfig
50 @@ -671,8 +671,6 @@ config NFT_FIB_NETDEV
51
52 endif # NF_TABLES_NETDEV
53
54 -endif # NF_TABLES
55 -
56 config NF_FLOW_TABLE_INET
57 tristate "Netfilter flow table mixed IPv4/IPv6 module"
58 depends on NF_FLOW_TABLE
59 @@ -681,11 +679,12 @@ config NF_FLOW_TABLE_INET
60
61 To compile it as a module, choose M here.
62
63 +endif # NF_TABLES
64 +
65 config NF_FLOW_TABLE
66 tristate "Netfilter flow table module"
67 depends on NETFILTER_INGRESS
68 depends on NF_CONNTRACK
69 - depends on NF_TABLES
70 help
71 This option adds the flow table core infrastructure.
72
73 @@ -974,6 +973,15 @@ config NETFILTER_XT_TARGET_NOTRACK
74 depends on NETFILTER_ADVANCED
75 select NETFILTER_XT_TARGET_CT
76
77 +config NETFILTER_XT_TARGET_FLOWOFFLOAD
78 + tristate '"FLOWOFFLOAD" target support'
79 + depends on NF_FLOW_TABLE
80 + depends on NETFILTER_INGRESS
81 + help
82 + This option adds a `FLOWOFFLOAD' target, which uses the nf_flow_offload
83 + module to speed up processing of packets by bypassing the usual
84 + netfilter chains
85 +
86 config NETFILTER_XT_TARGET_RATEEST
87 tristate '"RATEEST" target support'
88 depends on NETFILTER_ADVANCED
89 --- a/net/netfilter/Makefile
90 +++ b/net/netfilter/Makefile
91 @@ -134,6 +134,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIF
92 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
93 obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
94 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
95 +obj-$(CONFIG_NETFILTER_XT_TARGET_FLOWOFFLOAD) += xt_FLOWOFFLOAD.o
96 obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
97 obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
98 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
99 --- /dev/null
100 +++ b/net/netfilter/xt_FLOWOFFLOAD.c
101 @@ -0,0 +1,403 @@
102 +/*
103 + * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
104 + *
105 + * This program is free software; you can redistribute it and/or modify
106 + * it under the terms of the GNU General Public License version 2 as
107 + * published by the Free Software Foundation.
108 + */
109 +#include <linux/module.h>
110 +#include <linux/init.h>
111 +#include <linux/netfilter.h>
112 +#include <linux/netfilter/xt_FLOWOFFLOAD.h>
113 +#include <net/ip.h>
114 +#include <net/netfilter/nf_conntrack.h>
115 +#include <net/netfilter/nf_flow_table.h>
116 +#include <net/netfilter/nf_conntrack_helper.h>
117 +
118 +static struct nf_flowtable nf_flowtable;
119 +static HLIST_HEAD(hooks);
120 +static DEFINE_SPINLOCK(hooks_lock);
121 +static struct delayed_work hook_work;
122 +
123 +struct xt_flowoffload_hook {
124 + struct hlist_node list;
125 + struct nf_hook_ops ops;
126 + struct net *net;
127 + bool registered;
128 + bool used;
129 +};
130 +
131 +static unsigned int
132 +xt_flowoffload_net_hook(void *priv, struct sk_buff *skb,
133 + const struct nf_hook_state *state)
134 +{
135 + switch (skb->protocol) {
136 + case htons(ETH_P_IP):
137 + return nf_flow_offload_ip_hook(priv, skb, state);
138 + case htons(ETH_P_IPV6):
139 + return nf_flow_offload_ipv6_hook(priv, skb, state);
140 + }
141 +
142 + return NF_ACCEPT;
143 +}
144 +
145 +static int
146 +xt_flowoffload_create_hook(struct net_device *dev)
147 +{
148 + struct xt_flowoffload_hook *hook;
149 + struct nf_hook_ops *ops;
150 +
151 + hook = kzalloc(sizeof(*hook), GFP_ATOMIC);
152 + if (!hook)
153 + return -ENOMEM;
154 +
155 + ops = &hook->ops;
156 + ops->pf = NFPROTO_NETDEV;
157 + ops->hooknum = NF_NETDEV_INGRESS;
158 + ops->priority = 10;
159 + ops->priv = &nf_flowtable;
160 + ops->hook = xt_flowoffload_net_hook;
161 + ops->dev = dev;
162 +
163 + hlist_add_head(&hook->list, &hooks);
164 + mod_delayed_work(system_power_efficient_wq, &hook_work, 0);
165 +
166 + return 0;
167 +}
168 +
169 +static struct xt_flowoffload_hook *
170 +flow_offload_lookup_hook(struct net_device *dev)
171 +{
172 + struct xt_flowoffload_hook *hook;
173 +
174 + hlist_for_each_entry(hook, &hooks, list) {
175 + if (hook->ops.dev == dev)
176 + return hook;
177 + }
178 +
179 + return NULL;
180 +}
181 +
182 +static void
183 +xt_flowoffload_check_device(struct net_device *dev)
184 +{
185 + struct xt_flowoffload_hook *hook;
186 +
187 + spin_lock_bh(&hooks_lock);
188 + hook = flow_offload_lookup_hook(dev);
189 + if (hook)
190 + hook->used = true;
191 + else
192 + xt_flowoffload_create_hook(dev);
193 + spin_unlock_bh(&hooks_lock);
194 +}
195 +
196 +static void
197 +xt_flowoffload_register_hooks(void)
198 +{
199 + struct xt_flowoffload_hook *hook;
200 +
201 +restart:
202 + hlist_for_each_entry(hook, &hooks, list) {
203 + if (hook->registered)
204 + continue;
205 +
206 + hook->registered = true;
207 + hook->net = dev_net(hook->ops.dev);
208 + spin_unlock_bh(&hooks_lock);
209 + nf_register_net_hook(hook->net, &hook->ops);
210 + spin_lock_bh(&hooks_lock);
211 + goto restart;
212 + }
213 +
214 +}
215 +
216 +static void
217 +xt_flowoffload_cleanup_hooks(void)
218 +{
219 + struct xt_flowoffload_hook *hook;
220 +
221 +restart:
222 + hlist_for_each_entry(hook, &hooks, list) {
223 + if (hook->used || !hook->registered)
224 + continue;
225 +
226 + hlist_del(&hook->list);
227 + spin_unlock_bh(&hooks_lock);
228 + nf_unregister_net_hook(hook->net, &hook->ops);
229 + kfree(hook);
230 + spin_lock_bh(&hooks_lock);
231 + goto restart;
232 + }
233 +
234 +}
235 +
236 +static void
237 +xt_flowoffload_check_hook(struct flow_offload *flow, void *data)
238 +{
239 + struct flow_offload_tuple *tuple = &flow->tuplehash[0].tuple;
240 + struct xt_flowoffload_hook *hook;
241 + bool *found = data;
242 +
243 + spin_lock_bh(&hooks_lock);
244 + hlist_for_each_entry(hook, &hooks, list) {
245 + if (hook->ops.dev->ifindex != tuple->iifidx &&
246 + hook->ops.dev->ifindex != tuple->oifidx)
247 + continue;
248 +
249 + hook->used = true;
250 + *found = true;
251 + }
252 + spin_unlock_bh(&hooks_lock);
253 +}
254 +
255 +static void
256 +xt_flowoffload_hook_work(struct work_struct *work)
257 +{
258 + struct xt_flowoffload_hook *hook;
259 + bool found = false;
260 + int err;
261 +
262 + spin_lock_bh(&hooks_lock);
263 + xt_flowoffload_register_hooks();
264 + hlist_for_each_entry(hook, &hooks, list)
265 + hook->used = false;
266 + spin_unlock_bh(&hooks_lock);
267 +
268 + err = nf_flow_table_iterate(&nf_flowtable, xt_flowoffload_check_hook,
269 + &found);
270 + if (err && err != -EAGAIN)
271 + goto out;
272 +
273 + spin_lock_bh(&hooks_lock);
274 + xt_flowoffload_cleanup_hooks();
275 + spin_unlock_bh(&hooks_lock);
276 +
277 +out:
278 + if (found)
279 + queue_delayed_work(system_power_efficient_wq, &hook_work, HZ);
280 +}
281 +
282 +static bool
283 +xt_flowoffload_skip(struct sk_buff *skb)
284 +{
285 + struct ip_options *opt = &(IPCB(skb)->opt);
286 +
287 + if (unlikely(opt->optlen))
288 + return true;
289 + if (skb_sec_path(skb))
290 + return true;
291 +
292 + return false;
293 +}
294 +
295 +static struct dst_entry *
296 +xt_flowoffload_dst(const struct nf_conn *ct, enum ip_conntrack_dir dir,
297 + const struct xt_action_param *par)
298 +{
299 + struct dst_entry *dst = NULL;
300 + struct flowi fl;
301 +
302 + memset(&fl, 0, sizeof(fl));
303 + switch (xt_family(par)) {
304 + case NFPROTO_IPV4:
305 + fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
306 + break;
307 + case NFPROTO_IPV6:
308 + fl.u.ip6.saddr = ct->tuplehash[dir].tuple.dst.u3.in6;
309 + fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
310 + break;
311 + }
312 +
313 + nf_route(xt_net(par), &dst, &fl, false, xt_family(par));
314 +
315 + return dst;
316 +}
317 +
318 +static int
319 +xt_flowoffload_route(struct sk_buff *skb, const struct nf_conn *ct,
320 + const struct xt_action_param *par,
321 + struct nf_flow_route *route, enum ip_conntrack_dir dir)
322 +{
323 + struct dst_entry *this_dst, *other_dst;
324 +
325 + this_dst = xt_flowoffload_dst(ct, dir, par);
326 + other_dst = xt_flowoffload_dst(ct, !dir, par);
327 + if (!this_dst || !other_dst)
328 + return -ENOENT;
329 +
330 + if (dst_xfrm(this_dst) || dst_xfrm(other_dst))
331 + return -EINVAL;
332 +
333 + route->tuple[dir].dst = this_dst;
334 + route->tuple[dir].ifindex = xt_in(par)->ifindex;
335 + route->tuple[!dir].dst = other_dst;
336 + route->tuple[!dir].ifindex = xt_out(par)->ifindex;
337 +
338 + return 0;
339 +}
340 +
341 +static unsigned int
342 +flowoffload_tg(struct sk_buff *skb, const struct xt_action_param *par)
343 +{
344 + const struct xt_flowoffload_target_info *info = par->targinfo;
345 + enum ip_conntrack_info ctinfo;
346 + enum ip_conntrack_dir dir;
347 + struct nf_flow_route route;
348 + struct flow_offload *flow;
349 + struct nf_conn *ct;
350 + const struct nf_conn_help *help;
351 +
352 + if (xt_flowoffload_skip(skb))
353 + return XT_CONTINUE;
354 +
355 + ct = nf_ct_get(skb, &ctinfo);
356 + if (ct == NULL)
357 + return XT_CONTINUE;
358 +
359 + switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
360 + case IPPROTO_TCP:
361 + if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
362 + return XT_CONTINUE;
363 + break;
364 + case IPPROTO_UDP:
365 + break;
366 + default:
367 + return XT_CONTINUE;
368 + }
369 +
370 + help = nfct_help(ct);
371 + if (help)
372 + return XT_CONTINUE;
373 +
374 + if (ctinfo == IP_CT_NEW ||
375 + ctinfo == IP_CT_RELATED)
376 + return XT_CONTINUE;
377 +
378 + if (!xt_in(par) || !xt_out(par))
379 + return XT_CONTINUE;
380 +
381 + if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
382 + return XT_CONTINUE;
383 +
384 + dir = CTINFO2DIR(ctinfo);
385 +
386 + if (xt_flowoffload_route(skb, ct, par, &route, dir) < 0)
387 + goto err_flow_route;
388 +
389 + flow = flow_offload_alloc(ct, &route);
390 + if (!flow)
391 + goto err_flow_alloc;
392 +
393 + if (flow_offload_add(&nf_flowtable, flow) < 0)
394 + goto err_flow_add;
395 +
396 + xt_flowoffload_check_device(xt_in(par));
397 + xt_flowoffload_check_device(xt_out(par));
398 +
399 + if (info->flags & XT_FLOWOFFLOAD_HW)
400 + nf_flow_offload_hw_add(xt_net(par), flow, ct);
401 +
402 + return XT_CONTINUE;
403 +
404 +err_flow_add:
405 + flow_offload_free(flow);
406 +err_flow_alloc:
407 + dst_release(route.tuple[!dir].dst);
408 +err_flow_route:
409 + clear_bit(IPS_OFFLOAD_BIT, &ct->status);
410 + return XT_CONTINUE;
411 +}
412 +
413 +
414 +static int flowoffload_chk(const struct xt_tgchk_param *par)
415 +{
416 + struct xt_flowoffload_target_info *info = par->targinfo;
417 +
418 + if (info->flags & ~XT_FLOWOFFLOAD_MASK)
419 + return -EINVAL;
420 +
421 + return 0;
422 +}
423 +
424 +static struct xt_target offload_tg_reg __read_mostly = {
425 + .family = NFPROTO_UNSPEC,
426 + .name = "FLOWOFFLOAD",
427 + .revision = 0,
428 + .targetsize = sizeof(struct xt_flowoffload_target_info),
429 + .usersize = sizeof(struct xt_flowoffload_target_info),
430 + .checkentry = flowoffload_chk,
431 + .target = flowoffload_tg,
432 + .me = THIS_MODULE,
433 +};
434 +
435 +static int xt_flowoffload_table_init(struct nf_flowtable *table)
436 +{
437 + table->flags = NF_FLOWTABLE_F_HW;
438 + nf_flow_table_init(table);
439 + return 0;
440 +}
441 +
442 +static void xt_flowoffload_table_cleanup(struct nf_flowtable *table)
443 +{
444 + nf_flow_table_free(table);
445 +}
446 +
447 +static int flow_offload_netdev_event(struct notifier_block *this,
448 + unsigned long event, void *ptr)
449 +{
450 + struct xt_flowoffload_hook *hook = NULL;
451 + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
452 +
453 + if (event != NETDEV_UNREGISTER)
454 + return NOTIFY_DONE;
455 +
456 + spin_lock_bh(&hooks_lock);
457 + hook = flow_offload_lookup_hook(dev);
458 + if (hook) {
459 + hlist_del(&hook->list);
460 + }
461 + spin_unlock_bh(&hooks_lock);
462 + if (hook) {
463 + nf_unregister_net_hook(hook->net, &hook->ops);
464 + kfree(hook);
465 + }
466 +
467 + nf_flow_table_cleanup(dev_net(dev), dev);
468 +
469 + return NOTIFY_DONE;
470 +}
471 +
472 +static struct notifier_block flow_offload_netdev_notifier = {
473 + .notifier_call = flow_offload_netdev_event,
474 +};
475 +
476 +static int __init xt_flowoffload_tg_init(void)
477 +{
478 + int ret;
479 +
480 + register_netdevice_notifier(&flow_offload_netdev_notifier);
481 +
482 + INIT_DELAYED_WORK(&hook_work, xt_flowoffload_hook_work);
483 +
484 + ret = xt_flowoffload_table_init(&nf_flowtable);
485 + if (ret)
486 + return ret;
487 +
488 + ret = xt_register_target(&offload_tg_reg);
489 + if (ret)
490 + xt_flowoffload_table_cleanup(&nf_flowtable);
491 +
492 + return ret;
493 +}
494 +
495 +static void __exit xt_flowoffload_tg_exit(void)
496 +{
497 + xt_unregister_target(&offload_tg_reg);
498 + xt_flowoffload_table_cleanup(&nf_flowtable);
499 + unregister_netdevice_notifier(&flow_offload_netdev_notifier);
500 +}
501 +
502 +MODULE_LICENSE("GPL");
503 +module_init(xt_flowoffload_tg_init);
504 +module_exit(xt_flowoffload_tg_exit);
505 --- a/net/netfilter/nf_flow_table_core.c
506 +++ b/net/netfilter/nf_flow_table_core.c
507 @@ -6,7 +6,6 @@
508 #include <linux/netdevice.h>
509 #include <net/ip.h>
510 #include <net/ip6_route.h>
511 -#include <net/netfilter/nf_tables.h>
512 #include <net/netfilter/nf_flow_table.h>
513 #include <net/netfilter/nf_conntrack.h>
514 #include <net/netfilter/nf_conntrack_core.h>
515 --- /dev/null
516 +++ b/include/uapi/linux/netfilter/xt_FLOWOFFLOAD.h
517 @@ -0,0 +1,17 @@
518 +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
519 +#ifndef _XT_FLOWOFFLOAD_H
520 +#define _XT_FLOWOFFLOAD_H
521 +
522 +#include <linux/types.h>
523 +
524 +enum {
525 + XT_FLOWOFFLOAD_HW = 1 << 0,
526 +
527 + XT_FLOWOFFLOAD_MASK = XT_FLOWOFFLOAD_HW
528 +};
529 +
530 +struct xt_flowoffload_target_info {
531 + __u32 flags;
532 +};
533 +
534 +#endif /* _XT_FLOWOFFLOAD_H */