netfilter: add a xt_FLOWOFFLOAD target for NAT/routing offload support
[openwrt/staging/chunkeey.git] / target / linux / generic / hack-4.14 / 650-netfilter-add-xt_OFFLOAD-target.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Tue, 20 Feb 2018 15:56:02 +0100
3 Subject: [PATCH] netfilter: add xt_OFFLOAD target
4
5 Signed-off-by: Felix Fietkau <nbd@nbd.name>
6 ---
7 create mode 100644 net/netfilter/xt_OFFLOAD.c
8
9 --- a/net/ipv4/netfilter/Kconfig
10 +++ b/net/ipv4/netfilter/Kconfig
11 @@ -75,8 +75,6 @@ config NF_TABLES_ARP
12 help
13 This option enables the ARP support for nf_tables.
14
15 -endif # NF_TABLES
16 -
17 config NF_FLOW_TABLE_IPV4
18 tristate "Netfilter flow table IPv4 module"
19 depends on NF_FLOW_TABLE
20 @@ -85,6 +83,8 @@ config NF_FLOW_TABLE_IPV4
21
22 To compile it as a module, choose M here.
23
24 +endif # NF_TABLES
25 +
26 config NF_DUP_IPV4
27 tristate "Netfilter IPv4 packet duplication to alternate destination"
28 depends on !NF_CONNTRACK || NF_CONNTRACK
29 --- a/net/ipv6/netfilter/Kconfig
30 +++ b/net/ipv6/netfilter/Kconfig
31 @@ -69,7 +69,6 @@ config NFT_FIB_IPV6
32 multicast or blackhole.
33
34 endif # NF_TABLES_IPV6
35 -endif # NF_TABLES
36
37 config NF_FLOW_TABLE_IPV6
38 tristate "Netfilter flow table IPv6 module"
39 @@ -79,6 +78,8 @@ config NF_FLOW_TABLE_IPV6
40
41 To compile it as a module, choose M here.
42
43 +endif # NF_TABLES
44 +
45 config NF_DUP_IPV6
46 tristate "Netfilter IPv6 packet duplication to alternate destination"
47 depends on !NF_CONNTRACK || NF_CONNTRACK
48 --- a/net/netfilter/Kconfig
49 +++ b/net/netfilter/Kconfig
50 @@ -665,8 +665,6 @@ config NFT_FIB_NETDEV
51
52 endif # NF_TABLES_NETDEV
53
54 -endif # NF_TABLES
55 -
56 config NF_FLOW_TABLE_INET
57 tristate "Netfilter flow table mixed IPv4/IPv6 module"
58 depends on NF_FLOW_TABLE
59 @@ -675,11 +673,12 @@ config NF_FLOW_TABLE_INET
60
61 To compile it as a module, choose M here.
62
63 +endif # NF_TABLES
64 +
65 config NF_FLOW_TABLE
66 tristate "Netfilter flow table module"
67 depends on NETFILTER_INGRESS
68 depends on NF_CONNTRACK
69 - depends on NF_TABLES
70 help
71 This option adds the flow table core infrastructure.
72
73 @@ -968,6 +967,15 @@ config NETFILTER_XT_TARGET_NOTRACK
74 depends on NETFILTER_ADVANCED
75 select NETFILTER_XT_TARGET_CT
76
77 +config NETFILTER_XT_TARGET_FLOWOFFLOAD
78 + tristate '"FLOWOFFLOAD" target support'
79 + depends on NF_FLOW_TABLE
80 + depends on NETFILTER_INGRESS
81 + help
82 + This option adds a `FLOWOFFLOAD' target, which uses the nf_flow_offload
83 + module to speed up processing of packets by bypassing the usual
84 + netfilter chains
85 +
86 config NETFILTER_XT_TARGET_RATEEST
87 tristate '"RATEEST" target support'
88 depends on NETFILTER_ADVANCED
89 --- a/net/netfilter/Makefile
90 +++ b/net/netfilter/Makefile
91 @@ -134,6 +134,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIF
92 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
93 obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
94 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
95 +obj-$(CONFIG_NETFILTER_XT_TARGET_FLOWOFFLOAD) += xt_FLOWOFFLOAD.o
96 obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
97 obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
98 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
99 --- /dev/null
100 +++ b/net/netfilter/xt_FLOWOFFLOAD.c
101 @@ -0,0 +1,335 @@
102 +/*
103 + * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
104 + *
105 + * This program is free software; you can redistribute it and/or modify
106 + * it under the terms of the GNU General Public License version 2 as
107 + * published by the Free Software Foundation.
108 + */
109 +#include <linux/module.h>
110 +#include <linux/init.h>
111 +#include <linux/netfilter.h>
112 +#include <net/ip.h>
113 +#include <net/netfilter/nf_conntrack.h>
114 +#include <net/netfilter/nf_flow_table.h>
115 +
116 +static struct nf_flowtable nf_flowtable;
117 +static HLIST_HEAD(hooks);
118 +static DEFINE_SPINLOCK(hooks_lock);
119 +static struct delayed_work hook_work;
120 +
121 +struct xt_flowoffload_hook {
122 + struct hlist_node list;
123 + struct nf_hook_ops ops;
124 + bool registered;
125 + bool used;
126 +};
127 +
128 +static unsigned int
129 +xt_flowoffload_net_hook(void *priv, struct sk_buff *skb,
130 + const struct nf_hook_state *state)
131 +{
132 + switch (skb->protocol) {
133 + case htons(ETH_P_IP):
134 + return nf_flow_offload_ip_hook(priv, skb, state);
135 + case htons(ETH_P_IPV6):
136 + return nf_flow_offload_ipv6_hook(priv, skb, state);
137 + }
138 +
139 + return NF_ACCEPT;
140 +}
141 +
142 +static int
143 +xt_flowoffload_create_hook(struct net_device *dev)
144 +{
145 + struct xt_flowoffload_hook *hook;
146 + struct nf_hook_ops *ops;
147 +
148 + hook = kzalloc(sizeof(*hook), GFP_ATOMIC);
149 + if (!hook)
150 + return -ENOMEM;
151 +
152 + ops = &hook->ops;
153 + ops->pf = NFPROTO_NETDEV;
154 + ops->hooknum = NF_NETDEV_INGRESS;
155 + ops->priority = 10;
156 + ops->priv = &nf_flowtable;
157 + ops->hook = xt_flowoffload_net_hook;
158 + ops->dev = dev;
159 +
160 + hlist_add_head(&hook->list, &hooks);
161 + mod_delayed_work(system_power_efficient_wq, &hook_work, 0);
162 +
163 + return 0;
164 +}
165 +
166 +static struct xt_flowoffload_hook *
167 +flow_offload_lookup_hook(struct net_device *dev)
168 +{
169 + struct xt_flowoffload_hook *hook;
170 +
171 + hlist_for_each_entry(hook, &hooks, list) {
172 + if (hook->ops.dev == dev)
173 + return hook;
174 + }
175 +
176 + return NULL;
177 +}
178 +
179 +static void
180 +xt_flowoffload_check_device(struct net_device *dev)
181 +{
182 + struct xt_flowoffload_hook *hook;
183 +
184 + spin_lock_bh(&hooks_lock);
185 + hook = flow_offload_lookup_hook(dev);
186 + if (hook)
187 + hook->used = true;
188 + else
189 + xt_flowoffload_create_hook(dev);
190 + spin_unlock_bh(&hooks_lock);
191 +}
192 +
193 +static void
194 +xt_flowoffload_register_hooks(void)
195 +{
196 + struct xt_flowoffload_hook *hook;
197 +
198 +restart:
199 + hlist_for_each_entry(hook, &hooks, list) {
200 + if (hook->registered)
201 + continue;
202 +
203 + hook->registered = true;
204 + spin_unlock_bh(&hooks_lock);
205 + nf_register_net_hook(dev_net(hook->ops.dev), &hook->ops);
206 + spin_lock_bh(&hooks_lock);
207 + goto restart;
208 + }
209 +
210 +}
211 +
212 +static void
213 +xt_flowoffload_cleanup_hooks(void)
214 +{
215 + struct xt_flowoffload_hook *hook;
216 +
217 +restart:
218 + hlist_for_each_entry(hook, &hooks, list) {
219 + if (hook->used)
220 + continue;
221 +
222 + hlist_del(&hook->list);
223 + spin_unlock_bh(&hooks_lock);
224 + nf_unregister_net_hook(dev_net(hook->ops.dev), &hook->ops);
225 + kfree(hook);
226 + spin_lock_bh(&hooks_lock);
227 + goto restart;
228 + }
229 +
230 +}
231 +
232 +static void
233 +xt_flowoffload_check_hook(struct flow_offload *flow, void *data)
234 +{
235 + struct flow_offload_tuple *tuple = &flow->tuplehash[0].tuple;
236 + struct xt_flowoffload_hook *hook;
237 + bool *found = data;
238 +
239 + spin_lock_bh(&hooks_lock);
240 + hlist_for_each_entry(hook, &hooks, list) {
241 + if (hook->ops.dev->ifindex != tuple->iifidx &&
242 + hook->ops.dev->ifindex != tuple->oifidx)
243 + continue;
244 +
245 + hook->used = true;
246 + *found = true;
247 + }
248 + spin_unlock_bh(&hooks_lock);
249 +}
250 +
251 +static void
252 +xt_flowoffload_hook_work(struct work_struct *work)
253 +{
254 + struct xt_flowoffload_hook *hook;
255 + bool found = false;
256 + int err;
257 +
258 + spin_lock_bh(&hooks_lock);
259 + xt_flowoffload_register_hooks();
260 + hlist_for_each_entry(hook, &hooks, list)
261 + hook->used = false;
262 + spin_unlock_bh(&hooks_lock);
263 +
264 + err = nf_flow_table_iterate(&nf_flowtable, xt_flowoffload_check_hook,
265 + &found);
266 + if (err && err != -EAGAIN)
267 + goto out;
268 +
269 + spin_lock_bh(&hooks_lock);
270 + xt_flowoffload_cleanup_hooks();
271 + spin_unlock_bh(&hooks_lock);
272 +
273 +out:
274 + if (found)
275 + queue_delayed_work(system_power_efficient_wq, &hook_work, HZ);
276 +}
277 +
278 +static bool
279 +xt_flowoffload_skip(struct sk_buff *skb)
280 +{
281 + struct ip_options *opt = &(IPCB(skb)->opt);
282 +
283 + if (unlikely(opt->optlen))
284 + return true;
285 + if (skb_sec_path(skb))
286 + return true;
287 +
288 + return false;
289 +}
290 +
291 +static int
292 +xt_flowoffload_route(struct sk_buff *skb, const struct nf_conn *ct,
293 + const struct xt_action_param *par,
294 + struct nf_flow_route *route, enum ip_conntrack_dir dir)
295 +{
296 + struct dst_entry *this_dst = skb_dst(skb);
297 + struct dst_entry *other_dst = NULL;
298 + struct flowi fl;
299 +
300 + memset(&fl, 0, sizeof(fl));
301 + switch (xt_family(par)) {
302 + case NFPROTO_IPV4:
303 + fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
304 + break;
305 + case NFPROTO_IPV6:
306 + fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
307 + break;
308 + }
309 +
310 + nf_route(xt_net(par), &other_dst, &fl, false, xt_family(par));
311 + if (!other_dst)
312 + return -ENOENT;
313 +
314 + route->tuple[dir].dst = this_dst;
315 + route->tuple[dir].ifindex = xt_in(par)->ifindex;
316 + route->tuple[!dir].dst = other_dst;
317 + route->tuple[!dir].ifindex = xt_out(par)->ifindex;
318 +
319 + return 0;
320 +}
321 +
322 +static unsigned int
323 +flowoffload_tg(struct sk_buff *skb, const struct xt_action_param *par)
324 +{
325 + enum ip_conntrack_info ctinfo;
326 + enum ip_conntrack_dir dir;
327 + struct nf_flow_route route;
328 + struct flow_offload *flow;
329 + struct nf_conn *ct;
330 +
331 + if (xt_flowoffload_skip(skb))
332 + return XT_CONTINUE;
333 +
334 + ct = nf_ct_get(skb, &ctinfo);
335 + if (ct == NULL)
336 + return XT_CONTINUE;
337 +
338 + switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
339 + case IPPROTO_TCP:
340 + case IPPROTO_UDP:
341 + break;
342 + default:
343 + return XT_CONTINUE;
344 + }
345 +
346 + if (test_bit(IPS_HELPER_BIT, &ct->status))
347 + return XT_CONTINUE;
348 +
349 + if (ctinfo == IP_CT_NEW ||
350 + ctinfo == IP_CT_RELATED)
351 + return XT_CONTINUE;
352 +
353 + if (!xt_in(par) || !xt_out(par))
354 + return XT_CONTINUE;
355 +
356 + if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
357 + return XT_CONTINUE;
358 +
359 + dir = CTINFO2DIR(ctinfo);
360 +
361 + if (xt_flowoffload_route(skb, ct, par, &route, dir) < 0)
362 + goto err_flow_route;
363 +
364 + flow = flow_offload_alloc(ct, &route);
365 + if (!flow)
366 + goto err_flow_alloc;
367 +
368 + if (flow_offload_add(&nf_flowtable, flow) < 0)
369 + goto err_flow_add;
370 +
371 + xt_flowoffload_check_device(xt_in(par));
372 + xt_flowoffload_check_device(xt_out(par));
373 +
374 + return XT_CONTINUE;
375 +
376 +err_flow_add:
377 + flow_offload_free(flow);
378 +err_flow_alloc:
379 + dst_release(route.tuple[!dir].dst);
380 +err_flow_route:
381 + clear_bit(IPS_OFFLOAD_BIT, &ct->status);
382 + return XT_CONTINUE;
383 +}
384 +
385 +
386 +static int flowoffload_chk(const struct xt_tgchk_param *par)
387 +{
388 + return 0;
389 +}
390 +
391 +static struct xt_target offload_tg_reg __read_mostly = {
392 + .family = NFPROTO_UNSPEC,
393 + .name = "FLOWOFFLOAD",
394 + .revision = 0,
395 + .checkentry = flowoffload_chk,
396 + .target = flowoffload_tg,
397 + .me = THIS_MODULE,
398 +};
399 +
400 +static int xt_flowoffload_table_init(struct nf_flowtable *table)
401 +{
402 + nf_flow_table_init(table);
403 + return 0;
404 +}
405 +
406 +static void xt_flowoffload_table_cleanup(struct nf_flowtable *table)
407 +{
408 + nf_flow_table_free(table);
409 +}
410 +
411 +static int __init xt_flowoffload_tg_init(void)
412 +{
413 + int ret;
414 +
415 + INIT_DELAYED_WORK(&hook_work, xt_flowoffload_hook_work);
416 +
417 + ret = xt_flowoffload_table_init(&nf_flowtable);
418 + if (ret)
419 + return ret;
420 +
421 + ret = xt_register_target(&offload_tg_reg);
422 + if (ret)
423 + xt_flowoffload_table_cleanup(&nf_flowtable);
424 +
425 + return ret;
426 +}
427 +
428 +static void __exit xt_flowoffload_tg_exit(void)
429 +{
430 + xt_unregister_target(&offload_tg_reg);
431 + xt_flowoffload_table_cleanup(&nf_flowtable);
432 +}
433 +
434 +MODULE_LICENSE("GPL");
435 +module_init(xt_flowoffload_tg_init);
436 +module_exit(xt_flowoffload_tg_exit);
437 --- a/net/netfilter/nf_flow_table_core.c
438 +++ b/net/netfilter/nf_flow_table_core.c
439 @@ -6,7 +6,6 @@
440 #include <linux/netdevice.h>
441 #include <net/ip.h>
442 #include <net/ip6_route.h>
443 -#include <net/netfilter/nf_tables.h>
444 #include <net/netfilter/nf_flow_table.h>
445 #include <net/netfilter/nf_conntrack.h>
446 #include <net/netfilter/nf_conntrack_core.h>