kernel: bump 5.10 to 5.10.42
[openwrt/staging/chunkeey.git] / target / linux / generic / backport-5.10 / 610-v5.13-17-netfilter-flowtable-add-xmit-path-types.patch
1 From: Pablo Neira Ayuso <pablo@netfilter.org>
2 Date: Wed, 24 Mar 2021 02:30:38 +0100
3 Subject: [PATCH] netfilter: flowtable: add xmit path types
4
5 Add the xmit_type field that defines the two supported xmit paths in the
6 flowtable data plane, which are the neighbour and the xfrm xmit paths.
7 This patch prepares for new flowtable xmit path types to come.
8
9 Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
10 ---
11
12 --- a/include/net/netfilter/nf_flow_table.h
13 +++ b/include/net/netfilter/nf_flow_table.h
14 @@ -89,6 +89,11 @@ enum flow_offload_tuple_dir {
15 };
16 #define FLOW_OFFLOAD_DIR_MAX IP_CT_DIR_MAX
17
18 +enum flow_offload_xmit_type {
19 + FLOW_OFFLOAD_XMIT_NEIGH = 0,
20 + FLOW_OFFLOAD_XMIT_XFRM,
21 +};
22 +
23 struct flow_offload_tuple {
24 union {
25 struct in_addr src_v4;
26 @@ -111,7 +116,8 @@ struct flow_offload_tuple {
27 /* All members above are keys for lookups, see flow_offload_hash(). */
28 struct { } __hash;
29
30 - u8 dir;
31 + u8 dir:6,
32 + xmit_type:2;
33
34 u16 mtu;
35
36 @@ -157,7 +163,8 @@ static inline __s32 nf_flow_timeout_delt
37
38 struct nf_flow_route {
39 struct {
40 - struct dst_entry *dst;
41 + struct dst_entry *dst;
42 + enum flow_offload_xmit_type xmit_type;
43 } tuple[FLOW_OFFLOAD_DIR_MAX];
44 };
45
46 --- a/net/netfilter/nf_flow_table_core.c
47 +++ b/net/netfilter/nf_flow_table_core.c
48 @@ -95,6 +95,7 @@ static int flow_offload_fill_route(struc
49 }
50
51 flow_tuple->iifidx = other_dst->dev->ifindex;
52 + flow_tuple->xmit_type = route->tuple[dir].xmit_type;
53 flow_tuple->dst_cache = dst;
54
55 return 0;
56 --- a/net/netfilter/nf_flow_table_ip.c
57 +++ b/net/netfilter/nf_flow_table_ip.c
58 @@ -235,8 +235,6 @@ nf_flow_offload_ip_hook(void *priv, stru
59
60 dir = tuplehash->tuple.dir;
61 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
62 - rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
63 - outdev = rt->dst.dev;
64
65 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
66 return NF_ACCEPT;
67 @@ -265,13 +263,16 @@ nf_flow_offload_ip_hook(void *priv, stru
68 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
69 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
70
71 - if (unlikely(dst_xfrm(&rt->dst))) {
72 + rt = (struct rtable *)tuplehash->tuple.dst_cache;
73 +
74 + if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
75 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
76 IPCB(skb)->iif = skb->dev->ifindex;
77 IPCB(skb)->flags = IPSKB_FORWARDED;
78 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
79 }
80
81 + outdev = rt->dst.dev;
82 skb->dev = outdev;
83 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
84 skb_dst_set_noref(skb, &rt->dst);
85 @@ -456,8 +457,6 @@ nf_flow_offload_ipv6_hook(void *priv, st
86
87 dir = tuplehash->tuple.dir;
88 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
89 - rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
90 - outdev = rt->dst.dev;
91
92 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
93 return NF_ACCEPT;
94 @@ -485,13 +484,16 @@ nf_flow_offload_ipv6_hook(void *priv, st
95 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
96 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
97
98 - if (unlikely(dst_xfrm(&rt->dst))) {
99 + rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
100 +
101 + if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
102 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
103 IP6CB(skb)->iif = skb->dev->ifindex;
104 IP6CB(skb)->flags = IP6SKB_FORWARDED;
105 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
106 }
107
108 + outdev = rt->dst.dev;
109 skb->dev = outdev;
110 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
111 skb_dst_set_noref(skb, &rt->dst);
112 --- a/net/netfilter/nft_flow_offload.c
113 +++ b/net/netfilter/nft_flow_offload.c
114 @@ -19,6 +19,22 @@ struct nft_flow_offload {
115 struct nft_flowtable *flowtable;
116 };
117
118 +static enum flow_offload_xmit_type nft_xmit_type(struct dst_entry *dst)
119 +{
120 + if (dst_xfrm(dst))
121 + return FLOW_OFFLOAD_XMIT_XFRM;
122 +
123 + return FLOW_OFFLOAD_XMIT_NEIGH;
124 +}
125 +
126 +static void nft_default_forward_path(struct nf_flow_route *route,
127 + struct dst_entry *dst_cache,
128 + enum ip_conntrack_dir dir)
129 +{
130 + route->tuple[dir].dst = dst_cache;
131 + route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
132 +}
133 +
134 static int nft_flow_route(const struct nft_pktinfo *pkt,
135 const struct nf_conn *ct,
136 struct nf_flow_route *route,
137 @@ -44,8 +60,8 @@ static int nft_flow_route(const struct n
138 if (!other_dst)
139 return -ENOENT;
140
141 - route->tuple[dir].dst = this_dst;
142 - route->tuple[!dir].dst = other_dst;
143 + nft_default_forward_path(route, this_dst, dir);
144 + nft_default_forward_path(route, other_dst, !dir);
145
146 return 0;
147 }