kernel: add linux 5.10 support
[openwrt/staging/rmilecki.git] / target / linux / generic / pending-5.10 / 640-11-netfilter-flowtable-add-offload-support-for-xmit-pat.patch
1 From: Pablo Neira Ayuso <pablo@netfilter.org>
2 Date: Mon, 7 Dec 2020 20:31:44 +0100
3 Subject: [PATCH] netfilter: flowtable: add offload support for xmit path
4 types
5
6 When the flow tuple xmit_type is set to FLOW_OFFLOAD_XMIT_DIRECT, the
7 dst_cache pointer is not valid, and the h_source/h_dest/ifidx out fields
8 need to be used.
9
10 This patch also adds the FLOW_ACTION_VLAN_PUSH action to pass the VLAN
11 tag to the driver.
12 ---
13
14 --- a/net/netfilter/nf_flow_table_offload.c
15 +++ b/net/netfilter/nf_flow_table_offload.c
16 @@ -175,28 +175,45 @@ static int flow_offload_eth_src(struct n
17 enum flow_offload_tuple_dir dir,
18 struct nf_flow_rule *flow_rule)
19 {
20 - const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple;
21 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
22 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
23 - struct net_device *dev;
24 + const struct flow_offload_tuple *other_tuple, *this_tuple;
25 + struct net_device *dev = NULL;
26 + const unsigned char *addr;
27 u32 mask, val;
28 u16 val16;
29
30 - dev = dev_get_by_index(net, tuple->iifidx);
31 - if (!dev)
32 - return -ENOENT;
33 + this_tuple = &flow->tuplehash[dir].tuple;
34 +
35 + switch (this_tuple->xmit_type) {
36 + case FLOW_OFFLOAD_XMIT_DIRECT:
37 + addr = this_tuple->out.h_source;
38 + break;
39 + case FLOW_OFFLOAD_XMIT_NEIGH:
40 + other_tuple = &flow->tuplehash[!dir].tuple;
41 + dev = dev_get_by_index(net, other_tuple->iifidx);
42 + if (!dev)
43 + return -ENOENT;
44 +
45 + addr = dev->dev_addr;
46 + break;
47 + default:
48 + return -EOPNOTSUPP;
49 + }
50
51 mask = ~0xffff0000;
52 - memcpy(&val16, dev->dev_addr, 2);
53 + memcpy(&val16, addr, 2);
54 val = val16 << 16;
55 flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
56 &val, &mask);
57
58 mask = ~0xffffffff;
59 - memcpy(&val, dev->dev_addr + 2, 4);
60 + memcpy(&val, addr + 2, 4);
61 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
62 &val, &mask);
63 - dev_put(dev);
64 +
65 + if (dev)
66 + dev_put(dev);
67
68 return 0;
69 }
70 @@ -208,27 +225,40 @@ static int flow_offload_eth_dst(struct n
71 {
72 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
73 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
74 - const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
75 + const struct flow_offload_tuple *other_tuple, *this_tuple;
76 const struct dst_entry *dst_cache;
77 unsigned char ha[ETH_ALEN];
78 struct neighbour *n;
79 + const void *daddr;
80 u32 mask, val;
81 u8 nud_state;
82 u16 val16;
83
84 - dst_cache = flow->tuplehash[dir].tuple.dst_cache;
85 - n = dst_neigh_lookup(dst_cache, daddr);
86 - if (!n)
87 - return -ENOENT;
88 + this_tuple = &flow->tuplehash[dir].tuple;
89
90 - read_lock_bh(&n->lock);
91 - nud_state = n->nud_state;
92 - ether_addr_copy(ha, n->ha);
93 - read_unlock_bh(&n->lock);
94 + switch (this_tuple->xmit_type) {
95 + case FLOW_OFFLOAD_XMIT_DIRECT:
96 + ether_addr_copy(ha, this_tuple->out.h_dest);
97 + break;
98 + case FLOW_OFFLOAD_XMIT_NEIGH:
99 + other_tuple = &flow->tuplehash[!dir].tuple;
100 + daddr = &other_tuple->src_v4;
101 + dst_cache = this_tuple->dst_cache;
102 + n = dst_neigh_lookup(dst_cache, daddr);
103 + if (!n)
104 + return -ENOENT;
105
106 - if (!(nud_state & NUD_VALID)) {
107 + read_lock_bh(&n->lock);
108 + nud_state = n->nud_state;
109 + ether_addr_copy(ha, n->ha);
110 + read_unlock_bh(&n->lock);
111 neigh_release(n);
112 - return -ENOENT;
113 +
114 + if (!(nud_state & NUD_VALID))
115 + return -ENOENT;
116 + break;
117 + default:
118 + return -EOPNOTSUPP;
119 }
120
121 mask = ~0xffffffff;
122 @@ -241,7 +271,6 @@ static int flow_offload_eth_dst(struct n
123 val = val16;
124 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
125 &val, &mask);
126 - neigh_release(n);
127
128 return 0;
129 }
130 @@ -463,27 +492,52 @@ static void flow_offload_ipv4_checksum(s
131 }
132 }
133
134 -static void flow_offload_redirect(const struct flow_offload *flow,
135 +static void flow_offload_redirect(struct net *net,
136 + const struct flow_offload *flow,
137 enum flow_offload_tuple_dir dir,
138 struct nf_flow_rule *flow_rule)
139 {
140 - struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
141 - struct rtable *rt;
142 + const struct flow_offload_tuple *this_tuple, *other_tuple;
143 + struct flow_action_entry *entry;
144 + struct net_device *dev;
145 + int ifindex;
146
147 - rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
148 + this_tuple = &flow->tuplehash[dir].tuple;
149 + switch (this_tuple->xmit_type) {
150 + case FLOW_OFFLOAD_XMIT_DIRECT:
151 + this_tuple = &flow->tuplehash[dir].tuple;
152 + ifindex = this_tuple->out.ifidx;
153 + break;
154 + case FLOW_OFFLOAD_XMIT_NEIGH:
155 + other_tuple = &flow->tuplehash[!dir].tuple;
156 + ifindex = other_tuple->iifidx;
157 + break;
158 + default:
159 + return;
160 + }
161 +
162 + dev = dev_get_by_index(net, ifindex);
163 + if (!dev)
164 + return;
165 +
166 + entry = flow_action_entry_next(flow_rule);
167 entry->id = FLOW_ACTION_REDIRECT;
168 - entry->dev = rt->dst.dev;
169 - dev_hold(rt->dst.dev);
170 + entry->dev = dev;
171 }
172
173 static void flow_offload_encap_tunnel(const struct flow_offload *flow,
174 enum flow_offload_tuple_dir dir,
175 struct nf_flow_rule *flow_rule)
176 {
177 + const struct flow_offload_tuple *this_tuple;
178 struct flow_action_entry *entry;
179 struct dst_entry *dst;
180
181 - dst = flow->tuplehash[dir].tuple.dst_cache;
182 + this_tuple = &flow->tuplehash[dir].tuple;
183 + if (this_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
184 + return;
185 +
186 + dst = this_tuple->dst_cache;
187 if (dst && dst->lwtstate) {
188 struct ip_tunnel_info *tun_info;
189
190 @@ -500,10 +554,15 @@ static void flow_offload_decap_tunnel(co
191 enum flow_offload_tuple_dir dir,
192 struct nf_flow_rule *flow_rule)
193 {
194 + const struct flow_offload_tuple *other_tuple;
195 struct flow_action_entry *entry;
196 struct dst_entry *dst;
197
198 - dst = flow->tuplehash[!dir].tuple.dst_cache;
199 + other_tuple = &flow->tuplehash[!dir].tuple;
200 + if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
201 + return;
202 +
203 + dst = other_tuple->dst_cache;
204 if (dst && dst->lwtstate) {
205 struct ip_tunnel_info *tun_info;
206
207 @@ -515,10 +574,14 @@ static void flow_offload_decap_tunnel(co
208 }
209 }
210
211 -int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
212 - enum flow_offload_tuple_dir dir,
213 - struct nf_flow_rule *flow_rule)
214 +static int
215 +nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
216 + enum flow_offload_tuple_dir dir,
217 + struct nf_flow_rule *flow_rule)
218 {
219 + const struct flow_offload_tuple *other_tuple;
220 + int i;
221 +
222 flow_offload_decap_tunnel(flow, dir, flow_rule);
223 flow_offload_encap_tunnel(flow, dir, flow_rule);
224
225 @@ -526,6 +589,26 @@ int nf_flow_rule_route_ipv4(struct net *
226 flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
227 return -1;
228
229 + other_tuple = &flow->tuplehash[!dir].tuple;
230 +
231 + for (i = 0; i < other_tuple->in_vlan_num; i++) {
232 + struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
233 +
234 + entry->id = FLOW_ACTION_VLAN_PUSH;
235 + entry->vlan.vid = other_tuple->in_vlan[i].id;
236 + entry->vlan.proto = other_tuple->in_vlan[i].proto;
237 + }
238 +
239 + return 0;
240 +}
241 +
242 +int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
243 + enum flow_offload_tuple_dir dir,
244 + struct nf_flow_rule *flow_rule)
245 +{
246 + if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
247 + return -1;
248 +
249 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
250 flow_offload_ipv4_snat(net, flow, dir, flow_rule);
251 flow_offload_port_snat(net, flow, dir, flow_rule);
252 @@ -538,7 +621,7 @@ int nf_flow_rule_route_ipv4(struct net *
253 test_bit(NF_FLOW_DNAT, &flow->flags))
254 flow_offload_ipv4_checksum(net, flow, flow_rule);
255
256 - flow_offload_redirect(flow, dir, flow_rule);
257 + flow_offload_redirect(net, flow, dir, flow_rule);
258
259 return 0;
260 }
261 @@ -548,11 +631,7 @@ int nf_flow_rule_route_ipv6(struct net *
262 enum flow_offload_tuple_dir dir,
263 struct nf_flow_rule *flow_rule)
264 {
265 - flow_offload_decap_tunnel(flow, dir, flow_rule);
266 - flow_offload_encap_tunnel(flow, dir, flow_rule);
267 -
268 - if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
269 - flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
270 + if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
271 return -1;
272
273 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
274 @@ -564,7 +643,7 @@ int nf_flow_rule_route_ipv6(struct net *
275 flow_offload_port_dnat(net, flow, dir, flow_rule);
276 }
277
278 - flow_offload_redirect(flow, dir, flow_rule);
279 + flow_offload_redirect(net, flow, dir, flow_rule);
280
281 return 0;
282 }
283 @@ -578,10 +657,10 @@ nf_flow_offload_rule_alloc(struct net *n
284 enum flow_offload_tuple_dir dir)
285 {
286 const struct nf_flowtable *flowtable = offload->flowtable;
287 + const struct flow_offload_tuple *tuple, *other_tuple;
288 const struct flow_offload *flow = offload->flow;
289 - const struct flow_offload_tuple *tuple;
290 + struct dst_entry *other_dst = NULL;
291 struct nf_flow_rule *flow_rule;
292 - struct dst_entry *other_dst;
293 int err = -ENOMEM;
294
295 flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
296 @@ -597,7 +676,10 @@ nf_flow_offload_rule_alloc(struct net *n
297 flow_rule->rule->match.key = &flow_rule->match.key;
298
299 tuple = &flow->tuplehash[dir].tuple;
300 - other_dst = flow->tuplehash[!dir].tuple.dst_cache;
301 + other_tuple = &flow->tuplehash[!dir].tuple;
302 + if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH)
303 + other_dst = other_tuple->dst_cache;
304 +
305 err = nf_flow_rule_match(&flow_rule->match, tuple, other_dst);
306 if (err < 0)
307 goto err_flow_match;