kernel: bump 5.10 to 5.10.42
[openwrt/staging/wigyori.git] / target / linux / generic / backport-5.10 / 610-v5.13-04-netfilter-flowtable-consolidate-skb_try_make_writabl.patch
1 From: Pablo Neira Ayuso <pablo@netfilter.org>
2 Date: Tue, 23 Mar 2021 00:56:22 +0100
3 Subject: [PATCH] netfilter: flowtable: consolidate
4 skb_try_make_writable() call
5
6 Fetch the layer 4 header size to be mangled by NAT when building the
7 tuple, then use it to make writable the network and the transport
8 headers. After this update, the NAT routines now assumes that the skbuff
9 area is writable. Do the pointer refetch only after the single
10 skb_try_make_writable() call.
11
12 Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
13 ---
14
15 --- a/net/netfilter/nf_flow_table_core.c
16 +++ b/net/netfilter/nf_flow_table_core.c
17 @@ -394,9 +394,6 @@ static int nf_flow_nat_port_tcp(struct s
18 {
19 struct tcphdr *tcph;
20
21 - if (skb_try_make_writable(skb, thoff + sizeof(*tcph)))
22 - return -1;
23 -
24 tcph = (void *)(skb_network_header(skb) + thoff);
25 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
26
27 @@ -408,9 +405,6 @@ static int nf_flow_nat_port_udp(struct s
28 {
29 struct udphdr *udph;
30
31 - if (skb_try_make_writable(skb, thoff + sizeof(*udph)))
32 - return -1;
33 -
34 udph = (void *)(skb_network_header(skb) + thoff);
35 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
36 inet_proto_csum_replace2(&udph->check, skb, port,
37 @@ -446,9 +440,6 @@ int nf_flow_snat_port(const struct flow_
38 struct flow_ports *hdr;
39 __be16 port, new_port;
40
41 - if (skb_try_make_writable(skb, thoff + sizeof(*hdr)))
42 - return -1;
43 -
44 hdr = (void *)(skb_network_header(skb) + thoff);
45
46 switch (dir) {
47 @@ -477,9 +468,6 @@ int nf_flow_dnat_port(const struct flow_
48 struct flow_ports *hdr;
49 __be16 port, new_port;
50
51 - if (skb_try_make_writable(skb, thoff + sizeof(*hdr)))
52 - return -1;
53 -
54 hdr = (void *)(skb_network_header(skb) + thoff);
55
56 switch (dir) {
57 --- a/net/netfilter/nf_flow_table_ip.c
58 +++ b/net/netfilter/nf_flow_table_ip.c
59 @@ -39,9 +39,6 @@ static int nf_flow_nat_ip_tcp(struct sk_
60 {
61 struct tcphdr *tcph;
62
63 - if (skb_try_make_writable(skb, thoff + sizeof(*tcph)))
64 - return -1;
65 -
66 tcph = (void *)(skb_network_header(skb) + thoff);
67 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
68
69 @@ -53,9 +50,6 @@ static int nf_flow_nat_ip_udp(struct sk_
70 {
71 struct udphdr *udph;
72
73 - if (skb_try_make_writable(skb, thoff + sizeof(*udph)))
74 - return -1;
75 -
76 udph = (void *)(skb_network_header(skb) + thoff);
77 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
78 inet_proto_csum_replace4(&udph->check, skb, addr,
79 @@ -136,19 +130,17 @@ static int nf_flow_dnat_ip(const struct
80 }
81
82 static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
83 - unsigned int thoff, enum flow_offload_tuple_dir dir)
84 + unsigned int thoff, enum flow_offload_tuple_dir dir,
85 + struct iphdr *iph)
86 {
87 - struct iphdr *iph = ip_hdr(skb);
88 -
89 if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
90 (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
91 - nf_flow_snat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0))
92 + nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
93 return -1;
94
95 - iph = ip_hdr(skb);
96 if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
97 (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
98 - nf_flow_dnat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0))
99 + nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
100 return -1;
101
102 return 0;
103 @@ -160,10 +152,10 @@ static bool ip_has_options(unsigned int
104 }
105
106 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
107 - struct flow_offload_tuple *tuple)
108 + struct flow_offload_tuple *tuple, u32 *hdrsize)
109 {
110 - unsigned int thoff, hdrsize;
111 struct flow_ports *ports;
112 + unsigned int thoff;
113 struct iphdr *iph;
114
115 if (!pskb_may_pull(skb, sizeof(*iph)))
116 @@ -178,10 +170,10 @@ static int nf_flow_tuple_ip(struct sk_bu
117
118 switch (iph->protocol) {
119 case IPPROTO_TCP:
120 - hdrsize = sizeof(struct tcphdr);
121 + *hdrsize = sizeof(struct tcphdr);
122 break;
123 case IPPROTO_UDP:
124 - hdrsize = sizeof(struct udphdr);
125 + *hdrsize = sizeof(struct udphdr);
126 break;
127 default:
128 return -1;
129 @@ -191,7 +183,7 @@ static int nf_flow_tuple_ip(struct sk_bu
130 return -1;
131
132 thoff = iph->ihl * 4;
133 - if (!pskb_may_pull(skb, thoff + hdrsize))
134 + if (!pskb_may_pull(skb, thoff + *hdrsize))
135 return -1;
136
137 iph = ip_hdr(skb);
138 @@ -252,11 +244,12 @@ nf_flow_offload_ip_hook(void *priv, stru
139 unsigned int thoff;
140 struct iphdr *iph;
141 __be32 nexthop;
142 + u32 hdrsize;
143
144 if (skb->protocol != htons(ETH_P_IP))
145 return NF_ACCEPT;
146
147 - if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
148 + if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize) < 0)
149 return NF_ACCEPT;
150
151 tuplehash = flow_offload_lookup(flow_table, &tuple);
152 @@ -271,11 +264,13 @@ nf_flow_offload_ip_hook(void *priv, stru
153 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
154 return NF_ACCEPT;
155
156 - if (skb_try_make_writable(skb, sizeof(*iph)))
157 + iph = ip_hdr(skb);
158 + thoff = iph->ihl * 4;
159 + if (skb_try_make_writable(skb, thoff + hdrsize))
160 return NF_DROP;
161
162 - thoff = ip_hdr(skb)->ihl * 4;
163 - if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
164 + iph = ip_hdr(skb);
165 + if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
166 return NF_ACCEPT;
167
168 flow_offload_refresh(flow_table, flow);
169 @@ -285,10 +280,9 @@ nf_flow_offload_ip_hook(void *priv, stru
170 return NF_ACCEPT;
171 }
172
173 - if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
174 + if (nf_flow_nat_ip(flow, skb, thoff, dir, iph) < 0)
175 return NF_DROP;
176
177 - iph = ip_hdr(skb);
178 ip_decrease_ttl(iph);
179 skb->tstamp = 0;
180
181 @@ -317,9 +311,6 @@ static int nf_flow_nat_ipv6_tcp(struct s
182 {
183 struct tcphdr *tcph;
184
185 - if (skb_try_make_writable(skb, thoff + sizeof(*tcph)))
186 - return -1;
187 -
188 tcph = (void *)(skb_network_header(skb) + thoff);
189 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
190 new_addr->s6_addr32, true);
191 @@ -333,9 +324,6 @@ static int nf_flow_nat_ipv6_udp(struct s
192 {
193 struct udphdr *udph;
194
195 - if (skb_try_make_writable(skb, thoff + sizeof(*udph)))
196 - return -1;
197 -
198 udph = (void *)(skb_network_header(skb) + thoff);
199 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
200 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
201 @@ -417,31 +405,30 @@ static int nf_flow_dnat_ipv6(const struc
202
203 static int nf_flow_nat_ipv6(const struct flow_offload *flow,
204 struct sk_buff *skb,
205 - enum flow_offload_tuple_dir dir)
206 + enum flow_offload_tuple_dir dir,
207 + struct ipv6hdr *ip6h)
208 {
209 - struct ipv6hdr *ip6h = ipv6_hdr(skb);
210 unsigned int thoff = sizeof(*ip6h);
211
212 if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
213 (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
214 - nf_flow_snat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0))
215 + nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
216 return -1;
217
218 - ip6h = ipv6_hdr(skb);
219 if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
220 (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
221 - nf_flow_dnat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0))
222 + nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
223 return -1;
224
225 return 0;
226 }
227
228 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
229 - struct flow_offload_tuple *tuple)
230 + struct flow_offload_tuple *tuple, u32 *hdrsize)
231 {
232 - unsigned int thoff, hdrsize;
233 struct flow_ports *ports;
234 struct ipv6hdr *ip6h;
235 + unsigned int thoff;
236
237 if (!pskb_may_pull(skb, sizeof(*ip6h)))
238 return -1;
239 @@ -450,10 +437,10 @@ static int nf_flow_tuple_ipv6(struct sk_
240
241 switch (ip6h->nexthdr) {
242 case IPPROTO_TCP:
243 - hdrsize = sizeof(struct tcphdr);
244 + *hdrsize = sizeof(struct tcphdr);
245 break;
246 case IPPROTO_UDP:
247 - hdrsize = sizeof(struct udphdr);
248 + *hdrsize = sizeof(struct udphdr);
249 break;
250 default:
251 return -1;
252 @@ -463,7 +450,7 @@ static int nf_flow_tuple_ipv6(struct sk_
253 return -1;
254
255 thoff = sizeof(*ip6h);
256 - if (!pskb_may_pull(skb, thoff + hdrsize))
257 + if (!pskb_may_pull(skb, thoff + *hdrsize))
258 return -1;
259
260 ip6h = ipv6_hdr(skb);
261 @@ -493,11 +480,12 @@ nf_flow_offload_ipv6_hook(void *priv, st
262 struct net_device *outdev;
263 struct ipv6hdr *ip6h;
264 struct rt6_info *rt;
265 + u32 hdrsize;
266
267 if (skb->protocol != htons(ETH_P_IPV6))
268 return NF_ACCEPT;
269
270 - if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
271 + if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize) < 0)
272 return NF_ACCEPT;
273
274 tuplehash = flow_offload_lookup(flow_table, &tuple);
275 @@ -523,13 +511,13 @@ nf_flow_offload_ipv6_hook(void *priv, st
276 return NF_ACCEPT;
277 }
278
279 - if (skb_try_make_writable(skb, sizeof(*ip6h)))
280 + if (skb_try_make_writable(skb, sizeof(*ip6h) + hdrsize))
281 return NF_DROP;
282
283 - if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
284 + ip6h = ipv6_hdr(skb);
285 + if (nf_flow_nat_ipv6(flow, skb, dir, ip6h) < 0)
286 return NF_DROP;
287
288 - ip6h = ipv6_hdr(skb);
289 ip6h->hop_limit--;
290 skb->tstamp = 0;
291