generic: 5.10: replace pending 730-net-phy-at803x-fix... with upstream
[openwrt/openwrt.git] / target / linux / generic / backport-5.10 / 610-v5.13-54-netfilter-flowtable-dst_check-from-garbage-collector.patch
1 From: Pablo Neira Ayuso <pablo@netfilter.org>
2 Date: Sun, 28 Mar 2021 23:08:55 +0200
3 Subject: [PATCH] netfilter: flowtable: dst_check() from garbage collector path
4
5 Move dst_check() to the garbage collector path. Stale routes trigger the
6 flow entry teardown state which makes affected flows go back to the
7 classic forwarding path to re-evaluate flow offloading.
8
9 IPv6 requires the dst cookie to work, store it in the flow_tuple,
10 otherwise dst_check() always fails.
11
12 Fixes: e5075c0badaa ("netfilter: flowtable: call dst_check() to fall back to classic forwarding")
13 Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
14 ---
15
16 --- a/include/net/netfilter/nf_flow_table.h
17 +++ b/include/net/netfilter/nf_flow_table.h
18 @@ -129,7 +129,10 @@ struct flow_offload_tuple {
19 in_vlan_ingress:2;
20 u16 mtu;
21 union {
22 - struct dst_entry *dst_cache;
23 + struct {
24 + struct dst_entry *dst_cache;
25 + u32 dst_cookie;
26 + };
27 struct {
28 u32 ifidx;
29 u32 hw_ifidx;
30 --- a/net/netfilter/nf_flow_table_core.c
31 +++ b/net/netfilter/nf_flow_table_core.c
32 @@ -74,6 +74,18 @@ err_ct_refcnt:
33 }
34 EXPORT_SYMBOL_GPL(flow_offload_alloc);
35
36 +static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
37 +{
38 + const struct rt6_info *rt;
39 +
40 + if (flow_tuple->l3proto == NFPROTO_IPV6) {
41 + rt = (const struct rt6_info *)flow_tuple->dst_cache;
42 + return rt6_get_cookie(rt);
43 + }
44 +
45 + return 0;
46 +}
47 +
48 static int flow_offload_fill_route(struct flow_offload *flow,
49 const struct nf_flow_route *route,
50 enum flow_offload_tuple_dir dir)
51 @@ -116,6 +128,7 @@ static int flow_offload_fill_route(struc
52 return -1;
53
54 flow_tuple->dst_cache = dst;
55 + flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
56 break;
57 }
58 flow_tuple->xmit_type = route->tuple[dir].xmit_type;
59 @@ -389,11 +402,33 @@ nf_flow_table_iterate(struct nf_flowtabl
60 return err;
61 }
62
63 +static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
64 +{
65 + struct dst_entry *dst;
66 +
67 + if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
68 + tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
69 + dst = tuple->dst_cache;
70 + if (!dst_check(dst, tuple->dst_cookie))
71 + return true;
72 + }
73 +
74 + return false;
75 +}
76 +
77 +static bool nf_flow_has_stale_dst(struct flow_offload *flow)
78 +{
79 + return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
80 + flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
81 +}
82 +
83 static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
84 {
85 struct nf_flowtable *flow_table = data;
86
87 - if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct))
88 + if (nf_flow_has_expired(flow) ||
89 + nf_ct_is_dying(flow->ct) ||
90 + nf_flow_has_stale_dst(flow))
91 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
92
93 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
94 --- a/net/netfilter/nf_flow_table_ip.c
95 +++ b/net/netfilter/nf_flow_table_ip.c
96 @@ -364,15 +364,6 @@ nf_flow_offload_ip_hook(void *priv, stru
97 if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
98 return NF_ACCEPT;
99
100 - if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
101 - tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
102 - rt = (struct rtable *)tuplehash->tuple.dst_cache;
103 - if (!dst_check(&rt->dst, 0)) {
104 - flow_offload_teardown(flow);
105 - return NF_ACCEPT;
106 - }
107 - }
108 -
109 if (skb_try_make_writable(skb, thoff + hdrsize))
110 return NF_DROP;
111
112 @@ -391,6 +382,7 @@ nf_flow_offload_ip_hook(void *priv, stru
113 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
114
115 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
116 + rt = (struct rtable *)tuplehash->tuple.dst_cache;
117 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
118 IPCB(skb)->iif = skb->dev->ifindex;
119 IPCB(skb)->flags = IPSKB_FORWARDED;
120 @@ -399,6 +391,7 @@ nf_flow_offload_ip_hook(void *priv, stru
121
122 switch (tuplehash->tuple.xmit_type) {
123 case FLOW_OFFLOAD_XMIT_NEIGH:
124 + rt = (struct rtable *)tuplehash->tuple.dst_cache;
125 outdev = rt->dst.dev;
126 skb->dev = outdev;
127 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
128 @@ -607,15 +600,6 @@ nf_flow_offload_ipv6_hook(void *priv, st
129 if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
130 return NF_ACCEPT;
131
132 - if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
133 - tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
134 - rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
135 - if (!dst_check(&rt->dst, 0)) {
136 - flow_offload_teardown(flow);
137 - return NF_ACCEPT;
138 - }
139 - }
140 -
141 if (skb_try_make_writable(skb, thoff + hdrsize))
142 return NF_DROP;
143
144 @@ -633,6 +617,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
145 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
146
147 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
148 + rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
149 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
150 IP6CB(skb)->iif = skb->dev->ifindex;
151 IP6CB(skb)->flags = IP6SKB_FORWARDED;
152 @@ -641,6 +626,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
153
154 switch (tuplehash->tuple.xmit_type) {
155 case FLOW_OFFLOAD_XMIT_NEIGH:
156 + rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
157 outdev = rt->dst.dev;
158 skb->dev = outdev;
159 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);