93117253466bc8e34ba5268b4343b7f9acbd9cbb
[openwrt/openwrt.git] / target / linux / generic / pending-4.14 / 641-netfilter-nf_flow_table-support-hw-offload-through-v.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Thu, 15 Mar 2018 20:46:31 +0100
3 Subject: [PATCH] netfilter: nf_flow_table: support hw offload through
4 virtual interfaces
5
6 There are hardware offload devices that support offloading VLANs and
7 PPPoE devices. Additionally, it is useful to be able to offload packets
8 routed through bridge interfaces as well.
9 Add support for finding the path to the offload device through these
10 virtual interfaces, while collecting useful parameters for the offload
11 device, like VLAN ID/protocol, PPPoE session and Ethernet MAC address.
12
13 Signed-off-by: Felix Fietkau <nbd@nbd.name>
14 ---
15
16 --- a/include/linux/netdevice.h
17 +++ b/include/linux/netdevice.h
18 @@ -827,6 +827,7 @@ struct xfrmdev_ops {
19 #endif
20
21 struct flow_offload;
22 +struct flow_offload_hw_path;
23
24 enum flow_offload_type {
25 FLOW_OFFLOAD_ADD = 0,
26 @@ -1064,8 +1065,15 @@ enum flow_offload_type {
27 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
28 * u16 flags);
29 *
30 + * int (*ndo_flow_offload_check)(struct flow_offload_hw_path *path);
31 + * For virtual devices like bridges, vlan, and pppoe, fill in the
32 + * underlying network device that can be used for offloading connections.
33 + * Return an error if offloading is not supported.
34 + *
35 * int (*ndo_flow_offload)(enum flow_offload_type type,
36 - * struct flow_offload *flow);
37 + * struct flow_offload *flow,
38 + * struct flow_offload_hw_path *src,
39 + * struct flow_offload_hw_path *dest);
40 * Adds/deletes flow entry to/from net device flowtable.
41 *
42 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
43 @@ -1292,8 +1300,11 @@ struct net_device_ops {
44 int (*ndo_bridge_dellink)(struct net_device *dev,
45 struct nlmsghdr *nlh,
46 u16 flags);
47 + int (*ndo_flow_offload_check)(struct flow_offload_hw_path *path);
48 int (*ndo_flow_offload)(enum flow_offload_type type,
49 - struct flow_offload *flow);
50 + struct flow_offload *flow,
51 + struct flow_offload_hw_path *src,
52 + struct flow_offload_hw_path *dest);
53 int (*ndo_change_carrier)(struct net_device *dev,
54 bool new_carrier);
55 int (*ndo_get_phys_port_id)(struct net_device *dev,
56 --- a/include/net/netfilter/nf_flow_table.h
57 +++ b/include/net/netfilter/nf_flow_table.h
58 @@ -86,6 +86,21 @@ struct flow_offload {
59 };
60 };
61
62 +#define FLOW_OFFLOAD_PATH_ETHERNET BIT(0)
63 +#define FLOW_OFFLOAD_PATH_VLAN BIT(1)
64 +#define FLOW_OFFLOAD_PATH_PPPOE BIT(2)
65 +
66 +struct flow_offload_hw_path {
67 + struct net_device *dev;
68 + u32 flags;
69 +
70 + u8 eth_src[ETH_ALEN];
71 + u8 eth_dest[ETH_ALEN];
72 + u16 vlan_proto;
73 + u16 vlan_id;
74 + u16 pppoe_sid;
75 +};
76 +
77 #define NF_FLOW_TIMEOUT (30 * HZ)
78
79 struct nf_flow_route {
80 --- a/net/netfilter/nf_flow_table_hw.c
81 +++ b/net/netfilter/nf_flow_table_hw.c
82 @@ -19,48 +19,75 @@ struct flow_offload_hw {
83 enum flow_offload_type type;
84 struct flow_offload *flow;
85 struct nf_conn *ct;
86 - possible_net_t flow_hw_net;
87 +
88 + struct flow_offload_hw_path src;
89 + struct flow_offload_hw_path dest;
90 };
91
92 -static int do_flow_offload_hw(struct net *net, struct flow_offload *flow,
93 - int type)
94 +static void flow_offload_check_ethernet(struct flow_offload_tuple *tuple,
95 + struct flow_offload_hw_path *path)
96 {
97 - struct net_device *indev;
98 - int ret, ifindex;
99 + struct net_device *dev = path->dev;
100 + struct neighbour *n;
101
102 - ifindex = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx;
103 - indev = dev_get_by_index(net, ifindex);
104 - if (WARN_ON(!indev))
105 - return 0;
106 -
107 - mutex_lock(&nf_flow_offload_hw_mutex);
108 - ret = indev->netdev_ops->ndo_flow_offload(type, flow);
109 - mutex_unlock(&nf_flow_offload_hw_mutex);
110 + if (dev->type != ARPHRD_ETHER)
111 + return;
112
113 - dev_put(indev);
114 + memcpy(path->eth_src, path->dev->dev_addr, ETH_ALEN);
115 + n = dst_neigh_lookup(tuple->dst_cache, &tuple->src_v4);
116 + if (!n)
117 + return;
118
119 - return ret;
120 + memcpy(path->eth_dest, n->ha, ETH_ALEN);
121 + path->flags |= FLOW_OFFLOAD_PATH_ETHERNET;
122 + neigh_release(n);
123 }
124
125 -static void flow_offload_hw_work_add(struct flow_offload_hw *offload)
126 +static int flow_offload_check_path(struct net *net,
127 + struct flow_offload_tuple *tuple,
128 + struct flow_offload_hw_path *path)
129 {
130 - struct net *net;
131 - int ret;
132 + struct net_device *dev;
133
134 - if (nf_ct_is_dying(offload->ct))
135 - return;
136 + dev = dev_get_by_index_rcu(net, tuple->iifidx);
137 + if (!dev)
138 + return -ENOENT;
139 +
140 + path->dev = dev;
141 + flow_offload_check_ethernet(tuple, path);
142
143 - net = read_pnet(&offload->flow_hw_net);
144 - ret = do_flow_offload_hw(net, offload->flow, FLOW_OFFLOAD_ADD);
145 - if (ret >= 0)
146 - offload->flow->flags |= FLOW_OFFLOAD_HW;
147 + if (dev->netdev_ops->ndo_flow_offload_check)
148 + return dev->netdev_ops->ndo_flow_offload_check(path);
149 +
150 + return 0;
151 }
152
153 -static void flow_offload_hw_work_del(struct flow_offload_hw *offload)
154 +static int do_flow_offload_hw(struct flow_offload_hw *offload)
155 {
156 - struct net *net = read_pnet(&offload->flow_hw_net);
157 + struct net_device *src_dev = offload->src.dev;
158 + struct net_device *dest_dev = offload->dest.dev;
159 + int ret;
160 +
161 + ret = src_dev->netdev_ops->ndo_flow_offload(offload->type,
162 + offload->flow,
163 + &offload->src,
164 + &offload->dest);
165 +
166 + /* restore devices in case the driver mangled them */
167 + offload->src.dev = src_dev;
168 + offload->dest.dev = dest_dev;
169 +
170 + return ret;
171 +}
172
173 - do_flow_offload_hw(net, offload->flow, FLOW_OFFLOAD_DEL);
174 +static void flow_offload_hw_free(struct flow_offload_hw *offload)
175 +{
176 + dev_put(offload->src.dev);
177 + dev_put(offload->dest.dev);
178 + if (offload->ct)
179 + nf_conntrack_put(&offload->ct->ct_general);
180 + list_del(&offload->list);
181 + kfree(offload);
182 }
183
184 static void flow_offload_hw_work(struct work_struct *work)
185 @@ -73,18 +100,22 @@ static void flow_offload_hw_work(struct
186 spin_unlock_bh(&flow_offload_hw_pending_list_lock);
187
188 list_for_each_entry_safe(offload, next, &hw_offload_pending, list) {
189 + mutex_lock(&nf_flow_offload_hw_mutex);
190 switch (offload->type) {
191 case FLOW_OFFLOAD_ADD:
192 - flow_offload_hw_work_add(offload);
193 + if (nf_ct_is_dying(offload->ct))
194 + break;
195 +
196 + if (do_flow_offload_hw(offload) >= 0)
197 + offload->flow->flags |= FLOW_OFFLOAD_HW;
198 break;
199 case FLOW_OFFLOAD_DEL:
200 - flow_offload_hw_work_del(offload);
201 + do_flow_offload_hw(offload);
202 break;
203 }
204 - if (offload->ct)
205 - nf_conntrack_put(&offload->ct->ct_general);
206 - list_del(&offload->list);
207 - kfree(offload);
208 + mutex_unlock(&nf_flow_offload_hw_mutex);
209 +
210 + flow_offload_hw_free(offload);
211 }
212 }
213
214 @@ -97,20 +128,55 @@ static void flow_offload_queue_work(stru
215 schedule_work(&nf_flow_offload_hw_work);
216 }
217
218 +static struct flow_offload_hw *
219 +flow_offload_hw_prepare(struct net *net, struct flow_offload *flow)
220 +{
221 + struct flow_offload_hw_path src = {};
222 + struct flow_offload_hw_path dest = {};
223 + struct flow_offload_tuple *tuple;
224 + struct flow_offload_hw *offload = NULL;
225 +
226 + rcu_read_lock_bh();
227 +
228 + tuple = &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple;
229 + if (flow_offload_check_path(net, tuple, &src))
230 + goto out;
231 +
232 + tuple = &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple;
233 + if (flow_offload_check_path(net, tuple, &dest))
234 + goto out;
235 +
236 + if (!src.dev->netdev_ops->ndo_flow_offload)
237 + goto out;
238 +
239 + offload = kzalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
240 + if (!offload)
241 + goto out;
242 +
243 + dev_hold(src.dev);
244 + dev_hold(dest.dev);
245 + offload->src = src;
246 + offload->dest = dest;
247 + offload->flow = flow;
248 +
249 +out:
250 + rcu_read_unlock_bh();
251 +
252 + return offload;
253 +}
254 +
255 static void flow_offload_hw_add(struct net *net, struct flow_offload *flow,
256 struct nf_conn *ct)
257 {
258 struct flow_offload_hw *offload;
259
260 - offload = kmalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
261 + offload = flow_offload_hw_prepare(net, flow);
262 if (!offload)
263 return;
264
265 nf_conntrack_get(&ct->ct_general);
266 offload->type = FLOW_OFFLOAD_ADD;
267 offload->ct = ct;
268 - offload->flow = flow;
269 - write_pnet(&offload->flow_hw_net, net);
270
271 flow_offload_queue_work(offload);
272 }
273 @@ -119,14 +185,11 @@ static void flow_offload_hw_del(struct n
274 {
275 struct flow_offload_hw *offload;
276
277 - offload = kmalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
278 + offload = flow_offload_hw_prepare(net, flow);
279 if (!offload)
280 return;
281
282 offload->type = FLOW_OFFLOAD_DEL;
283 - offload->ct = NULL;
284 - offload->flow = flow;
285 - write_pnet(&offload->flow_hw_net, net);
286
287 flow_offload_queue_work(offload);
288 }
289 @@ -153,12 +216,8 @@ static void __exit nf_flow_table_hw_modu
290 nf_flow_table_hw_unregister(&flow_offload_hw);
291 cancel_work_sync(&nf_flow_offload_hw_work);
292
293 - list_for_each_entry_safe(offload, next, &hw_offload_pending, list) {
294 - if (offload->ct)
295 - nf_conntrack_put(&offload->ct->ct_general);
296 - list_del(&offload->list);
297 - kfree(offload);
298 - }
299 + list_for_each_entry_safe(offload, next, &hw_offload_pending, list)
300 + flow_offload_hw_free(offload);
301 }
302
303 module_init(nf_flow_table_hw_module_init);