batman-adv: Drop compat-hack for netif_trans_update
[feed/routing.git] / batman-adv / src / compat-hacks.h
1 /* Please avoid adding hacks here - instead add it to mac80211/backports.git */
2
3 #undef CONFIG_MODULE_STRIPPED
4
5 #include <linux/version.h> /* LINUX_VERSION_CODE */
6 #include <linux/types.h>
7
8 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)
9
10 #define dev_get_iflink(_net_dev) ((_net_dev)->iflink)
11
12 #endif /* < KERNEL_VERSION(4, 1, 0) */
13
14 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
15
16 #include <linux/netdevice.h>
17
18 #define netdev_master_upper_dev_link(dev, upper_dev, upper_priv, upper_info, extack) ({\
19 BUILD_BUG_ON(upper_priv != NULL); \
20 BUILD_BUG_ON(upper_info != NULL); \
21 BUILD_BUG_ON(extack != NULL); \
22 netdev_master_upper_dev_link(dev, upper_dev); \
23 })
24
25 #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
26
27 #include <linux/netdevice.h>
28
29 #define netdev_master_upper_dev_link(dev, upper_dev, upper_priv, upper_info, extack) ({\
30 BUILD_BUG_ON(extack != NULL); \
31 netdev_master_upper_dev_link(dev, upper_dev, upper_priv, upper_info); \
32 })
33
34 #endif /* < KERNEL_VERSION(4, 5, 0) */
35
36
37 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
38
39 /* wild hack for batadv_getlink_net only */
40 #define get_link_net get_xstats_size || 1 ? fallback_net : (struct net*)netdev->rtnl_link_ops->get_xstats_size
41
42 #endif /* < KERNEL_VERSION(4, 0, 0) */
43
44
45 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
46
47 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
48 unsigned int transport_len,
49 __sum16(*skb_chkf)(struct sk_buff *skb));
50
51 int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
52
53 int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed);
54
55 #endif /* < KERNEL_VERSION(4, 2, 0) */
56
57 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
58
59 #define IFF_NO_QUEUE 0; dev->tx_queue_len = 0
60
61 static inline bool hlist_fake(struct hlist_node *h)
62 {
63 return h->pprev == &h->next;
64 }
65
66 #endif /* < KERNEL_VERSION(4, 3, 0) */
67
68 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
69
70 #include <linux/ethtool.h>
71
72 #define ethtool_link_ksettings batadv_ethtool_link_ksettings
73
74 struct batadv_ethtool_link_ksettings {
75 struct {
76 __u32 speed;
77 __u8 duplex;
78 __u8 autoneg;
79 } base;
80 };
81
82 #define __ethtool_get_link_ksettings(__dev, __link_settings) \
83 batadv_ethtool_get_link_ksettings(__dev, __link_settings)
84
85 static inline int
86 batadv_ethtool_get_link_ksettings(struct net_device *dev,
87 struct ethtool_link_ksettings *link_ksettings)
88 {
89 struct ethtool_cmd cmd;
90 int ret;
91
92 memset(&cmd, 0, sizeof(cmd));
93 ret = __ethtool_get_settings(dev, &cmd);
94
95 if (ret != 0)
96 return ret;
97
98 link_ksettings->base.duplex = cmd.duplex;
99 link_ksettings->base.speed = ethtool_cmd_speed(&cmd);
100 link_ksettings->base.autoneg = cmd.autoneg;
101
102 return 0;
103 }
104
105 #endif /* < KERNEL_VERSION(4, 6, 0) */
106
107
108 #include_next <linux/netlink.h>
109
110 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
111
112 #include_next <net/netlink.h>
113
114 static inline bool batadv_nla_need_padding_for_64bit(struct sk_buff *skb);
115
116 static inline int batadv_nla_align_64bit(struct sk_buff *skb, int padattr)
117 {
118 if (batadv_nla_need_padding_for_64bit(skb) &&
119 !nla_reserve(skb, padattr, 0))
120 return -EMSGSIZE;
121
122 return 0;
123 }
124
125 static inline struct nlattr *batadv__nla_reserve_64bit(struct sk_buff *skb,
126 int attrtype,
127 int attrlen, int padattr)
128 {
129 if (batadv_nla_need_padding_for_64bit(skb))
130 batadv_nla_align_64bit(skb, padattr);
131
132 return __nla_reserve(skb, attrtype, attrlen);
133 }
134
135 static inline void batadv__nla_put_64bit(struct sk_buff *skb, int attrtype,
136 int attrlen, const void *data,
137 int padattr)
138 {
139 struct nlattr *nla;
140
141 nla = batadv__nla_reserve_64bit(skb, attrtype, attrlen, padattr);
142 memcpy(nla_data(nla), data, attrlen);
143 }
144
145 static inline bool batadv_nla_need_padding_for_64bit(struct sk_buff *skb)
146 {
147 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
148 /* The nlattr header is 4 bytes in size, that's why we test
149 * if the skb->data _is_ aligned. A NOP attribute, plus
150 * nlattr header for next attribute, will make nla_data()
151 * 8-byte aligned.
152 */
153 if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
154 return true;
155 #endif
156 return false;
157 }
158
159 static inline int batadv_nla_total_size_64bit(int payload)
160 {
161 return NLA_ALIGN(nla_attr_size(payload))
162 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
163 + NLA_ALIGN(nla_attr_size(0))
164 #endif
165 ;
166 }
167
168 static inline int batadv_nla_put_64bit(struct sk_buff *skb, int attrtype,
169 int attrlen, const void *data,
170 int padattr)
171 {
172 size_t len;
173
174 if (batadv_nla_need_padding_for_64bit(skb))
175 len = batadv_nla_total_size_64bit(attrlen);
176 else
177 len = nla_total_size(attrlen);
178 if (unlikely(skb_tailroom(skb) < len))
179 return -EMSGSIZE;
180
181 batadv__nla_put_64bit(skb, attrtype, attrlen, data, padattr);
182 return 0;
183 }
184
185 #ifdef nla_put_u64_64bit
186 #undef nla_put_u64_64bit
187 #endif
188
189 #define nla_put_u64_64bit(_skb, _attrtype, _value, _padattr) \
190 batadv_nla_put_u64_64bit(_skb, _attrtype, _value, _padattr)
191 static inline int batadv_nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
192 u64 value, int padattr)
193 {
194 return batadv_nla_put_64bit(skb, attrtype, sizeof(u64), &value,
195 padattr);
196 }
197
198 #endif /* < KERNEL_VERSION(4, 7, 0) */
199
200
201 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
202
203 #include_next <linux/cache.h>
204
205 /* hack for netlink.c which marked the family ops as ro */
206 #ifdef __ro_after_init
207 #undef __ro_after_init
208 #endif
209 #define __ro_after_init
210
211 #endif /* < KERNEL_VERSION(4, 10, 0) */
212
213 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 9)
214
215 #include <linux/netdevice.h>
216
217 /* work around missing attribute needs_free_netdev and priv_destructor in
218 * net_device
219 */
220 #define ether_setup(dev) \
221 void batadv_softif_free2(struct net_device *dev) \
222 { \
223 batadv_softif_free(dev); \
224 free_netdev(dev); \
225 } \
226 void (*t1)(struct net_device *dev) __attribute__((unused)); \
227 bool t2 __attribute__((unused)); \
228 ether_setup(dev)
229 #define needs_free_netdev destructor = batadv_softif_free2; t2
230 #define priv_destructor destructor = batadv_softif_free2; t1
231
232 #endif /* < KERNEL_VERSION(4, 11, 9) */
233
234 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
235
236 static inline void *batadv_skb_put(struct sk_buff *skb, unsigned int len)
237 {
238 return (void *)skb_put(skb, len);
239 }
240 #ifdef skb_put
241 #undef skb_put
242 #endif
243
244 #define skb_put batadv_skb_put
245
246 static inline void *batadv_skb_put_zero(struct sk_buff *skb, unsigned int len)
247 {
248 void *tmp = skb_put(skb, len);
249
250 memset(tmp, 0, len);
251
252 return tmp;
253 }
254 #ifdef skb_put_zero
255 #undef skb_put_zero
256 #endif
257
258 #define skb_put_zero batadv_skb_put_zero
259
260 static inline void *batadv_skb_put_data(struct sk_buff *skb, const void *data,
261 unsigned int len)
262 {
263 void *tmp = skb_put(skb, len);
264
265 memcpy(tmp, data, len);
266
267 return tmp;
268 }
269 #ifdef skb_put_data
270 #undef skb_put_data
271 #endif
272
273 #define skb_put_data batadv_skb_put_data
274
275 #endif /* < KERNEL_VERSION(4, 13, 0) */
276
277 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
278
279 #define batadv_softif_slave_add(__dev, __slave_dev, __extack) \
280 batadv_softif_slave_add(__dev, __slave_dev)
281
282 #endif /* < KERNEL_VERSION(4, 15, 0) */
283
284 #ifndef from_timer
285
286 #define TIMER_DATA_TYPE unsigned long
287 #define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE)
288
289 static inline void timer_setup(struct timer_list *timer,
290 void (*callback)(struct timer_list *),
291 unsigned int flags)
292 {
293 __setup_timer(timer, (TIMER_FUNC_TYPE)callback,
294 (TIMER_DATA_TYPE)timer, flags);
295 }
296
297 #define from_timer(var, callback_timer, timer_fieldname) \
298 container_of(callback_timer, typeof(*var), timer_fieldname)
299
300 #endif /* !from_timer */
301
302 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
303
304
305 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
306
307 #include <net/cfg80211.h>
308
309 /* cfg80211 fix: https://patchwork.kernel.org/patch/10449857/ */
310 static inline int batadv_cfg80211_get_station(struct net_device *dev,
311 const u8 *mac_addr,
312 struct station_info *sinfo)
313 {
314 memset(sinfo, 0, sizeof(*sinfo));
315 return cfg80211_get_station(dev, mac_addr, sinfo);
316 }
317
318 #define cfg80211_get_station(dev, mac_addr, sinfo) \
319 batadv_cfg80211_get_station(dev, mac_addr, sinfo)
320
321 #endif /* < KERNEL_VERSION(4, 18, 0) */
322
323
324 #ifdef __CHECK_POLL
325 typedef unsigned __bitwise __poll_t;
326 #else
327 typedef unsigned __poll_t;
328 #endif
329
330 #endif /* < KERNEL_VERSION(4, 16, 0) */
331
332 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
333
334 static inline int batadv_access_ok(int type, const void __user *p,
335 unsigned long size)
336 {
337 return access_ok(type, p, size);
338 }
339
340 #ifdef access_ok
341 #undef access_ok
342 #endif
343
344 #define access_ok_get(_1, _2, _3 , access_ok_name, ...) access_ok_name
345 #define access_ok(...) \
346 access_ok_get(__VA_ARGS__, access_ok3, access_ok2)(__VA_ARGS__)
347
348 #define access_ok2(addr, size) batadv_access_ok(VERIFY_WRITE, (addr), (size))
349 #define access_ok3(type, addr, size) batadv_access_ok((type), (addr), (size))
350
351 #endif /* < KERNEL_VERSION(5, 0, 0) */
352
353 /* <DECLARE_EWMA> */
354
355 #include <linux/version.h>
356 #include_next <linux/average.h>
357
358 #include <linux/bug.h>
359
360 #ifdef DECLARE_EWMA
361 #undef DECLARE_EWMA
362 #endif /* DECLARE_EWMA */
363
364 /*
365 * Exponentially weighted moving average (EWMA)
366 *
367 * This implements a fixed-precision EWMA algorithm, with both the
368 * precision and fall-off coefficient determined at compile-time
369 * and built into the generated helper funtions.
370 *
371 * The first argument to the macro is the name that will be used
372 * for the struct and helper functions.
373 *
374 * The second argument, the precision, expresses how many bits are
375 * used for the fractional part of the fixed-precision values.
376 *
377 * The third argument, the weight reciprocal, determines how the
378 * new values will be weighed vs. the old state, new values will
379 * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note
380 * that this parameter must be a power of two for efficiency.
381 */
382
383 #define DECLARE_EWMA(name, _precision, _weight_rcp) \
384 struct ewma_##name { \
385 unsigned long internal; \
386 }; \
387 static inline void ewma_##name##_init(struct ewma_##name *e) \
388 { \
389 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
390 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
391 /* \
392 * Even if you want to feed it just 0/1 you should have \
393 * some bits for the non-fractional part... \
394 */ \
395 BUILD_BUG_ON((_precision) > 30); \
396 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
397 e->internal = 0; \
398 } \
399 static inline unsigned long \
400 ewma_##name##_read(struct ewma_##name *e) \
401 { \
402 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
403 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
404 BUILD_BUG_ON((_precision) > 30); \
405 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
406 return e->internal >> (_precision); \
407 } \
408 static inline void ewma_##name##_add(struct ewma_##name *e, \
409 unsigned long val) \
410 { \
411 unsigned long internal = READ_ONCE(e->internal); \
412 unsigned long weight_rcp = ilog2(_weight_rcp); \
413 unsigned long precision = _precision; \
414 \
415 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
416 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
417 BUILD_BUG_ON((_precision) > 30); \
418 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
419 \
420 WRITE_ONCE(e->internal, internal ? \
421 (((internal << weight_rcp) - internal) + \
422 (val << precision)) >> weight_rcp : \
423 (val << precision)); \
424 }
425
426 /* </DECLARE_EWMA> */