1 /* Please avoid adding hacks here - instead add it to mac80211/backports.git */
3 #undef CONFIG_MODULE_STRIPPED
5 #include <linux/version.h> /* LINUX_VERSION_CODE */
6 #include <linux/types.h>
8 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)
10 #define dev_get_iflink(_net_dev) ((_net_dev)->iflink)
12 #endif /* < KERNEL_VERSION(4, 1, 0) */
14 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
16 #include <linux/netdevice.h>
18 #define netdev_master_upper_dev_link(dev, upper_dev, upper_priv, upper_info) ({\
19 BUILD_BUG_ON(upper_priv != NULL); \
20 BUILD_BUG_ON(upper_info != NULL); \
21 netdev_set_master(dev, upper_dev); \
24 #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
26 #include <linux/netdevice.h>
28 #define netdev_master_upper_dev_link(dev, upper_dev, upper_priv, upper_info) ({\
29 BUILD_BUG_ON(upper_priv != NULL); \
30 BUILD_BUG_ON(upper_info != NULL); \
31 netdev_master_upper_dev_link(dev, upper_dev); \
34 #endif /* < KERNEL_VERSION(4, 5, 0) */
37 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
39 /* wild hack for batadv_getlink_net only */
40 #define get_link_net get_xstats_size || 1 ? fallback_net : (struct net*)netdev->rtnl_link_ops->get_xstats_size
42 #endif /* < KERNEL_VERSION(4, 0, 0) */
45 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
47 struct sk_buff
*skb_checksum_trimmed(struct sk_buff
*skb
,
48 unsigned int transport_len
,
49 __sum16(*skb_chkf
)(struct sk_buff
*skb
));
51 int ip_mc_check_igmp(struct sk_buff
*skb
, struct sk_buff
**skb_trimmed
);
53 int ipv6_mc_check_mld(struct sk_buff
*skb
, struct sk_buff
**skb_trimmed
);
55 #endif /* < KERNEL_VERSION(4, 2, 0) */
57 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
59 #define IFF_NO_QUEUE 0; dev->tx_queue_len = 0
61 static inline bool hlist_fake(struct hlist_node
*h
)
63 return h
->pprev
== &h
->next
;
66 #endif /* < KERNEL_VERSION(4, 3, 0) */
68 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
70 #include <linux/ethtool.h>
72 #define ethtool_link_ksettings batadv_ethtool_link_ksettings
74 struct batadv_ethtool_link_ksettings
{
81 #define __ethtool_get_link_ksettings(__dev, __link_settings) \
82 batadv_ethtool_get_link_ksettings(__dev, __link_settings)
85 batadv_ethtool_get_link_ksettings(struct net_device
*dev
,
86 struct ethtool_link_ksettings
*link_ksettings
)
88 struct ethtool_cmd cmd
;
91 memset(&cmd
, 0, sizeof(cmd
));
92 ret
= __ethtool_get_settings(dev
, &cmd
);
97 link_ksettings
->base
.duplex
= cmd
.duplex
;
98 link_ksettings
->base
.speed
= ethtool_cmd_speed(&cmd
);
103 #endif /* < KERNEL_VERSION(4, 6, 0) */
105 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
107 #define netif_trans_update batadv_netif_trans_update
108 static inline void batadv_netif_trans_update(struct net_device
*dev
)
110 dev
->trans_start
= jiffies
;
113 #endif /* < KERNEL_VERSION(4, 7, 0) */
116 #include_next <linux/netlink.h>
118 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
120 #include_next <net/netlink.h>
122 static inline bool batadv_nla_need_padding_for_64bit(struct sk_buff
*skb
);
124 static inline int batadv_nla_align_64bit(struct sk_buff
*skb
, int padattr
)
126 if (batadv_nla_need_padding_for_64bit(skb
) &&
127 !nla_reserve(skb
, padattr
, 0))
133 static inline struct nlattr
*batadv__nla_reserve_64bit(struct sk_buff
*skb
,
135 int attrlen
, int padattr
)
137 if (batadv_nla_need_padding_for_64bit(skb
))
138 batadv_nla_align_64bit(skb
, padattr
);
140 return __nla_reserve(skb
, attrtype
, attrlen
);
143 static inline void batadv__nla_put_64bit(struct sk_buff
*skb
, int attrtype
,
144 int attrlen
, const void *data
,
149 nla
= batadv__nla_reserve_64bit(skb
, attrtype
, attrlen
, padattr
);
150 memcpy(nla_data(nla
), data
, attrlen
);
153 static inline bool batadv_nla_need_padding_for_64bit(struct sk_buff
*skb
)
155 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
156 /* The nlattr header is 4 bytes in size, that's why we test
157 * if the skb->data _is_ aligned. A NOP attribute, plus
158 * nlattr header for next attribute, will make nla_data()
161 if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb
), 8))
167 static inline int batadv_nla_total_size_64bit(int payload
)
169 return NLA_ALIGN(nla_attr_size(payload
))
170 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
171 + NLA_ALIGN(nla_attr_size(0))
176 static inline int batadv_nla_put_64bit(struct sk_buff
*skb
, int attrtype
,
177 int attrlen
, const void *data
,
182 if (batadv_nla_need_padding_for_64bit(skb
))
183 len
= batadv_nla_total_size_64bit(attrlen
);
185 len
= nla_total_size(attrlen
);
186 if (unlikely(skb_tailroom(skb
) < len
))
189 batadv__nla_put_64bit(skb
, attrtype
, attrlen
, data
, padattr
);
193 #define nla_put_u64_64bit(_skb, _attrtype, _value, _padattr) \
194 batadv_nla_put_u64_64bit(_skb, _attrtype, _value, _padattr)
195 static inline int batadv_nla_put_u64_64bit(struct sk_buff
*skb
, int attrtype
,
196 u64 value
, int padattr
)
198 return batadv_nla_put_64bit(skb
, attrtype
, sizeof(u64
), &value
,
202 #endif /* < KERNEL_VERSION(4, 7, 0) */
205 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
207 #include_next <linux/cache.h>
209 /* hack for netlink.c which marked the family ops as ro */
210 #ifdef __ro_after_init
211 #undef __ro_after_init
213 #define __ro_after_init
215 #endif /* < KERNEL_VERSION(4, 10, 0) */
219 #include <linux/version.h>
220 #include_next <linux/average.h>
222 #include <linux/bug.h>
226 #endif /* DECLARE_EWMA */
229 * Exponentially weighted moving average (EWMA)
231 * This implements a fixed-precision EWMA algorithm, with both the
232 * precision and fall-off coefficient determined at compile-time
233 * and built into the generated helper funtions.
235 * The first argument to the macro is the name that will be used
236 * for the struct and helper functions.
238 * The second argument, the precision, expresses how many bits are
239 * used for the fractional part of the fixed-precision values.
241 * The third argument, the weight reciprocal, determines how the
242 * new values will be weighed vs. the old state, new values will
243 * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note
244 * that this parameter must be a power of two for efficiency.
247 #define DECLARE_EWMA(name, _precision, _weight_rcp) \
248 struct ewma_##name { \
249 unsigned long internal; \
251 static inline void ewma_##name##_init(struct ewma_##name *e) \
253 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
254 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
256 * Even if you want to feed it just 0/1 you should have \
257 * some bits for the non-fractional part... \
259 BUILD_BUG_ON((_precision) > 30); \
260 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
263 static inline unsigned long \
264 ewma_##name##_read(struct ewma_##name *e) \
266 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
267 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
268 BUILD_BUG_ON((_precision) > 30); \
269 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
270 return e->internal >> (_precision); \
272 static inline void ewma_##name##_add(struct ewma_##name *e, \
275 unsigned long internal = ACCESS_ONCE(e->internal); \
276 unsigned long weight_rcp = ilog2(_weight_rcp); \
277 unsigned long precision = _precision; \
279 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
280 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
281 BUILD_BUG_ON((_precision) > 30); \
282 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
284 ACCESS_ONCE(e->internal) = internal ? \
285 (((internal << weight_rcp) - internal) + \
286 (val << precision)) >> weight_rcp : \
287 (val << precision); \
290 /* </DECLARE_EWMA> */