backport-include: backport skb_get_hash_perturb
[openwrt/staging/blogic.git] / backport / backport-include / linux / skbuff.h
1 #ifndef __BACKPORT_SKBUFF_H
2 #define __BACKPORT_SKBUFF_H
3 #include_next <linux/skbuff.h>
4 #include <linux/version.h>
5 #include <generated/utsrelease.h>
6
7 #if LINUX_VERSION_IS_LESS(3,4,0) && \
8 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,4)) && \
9 !(defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_IS_GEQ(3,0,0))
10 #define skb_add_rx_frag(skb, i, page, off, size, truesize) \
11 skb_add_rx_frag(skb, i, page, off, size)
12 #endif
13
14 #if LINUX_VERSION_IS_LESS(3,3,0)
15 #define __pskb_copy LINUX_BACKPORT(__pskb_copy)
16 extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
17 int headroom, gfp_t gfp_mask);
18
19 #define skb_complete_wifi_ack LINUX_BACKPORT(skb_complete_wifi_ack)
20 static inline void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
21 {
22 WARN_ON(1);
23 }
24
25 /* define to 0 so checks for it are always false */
26 #define SKBTX_WIFI_STATUS 0
27 #elif LINUX_VERSION_IS_LESS(3,18,0)
28 #define skb_complete_wifi_ack LINUX_BACKPORT(skb_complete_wifi_ack)
29 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
30 #endif
31
32 #if LINUX_VERSION_IS_LESS(3,2,0)
33 #include <linux/dma-mapping.h>
34
35 /* mask skb_frag_page as RHEL6 backports this */
36 #define skb_frag_page LINUX_BACKPORT(skb_frag_page)
37 static inline struct page *skb_frag_page(const skb_frag_t *frag)
38 {
39 return frag->page;
40 }
41
42 #define skb_frag_size LINUX_BACKPORT(skb_frag_size)
43 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
44 {
45 return frag->size;
46 }
47
48 /* mask skb_frag_dma_map as RHEL6 backports this */
49 #define skb_frag_dma_map LINUX_BACKPORT(skb_frag_dma_map)
50 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
51 const skb_frag_t *frag,
52 size_t offset, size_t size,
53 enum dma_data_direction dir)
54 {
55 return dma_map_page(dev, skb_frag_page(frag),
56 frag->page_offset + offset, size, dir);
57 }
58 #endif
59
60 #if LINUX_VERSION_IS_LESS(3,1,0)
61 /* mask __netdev_alloc_skb_ip_align as RHEL6 backports this */
62 #define __netdev_alloc_skb_ip_align(a,b,c) compat__netdev_alloc_skb_ip_align(a,b,c)
63 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
64 unsigned int length, gfp_t gfp)
65 {
66 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
67
68 if (NET_IP_ALIGN && skb)
69 skb_reserve(skb, NET_IP_ALIGN);
70 return skb;
71 }
72 #endif
73
74 #ifndef skb_walk_frags
75 #define skb_walk_frags(skb, iter) \
76 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
77 #endif
78
79 #if LINUX_VERSION_IS_LESS(3,2,0)
80 #define skb_frag_size_sub LINUX_BACKPORT(skb_frag_size_sub)
81 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
82 {
83 frag->size -= delta;
84 }
85
86 /**
87 * skb_frag_address - gets the address of the data contained in a paged fragment
88 * @frag: the paged fragment buffer
89 *
90 * Returns the address of the data within @frag. The page must already
91 * be mapped.
92 */
93 #define skb_frag_address LINUX_BACKPORT(skb_frag_address)
94 static inline void *skb_frag_address(const skb_frag_t *frag)
95 {
96 return page_address(skb_frag_page(frag)) + frag->page_offset;
97 }
98 #endif /* LINUX_VERSION_IS_LESS(3,2,0) */
99
100 #if LINUX_VERSION_IS_LESS(3,9,0)
101 #ifndef NETDEV_FRAG_PAGE_MAX_ORDER
102 #define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
103 #endif
104 #ifndef NETDEV_FRAG_PAGE_MAX_SIZE
105 #define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
106 #endif
107 #endif /* LINUX_VERSION_IS_LESS(3,9,0) */
108
109 #if LINUX_VERSION_IS_LESS(3,9,0)
110 #define skb_unclone LINUX_BACKPORT(skb_unclone)
111 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
112 {
113 might_sleep_if(pri & __GFP_WAIT);
114 if (skb_cloned(skb))
115 return pskb_expand_head(skb, 0, 0, pri);
116 return 0;
117 }
118 #endif
119
120 #if LINUX_VERSION_IS_LESS(3,2,0)
121
122 #define skb_frag_address_safe LINUX_BACKPORT(skb_frag_address_safe)
123 /**
124 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
125 * @frag: the paged fragment buffer
126 *
127 * Returns the address of the data within @frag. Checks that the page
128 * is mapped and returns %NULL otherwise.
129 */
130 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
131 {
132 void *ptr = page_address(skb_frag_page(frag));
133 if (unlikely(!ptr))
134 return NULL;
135
136 return ptr + frag->page_offset;
137 }
138 #endif /* LINUX_VERSION_IS_LESS(3,2,0) */
139
140 #if LINUX_VERSION_IS_LESS(3,14,0) && \
141 RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0) && \
142 !(LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30)
143 /*
144 * Packet hash types specify the type of hash in skb_set_hash.
145 *
146 * Hash types refer to the protocol layer addresses which are used to
147 * construct a packet's hash. The hashes are used to differentiate or identify
148 * flows of the protocol layer for the hash type. Hash types are either
149 * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
150 *
151 * Properties of hashes:
152 *
153 * 1) Two packets in different flows have different hash values
154 * 2) Two packets in the same flow should have the same hash value
155 *
156 * A hash at a higher layer is considered to be more specific. A driver should
157 * set the most specific hash possible.
158 *
159 * A driver cannot indicate a more specific hash than the layer at which a hash
160 * was computed. For instance an L3 hash cannot be set as an L4 hash.
161 *
162 * A driver may indicate a hash level which is less specific than the
163 * actual layer the hash was computed on. For instance, a hash computed
164 * at L4 may be considered an L3 hash. This should only be done if the
165 * driver can't unambiguously determine that the HW computed the hash at
166 * the higher layer. Note that the "should" in the second property above
167 * permits this.
168 */
169 enum pkt_hash_types {
170 PKT_HASH_TYPE_NONE, /* Undefined type */
171 PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
172 PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
173 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
174 };
175
176 static inline void
177 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
178 {
179 #if LINUX_VERSION_IS_GEQ(3,2,0) /* 4031ae6edb */
180 skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
181 #endif
182 #if LINUX_VERSION_IS_GEQ(3,4,0) /* bdeab99191 */
183 skb->rxhash = hash;
184 #endif
185 }
186 #endif /* LINUX_VERSION_IS_LESS(3,14,0) */
187
188 #if LINUX_VERSION_IS_LESS(3,16,0)
189 #define __pskb_copy_fclone LINUX_BACKPORT(__pskb_copy_fclone)
190 static inline struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb,
191 int headroom, gfp_t gfp_mask,
192 bool fclone)
193 {
194 return __pskb_copy(skb, headroom, gfp_mask);
195 }
196 #endif
197
198 #if LINUX_VERSION_IS_LESS(3,18,0)
199 #define skb_clone_sk LINUX_BACKPORT(skb_clone_sk)
200 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
201 #endif
202
203 #if LINUX_VERSION_IS_LESS(3,19,0)
204 /**
205 * __dev_alloc_pages - allocate page for network Rx
206 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
207 * @order: size of the allocation
208 *
209 * Allocate a new page.
210 *
211 * %NULL is returned if there is no free memory.
212 */
213 #define __dev_alloc_pages LINUX_BACKPORT(__dev_alloc_pages)
214 static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
215 unsigned int order)
216 {
217 /* This piece of code contains several assumptions.
218 * 1. This is for device Rx, therefor a cold page is preferred.
219 * 2. The expectation is the user wants a compound page.
220 * 3. If requesting a order 0 page it will not be compound
221 * due to the check to see if order has a value in prep_new_page
222 * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
223 * code in gfp_to_alloc_flags that should be enforcing this.
224 */
225 gfp_mask |= __GFP_COLD | __GFP_COMP;
226 #if LINUX_VERSION_IS_GEQ(3,6,0)
227 gfp_mask |= __GFP_MEMALLOC;
228 #endif
229
230 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
231 }
232
233 #define dev_alloc_pages LINUX_BACKPORT(dev_alloc_pages)
234 static inline struct page *dev_alloc_pages(unsigned int order)
235 {
236 return __dev_alloc_pages(GFP_ATOMIC, order);
237 }
238
239 /**
240 * __dev_alloc_page - allocate a page for network Rx
241 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
242 *
243 * Allocate a new page.
244 *
245 * %NULL is returned if there is no free memory.
246 */
247 #define __dev_alloc_page LINUX_BACKPORT(__dev_alloc_page)
248 static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
249 {
250 return __dev_alloc_pages(gfp_mask, 0);
251 }
252
253 #define dev_alloc_page LINUX_BACKPORT(dev_alloc_page)
254 static inline struct page *dev_alloc_page(void)
255 {
256 return __dev_alloc_page(GFP_ATOMIC);
257 }
258 #endif /* LINUX_VERSION_IS_LESS(3,19,0) */
259
260 #if LINUX_VERSION_IS_LESS(3,19,0)
261 #define skb_copy_datagram_msg LINUX_BACKPORT(skb_copy_datagram_msg)
262 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
263 struct msghdr *msg, int size)
264 {
265 return skb_copy_datagram_iovec(from, offset, msg->msg_iov, size);
266 }
267
268 #define memcpy_from_msg LINUX_BACKPORT(memcpy_from_msg)
269 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
270 {
271 return memcpy_fromiovec(data, msg->msg_iov, len);
272 }
273
274 /**
275 * skb_put_padto - increase size and pad an skbuff up to a minimal size
276 * @skb: buffer to pad
277 * @len: minimal length
278 *
279 * Pads up a buffer to ensure the trailing bytes exist and are
280 * blanked. If the buffer already contains sufficient data it
281 * is untouched. Otherwise it is extended. Returns zero on
282 * success. The skb is freed on error.
283 */
284 #define skb_put_padto LINUX_BACKPORT(skb_put_padto)
285 static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
286 {
287 unsigned int size = skb->len;
288
289 if (unlikely(size < len)) {
290 len -= size;
291 if (skb_pad(skb, len))
292 return -ENOMEM;
293 __skb_put(skb, len);
294 }
295 return 0;
296 }
297
298 #define skb_ensure_writable LINUX_BACKPORT(skb_ensure_writable)
299 int skb_ensure_writable(struct sk_buff *skb, int write_len);
300
301 #endif /* LINUX_VERSION_IS_LESS(3,19,0) */
302
303 #if LINUX_VERSION_IS_LESS(4,2,0)
304 static inline void skb_free_frag(void *data)
305 {
306 put_page(virt_to_head_page(data));
307 }
308
309 #if LINUX_VERSION_IS_LESS(3,3,0)
310
311 static inline u32 skb_get_hash_perturb(struct sk_buff *skb, u32 key)
312 {
313 return 0;
314 }
315
316 #else
317 #include <net/flow_keys.h>
318 #include <linux/jhash.h>
319
320 static inline u32 skb_get_hash_perturb(struct sk_buff *skb, u32 key)
321 {
322 struct flow_keys keys;
323
324 skb_flow_dissect(skb, &keys);
325 return jhash_3words((__force u32)keys.dst,
326 (__force u32)keys.src ^ keys.ip_proto,
327 (__force u32)keys.ports, key);
328 }
329 #endif /* LINUX_VERSION_IS_LESS(3,3,0) */
330 #endif /* LINUX_VERSION_IS_LESS(4,2,0) */
331
332 #if LINUX_VERSION_IS_LESS(4,13,0)
333 static inline void *backport_skb_put(struct sk_buff *skb, unsigned int len)
334 {
335 return skb_put(skb, len);
336 }
337 #define skb_put LINUX_BACKPORT(skb_put)
338
339 static inline void *backport_skb_push(struct sk_buff *skb, unsigned int len)
340 {
341 return skb_push(skb, len);
342 }
343 #define skb_push LINUX_BACKPORT(skb_push)
344
345 static inline void *backport___skb_push(struct sk_buff *skb, unsigned int len)
346 {
347 return __skb_push(skb, len);
348 }
349 #define __skb_push LINUX_BACKPORT(__skb_push)
350
351 static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
352 {
353 void *tmp = skb_put(skb, len);
354
355 memset(tmp, 0, len);
356
357 return tmp;
358 }
359
360 static inline void *skb_put_data(struct sk_buff *skb, const void *data,
361 unsigned int len)
362 {
363 void *tmp = skb_put(skb, len);
364
365 memcpy(tmp, data, len);
366
367 return tmp;
368 }
369
370 static inline void skb_put_u8(struct sk_buff *skb, u8 val)
371 {
372 *(u8 *)skb_put(skb, 1) = val;
373 }
374 #endif
375
376 #endif /* __BACKPORT_SKBUFF_H */