kernel: bump 4.9 to 4.9.63
[openwrt/staging/wigyori.git] / target / linux / layerscape / patches-4.9 / 202-core-linux-support-layerscape.patch
1 From c37953457a7ebeb0d97ae8574b3d41274fcd9119 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 1 Nov 2017 16:22:33 +0800
4 Subject: [PATCH] core-linux: support layerscape
5
6 This is a integrated patch for layerscape core-linux support.
7
8 Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
9 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
10 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
11 Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
12 Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
13 Signed-off-by: Ramneek Mehresh <ramneek.mehresh@freescale.com>
14 Signed-off-by: Jarod Wilson <jarod@redhat.com>
15 Signed-off-by: Nikhil Badola <nikhil.badola@freescale.com>
16 Signed-off-by: stephen hemminger <stephen@networkplumber.org>
17 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
18 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
19 ---
20 drivers/base/devres.c | 66 ++++++++++++++++++++++++++++
21 drivers/base/soc.c | 66 ++++++++++++++++++++++++++++
22 include/linux/device.h | 19 ++++++++
23 include/linux/fsl/svr.h | 97 +++++++++++++++++++++++++++++++++++++++++
24 include/linux/fsl_devices.h | 3 ++
25 include/linux/netdev_features.h | 2 +
26 include/linux/netdevice.h | 4 ++
27 include/linux/skbuff.h | 2 +
28 include/linux/sys_soc.h | 3 ++
29 include/uapi/linux/if_ether.h | 1 +
30 net/core/dev.c | 13 +++++-
31 net/core/skbuff.c | 29 +++++++++++-
32 net/sched/sch_generic.c | 7 +++
33 13 files changed, 309 insertions(+), 3 deletions(-)
34 create mode 100644 include/linux/fsl/svr.h
35
36 --- a/drivers/base/devres.c
37 +++ b/drivers/base/devres.c
38 @@ -10,6 +10,7 @@
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/slab.h>
42 +#include <linux/percpu.h>
43
44 #include "base.h"
45
46 @@ -985,3 +986,68 @@ void devm_free_pages(struct device *dev,
47 &devres));
48 }
49 EXPORT_SYMBOL_GPL(devm_free_pages);
50 +
51 +static void devm_percpu_release(struct device *dev, void *pdata)
52 +{
53 + void __percpu *p;
54 +
55 + p = *(void __percpu **)pdata;
56 + free_percpu(p);
57 +}
58 +
59 +static int devm_percpu_match(struct device *dev, void *data, void *p)
60 +{
61 + struct devres *devr = container_of(data, struct devres, data);
62 +
63 + return *(void **)devr->data == p;
64 +}
65 +
66 +/**
67 + * __devm_alloc_percpu - Resource-managed alloc_percpu
68 + * @dev: Device to allocate per-cpu memory for
69 + * @size: Size of per-cpu memory to allocate
70 + * @align: Alignment of per-cpu memory to allocate
71 + *
72 + * Managed alloc_percpu. Per-cpu memory allocated with this function is
73 + * automatically freed on driver detach.
74 + *
75 + * RETURNS:
76 + * Pointer to allocated memory on success, NULL on failure.
77 + */
78 +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
79 + size_t align)
80 +{
81 + void *p;
82 + void __percpu *pcpu;
83 +
84 + pcpu = __alloc_percpu(size, align);
85 + if (!pcpu)
86 + return NULL;
87 +
88 + p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
89 + if (!p) {
90 + free_percpu(pcpu);
91 + return NULL;
92 + }
93 +
94 + *(void __percpu **)p = pcpu;
95 +
96 + devres_add(dev, p);
97 +
98 + return pcpu;
99 +}
100 +EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
101 +
102 +/**
103 + * devm_free_percpu - Resource-managed free_percpu
104 + * @dev: Device this memory belongs to
105 + * @pdata: Per-cpu memory to free
106 + *
107 + * Free memory allocated with devm_alloc_percpu().
108 + */
109 +void devm_free_percpu(struct device *dev, void __percpu *pdata)
110 +{
111 + WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
112 + (void *)pdata));
113 +}
114 +EXPORT_SYMBOL_GPL(devm_free_percpu);
115 --- a/drivers/base/soc.c
116 +++ b/drivers/base/soc.c
117 @@ -13,6 +13,7 @@
118 #include <linux/spinlock.h>
119 #include <linux/sys_soc.h>
120 #include <linux/err.h>
121 +#include <linux/glob.h>
122
123 static DEFINE_IDA(soc_ida);
124
125 @@ -159,3 +160,68 @@ static int __init soc_bus_register(void)
126 return bus_register(&soc_bus_type);
127 }
128 core_initcall(soc_bus_register);
129 +
130 +static int soc_device_match_one(struct device *dev, void *arg)
131 +{
132 + struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
133 + const struct soc_device_attribute *match = arg;
134 +
135 + if (match->machine &&
136 + !glob_match(match->machine, soc_dev->attr->machine))
137 + return 0;
138 +
139 + if (match->family &&
140 + !glob_match(match->family, soc_dev->attr->family))
141 + return 0;
142 +
143 + if (match->revision &&
144 + !glob_match(match->revision, soc_dev->attr->revision))
145 + return 0;
146 +
147 + if (match->soc_id &&
148 + !glob_match(match->soc_id, soc_dev->attr->soc_id))
149 + return 0;
150 +
151 + return 1;
152 +}
153 +
154 +/*
155 + * soc_device_match - identify the SoC in the machine
156 + * @matches: zero-terminated array of possible matches
157 + *
158 + * returns the first matching entry of the argument array, or NULL
159 + * if none of them match.
160 + *
161 + * This function is meant as a helper in place of of_match_node()
162 + * in cases where either no device tree is available or the information
163 + * in a device node is insufficient to identify a particular variant
164 + * by its compatible strings or other properties. For new devices,
165 + * the DT binding should always provide unique compatible strings
166 + * that allow the use of of_match_node() instead.
167 + *
168 + * The calling function can use the .data entry of the
169 + * soc_device_attribute to pass a structure or function pointer for
170 + * each entry.
171 + */
172 +const struct soc_device_attribute *soc_device_match(
173 + const struct soc_device_attribute *matches)
174 +{
175 + int ret = 0;
176 +
177 + if (!matches)
178 + return NULL;
179 +
180 + while (!ret) {
181 + if (!(matches->machine || matches->family ||
182 + matches->revision || matches->soc_id))
183 + break;
184 + ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
185 + soc_device_match_one);
186 + if (!ret)
187 + matches++;
188 + else
189 + return matches;
190 + }
191 + return NULL;
192 +}
193 +EXPORT_SYMBOL_GPL(soc_device_match);
194 --- a/include/linux/device.h
195 +++ b/include/linux/device.h
196 @@ -688,6 +688,25 @@ void __iomem *devm_ioremap_resource(stru
197 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
198 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
199
200 +/**
201 + * devm_alloc_percpu - Resource-managed alloc_percpu
202 + * @dev: Device to allocate per-cpu memory for
203 + * @type: Type to allocate per-cpu memory for
204 + *
205 + * Managed alloc_percpu. Per-cpu memory allocated with this function is
206 + * automatically freed on driver detach.
207 + *
208 + * RETURNS:
209 + * Pointer to allocated memory on success, NULL on failure.
210 + */
211 +#define devm_alloc_percpu(dev, type) \
212 + ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
213 + __alignof__(type)))
214 +
215 +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
216 + size_t align);
217 +void devm_free_percpu(struct device *dev, void __percpu *pdata);
218 +
219 static inline int devm_add_action_or_reset(struct device *dev,
220 void (*action)(void *), void *data)
221 {
222 --- /dev/null
223 +++ b/include/linux/fsl/svr.h
224 @@ -0,0 +1,97 @@
225 +/*
226 + * MPC85xx cpu type detection
227 + *
228 + * Copyright 2011-2012 Freescale Semiconductor, Inc.
229 + *
230 + * This is free software; you can redistribute it and/or modify
231 + * it under the terms of the GNU General Public License as published by
232 + * the Free Software Foundation; either version 2 of the License, or
233 + * (at your option) any later version.
234 + */
235 +
236 +#ifndef FSL_SVR_H
237 +#define FSL_SVR_H
238 +
239 +#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */
240 +#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/
241 +#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/
242 +
243 +/* Some parts define SVR[0:23] as the SOC version */
244 +#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */
245 +
246 +#define SVR_8533 0x803400
247 +#define SVR_8535 0x803701
248 +#define SVR_8536 0x803700
249 +#define SVR_8540 0x803000
250 +#define SVR_8541 0x807200
251 +#define SVR_8543 0x803200
252 +#define SVR_8544 0x803401
253 +#define SVR_8545 0x803102
254 +#define SVR_8547 0x803101
255 +#define SVR_8548 0x803100
256 +#define SVR_8555 0x807100
257 +#define SVR_8560 0x807000
258 +#define SVR_8567 0x807501
259 +#define SVR_8568 0x807500
260 +#define SVR_8569 0x808000
261 +#define SVR_8572 0x80E000
262 +#define SVR_P1010 0x80F100
263 +#define SVR_P1011 0x80E500
264 +#define SVR_P1012 0x80E501
265 +#define SVR_P1013 0x80E700
266 +#define SVR_P1014 0x80F101
267 +#define SVR_P1017 0x80F700
268 +#define SVR_P1020 0x80E400
269 +#define SVR_P1021 0x80E401
270 +#define SVR_P1022 0x80E600
271 +#define SVR_P1023 0x80F600
272 +#define SVR_P1024 0x80E402
273 +#define SVR_P1025 0x80E403
274 +#define SVR_P2010 0x80E300
275 +#define SVR_P2020 0x80E200
276 +#define SVR_P2040 0x821000
277 +#define SVR_P2041 0x821001
278 +#define SVR_P3041 0x821103
279 +#define SVR_P4040 0x820100
280 +#define SVR_P4080 0x820000
281 +#define SVR_P5010 0x822100
282 +#define SVR_P5020 0x822000
283 +#define SVR_P5021 0X820500
284 +#define SVR_P5040 0x820400
285 +#define SVR_T4240 0x824000
286 +#define SVR_T4120 0x824001
287 +#define SVR_T4160 0x824100
288 +#define SVR_T4080 0x824102
289 +#define SVR_C291 0x850000
290 +#define SVR_C292 0x850020
291 +#define SVR_C293 0x850030
292 +#define SVR_B4860 0X868000
293 +#define SVR_G4860 0x868001
294 +#define SVR_G4060 0x868003
295 +#define SVR_B4440 0x868100
296 +#define SVR_G4440 0x868101
297 +#define SVR_B4420 0x868102
298 +#define SVR_B4220 0x868103
299 +#define SVR_T1040 0x852000
300 +#define SVR_T1041 0x852001
301 +#define SVR_T1042 0x852002
302 +#define SVR_T1020 0x852100
303 +#define SVR_T1021 0x852101
304 +#define SVR_T1022 0x852102
305 +#define SVR_T1023 0x854100
306 +#define SVR_T1024 0x854000
307 +#define SVR_T2080 0x853000
308 +#define SVR_T2081 0x853100
309 +
310 +#define SVR_8610 0x80A000
311 +#define SVR_8641 0x809000
312 +#define SVR_8641D 0x809001
313 +
314 +#define SVR_9130 0x860001
315 +#define SVR_9131 0x860000
316 +#define SVR_9132 0x861000
317 +#define SVR_9232 0x861400
318 +
319 +#define SVR_Unknown 0xFFFFFF
320 +
321 +#endif
322 --- a/include/linux/fsl_devices.h
323 +++ b/include/linux/fsl_devices.h
324 @@ -99,7 +99,10 @@ struct fsl_usb2_platform_data {
325 unsigned suspended:1;
326 unsigned already_suspended:1;
327 unsigned has_fsl_erratum_a007792:1;
328 + unsigned has_fsl_erratum_14:1;
329 unsigned has_fsl_erratum_a005275:1;
330 + unsigned has_fsl_erratum_a006918:1;
331 + unsigned has_fsl_erratum_a005697:1;
332 unsigned check_phy_clk_valid:1;
333
334 /* register save area for suspend/resume */
335 --- a/include/linux/netdev_features.h
336 +++ b/include/linux/netdev_features.h
337 @@ -74,6 +74,7 @@ enum {
338 NETIF_F_BUSY_POLL_BIT, /* Busy poll */
339
340 NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
341 + NETIF_F_HW_ACCEL_MQ_BIT, /* Hardware-accelerated multiqueue */
342
343 /*
344 * Add your fresh new feature above and remember to update
345 @@ -136,6 +137,7 @@ enum {
346 #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
347 #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
348 #define NETIF_F_HW_TC __NETIF_F(HW_TC)
349 +#define NETIF_F_HW_ACCEL_MQ __NETIF_F(HW_ACCEL_MQ)
350
351 #define for_each_netdev_feature(mask_addr, bit) \
352 for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
353 --- a/include/linux/netdevice.h
354 +++ b/include/linux/netdevice.h
355 @@ -1509,6 +1509,8 @@ enum netdev_priv_flags {
356 * @if_port: Selectable AUI, TP, ...
357 * @dma: DMA channel
358 * @mtu: Interface MTU value
359 + * @min_mtu: Interface Minimum MTU value
360 + * @max_mtu: Interface Maximum MTU value
361 * @type: Interface hardware type
362 * @hard_header_len: Maximum hardware header length.
363 * @min_header_len: Minimum hardware header length
364 @@ -1735,6 +1737,8 @@ struct net_device {
365 unsigned char dma;
366
367 unsigned int mtu;
368 + unsigned int min_mtu;
369 + unsigned int max_mtu;
370 unsigned short type;
371 unsigned short hard_header_len;
372 unsigned short min_header_len;
373 --- a/include/linux/skbuff.h
374 +++ b/include/linux/skbuff.h
375 @@ -903,6 +903,7 @@ void kfree_skb(struct sk_buff *skb);
376 void kfree_skb_list(struct sk_buff *segs);
377 void skb_tx_error(struct sk_buff *skb);
378 void consume_skb(struct sk_buff *skb);
379 +void skb_recycle(struct sk_buff *skb);
380 void __kfree_skb(struct sk_buff *skb);
381 extern struct kmem_cache *skbuff_head_cache;
382
383 @@ -3057,6 +3058,7 @@ static inline void skb_free_datagram_loc
384 }
385 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
386 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
387 +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old);
388 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
389 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
390 int len, __wsum csum);
391 --- a/include/linux/sys_soc.h
392 +++ b/include/linux/sys_soc.h
393 @@ -13,6 +13,7 @@ struct soc_device_attribute {
394 const char *family;
395 const char *revision;
396 const char *soc_id;
397 + const void *data;
398 };
399
400 /**
401 @@ -34,4 +35,6 @@ void soc_device_unregister(struct soc_de
402 */
403 struct device *soc_device_to_device(struct soc_device *soc);
404
405 +const struct soc_device_attribute *soc_device_match(
406 + const struct soc_device_attribute *matches);
407 #endif /* __SOC_BUS_H */
408 --- a/include/uapi/linux/if_ether.h
409 +++ b/include/uapi/linux/if_ether.h
410 @@ -35,6 +35,7 @@
411 #define ETH_DATA_LEN 1500 /* Max. octets in payload */
412 #define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
413 #define ETH_FCS_LEN 4 /* Octets in the FCS */
414 +#define ETH_MIN_MTU 68 /* Min IPv4 MTU per RFC791 */
415
416 /*
417 * These are the defined Ethernet Protocol ID's.
418 --- a/net/core/dev.c
419 +++ b/net/core/dev.c
420 @@ -6603,9 +6603,18 @@ int dev_set_mtu(struct net_device *dev,
421 if (new_mtu == dev->mtu)
422 return 0;
423
424 - /* MTU must be positive. */
425 - if (new_mtu < 0)
426 + /* MTU must be positive, and in range */
427 + if (new_mtu < 0 || new_mtu < dev->min_mtu) {
428 + net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
429 + dev->name, new_mtu, dev->min_mtu);
430 return -EINVAL;
431 + }
432 +
433 + if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
434 + net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
435 + dev->name, new_mtu, dev->min_mtu);
436 + return -EINVAL;
437 + }
438
439 if (!netif_device_present(dev))
440 return -ENODEV;
441 --- a/net/core/skbuff.c
442 +++ b/net/core/skbuff.c
443 @@ -842,6 +842,32 @@ void napi_consume_skb(struct sk_buff *sk
444 }
445 EXPORT_SYMBOL(napi_consume_skb);
446
447 +/**
448 + * skb_recycle - clean up an skb for reuse
449 + * @skb: buffer
450 + *
451 + * Recycles the skb to be reused as a receive buffer. This
452 + * function does any necessary reference count dropping, and
453 + * cleans up the skbuff as if it just came from __alloc_skb().
454 + */
455 +void skb_recycle(struct sk_buff *skb)
456 +{
457 + struct skb_shared_info *shinfo;
458 + u8 head_frag = skb->head_frag;
459 +
460 + skb_release_head_state(skb);
461 +
462 + shinfo = skb_shinfo(skb);
463 + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
464 + atomic_set(&shinfo->dataref, 1);
465 +
466 + memset(skb, 0, offsetof(struct sk_buff, tail));
467 + skb->data = skb->head + NET_SKB_PAD;
468 + skb->head_frag = head_frag;
469 + skb_reset_tail_pointer(skb);
470 +}
471 +EXPORT_SYMBOL(skb_recycle);
472 +
473 /* Make sure a field is enclosed inside headers_start/headers_end section */
474 #define CHECK_SKB_FIELD(field) \
475 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
476 @@ -1073,7 +1099,7 @@ static void skb_headers_offset_update(st
477 skb->inner_mac_header += off;
478 }
479
480 -static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
481 +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
482 {
483 __copy_skb_header(new, old);
484
485 @@ -1081,6 +1107,7 @@ static void copy_skb_header(struct sk_bu
486 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
487 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
488 }
489 +EXPORT_SYMBOL(copy_skb_header);
490
491 static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
492 {
493 --- a/net/sched/sch_generic.c
494 +++ b/net/sched/sch_generic.c
495 @@ -309,6 +309,13 @@ static void dev_watchdog(unsigned long a
496 txq->trans_timeout++;
497 break;
498 }
499 +
500 + /* Devices with HW_ACCEL_MQ have multiple txqs
501 + * but update only the first one's transmission
502 + * timestamp so avoid checking the rest.
503 + */
504 + if (dev->features & NETIF_F_HW_ACCEL_MQ)
505 + break;
506 }
507
508 if (some_queue_timedout) {