kernel: bump 4.9 to 4.9.143
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 202-core-linux-support-layerscape.patch
1 From f339945a8e81fff22df95284e142b79c37fd2333 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Thu, 5 Jul 2018 16:07:09 +0800
4 Subject: [PATCH 02/32] core-linux: support layerscape
5
6 This is an integrated patch for layerscape core-linux support.
7
8 Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
9 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
10 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
11 Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
12 Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
13 Signed-off-by: Ramneek Mehresh <ramneek.mehresh@freescale.com>
14 Signed-off-by: Jarod Wilson <jarod@redhat.com>
15 Signed-off-by: Nikhil Badola <nikhil.badola@freescale.com>
16 Signed-off-by: stephen hemminger <stephen@networkplumber.org>
17 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
18 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
19 ---
20 drivers/base/devres.c | 66 ++++++
21 drivers/base/soc.c | 70 ++++++
22 .../net/ethernet/mellanox/mlxsw/spectrum.c | 2 +-
23 .../mellanox/mlxsw/spectrum_switchdev.c | 2 +-
24 drivers/net/ethernet/rocker/rocker_ofdpa.c | 4 +-
25 include/linux/device.h | 19 ++
26 include/linux/dma-mapping.h | 5 +
27 include/linux/fsl/svr.h | 97 ++++++++
28 include/linux/fsl_devices.h | 3 +
29 include/linux/irqdesc.h | 4 +
30 include/linux/irqdomain.h | 13 +-
31 include/linux/netdev_features.h | 2 +
32 include/linux/netdevice.h | 10 +-
33 include/linux/skbuff.h | 2 +
34 include/linux/sys_soc.h | 3 +
35 include/net/switchdev.h | 8 +-
36 include/uapi/linux/if_ether.h | 1 +
37 kernel/irq/Kconfig | 11 +
38 kernel/irq/Makefile | 1 +
39 kernel/irq/debugfs.c | 215 ++++++++++++++++++
40 kernel/irq/internals.h | 22 ++
41 kernel/irq/irqdesc.c | 1 +
42 kernel/irq/irqdomain.c | 171 ++++++++++----
43 kernel/irq/manage.c | 1 +
44 kernel/irq/msi.c | 2 +-
45 net/bridge/br.c | 4 +-
46 net/bridge/br_fdb.c | 2 +
47 net/bridge/br_private.h | 7 +
48 net/bridge/br_switchdev.c | 33 +++
49 net/core/dev.c | 30 ++-
50 net/core/net-sysfs.c | 20 +-
51 net/core/rtnetlink.c | 4 +-
52 net/core/skbuff.c | 29 ++-
53 net/sched/sch_generic.c | 7 +
54 34 files changed, 809 insertions(+), 62 deletions(-)
55 create mode 100644 include/linux/fsl/svr.h
56 create mode 100644 kernel/irq/debugfs.c
57
58 --- a/drivers/base/devres.c
59 +++ b/drivers/base/devres.c
60 @@ -10,6 +10,7 @@
61 #include <linux/device.h>
62 #include <linux/module.h>
63 #include <linux/slab.h>
64 +#include <linux/percpu.h>
65
66 #include "base.h"
67
68 @@ -985,3 +986,68 @@ void devm_free_pages(struct device *dev,
69 &devres));
70 }
71 EXPORT_SYMBOL_GPL(devm_free_pages);
72 +
73 +static void devm_percpu_release(struct device *dev, void *pdata)
74 +{
75 + void __percpu *p;
76 +
77 + p = *(void __percpu **)pdata;
78 + free_percpu(p);
79 +}
80 +
81 +static int devm_percpu_match(struct device *dev, void *data, void *p)
82 +{
83 + struct devres *devr = container_of(data, struct devres, data);
84 +
85 + return *(void **)devr->data == p;
86 +}
87 +
88 +/**
89 + * __devm_alloc_percpu - Resource-managed alloc_percpu
90 + * @dev: Device to allocate per-cpu memory for
91 + * @size: Size of per-cpu memory to allocate
92 + * @align: Alignment of per-cpu memory to allocate
93 + *
94 + * Managed alloc_percpu. Per-cpu memory allocated with this function is
95 + * automatically freed on driver detach.
96 + *
97 + * RETURNS:
98 + * Pointer to allocated memory on success, NULL on failure.
99 + */
100 +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
101 + size_t align)
102 +{
103 + void *p;
104 + void __percpu *pcpu;
105 +
106 + pcpu = __alloc_percpu(size, align);
107 + if (!pcpu)
108 + return NULL;
109 +
110 + p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
111 + if (!p) {
112 + free_percpu(pcpu);
113 + return NULL;
114 + }
115 +
116 + *(void __percpu **)p = pcpu;
117 +
118 + devres_add(dev, p);
119 +
120 + return pcpu;
121 +}
122 +EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
123 +
124 +/**
125 + * devm_free_percpu - Resource-managed free_percpu
126 + * @dev: Device this memory belongs to
127 + * @pdata: Per-cpu memory to free
128 + *
129 + * Free memory allocated with devm_alloc_percpu().
130 + */
131 +void devm_free_percpu(struct device *dev, void __percpu *pdata)
132 +{
133 + WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
134 + (void *)pdata));
135 +}
136 +EXPORT_SYMBOL_GPL(devm_free_percpu);
137 --- a/drivers/base/soc.c
138 +++ b/drivers/base/soc.c
139 @@ -13,6 +13,7 @@
140 #include <linux/spinlock.h>
141 #include <linux/sys_soc.h>
142 #include <linux/err.h>
143 +#include <linux/glob.h>
144
145 static DEFINE_IDA(soc_ida);
146
147 @@ -159,3 +160,72 @@ static int __init soc_bus_register(void)
148 return bus_register(&soc_bus_type);
149 }
150 core_initcall(soc_bus_register);
151 +
152 +static int soc_device_match_one(struct device *dev, void *arg)
153 +{
154 + struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
155 + const struct soc_device_attribute *match = arg;
156 +
157 + if (match->machine &&
158 + (!soc_dev->attr->machine ||
159 + !glob_match(match->machine, soc_dev->attr->machine)))
160 + return 0;
161 +
162 + if (match->family &&
163 + (!soc_dev->attr->family ||
164 + !glob_match(match->family, soc_dev->attr->family)))
165 + return 0;
166 +
167 + if (match->revision &&
168 + (!soc_dev->attr->revision ||
169 + !glob_match(match->revision, soc_dev->attr->revision)))
170 + return 0;
171 +
172 + if (match->soc_id &&
173 + (!soc_dev->attr->soc_id ||
174 + !glob_match(match->soc_id, soc_dev->attr->soc_id)))
175 + return 0;
176 +
177 + return 1;
178 +}
179 +
180 +/*
181 + * soc_device_match - identify the SoC in the machine
182 + * @matches: zero-terminated array of possible matches
183 + *
184 + * returns the first matching entry of the argument array, or NULL
185 + * if none of them match.
186 + *
187 + * This function is meant as a helper in place of of_match_node()
188 + * in cases where either no device tree is available or the information
189 + * in a device node is insufficient to identify a particular variant
190 + * by its compatible strings or other properties. For new devices,
191 + * the DT binding should always provide unique compatible strings
192 + * that allow the use of of_match_node() instead.
193 + *
194 + * The calling function can use the .data entry of the
195 + * soc_device_attribute to pass a structure or function pointer for
196 + * each entry.
197 + */
198 +const struct soc_device_attribute *soc_device_match(
199 + const struct soc_device_attribute *matches)
200 +{
201 + int ret = 0;
202 +
203 + if (!matches)
204 + return NULL;
205 +
206 + while (!ret) {
207 + if (!(matches->machine || matches->family ||
208 + matches->revision || matches->soc_id))
209 + break;
210 + ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
211 + soc_device_match_one);
212 + if (!ret)
213 + matches++;
214 + else
215 + return matches;
216 + }
217 + return NULL;
218 +}
219 +EXPORT_SYMBOL_GPL(soc_device_match);
220 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
221 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
222 @@ -859,7 +859,7 @@ mlxsw_sp_port_get_sw_stats64(const struc
223 return 0;
224 }
225
226 -static bool mlxsw_sp_port_has_offload_stats(int attr_id)
227 +static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
228 {
229 switch (attr_id) {
230 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
231 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
232 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
233 @@ -1405,7 +1405,7 @@ static void mlxsw_sp_fdb_call_notifiers(
234 if (learning_sync) {
235 info.addr = mac;
236 info.vid = vid;
237 - notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
238 + notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
239 call_switchdev_notifiers(notifier_type, dev, &info.info);
240 }
241 }
242 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
243 +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
244 @@ -1939,10 +1939,10 @@ static void ofdpa_port_fdb_learn_work(st
245
246 rtnl_lock();
247 if (learned && removing)
248 - call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
249 + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
250 lw->ofdpa_port->dev, &info.info);
251 else if (learned && !removing)
252 - call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
253 + call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
254 lw->ofdpa_port->dev, &info.info);
255 rtnl_unlock();
256
257 --- a/include/linux/device.h
258 +++ b/include/linux/device.h
259 @@ -688,6 +688,25 @@ void __iomem *devm_ioremap_resource(stru
260 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
261 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
262
263 +/**
264 + * devm_alloc_percpu - Resource-managed alloc_percpu
265 + * @dev: Device to allocate per-cpu memory for
266 + * @type: Type to allocate per-cpu memory for
267 + *
268 + * Managed alloc_percpu. Per-cpu memory allocated with this function is
269 + * automatically freed on driver detach.
270 + *
271 + * RETURNS:
272 + * Pointer to allocated memory on success, NULL on failure.
273 + */
274 +#define devm_alloc_percpu(dev, type) \
275 + ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
276 + __alignof__(type)))
277 +
278 +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
279 + size_t align);
280 +void devm_free_percpu(struct device *dev, void __percpu *pdata);
281 +
282 static inline int devm_add_action_or_reset(struct device *dev,
283 void (*action)(void *), void *data)
284 {
285 --- a/include/linux/dma-mapping.h
286 +++ b/include/linux/dma-mapping.h
287 @@ -164,6 +164,11 @@ int dma_mmap_from_coherent(struct device
288
289 #ifdef CONFIG_HAS_DMA
290 #include <asm/dma-mapping.h>
291 +static inline void set_dma_ops(struct device *dev,
292 + struct dma_map_ops *dma_ops)
293 +{
294 + dev->archdata.dma_ops = dma_ops;
295 +}
296 #else
297 /*
298 * Define the dma api to allow compilation but not linking of
299 --- /dev/null
300 +++ b/include/linux/fsl/svr.h
301 @@ -0,0 +1,97 @@
302 +/*
303 + * MPC85xx cpu type detection
304 + *
305 + * Copyright 2011-2012 Freescale Semiconductor, Inc.
306 + *
307 + * This is free software; you can redistribute it and/or modify
308 + * it under the terms of the GNU General Public License as published by
309 + * the Free Software Foundation; either version 2 of the License, or
310 + * (at your option) any later version.
311 + */
312 +
313 +#ifndef FSL_SVR_H
314 +#define FSL_SVR_H
315 +
316 +#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */
317 +#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/
318 +#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/
319 +
320 +/* Some parts define SVR[0:23] as the SOC version */
321 +#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */
322 +
323 +#define SVR_8533 0x803400
324 +#define SVR_8535 0x803701
325 +#define SVR_8536 0x803700
326 +#define SVR_8540 0x803000
327 +#define SVR_8541 0x807200
328 +#define SVR_8543 0x803200
329 +#define SVR_8544 0x803401
330 +#define SVR_8545 0x803102
331 +#define SVR_8547 0x803101
332 +#define SVR_8548 0x803100
333 +#define SVR_8555 0x807100
334 +#define SVR_8560 0x807000
335 +#define SVR_8567 0x807501
336 +#define SVR_8568 0x807500
337 +#define SVR_8569 0x808000
338 +#define SVR_8572 0x80E000
339 +#define SVR_P1010 0x80F100
340 +#define SVR_P1011 0x80E500
341 +#define SVR_P1012 0x80E501
342 +#define SVR_P1013 0x80E700
343 +#define SVR_P1014 0x80F101
344 +#define SVR_P1017 0x80F700
345 +#define SVR_P1020 0x80E400
346 +#define SVR_P1021 0x80E401
347 +#define SVR_P1022 0x80E600
348 +#define SVR_P1023 0x80F600
349 +#define SVR_P1024 0x80E402
350 +#define SVR_P1025 0x80E403
351 +#define SVR_P2010 0x80E300
352 +#define SVR_P2020 0x80E200
353 +#define SVR_P2040 0x821000
354 +#define SVR_P2041 0x821001
355 +#define SVR_P3041 0x821103
356 +#define SVR_P4040 0x820100
357 +#define SVR_P4080 0x820000
358 +#define SVR_P5010 0x822100
359 +#define SVR_P5020 0x822000
360 +#define SVR_P5021 0X820500
361 +#define SVR_P5040 0x820400
362 +#define SVR_T4240 0x824000
363 +#define SVR_T4120 0x824001
364 +#define SVR_T4160 0x824100
365 +#define SVR_T4080 0x824102
366 +#define SVR_C291 0x850000
367 +#define SVR_C292 0x850020
368 +#define SVR_C293 0x850030
369 +#define SVR_B4860 0X868000
370 +#define SVR_G4860 0x868001
371 +#define SVR_G4060 0x868003
372 +#define SVR_B4440 0x868100
373 +#define SVR_G4440 0x868101
374 +#define SVR_B4420 0x868102
375 +#define SVR_B4220 0x868103
376 +#define SVR_T1040 0x852000
377 +#define SVR_T1041 0x852001
378 +#define SVR_T1042 0x852002
379 +#define SVR_T1020 0x852100
380 +#define SVR_T1021 0x852101
381 +#define SVR_T1022 0x852102
382 +#define SVR_T1023 0x854100
383 +#define SVR_T1024 0x854000
384 +#define SVR_T2080 0x853000
385 +#define SVR_T2081 0x853100
386 +
387 +#define SVR_8610 0x80A000
388 +#define SVR_8641 0x809000
389 +#define SVR_8641D 0x809001
390 +
391 +#define SVR_9130 0x860001
392 +#define SVR_9131 0x860000
393 +#define SVR_9132 0x861000
394 +#define SVR_9232 0x861400
395 +
396 +#define SVR_Unknown 0xFFFFFF
397 +
398 +#endif
399 --- a/include/linux/fsl_devices.h
400 +++ b/include/linux/fsl_devices.h
401 @@ -99,7 +99,10 @@ struct fsl_usb2_platform_data {
402 unsigned suspended:1;
403 unsigned already_suspended:1;
404 unsigned has_fsl_erratum_a007792:1;
405 + unsigned has_fsl_erratum_14:1;
406 unsigned has_fsl_erratum_a005275:1;
407 + unsigned has_fsl_erratum_a006918:1;
408 + unsigned has_fsl_erratum_a005697:1;
409 unsigned check_phy_clk_valid:1;
410
411 /* register save area for suspend/resume */
412 --- a/include/linux/irqdesc.h
413 +++ b/include/linux/irqdesc.h
414 @@ -46,6 +46,7 @@ struct pt_regs;
415 * @rcu: rcu head for delayed free
416 * @kobj: kobject used to represent this struct in sysfs
417 * @dir: /proc/irq/ procfs entry
418 + * @debugfs_file: dentry for the debugfs file
419 * @name: flow handler name for /proc/interrupts output
420 */
421 struct irq_desc {
422 @@ -88,6 +89,9 @@ struct irq_desc {
423 #ifdef CONFIG_PROC_FS
424 struct proc_dir_entry *dir;
425 #endif
426 +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
427 + struct dentry *debugfs_file;
428 +#endif
429 #ifdef CONFIG_SPARSE_IRQ
430 struct rcu_head rcu;
431 struct kobject kobj;
432 --- a/include/linux/irqdomain.h
433 +++ b/include/linux/irqdomain.h
434 @@ -138,6 +138,7 @@ struct irq_domain_chip_generic;
435 * setting up one or more generic chips for interrupt controllers
436 * drivers using the generic chip library which uses this pointer.
437 * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
438 + * @debugfs_file: dentry for the domain debugfs file
439 *
440 * Revmap data, used internally by irq_domain
441 * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
442 @@ -160,6 +161,9 @@ struct irq_domain {
443 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
444 struct irq_domain *parent;
445 #endif
446 +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
447 + struct dentry *debugfs_file;
448 +#endif
449
450 /* reverse map data. The linear map gets appended to the irq_domain */
451 irq_hw_number_t hwirq_max;
452 @@ -174,8 +178,8 @@ enum {
453 /* Irq domain is hierarchical */
454 IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
455
456 - /* Core calls alloc/free recursive through the domain hierarchy. */
457 - IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
458 + /* Irq domain name was allocated in __irq_domain_add() */
459 + IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
460
461 /* Irq domain is an IPI domain with virq per cpu */
462 IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
463 @@ -231,6 +235,9 @@ static inline bool is_fwnode_irqchip(str
464 return fwnode && fwnode->type == FWNODE_IRQCHIP;
465 }
466
467 +extern void irq_domain_update_bus_token(struct irq_domain *domain,
468 + enum irq_domain_bus_token bus_token);
469 +
470 static inline
471 struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
472 enum irq_domain_bus_token bus_token)
473 @@ -403,7 +410,7 @@ static inline int irq_domain_alloc_irqs(
474 NULL);
475 }
476
477 -extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
478 +extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
479 unsigned int irq_base,
480 unsigned int nr_irqs, void *arg);
481 extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
482 --- a/include/linux/netdev_features.h
483 +++ b/include/linux/netdev_features.h
484 @@ -74,6 +74,7 @@ enum {
485 NETIF_F_BUSY_POLL_BIT, /* Busy poll */
486
487 NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
488 + NETIF_F_HW_ACCEL_MQ_BIT, /* Hardware-accelerated multiqueue */
489
490 /*
491 * Add your fresh new feature above and remember to update
492 @@ -136,6 +137,7 @@ enum {
493 #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
494 #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
495 #define NETIF_F_HW_TC __NETIF_F(HW_TC)
496 +#define NETIF_F_HW_ACCEL_MQ __NETIF_F(HW_ACCEL_MQ)
497
498 #define for_each_netdev_feature(mask_addr, bit) \
499 for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
500 --- a/include/linux/netdevice.h
501 +++ b/include/linux/netdevice.h
502 @@ -930,7 +930,7 @@ struct netdev_xdp {
503 * 3. Update dev->stats asynchronously and atomically, and define
504 * neither operation.
505 *
506 - * bool (*ndo_has_offload_stats)(int attr_id)
507 + * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
508 * Return true if this device supports offload stats of this attr_id.
509 *
510 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
511 @@ -1167,7 +1167,7 @@ struct net_device_ops {
512
513 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
514 struct rtnl_link_stats64 *storage);
515 - bool (*ndo_has_offload_stats)(int attr_id);
516 + bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
517 int (*ndo_get_offload_stats)(int attr_id,
518 const struct net_device *dev,
519 void *attr_data);
520 @@ -1509,6 +1509,8 @@ enum netdev_priv_flags {
521 * @if_port: Selectable AUI, TP, ...
522 * @dma: DMA channel
523 * @mtu: Interface MTU value
524 + * @min_mtu: Interface Minimum MTU value
525 + * @max_mtu: Interface Maximum MTU value
526 * @type: Interface hardware type
527 * @hard_header_len: Maximum hardware header length.
528 * @min_header_len: Minimum hardware header length
529 @@ -1735,6 +1737,8 @@ struct net_device {
530 unsigned char dma;
531
532 unsigned int mtu;
533 + unsigned int min_mtu;
534 + unsigned int max_mtu;
535 unsigned short type;
536 unsigned short hard_header_len;
537 unsigned short min_header_len;
538 @@ -1938,6 +1942,8 @@ int netdev_set_prio_tc_map(struct net_de
539 return 0;
540 }
541
542 +int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
543 +
544 static inline
545 void netdev_reset_tc(struct net_device *dev)
546 {
547 --- a/include/linux/skbuff.h
548 +++ b/include/linux/skbuff.h
549 @@ -908,6 +908,7 @@ void kfree_skb(struct sk_buff *skb);
550 void kfree_skb_list(struct sk_buff *segs);
551 void skb_tx_error(struct sk_buff *skb);
552 void consume_skb(struct sk_buff *skb);
553 +void skb_recycle(struct sk_buff *skb);
554 void __kfree_skb(struct sk_buff *skb);
555 extern struct kmem_cache *skbuff_head_cache;
556
557 @@ -3081,6 +3082,7 @@ static inline void skb_free_datagram_loc
558 }
559 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
560 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
561 +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old);
562 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
563 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
564 int len, __wsum csum);
565 --- a/include/linux/sys_soc.h
566 +++ b/include/linux/sys_soc.h
567 @@ -13,6 +13,7 @@ struct soc_device_attribute {
568 const char *family;
569 const char *revision;
570 const char *soc_id;
571 + const void *data;
572 };
573
574 /**
575 @@ -34,4 +35,6 @@ void soc_device_unregister(struct soc_de
576 */
577 struct device *soc_device_to_device(struct soc_device *soc);
578
579 +const struct soc_device_attribute *soc_device_match(
580 + const struct soc_device_attribute *matches);
581 #endif /* __SOC_BUS_H */
582 --- a/include/net/switchdev.h
583 +++ b/include/net/switchdev.h
584 @@ -46,6 +46,7 @@ enum switchdev_attr_id {
585 SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
586 SWITCHDEV_ATTR_ID_PORT_STP_STATE,
587 SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
588 + SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT,
589 SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
590 SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
591 };
592 @@ -60,6 +61,7 @@ struct switchdev_attr {
593 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
594 u8 stp_state; /* PORT_STP_STATE */
595 unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
596 + unsigned long brport_flags_support; /* PORT_BRIDGE_FLAGS_SUPPORT */
597 clock_t ageing_time; /* BRIDGE_AGEING_TIME */
598 bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
599 } u;
600 @@ -149,8 +151,10 @@ struct switchdev_ops {
601 };
602
603 enum switchdev_notifier_type {
604 - SWITCHDEV_FDB_ADD = 1,
605 - SWITCHDEV_FDB_DEL,
606 + SWITCHDEV_FDB_ADD_TO_BRIDGE = 1,
607 + SWITCHDEV_FDB_DEL_TO_BRIDGE,
608 + SWITCHDEV_FDB_ADD_TO_DEVICE,
609 + SWITCHDEV_FDB_DEL_TO_DEVICE,
610 };
611
612 struct switchdev_notifier_info {
613 --- a/include/uapi/linux/if_ether.h
614 +++ b/include/uapi/linux/if_ether.h
615 @@ -36,6 +36,7 @@
616 #define ETH_DATA_LEN 1500 /* Max. octets in payload */
617 #define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
618 #define ETH_FCS_LEN 4 /* Octets in the FCS */
619 +#define ETH_MIN_MTU 68 /* Min IPv4 MTU per RFC791 */
620
621 /*
622 * These are the defined Ethernet Protocol ID's.
623 --- a/kernel/irq/Kconfig
624 +++ b/kernel/irq/Kconfig
625 @@ -108,4 +108,15 @@ config SPARSE_IRQ
626
627 If you don't know what to do here, say N.
628
629 +config GENERIC_IRQ_DEBUGFS
630 + bool "Expose irq internals in debugfs"
631 + depends on DEBUG_FS
632 + default n
633 + ---help---
634 +
635 + Exposes internal state information through debugfs. Mostly for
636 + developers and debugging of hard to diagnose interrupt problems.
637 +
638 + If you don't know what to do here, say N.
639 +
640 endmenu
641 --- a/kernel/irq/Makefile
642 +++ b/kernel/irq/Makefile
643 @@ -10,3 +10,4 @@ obj-$(CONFIG_PM_SLEEP) += pm.o
644 obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
645 obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
646 obj-$(CONFIG_SMP) += affinity.o
647 +obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o
648 --- /dev/null
649 +++ b/kernel/irq/debugfs.c
650 @@ -0,0 +1,215 @@
651 +/*
652 + * Copyright 2017 Thomas Gleixner <tglx@linutronix.de>
653 + *
654 + * This file is licensed under the GPL V2.
655 + */
656 +#include <linux/debugfs.h>
657 +#include <linux/irqdomain.h>
658 +#include <linux/irq.h>
659 +
660 +#include "internals.h"
661 +
662 +static struct dentry *irq_dir;
663 +
664 +struct irq_bit_descr {
665 + unsigned int mask;
666 + char *name;
667 +};
668 +#define BIT_MASK_DESCR(m) { .mask = m, .name = #m }
669 +
670 +static void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state,
671 + const struct irq_bit_descr *sd, int size)
672 +{
673 + int i;
674 +
675 + for (i = 0; i < size; i++, sd++) {
676 + if (state & sd->mask)
677 + seq_printf(m, "%*s%s\n", ind + 12, "", sd->name);
678 + }
679 +}
680 +
681 +#ifdef CONFIG_SMP
682 +static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc)
683 +{
684 + struct irq_data *data = irq_desc_get_irq_data(desc);
685 + struct cpumask *msk;
686 +
687 + msk = irq_data_get_affinity_mask(data);
688 + seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk));
689 +#ifdef CONFIG_GENERIC_PENDING_IRQ
690 + msk = desc->pending_mask;
691 + seq_printf(m, "pending: %*pbl\n", cpumask_pr_args(msk));
692 +#endif
693 +}
694 +#else
695 +static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) { }
696 +#endif
697 +
698 +static const struct irq_bit_descr irqchip_flags[] = {
699 + BIT_MASK_DESCR(IRQCHIP_SET_TYPE_MASKED),
700 + BIT_MASK_DESCR(IRQCHIP_EOI_IF_HANDLED),
701 + BIT_MASK_DESCR(IRQCHIP_MASK_ON_SUSPEND),
702 + BIT_MASK_DESCR(IRQCHIP_ONOFFLINE_ENABLED),
703 + BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
704 + BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
705 + BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
706 +};
707 +
708 +static void
709 +irq_debug_show_chip(struct seq_file *m, struct irq_data *data, int ind)
710 +{
711 + struct irq_chip *chip = data->chip;
712 +
713 + if (!chip) {
714 + seq_printf(m, "chip: None\n");
715 + return;
716 + }
717 + seq_printf(m, "%*schip: %s\n", ind, "", chip->name);
718 + seq_printf(m, "%*sflags: 0x%lx\n", ind + 1, "", chip->flags);
719 + irq_debug_show_bits(m, ind, chip->flags, irqchip_flags,
720 + ARRAY_SIZE(irqchip_flags));
721 +}
722 +
723 +static void
724 +irq_debug_show_data(struct seq_file *m, struct irq_data *data, int ind)
725 +{
726 + seq_printf(m, "%*sdomain: %s\n", ind, "",
727 + data->domain ? data->domain->name : "");
728 + seq_printf(m, "%*shwirq: 0x%lx\n", ind + 1, "", data->hwirq);
729 + irq_debug_show_chip(m, data, ind + 1);
730 +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
731 + if (!data->parent_data)
732 + return;
733 + seq_printf(m, "%*sparent:\n", ind + 1, "");
734 + irq_debug_show_data(m, data->parent_data, ind + 4);
735 +#endif
736 +}
737 +
738 +static const struct irq_bit_descr irqdata_states[] = {
739 + BIT_MASK_DESCR(IRQ_TYPE_EDGE_RISING),
740 + BIT_MASK_DESCR(IRQ_TYPE_EDGE_FALLING),
741 + BIT_MASK_DESCR(IRQ_TYPE_LEVEL_HIGH),
742 + BIT_MASK_DESCR(IRQ_TYPE_LEVEL_LOW),
743 + BIT_MASK_DESCR(IRQD_LEVEL),
744 +
745 + BIT_MASK_DESCR(IRQD_ACTIVATED),
746 + BIT_MASK_DESCR(IRQD_IRQ_STARTED),
747 + BIT_MASK_DESCR(IRQD_IRQ_DISABLED),
748 + BIT_MASK_DESCR(IRQD_IRQ_MASKED),
749 + BIT_MASK_DESCR(IRQD_IRQ_INPROGRESS),
750 +
751 + BIT_MASK_DESCR(IRQD_PER_CPU),
752 + BIT_MASK_DESCR(IRQD_NO_BALANCING),
753 +
754 + BIT_MASK_DESCR(IRQD_MOVE_PCNTXT),
755 + BIT_MASK_DESCR(IRQD_AFFINITY_SET),
756 + BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING),
757 + BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED),
758 + BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
759 +
760 + BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
761 +
762 + BIT_MASK_DESCR(IRQD_WAKEUP_STATE),
763 + BIT_MASK_DESCR(IRQD_WAKEUP_ARMED),
764 +};
765 +
766 +static const struct irq_bit_descr irqdesc_states[] = {
767 + BIT_MASK_DESCR(_IRQ_NOPROBE),
768 + BIT_MASK_DESCR(_IRQ_NOREQUEST),
769 + BIT_MASK_DESCR(_IRQ_NOTHREAD),
770 + BIT_MASK_DESCR(_IRQ_NOAUTOEN),
771 + BIT_MASK_DESCR(_IRQ_NESTED_THREAD),
772 + BIT_MASK_DESCR(_IRQ_PER_CPU_DEVID),
773 + BIT_MASK_DESCR(_IRQ_IS_POLLED),
774 + BIT_MASK_DESCR(_IRQ_DISABLE_UNLAZY),
775 +};
776 +
777 +static const struct irq_bit_descr irqdesc_istates[] = {
778 + BIT_MASK_DESCR(IRQS_AUTODETECT),
779 + BIT_MASK_DESCR(IRQS_SPURIOUS_DISABLED),
780 + BIT_MASK_DESCR(IRQS_POLL_INPROGRESS),
781 + BIT_MASK_DESCR(IRQS_ONESHOT),
782 + BIT_MASK_DESCR(IRQS_REPLAY),
783 + BIT_MASK_DESCR(IRQS_WAITING),
784 + BIT_MASK_DESCR(IRQS_PENDING),
785 + BIT_MASK_DESCR(IRQS_SUSPENDED),
786 +};
787 +
788 +
789 +static int irq_debug_show(struct seq_file *m, void *p)
790 +{
791 + struct irq_desc *desc = m->private;
792 + struct irq_data *data;
793 +
794 + raw_spin_lock_irq(&desc->lock);
795 + data = irq_desc_get_irq_data(desc);
796 + seq_printf(m, "handler: %pf\n", desc->handle_irq);
797 + seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors);
798 + irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states,
799 + ARRAY_SIZE(irqdesc_states));
800 + seq_printf(m, "istate: 0x%08x\n", desc->istate);
801 + irq_debug_show_bits(m, 0, desc->istate, irqdesc_istates,
802 + ARRAY_SIZE(irqdesc_istates));
803 + seq_printf(m, "ddepth: %u\n", desc->depth);
804 + seq_printf(m, "wdepth: %u\n", desc->wake_depth);
805 + seq_printf(m, "dstate: 0x%08x\n", irqd_get(data));
806 + irq_debug_show_bits(m, 0, irqd_get(data), irqdata_states,
807 + ARRAY_SIZE(irqdata_states));
808 + seq_printf(m, "node: %d\n", irq_data_get_node(data));
809 + irq_debug_show_masks(m, desc);
810 + irq_debug_show_data(m, data, 0);
811 + raw_spin_unlock_irq(&desc->lock);
812 + return 0;
813 +}
814 +
815 +static int irq_debug_open(struct inode *inode, struct file *file)
816 +{
817 + return single_open(file, irq_debug_show, inode->i_private);
818 +}
819 +
820 +static const struct file_operations dfs_irq_ops = {
821 + .open = irq_debug_open,
822 + .read = seq_read,
823 + .llseek = seq_lseek,
824 + .release = single_release,
825 +};
826 +
827 +void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
828 +{
829 + char name [10];
830 +
831 + if (!irq_dir || !desc || desc->debugfs_file)
832 + return;
833 +
834 + sprintf(name, "%d", irq);
835 + desc->debugfs_file = debugfs_create_file(name, 0444, irq_dir, desc,
836 + &dfs_irq_ops);
837 +}
838 +
839 +void irq_remove_debugfs_entry(struct irq_desc *desc)
840 +{
841 + if (desc->debugfs_file)
842 + debugfs_remove(desc->debugfs_file);
843 +}
844 +
845 +static int __init irq_debugfs_init(void)
846 +{
847 + struct dentry *root_dir;
848 + int irq;
849 +
850 + root_dir = debugfs_create_dir("irq", NULL);
851 + if (!root_dir)
852 + return -ENOMEM;
853 +
854 + irq_domain_debugfs_init(root_dir);
855 +
856 + irq_dir = debugfs_create_dir("irqs", root_dir);
857 +
858 + irq_lock_sparse();
859 + for_each_active_irq(irq)
860 + irq_add_debugfs_entry(irq, irq_to_desc(irq));
861 + irq_unlock_sparse();
862 +
863 + return 0;
864 +}
865 +__initcall(irq_debugfs_init);
866 --- a/kernel/irq/internals.h
867 +++ b/kernel/irq/internals.h
868 @@ -169,6 +169,11 @@ irq_put_desc_unlock(struct irq_desc *des
869
870 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
871
872 +static inline unsigned int irqd_get(struct irq_data *d)
873 +{
874 + return __irqd_to_state(d);
875 +}
876 +
877 /*
878 * Manipulation functions for irq_data.state
879 */
880 @@ -226,3 +231,20 @@ irq_pm_install_action(struct irq_desc *d
881 static inline void
882 irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { }
883 #endif
884 +
885 +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
886 +void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc);
887 +void irq_remove_debugfs_entry(struct irq_desc *desc);
888 +# ifdef CONFIG_IRQ_DOMAIN
889 +void irq_domain_debugfs_init(struct dentry *root);
890 +# else
891 +static inline void irq_domain_debugfs_init(struct dentry *root);
892 +# endif
893 +#else /* CONFIG_GENERIC_IRQ_DEBUGFS */
894 +static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d)
895 +{
896 +}
897 +static inline void irq_remove_debugfs_entry(struct irq_desc *d)
898 +{
899 +}
900 +#endif /* CONFIG_GENERIC_IRQ_DEBUGFS */
901 --- a/kernel/irq/irqdesc.c
902 +++ b/kernel/irq/irqdesc.c
903 @@ -394,6 +394,7 @@ static void free_desc(unsigned int irq)
904 {
905 struct irq_desc *desc = irq_to_desc(irq);
906
907 + irq_remove_debugfs_entry(desc);
908 unregister_irq_proc(irq, desc);
909
910 /*
911 --- a/kernel/irq/irqdomain.c
912 +++ b/kernel/irq/irqdomain.c
913 @@ -31,6 +31,14 @@ struct irqchip_fwid {
914 void *data;
915 };
916
917 +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
918 +static void debugfs_add_domain_dir(struct irq_domain *d);
919 +static void debugfs_remove_domain_dir(struct irq_domain *d);
920 +#else
921 +static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
922 +static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
923 +#endif
924 +
925 /**
926 * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
927 * identifying an irq domain
928 @@ -117,6 +125,7 @@ struct irq_domain *__irq_domain_add(stru
929 irq_domain_check_hierarchy(domain);
930
931 mutex_lock(&irq_domain_mutex);
932 + debugfs_add_domain_dir(domain);
933 list_add(&domain->link, &irq_domain_list);
934 mutex_unlock(&irq_domain_mutex);
935
936 @@ -136,6 +145,7 @@ EXPORT_SYMBOL_GPL(__irq_domain_add);
937 void irq_domain_remove(struct irq_domain *domain)
938 {
939 mutex_lock(&irq_domain_mutex);
940 + debugfs_remove_domain_dir(domain);
941
942 WARN_ON(!radix_tree_empty(&domain->revmap_tree));
943
944 @@ -156,6 +166,37 @@ void irq_domain_remove(struct irq_domain
945 }
946 EXPORT_SYMBOL_GPL(irq_domain_remove);
947
948 +void irq_domain_update_bus_token(struct irq_domain *domain,
949 + enum irq_domain_bus_token bus_token)
950 +{
951 + char *name;
952 +
953 + if (domain->bus_token == bus_token)
954 + return;
955 +
956 + mutex_lock(&irq_domain_mutex);
957 +
958 + domain->bus_token = bus_token;
959 +
960 + name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
961 + if (!name) {
962 + mutex_unlock(&irq_domain_mutex);
963 + return;
964 + }
965 +
966 + debugfs_remove_domain_dir(domain);
967 +
968 + if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
969 + kfree(domain->name);
970 + else
971 + domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
972 +
973 + domain->name = name;
974 + debugfs_add_domain_dir(domain);
975 +
976 + mutex_unlock(&irq_domain_mutex);
977 +}
978 +
979 /**
980 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
981 * @of_node: pointer to interrupt controller's device tree node.
982 @@ -1164,43 +1205,18 @@ void irq_domain_free_irqs_top(struct irq
983 irq_domain_free_irqs_common(domain, virq, nr_irqs);
984 }
985
986 -static bool irq_domain_is_auto_recursive(struct irq_domain *domain)
987 -{
988 - return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE;
989 -}
990 -
991 -static void irq_domain_free_irqs_recursive(struct irq_domain *domain,
992 +static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
993 unsigned int irq_base,
994 unsigned int nr_irqs)
995 {
996 domain->ops->free(domain, irq_base, nr_irqs);
997 - if (irq_domain_is_auto_recursive(domain)) {
998 - BUG_ON(!domain->parent);
999 - irq_domain_free_irqs_recursive(domain->parent, irq_base,
1000 - nr_irqs);
1001 - }
1002 }
1003
1004 -int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
1005 +int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
1006 unsigned int irq_base,
1007 unsigned int nr_irqs, void *arg)
1008 {
1009 - int ret = 0;
1010 - struct irq_domain *parent = domain->parent;
1011 - bool recursive = irq_domain_is_auto_recursive(domain);
1012 -
1013 - BUG_ON(recursive && !parent);
1014 - if (recursive)
1015 - ret = irq_domain_alloc_irqs_recursive(parent, irq_base,
1016 - nr_irqs, arg);
1017 - if (ret < 0)
1018 - return ret;
1019 -
1020 - ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg);
1021 - if (ret < 0 && recursive)
1022 - irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs);
1023 -
1024 - return ret;
1025 + return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
1026 }
1027
1028 /**
1029 @@ -1261,7 +1277,7 @@ int __irq_domain_alloc_irqs(struct irq_d
1030 }
1031
1032 mutex_lock(&irq_domain_mutex);
1033 - ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg);
1034 + ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
1035 if (ret < 0) {
1036 mutex_unlock(&irq_domain_mutex);
1037 goto out_free_irq_data;
1038 @@ -1296,7 +1312,7 @@ void irq_domain_free_irqs(unsigned int v
1039 mutex_lock(&irq_domain_mutex);
1040 for (i = 0; i < nr_irqs; i++)
1041 irq_domain_remove_irq(virq + i);
1042 - irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs);
1043 + irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
1044 mutex_unlock(&irq_domain_mutex);
1045
1046 irq_domain_free_irq_data(virq, nr_irqs);
1047 @@ -1316,15 +1332,11 @@ int irq_domain_alloc_irqs_parent(struct
1048 unsigned int irq_base, unsigned int nr_irqs,
1049 void *arg)
1050 {
1051 - /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */
1052 - if (irq_domain_is_auto_recursive(domain))
1053 - return 0;
1054 + if (!domain->parent)
1055 + return -ENOSYS;
1056
1057 - domain = domain->parent;
1058 - if (domain)
1059 - return irq_domain_alloc_irqs_recursive(domain, irq_base,
1060 - nr_irqs, arg);
1061 - return -ENOSYS;
1062 + return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
1063 + nr_irqs, arg);
1064 }
1065 EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
1066
1067 @@ -1339,10 +1351,10 @@ EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_
1068 void irq_domain_free_irqs_parent(struct irq_domain *domain,
1069 unsigned int irq_base, unsigned int nr_irqs)
1070 {
1071 - /* irq_domain_free_irqs_recursive() will call parent's free */
1072 - if (!irq_domain_is_auto_recursive(domain) && domain->parent)
1073 - irq_domain_free_irqs_recursive(domain->parent, irq_base,
1074 - nr_irqs);
1075 + if (!domain->parent)
1076 + return;
1077 +
1078 + irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
1079 }
1080 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1081
1082 @@ -1448,3 +1460,78 @@ static void irq_domain_check_hierarchy(s
1083 {
1084 }
1085 #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
1086 +
1087 +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1088 +static struct dentry *domain_dir;
1089 +
1090 +static void
1091 +irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
1092 +{
1093 + seq_printf(m, "%*sname: %s\n", ind, "", d->name);
1094 + seq_printf(m, "%*ssize: %u\n", ind + 1, "",
1095 + d->revmap_size + d->revmap_direct_max_irq);
1096 + seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
1097 + seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
1098 +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1099 + if (!d->parent)
1100 + return;
1101 + seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
1102 + irq_domain_debug_show_one(m, d->parent, ind + 4);
1103 +#endif
1104 +}
1105 +
1106 +static int irq_domain_debug_show(struct seq_file *m, void *p)
1107 +{
1108 + struct irq_domain *d = m->private;
1109 +
1110 + /* Default domain? Might be NULL */
1111 + if (!d) {
1112 + if (!irq_default_domain)
1113 + return 0;
1114 + d = irq_default_domain;
1115 + }
1116 + irq_domain_debug_show_one(m, d, 0);
1117 + return 0;
1118 +}
1119 +
1120 +static int irq_domain_debug_open(struct inode *inode, struct file *file)
1121 +{
1122 + return single_open(file, irq_domain_debug_show, inode->i_private);
1123 +}
1124 +
1125 +static const struct file_operations dfs_domain_ops = {
1126 + .open = irq_domain_debug_open,
1127 + .read = seq_read,
1128 + .llseek = seq_lseek,
1129 + .release = single_release,
1130 +};
1131 +
1132 +static void debugfs_add_domain_dir(struct irq_domain *d)
1133 +{
1134 + if (!d->name || !domain_dir || d->debugfs_file)
1135 + return;
1136 + d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
1137 + &dfs_domain_ops);
1138 +}
1139 +
1140 +static void debugfs_remove_domain_dir(struct irq_domain *d)
1141 +{
1142 + if (d->debugfs_file)
1143 + debugfs_remove(d->debugfs_file);
1144 +}
1145 +
1146 +void __init irq_domain_debugfs_init(struct dentry *root)
1147 +{
1148 + struct irq_domain *d;
1149 +
1150 + domain_dir = debugfs_create_dir("domains", root);
1151 + if (!domain_dir)
1152 + return;
1153 +
1154 + debugfs_create_file("default", 0444, domain_dir, NULL, &dfs_domain_ops);
1155 + mutex_lock(&irq_domain_mutex);
1156 + list_for_each_entry(d, &irq_domain_list, link)
1157 + debugfs_add_domain_dir(d);
1158 + mutex_unlock(&irq_domain_mutex);
1159 +}
1160 +#endif
1161 --- a/kernel/irq/manage.c
1162 +++ b/kernel/irq/manage.c
1163 @@ -1391,6 +1391,7 @@ __setup_irq(unsigned int irq, struct irq
1164 wake_up_process(new->secondary->thread);
1165
1166 register_irq_proc(irq, desc);
1167 + irq_add_debugfs_entry(irq, desc);
1168 new->dir = NULL;
1169 register_handler_proc(irq, new);
1170 free_cpumask_var(mask);
1171 --- a/kernel/irq/msi.c
1172 +++ b/kernel/irq/msi.c
1173 @@ -310,7 +310,7 @@ int msi_domain_populate_irqs(struct irq_
1174
1175 ops->set_desc(arg, desc);
1176 /* Assumes the domain mutex is held! */
1177 - ret = irq_domain_alloc_irqs_recursive(domain, virq, 1, arg);
1178 + ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
1179 if (ret)
1180 break;
1181
1182 --- a/net/bridge/br.c
1183 +++ b/net/bridge/br.c
1184 @@ -138,14 +138,14 @@ static int br_switchdev_event(struct not
1185 br = p->br;
1186
1187 switch (event) {
1188 - case SWITCHDEV_FDB_ADD:
1189 + case SWITCHDEV_FDB_ADD_TO_BRIDGE:
1190 fdb_info = ptr;
1191 err = br_fdb_external_learn_add(br, p, fdb_info->addr,
1192 fdb_info->vid);
1193 if (err)
1194 err = notifier_from_errno(err);
1195 break;
1196 - case SWITCHDEV_FDB_DEL:
1197 + case SWITCHDEV_FDB_DEL_TO_BRIDGE:
1198 fdb_info = ptr;
1199 err = br_fdb_external_learn_del(br, p, fdb_info->addr,
1200 fdb_info->vid);
1201 --- a/net/bridge/br_fdb.c
1202 +++ b/net/bridge/br_fdb.c
1203 @@ -688,6 +688,8 @@ static void fdb_notify(struct net_bridge
1204 struct sk_buff *skb;
1205 int err = -ENOBUFS;
1206
1207 + br_switchdev_fdb_notify(fdb, type);
1208 +
1209 skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
1210 if (skb == NULL)
1211 goto errout;
1212 --- a/net/bridge/br_private.h
1213 +++ b/net/bridge/br_private.h
1214 @@ -1060,6 +1060,8 @@ void nbp_switchdev_frame_mark(const stru
1215 struct sk_buff *skb);
1216 bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
1217 const struct sk_buff *skb);
1218 +void br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb,
1219 + int type);
1220 #else
1221 static inline int nbp_switchdev_mark_set(struct net_bridge_port *p)
1222 {
1223 @@ -1076,6 +1078,11 @@ static inline bool nbp_switchdev_allowed
1224 {
1225 return true;
1226 }
1227 +
1228 +static inline void
1229 +br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
1230 +{
1231 +}
1232 #endif /* CONFIG_NET_SWITCHDEV */
1233
1234 #endif
1235 --- a/net/bridge/br_switchdev.c
1236 +++ b/net/bridge/br_switchdev.c
1237 @@ -55,3 +55,36 @@ bool nbp_switchdev_allowed_egress(const
1238 return !skb->offload_fwd_mark ||
1239 BR_INPUT_SKB_CB(skb)->offload_fwd_mark != p->offload_fwd_mark;
1240 }
1241 +
1242 +static void
1243 +br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
1244 + u16 vid, struct net_device *dev)
1245 +{
1246 + struct switchdev_notifier_fdb_info info;
1247 + unsigned long notifier_type;
1248 +
1249 + info.addr = mac;
1250 + info.vid = vid;
1251 + notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE;
1252 + call_switchdev_notifiers(notifier_type, dev, &info.info);
1253 +}
1254 +
1255 +void
1256 +br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
1257 +{
1258 + if (!fdb->added_by_user)
1259 + return;
1260 +
1261 + switch (type) {
1262 + case RTM_DELNEIGH:
1263 + br_switchdev_fdb_call_notifiers(false, fdb->addr.addr,
1264 + fdb->vlan_id,
1265 + fdb->dst->dev);
1266 + break;
1267 + case RTM_NEWNEIGH:
1268 + br_switchdev_fdb_call_notifiers(true, fdb->addr.addr,
1269 + fdb->vlan_id,
1270 + fdb->dst->dev);
1271 + break;
1272 + }
1273 +}
1274 --- a/net/core/dev.c
1275 +++ b/net/core/dev.c
1276 @@ -1990,6 +1990,23 @@ static void netif_setup_tc(struct net_de
1277 }
1278 }
1279
1280 +int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
1281 +{
1282 + if (dev->num_tc) {
1283 + struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1284 + int i;
1285 +
1286 + for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
1287 + if ((txq - tc->offset) < tc->count)
1288 + return i;
1289 + }
1290 +
1291 + return -1;
1292 + }
1293 +
1294 + return 0;
1295 +}
1296 +
1297 #ifdef CONFIG_XPS
1298 static DEFINE_MUTEX(xps_map_mutex);
1299 #define xmap_dereference(P) \
1300 @@ -6656,9 +6673,18 @@ int dev_set_mtu(struct net_device *dev,
1301 if (new_mtu == dev->mtu)
1302 return 0;
1303
1304 - /* MTU must be positive. */
1305 - if (new_mtu < 0)
1306 + /* MTU must be positive, and in range */
1307 + if (new_mtu < 0 || new_mtu < dev->min_mtu) {
1308 + net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
1309 + dev->name, new_mtu, dev->min_mtu);
1310 return -EINVAL;
1311 + }
1312 +
1313 + if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
1314 + net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
1315 + dev->name, new_mtu, dev->min_mtu);
1316 + return -EINVAL;
1317 + }
1318
1319 if (!netif_device_present(dev))
1320 return -ENODEV;
1321 --- a/net/core/net-sysfs.c
1322 +++ b/net/core/net-sysfs.c
1323 @@ -1021,7 +1021,6 @@ static ssize_t show_trans_timeout(struct
1324 return sprintf(buf, "%lu", trans_timeout);
1325 }
1326
1327 -#ifdef CONFIG_XPS
1328 static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1329 {
1330 struct net_device *dev = queue->dev;
1331 @@ -1033,6 +1032,21 @@ static unsigned int get_netdev_queue_ind
1332 return i;
1333 }
1334
1335 +static ssize_t show_traffic_class(struct netdev_queue *queue,
1336 + struct netdev_queue_attribute *attribute,
1337 + char *buf)
1338 +{
1339 + struct net_device *dev = queue->dev;
1340 + int index = get_netdev_queue_index(queue);
1341 + int tc = netdev_txq_to_tc(dev, index);
1342 +
1343 + if (tc < 0)
1344 + return -EINVAL;
1345 +
1346 + return sprintf(buf, "%u\n", tc);
1347 +}
1348 +
1349 +#ifdef CONFIG_XPS
1350 static ssize_t show_tx_maxrate(struct netdev_queue *queue,
1351 struct netdev_queue_attribute *attribute,
1352 char *buf)
1353 @@ -1075,6 +1089,9 @@ static struct netdev_queue_attribute que
1354 static struct netdev_queue_attribute queue_trans_timeout =
1355 __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
1356
1357 +static struct netdev_queue_attribute queue_traffic_class =
1358 + __ATTR(traffic_class, S_IRUGO, show_traffic_class, NULL);
1359 +
1360 #ifdef CONFIG_BQL
1361 /*
1362 * Byte queue limits sysfs structures and functions.
1363 @@ -1260,6 +1277,7 @@ static struct netdev_queue_attribute xps
1364
1365 static struct attribute *netdev_queue_default_attrs[] = {
1366 &queue_trans_timeout.attr,
1367 + &queue_traffic_class.attr,
1368 #ifdef CONFIG_XPS
1369 &xps_cpus_attribute.attr,
1370 &queue_tx_maxrate.attr,
1371 --- a/net/core/rtnetlink.c
1372 +++ b/net/core/rtnetlink.c
1373 @@ -3706,7 +3706,7 @@ static int rtnl_get_offload_stats(struct
1374 if (!size)
1375 continue;
1376
1377 - if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
1378 + if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
1379 continue;
1380
1381 attr = nla_reserve_64bit(skb, attr_id, size,
1382 @@ -3747,7 +3747,7 @@ static int rtnl_get_offload_stats_size(c
1383
1384 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
1385 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
1386 - if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
1387 + if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
1388 continue;
1389 size = rtnl_get_offload_stats_attr_size(attr_id);
1390 nla_size += nla_total_size_64bit(size);
1391 --- a/net/core/skbuff.c
1392 +++ b/net/core/skbuff.c
1393 @@ -842,6 +842,32 @@ void napi_consume_skb(struct sk_buff *sk
1394 }
1395 EXPORT_SYMBOL(napi_consume_skb);
1396
1397 +/**
1398 + * skb_recycle - clean up an skb for reuse
1399 + * @skb: buffer
1400 + *
1401 + * Recycles the skb to be reused as a receive buffer. This
1402 + * function does any necessary reference count dropping, and
1403 + * cleans up the skbuff as if it just came from __alloc_skb().
1404 + */
1405 +void skb_recycle(struct sk_buff *skb)
1406 +{
1407 + struct skb_shared_info *shinfo;
1408 + u8 head_frag = skb->head_frag;
1409 +
1410 + skb_release_head_state(skb);
1411 +
1412 + shinfo = skb_shinfo(skb);
1413 + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
1414 + atomic_set(&shinfo->dataref, 1);
1415 +
1416 + memset(skb, 0, offsetof(struct sk_buff, tail));
1417 + skb->data = skb->head + NET_SKB_PAD;
1418 + skb->head_frag = head_frag;
1419 + skb_reset_tail_pointer(skb);
1420 +}
1421 +EXPORT_SYMBOL(skb_recycle);
1422 +
1423 /* Make sure a field is enclosed inside headers_start/headers_end section */
1424 #define CHECK_SKB_FIELD(field) \
1425 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
1426 @@ -1075,7 +1101,7 @@ static void skb_headers_offset_update(st
1427 skb->inner_mac_header += off;
1428 }
1429
1430 -static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1431 +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1432 {
1433 __copy_skb_header(new, old);
1434
1435 @@ -1083,6 +1109,7 @@ static void copy_skb_header(struct sk_bu
1436 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1437 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1438 }
1439 +EXPORT_SYMBOL(copy_skb_header);
1440
1441 static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1442 {
1443 --- a/net/sched/sch_generic.c
1444 +++ b/net/sched/sch_generic.c
1445 @@ -309,6 +309,13 @@ static void dev_watchdog(unsigned long a
1446 txq->trans_timeout++;
1447 break;
1448 }
1449 +
1450 + /* Devices with HW_ACCEL_MQ have multiple txqs
1451 + * but update only the first one's transmission
1452 + * timestamp so avoid checking the rest.
1453 + */
1454 + if (dev->features & NETIF_F_HW_ACCEL_MQ)
1455 + break;
1456 }
1457
1458 if (some_queue_timedout) {