1 From afa3ab54c03d5126b14651f367b38165fab5b3cc Mon Sep 17 00:00:00 2001
2 From: Birger Koblitz <git@birger-koblitz.de>
3 Date: Tue, 18 Jan 2022 17:18:43 +0100
4 Subject: [PATCH] realtek: Backport bridge configuration for DSA
6 Adds the DSA API for bridge configuration (flooding, L2 learning,
7 and aging) offload as found in Linux 5.12 so that we can implement
10 Submitted-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
11 Submitted-by: Birger Koblitz <git@birger-koblitz.de>
13 drivers/net/bonding/bond_main.c | 2 ++
14 include/net/dsa.h | 79 ++++++++++++++++-
15 net/dsa/dsa2.c | 88 +++++++++++++++++++
16 net/dsa/dsa_priv.h | 74 ++++++++++++++
17 net/dsa/port.c | 92 ++++++++++++++++++++
18 net/dsa/slave.c | 88 ++++++++++++++++---
19 net/dsa/switch.c | 49 ++++++++++
20 net/sda/tag_dsa.c | 13 +++++-
21 8 file changed, 460 insertions(+), 25 deletions(-)
23 --- a/drivers/net/bonding/bond_main.c
24 +++ b/drivers/net/bonding/bond_main.c
25 @@ -2045,6 +2045,8 @@ int bond_enslave(struct net_device *bond
29 + bond_lower_state_changed(new_slave);
31 res = bond_sysfs_slave_add(new_slave);
33 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
34 --- a/include/net/dsa.h
35 +++ b/include/net/dsa.h
36 @@ -149,8 +149,41 @@ struct dsa_switch_tree {
38 /* List of DSA links composing the routing table */
39 struct list_head rtable;
41 + /* Maps offloaded LAG netdevs to a zero-based linear ID for
42 + * drivers that need it.
44 + struct net_device **lags;
45 + unsigned int lags_len;
48 +#define dsa_lags_foreach_id(_id, _dst) \
49 + for ((_id) = 0; (_id) < (_dst)->lags_len; (_id)++) \
50 + if ((_dst)->lags[(_id)])
52 +#define dsa_lag_foreach_port(_dp, _dst, _lag) \
53 + list_for_each_entry((_dp), &(_dst)->ports, list) \
54 + if ((_dp)->lag_dev == (_lag))
56 +static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst,
59 + return dst->lags[id];
62 +static inline int dsa_lag_id(struct dsa_switch_tree *dst,
63 + struct net_device *lag)
67 + dsa_lags_foreach_id(id, dst) {
68 + if (dsa_lag_dev(dst, id) == lag)
75 /* TC matchall action types */
76 enum dsa_port_mall_action_type {
78 @@ -220,6 +253,8 @@ struct dsa_port {
79 bool devlink_port_setup;
81 struct phylink_config pl_config;
82 + struct net_device *lag_dev;
83 + bool lag_tx_enabled;
85 struct list_head list;
87 @@ -340,6 +375,14 @@ struct dsa_switch {
89 bool mtu_enforcement_ingress;
91 + /* Drivers that benefit from having an ID associated with each
92 + * offloaded LAG should set this to the maximum number of
93 + * supported IDs. DSA will then maintain a mapping of _at
94 + * least_ these many IDs, accessible to drivers via
97 + unsigned int num_lag_ids;
102 @@ -432,6 +475,18 @@ static inline bool dsa_port_is_vlan_filt
103 return dp->vlan_filtering;
107 +struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
109 + if (!dp->bridge_dev)
113 + return dp->lag_dev;
118 typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
119 bool is_static, void *data);
120 struct dsa_switch_ops {
121 @@ -629,6 +684,13 @@ struct dsa_switch_ops {
122 void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index,
123 int sw_index, int port,
124 struct net_device *br);
125 + int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
127 + int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
128 + int port, struct net_device *lag,
129 + struct netdev_lag_upper_info *info);
130 + int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
131 + int port, struct net_device *lag);
135 @@ -660,6 +722,16 @@ struct dsa_switch_ops {
136 int (*port_change_mtu)(struct dsa_switch *ds, int port,
138 int (*port_max_mtu)(struct dsa_switch *ds, int port);
143 + int (*port_lag_change)(struct dsa_switch *ds, int port);
144 + int (*port_lag_join)(struct dsa_switch *ds, int port,
145 + struct net_device *lag,
146 + struct netdev_lag_upper_info *info);
147 + int (*port_lag_leave)(struct dsa_switch *ds, int port,
148 + struct net_device *lag);
151 #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
154 @@ -220,11 +220,21 @@ static int dsa_switch_rcv(struct sk_buff
158 - p = netdev_priv(skb->dev);
159 skb_push(skb, ETH_HLEN);
160 skb->pkt_type = PACKET_HOST;
161 skb->protocol = eth_type_trans(skb, skb->dev);
163 + if (unlikely(!dsa_slave_dev_check(skb->dev))) {
164 + /* Packet is to be injected directly on an upper
165 + * device, e.g. a team/bond, so skip all DSA-port
166 + * specific actions.
172 + p = netdev_priv(skb->dev);
174 if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
175 nskb = dsa_untag_bridge_pvid(skb);
180 static DEFINE_MUTEX(dsa2_mutex);
181 LIST_HEAD(dsa_tree_list);
184 + * dsa_lag_map() - Map LAG netdev to a linear LAG ID
185 + * @dst: Tree in which to record the mapping.
186 + * @lag: Netdev that is to be mapped to an ID.
188 + * dsa_lag_id/dsa_lag_dev can then be used to translate between the
189 + * two spaces. The size of the mapping space is determined by the
190 + * driver by setting ds->num_lag_ids. It is perfectly legal to leave
191 + * it unset if it is not needed, in which case these functions become
194 +void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
198 + if (dsa_lag_id(dst, lag) >= 0)
199 + /* Already mapped */
202 + for (id = 0; id < dst->lags_len; id++) {
203 + if (!dsa_lag_dev(dst, id)) {
204 + dst->lags[id] = lag;
209 + /* No IDs left, which is OK. Some drivers do not need it. The
210 + * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
211 + * returns an error for this device when joining the LAG. The
212 + * driver can then return -EOPNOTSUPP back to DSA, which will
213 + * fall back to a software LAG.
218 + * dsa_lag_unmap() - Remove a LAG ID mapping
219 + * @dst: Tree in which the mapping is recorded.
220 + * @lag: Netdev that was mapped.
222 + * As there may be multiple users of the mapping, it is only removed
223 + * if there are no other references to it.
225 +void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
227 + struct dsa_port *dp;
230 + dsa_lag_foreach_port(dp, dst, lag)
231 + /* There are remaining users of this mapping */
234 + dsa_lags_foreach_id(id, dst) {
235 + if (dsa_lag_dev(dst, id) == lag) {
236 + dst->lags[id] = NULL;
242 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
244 struct dsa_switch_tree *dst;
245 @@ -597,6 +656,32 @@ static void dsa_tree_teardown_master(str
246 dsa_master_teardown(dp->master);
249 +static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
251 + unsigned int len = 0;
252 + struct dsa_port *dp;
254 + list_for_each_entry(dp, &dst->ports, list) {
255 + if (dp->ds->num_lag_ids > len)
256 + len = dp->ds->num_lag_ids;
262 + dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
266 + dst->lags_len = len;
270 +static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
275 static int dsa_tree_setup(struct dsa_switch_tree *dst)
278 @@ -624,12 +709,18 @@ static int dsa_tree_setup(struct dsa_swi
280 goto teardown_switches;
282 + err = dsa_tree_setup_lags(dst);
284 + goto teardown_master;
288 pr_info("DSA: tree %d setup\n", dst->index);
293 + dsa_tree_teardown_master(dst);
295 dsa_tree_teardown_switches(dst);
296 teardown_default_cpu:
297 @@ -645,6 +736,8 @@ static void dsa_tree_teardown(struct dsa
301 + dsa_tree_teardown_lags(dst);
303 dsa_tree_teardown_master(dst);
305 dsa_tree_teardown_switches(dst);
306 --- a/net/dsa/dsa_priv.h
307 +++ b/net/dsa/dsa_priv.h
308 @@ -20,6 +20,9 @@ enum {
309 DSA_NOTIFIER_BRIDGE_LEAVE,
310 DSA_NOTIFIER_FDB_ADD,
311 DSA_NOTIFIER_FDB_DEL,
312 + DSA_NOTIFIER_LAG_CHANGE,
313 + DSA_NOTIFIER_LAG_JOIN,
314 + DSA_NOTIFIER_LAG_LEAVE,
315 DSA_NOTIFIER_MDB_ADD,
316 DSA_NOTIFIER_MDB_DEL,
317 DSA_NOTIFIER_VLAN_ADD,
318 @@ -57,6 +60,15 @@ struct dsa_notifier_mdb_info {
322 +/* DSA_NOTIFIER_LAG_* */
323 +struct dsa_notifier_lag_info {
324 + struct net_device *lag;
328 + struct netdev_lag_upper_info *info;
331 /* DSA_NOTIFIER_VLAN_* */
332 struct dsa_notifier_vlan_info {
333 const struct switchdev_obj_port_vlan *vlan;
334 @@ -149,6 +161,11 @@ void dsa_port_disable_rt(struct dsa_port
335 void dsa_port_disable(struct dsa_port *dp);
336 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
337 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
338 +int dsa_port_lag_change(struct dsa_port *dp,
339 + struct netdev_lag_lower_state_info *linfo);
340 +int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
341 + struct netdev_lag_upper_info *uinfo);
342 +void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
343 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
344 struct switchdev_trans *trans);
345 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
346 @@ -181,6 +198,71 @@ int dsa_port_link_register_of(struct dsa
347 void dsa_port_link_unregister_of(struct dsa_port *dp);
348 extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
350 +static inline bool dsa_port_offloads_netdev(struct dsa_port *dp,
351 + struct net_device *dev)
353 + /* Switchdev offloading can be configured on: */
355 + if (dev == dp->slave)
356 + /* DSA ports directly connected to a bridge, and event
357 + * was emitted for the ports themselves.
361 + if (dp->bridge_dev == dev)
362 + /* DSA ports connected to a bridge, and event was emitted
367 + if (dp->lag_dev == dev)
368 + /* DSA ports connected to a bridge via a LAG */
374 +static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
375 + struct net_device *dev)
377 + return dsa_port_to_bridge_port(dp) == dev;
380 +static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
381 + struct net_device *bridge_dev)
383 + /* DSA ports connected to a bridge, and event was emitted
386 + return dp->bridge_dev == bridge_dev;
389 +/* Returns true if any port of this tree offloads the given net_device */
390 +static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
391 + struct net_device *dev)
393 + struct dsa_port *dp;
395 + list_for_each_entry(dp, &dst->ports, list)
396 + if (dsa_port_offloads_bridge_port(dp, dev))
402 +/* Returns true if any port of this tree offloads the given net_device */
403 +static inline bool dsa_tree_offloads_netdev(struct dsa_switch_tree *dst,
404 + struct net_device *dev)
406 + struct dsa_port *dp;
408 + list_for_each_entry(dp, &dst->ports, list)
409 + if (dsa_port_offloads_netdev(dp, dev))
416 extern const struct dsa_device_ops notag_netdev_ops;
417 void dsa_slave_mii_bus_init(struct dsa_switch *ds);
418 @@ -285,6 +367,9 @@ int dsa_switch_register_notifier(struct
419 void dsa_switch_unregister_notifier(struct dsa_switch *ds);
422 +void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
423 +void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
425 extern struct list_head dsa_tree_list;
430 @@ -193,6 +193,99 @@ void dsa_port_bridge_leave(struct dsa_po
431 dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
434 +int dsa_port_lag_change(struct dsa_port *dp,
435 + struct netdev_lag_lower_state_info *linfo)
437 + struct dsa_notifier_lag_info info = {
438 + .sw_index = dp->ds->index,
446 + /* On statically configured aggregates (e.g. loadbalance
447 + * without LACP) ports will always be tx_enabled, even if the
448 + * link is down. Thus we require both link_up and tx_enabled
449 + * in order to include it in the tx set.
451 + tx_enabled = linfo->link_up && linfo->tx_enabled;
453 + if (tx_enabled == dp->lag_tx_enabled)
456 + dp->lag_tx_enabled = tx_enabled;
458 + return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
461 +int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
462 + struct netdev_lag_upper_info *uinfo)
464 + struct dsa_notifier_lag_info info = {
465 + .sw_index = dp->ds->index,
470 + struct net_device *bridge_dev;
473 + dsa_lag_map(dp->ds->dst, lag);
476 + err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
480 + bridge_dev = netdev_master_upper_dev_get(lag);
481 + if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
484 + err = dsa_port_bridge_join(dp, bridge_dev);
486 + goto err_bridge_join;
491 + dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
493 + dp->lag_dev = NULL;
494 + dsa_lag_unmap(dp->ds->dst, lag);
498 +void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
500 + struct dsa_notifier_lag_info info = {
501 + .sw_index = dp->ds->index,
510 + /* Port might have been part of a LAG that in turn was
511 + * attached to a bridge.
513 + if (dp->bridge_dev)
514 + dsa_port_bridge_leave(dp, dp->bridge_dev);
516 + dp->lag_tx_enabled = false;
517 + dp->lag_dev = NULL;
519 + err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
521 + pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
524 + dsa_lag_unmap(dp->ds->dst, lag);
527 /* Must be called under rcu_read_lock() */
528 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
530 --- a/net/dsa/slave.c
531 +++ b/net/dsa/slave.c
532 @@ -337,9 +337,6 @@ static int dsa_slave_vlan_add(struct net
533 struct switchdev_obj_port_vlan vlan;
536 - if (obj->orig_dev != dev)
537 - return -EOPNOTSUPP;
539 if (dsa_port_skip_vlan_configuration(dp))
542 @@ -394,11 +391,13 @@ static int dsa_slave_port_obj_add(struct
545 case SWITCHDEV_OBJ_ID_PORT_MDB:
546 - if (obj->orig_dev != dev)
547 + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
549 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
551 case SWITCHDEV_OBJ_ID_HOST_MDB:
552 + if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
553 + return -EOPNOTSUPP;
554 /* DSA can directly translate this to a normal MDB add,
555 * but on the CPU port.
557 @@ -406,6 +405,9 @@ static int dsa_slave_port_obj_add(struct
560 case SWITCHDEV_OBJ_ID_PORT_VLAN:
561 + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
562 + return -EOPNOTSUPP;
564 err = dsa_slave_vlan_add(dev, obj, trans);
567 @@ -424,9 +426,6 @@ static int dsa_slave_vlan_del(struct net
568 struct switchdev_obj_port_vlan *vlan;
571 - if (obj->orig_dev != dev)
572 - return -EOPNOTSUPP;
574 if (dsa_port_skip_vlan_configuration(dp))
577 @@ -453,17 +452,22 @@ static int dsa_slave_port_obj_del(struct
580 case SWITCHDEV_OBJ_ID_PORT_MDB:
581 - if (obj->orig_dev != dev)
582 + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
584 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
586 case SWITCHDEV_OBJ_ID_HOST_MDB:
587 + if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
588 + return -EOPNOTSUPP;
589 /* DSA can directly translate this to a normal MDB add,
590 * but on the CPU port.
592 err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
594 case SWITCHDEV_OBJ_ID_PORT_VLAN:
595 + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
596 + return -EOPNOTSUPP;
598 err = dsa_slave_vlan_del(dev, obj);
601 @@ -1993,6 +1997,46 @@ static int dsa_slave_changeupper(struct
602 dsa_port_bridge_leave(dp, info->upper_dev);
605 + } else if (netif_is_lag_master(info->upper_dev)) {
606 + if (info->linking) {
607 + err = dsa_port_lag_join(dp, info->upper_dev,
609 + if (err == -EOPNOTSUPP) {
610 + NL_SET_ERR_MSG_MOD(info->info.extack,
611 + "Offloading not supported");
614 + err = notifier_from_errno(err);
616 + dsa_port_lag_leave(dp, info->upper_dev);
625 +dsa_slave_lag_changeupper(struct net_device *dev,
626 + struct netdev_notifier_changeupper_info *info)
628 + struct net_device *lower;
629 + struct list_head *iter;
630 + int err = NOTIFY_DONE;
631 + struct dsa_port *dp;
633 + netdev_for_each_lower_dev(dev, lower, iter) {
634 + if (!dsa_slave_dev_check(lower))
637 + dp = dsa_slave_to_port(lower);
642 + err = dsa_slave_changeupper(lower, info);
643 + if (notifier_to_errno(err))
648 @@ -2078,10 +2122,26 @@ static int dsa_slave_netdevice_event(str
651 case NETDEV_CHANGEUPPER:
652 + if (dsa_slave_dev_check(dev))
653 + return dsa_slave_changeupper(dev, ptr);
655 + if (netif_is_lag_master(dev))
656 + return dsa_slave_lag_changeupper(dev, ptr);
659 + case NETDEV_CHANGELOWERSTATE: {
660 + struct netdev_notifier_changelowerstate_info *info = ptr;
661 + struct dsa_port *dp;
664 if (!dsa_slave_dev_check(dev))
665 - return NOTIFY_DONE;
668 - return dsa_slave_changeupper(dev, ptr);
669 + dp = dsa_slave_to_port(dev);
671 + err = dsa_port_lag_change(dp, info->lower_state_info);
672 + return notifier_from_errno(err);
677 @@ -2229,6 +2289,15 @@ static int dsa_slave_switchdev_event(str
678 if (!fdb_info->added_by_user &&
679 !dp->ds->assisted_learning_on_cpu_port)
682 + /* When the bridge learns an address on an offloaded
683 + * LAG we don't want to send traffic to the CPU, the
684 + * other ports bridged with the LAG should be able to
685 + * autonomously forward towards it.
687 + if (dsa_tree_offloads_netdev(dp->ds->dst, dev))
688 + return NOTIFY_DONE;
692 if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del)
693 --- a/net/dsa/switch.c
694 +++ b/net/dsa/switch.c
695 @@ -193,6 +193,47 @@ static int dsa_switch_fdb_del(struct dsa
696 return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
699 +static int dsa_switch_lag_change(struct dsa_switch *ds,
700 + struct dsa_notifier_lag_info *info)
702 + if (ds->index == info->sw_index && ds->ops->port_lag_change)
703 + return ds->ops->port_lag_change(ds, info->port);
705 + if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
706 + return ds->ops->crosschip_lag_change(ds, info->sw_index,
712 +static int dsa_switch_lag_join(struct dsa_switch *ds,
713 + struct dsa_notifier_lag_info *info)
715 + if (ds->index == info->sw_index && ds->ops->port_lag_join)
716 + return ds->ops->port_lag_join(ds, info->port, info->lag,
719 + if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
720 + return ds->ops->crosschip_lag_join(ds, info->sw_index,
721 + info->port, info->lag,
724 + return -EOPNOTSUPP;
727 +static int dsa_switch_lag_leave(struct dsa_switch *ds,
728 + struct dsa_notifier_lag_info *info)
730 + if (ds->index == info->sw_index && ds->ops->port_lag_leave)
731 + return ds->ops->port_lag_leave(ds, info->port, info->lag);
733 + if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
734 + return ds->ops->crosschip_lag_leave(ds, info->sw_index,
735 + info->port, info->lag);
737 + return -EOPNOTSUPP;
740 static bool dsa_switch_mdb_match(struct dsa_switch *ds, int port,
741 struct dsa_notifier_mdb_info *info)
743 @@ -340,6 +381,15 @@ static int dsa_switch_event(struct notif
744 case DSA_NOTIFIER_FDB_DEL:
745 err = dsa_switch_fdb_del(ds, info);
747 + case DSA_NOTIFIER_LAG_CHANGE:
748 + err = dsa_switch_lag_change(ds, info);
750 + case DSA_NOTIFIER_LAG_JOIN:
751 + err = dsa_switch_lag_join(ds, info);
753 + case DSA_NOTIFIER_LAG_LEAVE:
754 + err = dsa_switch_lag_leave(ds, info);
756 case DSA_NOTIFIER_MDB_ADD:
757 err = dsa_switch_mdb_add(ds, info);
759 --- a/net/dsa/tag_dsa.c
760 +++ b/net/dsa/tag_dsa.c
761 @@ -82,7 +82,19 @@ static struct sk_buff *dsa_rcv(struct sk
762 source_device = dsa_header[0] & 0x1f;
763 source_port = (dsa_header[1] >> 3) & 0x1f;
765 - skb->dev = dsa_master_find_slave(dev, source_device, source_port);
767 + struct dsa_port *cpu_dp = dev->dsa_ptr;
769 + /* The exact source port is not available in the tag,
770 + * so we inject the frame directly on the upper
773 + skb->dev = dsa_lag_dev(cpu_dp->dst, source_port);
775 + skb->dev = dsa_master_find_slave(dev, source_device,