1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
4 * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2016 John Crispin <john@phrozen.org>
9 #include <linux/module.h>
10 #include <linux/phy.h>
11 #include <linux/netdevice.h>
13 #include <linux/of_net.h>
14 #include <linux/of_platform.h>
15 #include <linux/if_bridge.h>
16 #include <linux/mdio.h>
17 #include <linux/etherdevice.h>
18 #include <linux/clk.h>
19 #include <linux/reset.h>
20 #include <linux/mdio.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/of_mdio.h>
23 #include <linux/workqueue.h>
27 #define MIB_DESC(_s, _o, _n) \
34 static const struct qca8k_mib_desc ar8327_mib
[] = {
35 MIB_DESC(1, 0x00, "RxBroad"),
36 MIB_DESC(1, 0x04, "RxPause"),
37 MIB_DESC(1, 0x08, "RxMulti"),
38 MIB_DESC(1, 0x0c, "RxFcsErr"),
39 MIB_DESC(1, 0x10, "RxAlignErr"),
40 MIB_DESC(1, 0x14, "RxRunt"),
41 MIB_DESC(1, 0x18, "RxFragment"),
42 MIB_DESC(1, 0x1c, "Rx64Byte"),
43 MIB_DESC(1, 0x20, "Rx128Byte"),
44 MIB_DESC(1, 0x24, "Rx256Byte"),
45 MIB_DESC(1, 0x28, "Rx512Byte"),
46 MIB_DESC(1, 0x2c, "Rx1024Byte"),
47 MIB_DESC(1, 0x30, "Rx1518Byte"),
48 MIB_DESC(1, 0x34, "RxMaxByte"),
49 MIB_DESC(1, 0x38, "RxTooLong"),
50 MIB_DESC(2, 0x3c, "RxGoodByte"),
51 MIB_DESC(2, 0x44, "RxBadByte"),
52 MIB_DESC(1, 0x4c, "RxOverFlow"),
53 MIB_DESC(1, 0x50, "Filtered"),
54 MIB_DESC(1, 0x54, "TxBroad"),
55 MIB_DESC(1, 0x58, "TxPause"),
56 MIB_DESC(1, 0x5c, "TxMulti"),
57 MIB_DESC(1, 0x60, "TxUnderRun"),
58 MIB_DESC(1, 0x64, "Tx64Byte"),
59 MIB_DESC(1, 0x68, "Tx128Byte"),
60 MIB_DESC(1, 0x6c, "Tx256Byte"),
61 MIB_DESC(1, 0x70, "Tx512Byte"),
62 MIB_DESC(1, 0x74, "Tx1024Byte"),
63 MIB_DESC(1, 0x78, "Tx1518Byte"),
64 MIB_DESC(1, 0x7c, "TxMaxByte"),
65 MIB_DESC(1, 0x80, "TxOverSize"),
66 MIB_DESC(2, 0x84, "TxByte"),
67 MIB_DESC(1, 0x8c, "TxCollision"),
68 MIB_DESC(1, 0x90, "TxAbortCol"),
69 MIB_DESC(1, 0x94, "TxMultiCol"),
70 MIB_DESC(1, 0x98, "TxSingleCol"),
71 MIB_DESC(1, 0x9c, "TxExcDefer"),
72 MIB_DESC(1, 0xa0, "TxDefer"),
73 MIB_DESC(1, 0xa4, "TxLateCol"),
78 qca8k_read(struct qca8k_priv
*priv
, u32 reg
)
82 regmap_read(priv
->base
, reg
, &val
);
87 qca8k_write(struct qca8k_priv
*priv
, u32 reg
, u32 val
)
89 regmap_write(priv
->base
, reg
, val
);
93 qca8k_rmw(struct qca8k_priv
*priv
, u32 reg
, u32 mask
, u32 val
)
97 ret
= qca8k_read(priv
, reg
);
100 qca8k_write(priv
, reg
, ret
);
106 qca8k_reg_set(struct qca8k_priv
*priv
, u32 reg
, u32 val
)
108 qca8k_rmw(priv
, reg
, 0, val
);
112 qca8k_reg_clear(struct qca8k_priv
*priv
, u32 reg
, u32 val
)
114 qca8k_rmw(priv
, reg
, val
, 0);
118 qca8k_regmap_read(void *ctx
, uint32_t reg
, uint32_t *val
)
120 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ctx
;
122 *val
= qca8k_read(priv
, reg
);
128 qca8k_regmap_write(void *ctx
, uint32_t reg
, uint32_t val
)
130 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ctx
;
132 qca8k_write(priv
, reg
, val
);
137 static const struct regmap_range qca8k_readable_ranges
[] = {
138 regmap_reg_range(0x0000, 0x00e4), /* Global control */
139 regmap_reg_range(0x0100, 0x0168), /* EEE control */
140 regmap_reg_range(0x0200, 0x0270), /* Parser control */
141 regmap_reg_range(0x0400, 0x0454), /* ACL */
142 regmap_reg_range(0x0600, 0x0718), /* Lookup */
143 regmap_reg_range(0x0800, 0x0b70), /* QM */
144 regmap_reg_range(0x0c00, 0x0c80), /* PKT */
145 regmap_reg_range(0x0e00, 0x0e98), /* L3 */
146 regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
147 regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
148 regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
149 regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
150 regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
151 regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
152 regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
156 static const struct regmap_access_table qca8k_readable_table
= {
157 .yes_ranges
= qca8k_readable_ranges
,
158 .n_yes_ranges
= ARRAY_SIZE(qca8k_readable_ranges
),
161 static struct regmap_config qca8k_regmap_config
= {
165 .max_register
= 0x16ac, /* end MIB - Port6 range */
166 .reg_read
= qca8k_regmap_read
,
167 .reg_write
= qca8k_regmap_write
,
168 .rd_table
= &qca8k_readable_table
,
172 qca8k_busy_wait(struct qca8k_priv
*priv
, u32 reg
, u32 mask
)
174 unsigned long timeout
;
176 timeout
= jiffies
+ msecs_to_jiffies(20);
178 /* loop until the busy flag has cleared */
180 u32 val
= qca8k_read(priv
, reg
);
181 int busy
= val
& mask
;
186 } while (!time_after_eq(jiffies
, timeout
));
188 return time_after_eq(jiffies
, timeout
);
192 qca8k_fdb_read(struct qca8k_priv
*priv
, struct qca8k_fdb
*fdb
)
197 /* load the ARL table into an array */
198 for (i
= 0; i
< 4; i
++)
199 reg
[i
] = qca8k_read(priv
, QCA8K_REG_ATU_DATA0
+ (i
* 4));
202 fdb
->vid
= (reg
[2] >> QCA8K_ATU_VID_S
) & QCA8K_ATU_VID_M
;
204 fdb
->aging
= reg
[2] & QCA8K_ATU_STATUS_M
;
205 /* portmask - 54:48 */
206 fdb
->port_mask
= (reg
[1] >> QCA8K_ATU_PORT_S
) & QCA8K_ATU_PORT_M
;
208 fdb
->mac
[0] = (reg
[1] >> QCA8K_ATU_ADDR0_S
) & 0xff;
209 fdb
->mac
[1] = reg
[1] & 0xff;
210 fdb
->mac
[2] = (reg
[0] >> QCA8K_ATU_ADDR2_S
) & 0xff;
211 fdb
->mac
[3] = (reg
[0] >> QCA8K_ATU_ADDR3_S
) & 0xff;
212 fdb
->mac
[4] = (reg
[0] >> QCA8K_ATU_ADDR4_S
) & 0xff;
213 fdb
->mac
[5] = reg
[0] & 0xff;
217 qca8k_fdb_write(struct qca8k_priv
*priv
, u16 vid
, u8 port_mask
, const u8
*mac
,
224 reg
[2] = (vid
& QCA8K_ATU_VID_M
) << QCA8K_ATU_VID_S
;
226 reg
[2] |= aging
& QCA8K_ATU_STATUS_M
;
227 /* portmask - 54:48 */
228 reg
[1] = (port_mask
& QCA8K_ATU_PORT_M
) << QCA8K_ATU_PORT_S
;
230 reg
[1] |= mac
[0] << QCA8K_ATU_ADDR0_S
;
232 reg
[0] |= mac
[2] << QCA8K_ATU_ADDR2_S
;
233 reg
[0] |= mac
[3] << QCA8K_ATU_ADDR3_S
;
234 reg
[0] |= mac
[4] << QCA8K_ATU_ADDR4_S
;
237 /* load the array into the ARL table */
238 for (i
= 0; i
< 3; i
++)
239 qca8k_write(priv
, QCA8K_REG_ATU_DATA0
+ (i
* 4), reg
[i
]);
243 qca8k_fdb_access(struct qca8k_priv
*priv
, enum qca8k_fdb_cmd cmd
, int port
)
247 /* Set the command and FDB index */
248 reg
= QCA8K_ATU_FUNC_BUSY
;
251 reg
|= QCA8K_ATU_FUNC_PORT_EN
;
252 reg
|= (port
& QCA8K_ATU_FUNC_PORT_M
) << QCA8K_ATU_FUNC_PORT_S
;
255 /* Write the function register triggering the table access */
256 qca8k_write(priv
, QCA8K_REG_ATU_FUNC
, reg
);
258 /* wait for completion */
259 if (qca8k_busy_wait(priv
, QCA8K_REG_ATU_FUNC
, QCA8K_ATU_FUNC_BUSY
))
262 /* Check for table full violation when adding an entry */
263 if (cmd
== QCA8K_FDB_LOAD
) {
264 reg
= qca8k_read(priv
, QCA8K_REG_ATU_FUNC
);
265 if (reg
& QCA8K_ATU_FUNC_FULL
)
273 qca8k_fdb_next(struct qca8k_priv
*priv
, struct qca8k_fdb
*fdb
, int port
)
277 qca8k_fdb_write(priv
, fdb
->vid
, fdb
->port_mask
, fdb
->mac
, fdb
->aging
);
278 ret
= qca8k_fdb_access(priv
, QCA8K_FDB_NEXT
, port
);
280 qca8k_fdb_read(priv
, fdb
);
286 qca8k_fdb_add(struct qca8k_priv
*priv
, const u8
*mac
, u16 port_mask
,
291 mutex_lock(&priv
->reg_mutex
);
292 qca8k_fdb_write(priv
, vid
, port_mask
, mac
, aging
);
293 ret
= qca8k_fdb_access(priv
, QCA8K_FDB_LOAD
, -1);
294 mutex_unlock(&priv
->reg_mutex
);
300 qca8k_fdb_del(struct qca8k_priv
*priv
, const u8
*mac
, u16 port_mask
, u16 vid
)
304 mutex_lock(&priv
->reg_mutex
);
305 qca8k_fdb_write(priv
, vid
, port_mask
, mac
, 0);
306 ret
= qca8k_fdb_access(priv
, QCA8K_FDB_PURGE
, -1);
307 mutex_unlock(&priv
->reg_mutex
);
313 qca8k_fdb_flush(struct qca8k_priv
*priv
)
315 mutex_lock(&priv
->reg_mutex
);
316 qca8k_fdb_access(priv
, QCA8K_FDB_FLUSH
, -1);
317 mutex_unlock(&priv
->reg_mutex
);
321 qca8k_mib_init(struct qca8k_priv
*priv
)
323 mutex_lock(&priv
->reg_mutex
);
324 qca8k_reg_set(priv
, QCA8K_REG_MIB
, QCA8K_MIB_FLUSH
| QCA8K_MIB_BUSY
);
325 qca8k_busy_wait(priv
, QCA8K_REG_MIB
, QCA8K_MIB_BUSY
);
326 qca8k_reg_set(priv
, QCA8K_REG_MIB
, QCA8K_MIB_CPU_KEEP
);
327 qca8k_write(priv
, QCA8K_REG_MODULE_EN
, QCA8K_MODULE_EN_MIB
);
328 mutex_unlock(&priv
->reg_mutex
);
332 qca8k_set_pad_ctrl(struct qca8k_priv
*priv
, int port
, int mode
)
338 reg
= QCA8K_REG_PORT0_PAD_CTRL
;
341 reg
= QCA8K_REG_PORT6_PAD_CTRL
;
344 pr_err("Can't set PAD_CTRL on port %d\n", port
);
348 /* Configure a port to be directly connected to an external
352 case PHY_INTERFACE_MODE_RGMII
:
353 qca8k_write(priv
, reg
,
354 QCA8K_PORT_PAD_RGMII_EN
|
355 QCA8K_PORT_PAD_RGMII_TX_DELAY(3) |
356 QCA8K_PORT_PAD_RGMII_RX_DELAY(3));
358 /* According to the datasheet, RGMII delay is enabled through
359 * PORT5_PAD_CTRL for all ports, rather than individual port
362 qca8k_write(priv
, QCA8K_REG_PORT5_PAD_CTRL
,
363 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN
);
365 case PHY_INTERFACE_MODE_SGMII
:
366 qca8k_write(priv
, reg
, QCA8K_PORT_PAD_SGMII_EN
);
368 case PHY_INTERFACE_MODE_INTERNAL
:
371 pr_err("xMII mode %d not supported\n", mode
);
379 qca8k_port_set_status(struct qca8k_priv
*priv
, int port
, int enable
)
381 u32 mask
= QCA8K_PORT_STATUS_TXMAC
| QCA8K_PORT_STATUS_RXMAC
;
383 /* Port 0 and 6 have no internal PHY */
384 if (port
> 0 && port
< 6 && priv
->mac_mode
!= 3)
385 mask
|= QCA8K_PORT_STATUS_LINK_AUTO
;
388 qca8k_reg_set(priv
, QCA8K_REG_PORT_STATUS(port
), mask
);
390 qca8k_reg_clear(priv
, QCA8K_REG_PORT_STATUS(port
), mask
);
394 qca8k_setup(struct dsa_switch
*ds
)
396 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
397 int ret
, i
, phy_mode
= -1;
400 /* Make sure that port 0 is the cpu port */
401 if (!dsa_is_cpu_port(ds
, 0)) {
402 pr_err("port 0 is not the CPU port\n");
406 mutex_init(&priv
->reg_mutex
);
408 /* Start by setting up the register mapping */
409 priv
->regmap
= devm_regmap_init(ds
->dev
, NULL
, priv
,
410 &qca8k_regmap_config
);
411 if (IS_ERR(priv
->regmap
))
412 pr_warn("regmap initialization failed");
414 /* Initialize CPU port pad mode (xMII type, delays...) */
415 phy_mode
= of_get_phy_mode(ds
->ports
[QCA8K_CPU_PORT
].dn
);
417 pr_err("Can't find phy-mode for master device\n");
420 ret
= qca8k_set_pad_ctrl(priv
, QCA8K_CPU_PORT
, phy_mode
);
424 /* Enable CPU Port, force it to maximum bandwidth and full-duplex */
425 mask
= QCA8K_PORT_STATUS_SPEED_1000
| QCA8K_PORT_STATUS_TXFLOW
| QCA8K_PORT_TXHALF_FLOW
|
426 QCA8K_PORT_STATUS_RXFLOW
| QCA8K_PORT_STATUS_DUPLEX
;
427 qca8k_write(priv
, QCA8K_REG_PORT_STATUS(QCA8K_CPU_PORT
), mask
);
428 qca8k_reg_set(priv
, QCA8K_REG_GLOBAL_FW_CTRL0
,
429 QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN
);
430 qca8k_port_set_status(priv
, QCA8K_CPU_PORT
, 1);
431 priv
->port_sts
[QCA8K_CPU_PORT
].enabled
= 1;
433 /* Enable MIB counters */
434 qca8k_mib_init(priv
);
436 /* Disable buggy AZ */
437 qca8k_write(priv
, QCA8K_REG_EEE_CTRL
, 0);
439 /* enable jumbo frames */
440 qca8k_rmw(priv
, QCA8K_REG_MAX_FRAME_SIZE
,
441 QCA8K_MAX_FRAME_SIZE_MTU
, 9018 + 8 + 2);
443 qca8k_write(priv
, QCA8K_REG_PORT_FLOWCTRL_THRESH(0),
444 (QCA8K_PORT0_FC_THRESH_ON_DFLT
<< 16) |
445 QCA8K_PORT0_FC_THRESH_OFF_DFLT
);
447 /* Enable QCA header mode on the cpu port */
448 qca8k_write(priv
, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT
), 0);
450 QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
451 QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);*/
453 /* Disable forwarding by default on all ports */
454 for (i
= 0; i
< QCA8K_NUM_PORTS
; i
++)
455 qca8k_rmw(priv
, QCA8K_PORT_LOOKUP_CTRL(i
),
456 QCA8K_PORT_LOOKUP_MEMBER
, 0);
458 /* Disable MAC by default on all user ports */
459 for (i
= 1; i
< QCA8K_NUM_PORTS
; i
++)
460 if (dsa_is_user_port(ds
, i
))
461 qca8k_port_set_status(priv
, i
, 0);
463 /* Forward all unknown frames to CPU port for Linux processing */
464 qca8k_write(priv
, QCA8K_REG_GLOBAL_FW_CTRL1
,
465 BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S
|
466 GENMASK(5, 0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S
|
467 GENMASK(5, 0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S
|
468 GENMASK(5, 0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S
);
470 /* Setup connection between CPU port & user ports */
471 for (i
= 0; i
< DSA_MAX_PORTS
; i
++) {
472 /* CPU port gets connected to all user ports of the switch */
473 if (dsa_is_cpu_port(ds
, i
)) {
474 qca8k_rmw(priv
, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT
),
475 QCA8K_PORT_LOOKUP_MEMBER
, dsa_user_ports(ds
));
478 /* Invividual user ports get connected to CPU port only */
479 if (dsa_is_user_port(ds
, i
)) {
480 int shift
= 16 * (i
% 2);
482 qca8k_rmw(priv
, QCA8K_PORT_LOOKUP_CTRL(i
),
483 QCA8K_PORT_LOOKUP_MEMBER
,
484 BIT(QCA8K_CPU_PORT
));
486 /* Enable ARP Auto-learning by default */
487 qca8k_reg_set(priv
, QCA8K_PORT_LOOKUP_CTRL(i
),
488 QCA8K_PORT_LOOKUP_LEARN
);
490 /* For port based vlans to work we need to set the
493 qca8k_rmw(priv
, QCA8K_EGRESS_VLAN(i
),
494 0xffff << shift
, 1 << shift
);
495 qca8k_write(priv
, QCA8K_REG_PORT_VLAN_CTRL0(i
),
496 QCA8K_PORT_VLAN_CVID(1) |
497 QCA8K_PORT_VLAN_SVID(1));
501 /* Flush the FDB table */
502 qca8k_fdb_flush(priv
);
508 qca8k_adjust_link(struct dsa_switch
*ds
, int port
, struct phy_device
*phy
)
510 struct qca8k_priv
*priv
= ds
->priv
;
513 /* Force fixed-link setting for CPU port, skip others. */
514 if (!phy_is_pseudo_fixed_link(phy
) && priv
->mac_mode
!= 3)
518 switch (phy
->speed
) {
520 reg
= QCA8K_PORT_STATUS_SPEED_10
;
523 reg
= QCA8K_PORT_STATUS_SPEED_100
;
526 reg
= QCA8K_PORT_STATUS_SPEED_1000
;
529 dev_dbg(priv
->dev
, "port%d link speed %dMbps not supported.\n",
534 /* Set duplex mode */
535 if (phy
->duplex
== DUPLEX_FULL
)
536 reg
|= QCA8K_PORT_STATUS_DUPLEX
;
538 /* Force flow control */
539 if (dsa_is_cpu_port(ds
, port
) || priv
->mac_mode
== 3)
540 reg
|= QCA8K_PORT_STATUS_RXFLOW
| QCA8K_PORT_STATUS_TXFLOW
|
541 QCA8K_PORT_TXHALF_FLOW
;
543 /* Force link down before changing MAC options */
544 qca8k_port_set_status(priv
, port
, 0);
545 qca8k_write(priv
, QCA8K_REG_PORT_STATUS(port
), reg
);
546 qca8k_port_set_status(priv
, port
, 1);
550 qca8k_get_strings(struct dsa_switch
*ds
, int port
, u32 stringset
, uint8_t *data
)
554 if (stringset
!= ETH_SS_STATS
)
557 for (i
= 0; i
< ARRAY_SIZE(ar8327_mib
); i
++)
558 strncpy(data
+ i
* ETH_GSTRING_LEN
, ar8327_mib
[i
].name
,
563 qca8k_get_ethtool_stats(struct dsa_switch
*ds
, int port
,
566 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
567 const struct qca8k_mib_desc
*mib
;
571 for (i
= 0; i
< ARRAY_SIZE(ar8327_mib
); i
++) {
572 mib
= &ar8327_mib
[i
];
573 reg
= QCA8K_PORT_MIB_COUNTER(port
) + mib
->offset
;
575 data
[i
] = qca8k_read(priv
, reg
);
576 if (mib
->size
== 2) {
577 hi
= qca8k_read(priv
, reg
+ 4);
584 qca8k_get_sset_count(struct dsa_switch
*ds
, int port
, int sset
)
586 if (sset
!= ETH_SS_STATS
)
589 return ARRAY_SIZE(ar8327_mib
);
593 qca8k_set_mac_eee(struct dsa_switch
*ds
, int port
, struct ethtool_eee
*eee
)
595 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
596 u32 lpi_en
= QCA8K_REG_EEE_CTRL_LPI_EN(port
);
599 mutex_lock(&priv
->reg_mutex
);
600 reg
= qca8k_read(priv
, QCA8K_REG_EEE_CTRL
);
601 if (eee
->eee_enabled
)
605 qca8k_write(priv
, QCA8K_REG_EEE_CTRL
, reg
);
606 mutex_unlock(&priv
->reg_mutex
);
612 qca8k_get_mac_eee(struct dsa_switch
*ds
, int port
, struct ethtool_eee
*e
)
614 /* Nothing to do on the port's MAC */
619 qca8k_port_stp_state_set(struct dsa_switch
*ds
, int port
, u8 state
)
621 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
625 case BR_STATE_DISABLED
:
626 stp_state
= QCA8K_PORT_LOOKUP_STATE_DISABLED
;
628 case BR_STATE_BLOCKING
:
629 stp_state
= QCA8K_PORT_LOOKUP_STATE_BLOCKING
;
631 case BR_STATE_LISTENING
:
632 stp_state
= QCA8K_PORT_LOOKUP_STATE_LISTENING
;
634 case BR_STATE_LEARNING
:
635 stp_state
= QCA8K_PORT_LOOKUP_STATE_LEARNING
;
637 case BR_STATE_FORWARDING
:
639 stp_state
= QCA8K_PORT_LOOKUP_STATE_FORWARD
;
643 qca8k_rmw(priv
, QCA8K_PORT_LOOKUP_CTRL(port
),
644 QCA8K_PORT_LOOKUP_STATE_MASK
, stp_state
);
648 qca8k_port_bridge_join(struct dsa_switch
*ds
, int port
, struct net_device
*br
)
650 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
651 int port_mask
= BIT(QCA8K_CPU_PORT
);
654 for (i
= 1; i
< QCA8K_NUM_PORTS
; i
++) {
655 if (dsa_to_port(ds
, i
)->bridge_dev
!= br
)
657 /* Add this port to the portvlan mask of the other ports
661 QCA8K_PORT_LOOKUP_CTRL(i
),
666 /* Add all other ports to this ports portvlan mask */
667 qca8k_rmw(priv
, QCA8K_PORT_LOOKUP_CTRL(port
),
668 QCA8K_PORT_LOOKUP_MEMBER
, port_mask
);
674 qca8k_port_bridge_leave(struct dsa_switch
*ds
, int port
, struct net_device
*br
)
676 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
679 for (i
= 1; i
< QCA8K_NUM_PORTS
; i
++) {
680 if (dsa_to_port(ds
, i
)->bridge_dev
!= br
)
682 /* Remove this port to the portvlan mask of the other ports
685 qca8k_reg_clear(priv
,
686 QCA8K_PORT_LOOKUP_CTRL(i
),
690 /* Set the cpu port to be the only one in the portvlan mask of
693 qca8k_rmw(priv
, QCA8K_PORT_LOOKUP_CTRL(port
),
694 QCA8K_PORT_LOOKUP_MEMBER
, BIT(QCA8K_CPU_PORT
));
698 qca8k_port_enable(struct dsa_switch
*ds
, int port
,
699 struct phy_device
*phy
)
701 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
703 qca8k_port_set_status(priv
, port
, 1);
704 priv
->port_sts
[port
].enabled
= 1;
710 qca8k_port_disable(struct dsa_switch
*ds
, int port
,
711 struct phy_device
*phy
)
713 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
715 qca8k_port_set_status(priv
, port
, 0);
716 priv
->port_sts
[port
].enabled
= 0;
720 qca8k_port_fdb_insert(struct qca8k_priv
*priv
, const u8
*addr
,
721 u16 port_mask
, u16 vid
)
723 /* Set the vid to the port vlan id if no vid is set */
727 return qca8k_fdb_add(priv
, addr
, port_mask
, vid
,
728 QCA8K_ATU_STATUS_STATIC
);
732 qca8k_port_fdb_add(struct dsa_switch
*ds
, int port
,
733 const unsigned char *addr
, u16 vid
)
735 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
736 u16 port_mask
= BIT(port
);
738 return qca8k_port_fdb_insert(priv
, addr
, port_mask
, vid
);
742 qca8k_port_fdb_del(struct dsa_switch
*ds
, int port
,
743 const unsigned char *addr
, u16 vid
)
745 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
746 u16 port_mask
= BIT(port
);
751 return qca8k_fdb_del(priv
, addr
, port_mask
, vid
);
755 qca8k_port_fdb_dump(struct dsa_switch
*ds
, int port
,
756 dsa_fdb_dump_cb_t
*cb
, void *data
)
758 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
759 struct qca8k_fdb _fdb
= { 0 };
760 int cnt
= QCA8K_NUM_FDB_RECORDS
;
764 mutex_lock(&priv
->reg_mutex
);
765 while (cnt
-- && !qca8k_fdb_next(priv
, &_fdb
, port
)) {
768 is_static
= (_fdb
.aging
== QCA8K_ATU_STATUS_STATIC
);
769 ret
= cb(_fdb
.mac
, _fdb
.vid
, is_static
, data
);
773 mutex_unlock(&priv
->reg_mutex
);
778 static enum dsa_tag_protocol
779 qca8k_get_tag_protocol(struct dsa_switch
*ds
, int port
)
781 return DSA_TAG_PROTO_QCA
;
784 static const struct dsa_switch_ops qca8k_switch_ops
= {
785 .get_tag_protocol
= qca8k_get_tag_protocol
,
786 .setup
= qca8k_setup
,
787 .adjust_link
= qca8k_adjust_link
,
788 .port_enable
= qca8k_port_enable
,
789 .port_disable
= qca8k_port_disable
,
790 .get_strings
= qca8k_get_strings
,
791 .get_ethtool_stats
= qca8k_get_ethtool_stats
,
792 .get_sset_count
= qca8k_get_sset_count
,
793 .get_mac_eee
= qca8k_get_mac_eee
,
794 .set_mac_eee
= qca8k_set_mac_eee
,
795 .port_stp_state_set
= qca8k_port_stp_state_set
,
796 .port_bridge_join
= qca8k_port_bridge_join
,
797 .port_bridge_leave
= qca8k_port_bridge_leave
,
798 .port_fdb_add
= qca8k_port_fdb_add
,
799 .port_fdb_del
= qca8k_port_fdb_del
,
800 .port_fdb_dump
= qca8k_port_fdb_dump
,
803 #define AR40XX_NUM_PORTS 6
805 enum ar40xx_port_wrapper_cfg
{
806 PORT_WRAPPER_PSGMII
= 0,
807 PORT_WRAPPER_RGMII
= 3,
810 #define AR40XX_PSGMII_MODE_CONTROL 0x1b4
811 #define AR40XX_PSGMII_ATHR_CSCO_MODE_25M BIT(0)
813 #define AR40XX_PSGMIIPHY_TX_CONTROL 0x288
815 #define AR40XX_REG_RGMII_CTRL 0x0004
816 #define AR40XX_REG_PORT_LOOKUP(_i) (0x660 + (_i) * 0xc)
817 #define AR40XX_PORT_LOOKUP_LOOPBACK BIT(21)
819 #define AR40XX_PHY_SPEC_STATUS 0x11
820 #define AR40XX_PHY_SPEC_STATUS_LINK BIT(10)
821 #define AR40XX_PHY_SPEC_STATUS_DUPLEX BIT(13)
822 #define AR40XX_PHY_SPEC_STATUS_SPEED GENMASK(16, 14)
824 #define AR40XX_PSGMII_ID 5
825 #define AR40XX_PSGMII_CALB_NUM 100
826 #define AR40XX_MALIBU_PSGMII_MODE_CTRL 0x6d
827 #define AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL 0x220c
828 #define AR40XX_MALIBU_PHY_MMD7_DAC_CTRL 0x801a
829 #define AR40XX_MALIBU_DAC_CTRL_MASK 0x380
830 #define AR40XX_MALIBU_DAC_CTRL_VALUE 0x280
831 #define AR40XX_MALIBU_PHY_RLP_CTRL 0x805a
832 #define AR40XX_PSGMII_TX_DRIVER_1_CTRL 0xb
833 #define AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP 0x8a
834 #define AR40XX_MALIBU_PHY_LAST_ADDR 4
837 psgmii_read(struct qca8k_priv
*priv
, int reg
)
841 regmap_read(priv
->psgmii
, reg
, &val
);
846 psgmii_write(struct qca8k_priv
*priv
, int reg
, u32 val
)
848 regmap_write(priv
->psgmii
, reg
, val
);
852 qca8k_phy_mmd_write(struct qca8k_priv
*priv
, u32 phy_id
,
853 u16 mmd_num
, u16 reg_id
, u16 reg_val
)
855 struct mii_bus
*bus
= priv
->bus
;
857 mutex_lock(&bus
->mdio_lock
);
858 __mdiobus_write(bus
, phy_id
, MII_MMD_CTRL
, mmd_num
);
859 __mdiobus_write(bus
, phy_id
, MII_MMD_DATA
, reg_id
);
860 __mdiobus_write(bus
, phy_id
, MII_MMD_CTRL
, MII_MMD_CTRL_NOINCR
| mmd_num
);
861 __mdiobus_write(bus
, phy_id
, MII_MMD_DATA
, reg_val
);
862 mutex_unlock(&bus
->mdio_lock
);
866 qca8k_phy_mmd_read(struct qca8k_priv
*priv
, u32 phy_id
,
867 u16 mmd_num
, u16 reg_id
)
869 struct mii_bus
*bus
= priv
->bus
;
872 mutex_lock(&bus
->mdio_lock
);
873 __mdiobus_write(bus
, phy_id
, MII_MMD_CTRL
, mmd_num
);
874 __mdiobus_write(bus
, phy_id
, MII_MMD_DATA
, reg_id
);
875 __mdiobus_write(bus
, phy_id
, MII_MMD_CTRL
, MII_MMD_CTRL_NOINCR
| mmd_num
);
876 value
= __mdiobus_read(bus
, phy_id
, MII_MMD_DATA
);
877 mutex_unlock(&bus
->mdio_lock
);
883 ess_reset(struct qca8k_priv
*priv
)
885 reset_control_assert(priv
->ess_rst
);
889 reset_control_deassert(priv
->ess_rst
);
891 /* Waiting for all inner tables to be flushed and reinitialized.
892 * This takes between 5 and 10ms.
898 ar40xx_malibu_psgmii_ess_reset(struct qca8k_priv
*priv
)
900 struct mii_bus
*bus
= priv
->bus
;
903 /* Reset phy psgmii */
904 /* fix phy psgmii RX 20bit */
905 mdiobus_write(bus
, AR40XX_PSGMII_ID
, 0x0, 0x005b);
906 /* reset phy psgmii */
907 mdiobus_write(bus
, AR40XX_PSGMII_ID
, 0x0, 0x001b);
908 /* release reset phy psgmii */
909 mdiobus_write(bus
, AR40XX_PSGMII_ID
, 0x0, 0x005b);
911 for (n
= 0; n
< AR40XX_PSGMII_CALB_NUM
; n
++) {
914 status
= qca8k_phy_mmd_read(priv
, AR40XX_PSGMII_ID
,
915 MDIO_MMD_PMAPMD
, 0x28);
919 /* Polling interval to check PSGMII PLL in malibu is ready
920 * the worst time is 8.67ms
921 * for 25MHz reference clock
922 * [512+(128+2048)*49]*80ns+100us
927 /* check malibu psgmii calibration done end... */
929 /* freeze phy psgmii RX CDR */
930 mdiobus_write(bus
, AR40XX_PSGMII_ID
, 0x1a, 0x2230);
934 /* wait for the psgmii calibration to complete */
935 for (n
= 0; n
< AR40XX_PSGMII_CALB_NUM
; n
++) {
938 status
= psgmii_read(priv
, 0xa0);
942 /* Polling interval to check PSGMII PLL in ESS is ready */
946 /* release phy psgmii RX CDR */
947 mdiobus_write(bus
, AR40XX_PSGMII_ID
, 0x1a, 0x3230);
948 /* release phy psgmii RX 20bit */
949 mdiobus_write(bus
, AR40XX_PSGMII_ID
, 0x0, 0x005f);
953 ar40xx_psgmii_single_phy_testing(struct qca8k_priv
*priv
, int phy
)
955 struct mii_bus
*bus
= priv
->bus
;
960 u32 tx_all_ok
, rx_all_ok
;
963 mdiobus_write(bus
, phy
, MII_BMCR
, BMCR_RESET
| BMCR_ANENABLE
);
964 mdiobus_write(bus
, phy
, MII_BMCR
, BMCR_LOOPBACK
| BMCR_FULLDPLX
|
967 for (j
= 0; j
< AR40XX_PSGMII_CALB_NUM
; j
++) {
970 status
= mdiobus_read(bus
, phy
, AR40XX_PHY_SPEC_STATUS
);
971 if (status
& AR40XX_PHY_SPEC_STATUS_LINK
)
974 /* the polling interval to check if the PHY link up or not
975 * maxwait_timer: 750 ms +/-10 ms
976 * minwait_timer : 1 us +/- 0.1us
977 * time resides in minwait_timer ~ maxwait_timer
978 * see IEEE 802.3 section 40.4.5.2
984 qca8k_phy_mmd_write(priv
, phy
, 7, 0x8029, 0x0000);
985 qca8k_phy_mmd_write(priv
, phy
, 7, 0x8029, 0x0003);
988 qca8k_phy_mmd_write(priv
, phy
, 7, 0x8020, 0xa000);
990 /* wait precisely for all traffic end
991 * 4096(pkt num) * 1524(size) * 8ns (125MHz) = 49.9ms
996 tx_ok
= qca8k_phy_mmd_read(priv
, phy
, 7, 0x802e);
997 tx_ok_high16
= qca8k_phy_mmd_read(priv
, phy
, 7, 0x802d);
998 tx_error
= qca8k_phy_mmd_read(priv
, phy
, 7, 0x802f);
999 rx_ok
= qca8k_phy_mmd_read(priv
, phy
, 7, 0x802b);
1000 rx_ok_high16
= qca8k_phy_mmd_read(priv
, phy
, 7, 0x802a);
1001 rx_error
= qca8k_phy_mmd_read(priv
, phy
, 7, 0x802c);
1002 tx_all_ok
= tx_ok
+ (tx_ok_high16
<< 16);
1003 rx_all_ok
= rx_ok
+ (rx_ok_high16
<< 16);
1005 if (tx_all_ok
== 0x1000 && tx_error
== 0) {
1007 priv
->phy_t_status
&= (~BIT(phy
));
1009 pr_info("PHY %d single test PSGMII issue happen!\n", phy
);
1010 priv
->phy_t_status
|= BIT(phy
);
1013 mdiobus_write(bus
, phy
, MII_BMCR
, BMCR_ANENABLE
| BMCR_PDOWN
|
1018 ar40xx_psgmii_all_phy_testing(struct qca8k_priv
*priv
)
1020 struct mii_bus
*bus
= priv
->bus
;
1023 mdiobus_write(bus
, 0x1f, MII_BMCR
, BMCR_RESET
| BMCR_ANENABLE
);
1024 mdiobus_write(bus
, 0x1f, MII_BMCR
, BMCR_LOOPBACK
| BMCR_FULLDPLX
|
1027 for (j
= 0; j
< AR40XX_PSGMII_CALB_NUM
; j
++) {
1028 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1031 status
= mdiobus_read(bus
, phy
, AR40XX_PHY_SPEC_STATUS
);
1032 if (!(status
& AR40XX_PHY_SPEC_STATUS_LINK
))
1036 if (phy
>= (AR40XX_NUM_PORTS
- 1))
1038 /* The polling interva to check if the PHY link up or not */
1041 /* enable package accounting */
1042 qca8k_phy_mmd_write(priv
, 0x1f, 7, 0x8029, 0x0000);
1043 qca8k_phy_mmd_write(priv
, 0x1f, 7, 0x8029, 0x0003);
1045 /* start traffic generator */
1046 qca8k_phy_mmd_write(priv
, 0x1f, 7, 0x8020, 0xa000);
1048 /* wait for the traffic to die down.
1049 * 4096 Packets * 1524 Bytes/Packet * 8 ns/Byte (125MHz) = 49.9ms
1053 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1054 u32 tx_ok
, tx_error
;
1055 u32 rx_ok
, rx_error
;
1058 u32 tx_all_ok
, rx_all_ok
;
1061 tx_ok
= qca8k_phy_mmd_read(priv
, phy
, 7, 0x802e);
1062 tx_ok_high16
= qca8k_phy_mmd_read(priv
, phy
, 7, 0x802d);
1063 tx_error
= qca8k_phy_mmd_read(priv
, phy
, 7, 0x802f);
1064 rx_ok
= qca8k_phy_mmd_read(priv
, phy
, 7, 0x802b);
1065 rx_ok_high16
= qca8k_phy_mmd_read(priv
, phy
, 7, 0x802a);
1066 rx_error
= qca8k_phy_mmd_read(priv
, phy
, 7, 0x802c);
1067 tx_all_ok
= tx_ok
+ (tx_ok_high16
<< 16);
1068 rx_all_ok
= rx_ok
+ (rx_ok_high16
<< 16);
1070 if (tx_all_ok
== 4096 && tx_error
== 0) {
1072 priv
->phy_t_status
&= ~BIT(phy
+ 8);
1074 pr_info("PHY%d test see issue!\n", phy
);
1075 priv
->phy_t_status
|= BIT(phy
+ 8);
1079 pr_debug("PHY all test 0x%x \r\n", priv
->phy_t_status
);
1083 ar40xx_psgmii_self_test(struct qca8k_priv
*priv
)
1085 struct mii_bus
*bus
= priv
->bus
;
1088 ar40xx_malibu_psgmii_ess_reset(priv
);
1090 /* switch to access MII reg for copper */
1091 mdiobus_write(bus
, 4, 0x1f, 0x8500);
1093 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1094 /*enable phy mdio broadcast write*/
1095 qca8k_phy_mmd_write(priv
, phy
, 7, 0x8028, 0x801f);
1098 /* force no link by power down */
1099 mdiobus_write(bus
, 0x1f, MII_BMCR
, BMCR_ANENABLE
| BMCR_PDOWN
|
1102 /* Setup packet generator for loopback calibration */
1103 qca8k_phy_mmd_write(priv
, 0x1f, 7, 0x8021, 0x1000); /* 4096 Packets */
1104 qca8k_phy_mmd_write(priv
, 0x1f, 7, 0x8062, 0x05e0); /* 1524 Bytes */
1106 /* fix mdi status */
1107 mdiobus_write(bus
, 0x1f, 0x10, 0x6800);
1108 for (i
= 0; i
< AR40XX_PSGMII_CALB_NUM
; i
++) {
1109 priv
->phy_t_status
= 0;
1111 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1112 qca8k_rmw(priv
, AR40XX_REG_PORT_LOOKUP(phy
+ 1),
1113 AR40XX_PORT_LOOKUP_LOOPBACK
,
1114 AR40XX_PORT_LOOKUP_LOOPBACK
);
1117 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++)
1118 ar40xx_psgmii_single_phy_testing(priv
, phy
);
1120 ar40xx_psgmii_all_phy_testing(priv
);
1122 if (priv
->phy_t_status
)
1123 ar40xx_malibu_psgmii_ess_reset(priv
);
1128 if (i
>= AR40XX_PSGMII_CALB_NUM
)
1129 pr_info("PSGMII cannot recover\n");
1131 pr_debug("PSGMII recovered after %d times reset\n", i
);
1133 /* configuration recover */
1135 qca8k_phy_mmd_write(priv
, 0x1f, 7, 0x8021, 0x0);
1137 qca8k_phy_mmd_write(priv
, 0x1f, 7, 0x8029, 0x0);
1138 /* disable traffic */
1139 qca8k_phy_mmd_write(priv
, 0x1f, 7, 0x8020, 0x0);
1143 ar40xx_psgmii_self_test_clean(struct qca8k_priv
*priv
)
1145 struct mii_bus
*bus
= priv
->bus
;
1148 /* disable phy internal loopback */
1149 mdiobus_write(bus
, 0x1f, 0x10, 0x6860);
1150 mdiobus_write(bus
, 0x1f, MII_BMCR
, BMCR_ANENABLE
| BMCR_RESET
|
1153 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1154 /* disable mac loop back */
1155 qca8k_rmw(priv
, AR40XX_REG_PORT_LOOKUP(phy
+ 1),
1156 AR40XX_PORT_LOOKUP_LOOPBACK
, 0);
1158 /* disable phy mdio broadcast write */
1159 qca8k_phy_mmd_write(priv
, phy
, 7, 0x8028, 0x001f);
1164 ar40xx_malibu_init(struct qca8k_priv
*priv
)
1169 /* war to enable AZ transmitting ability */
1170 qca8k_phy_mmd_write(priv
, AR40XX_PSGMII_ID
, 1,
1171 AR40XX_MALIBU_PSGMII_MODE_CTRL
,
1172 AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL
);
1174 for (i
= 0; i
< AR40XX_NUM_PORTS
- 1; i
++) {
1176 /* change malibu control_dac */
1177 val
= qca8k_phy_mmd_read(priv
, i
, 7, AR40XX_MALIBU_PHY_MMD7_DAC_CTRL
);
1178 val
&= ~AR40XX_MALIBU_DAC_CTRL_MASK
;
1179 val
|= AR40XX_MALIBU_DAC_CTRL_VALUE
;
1180 qca8k_phy_mmd_write(priv
, i
, 7, AR40XX_MALIBU_PHY_MMD7_DAC_CTRL
, val
);
1182 if (i
== AR40XX_MALIBU_PHY_LAST_ADDR
) {
1183 /* avoid PHY to get into hibernation */
1184 val
= qca8k_phy_mmd_read(priv
, i
, 3,
1185 AR40XX_MALIBU_PHY_RLP_CTRL
);
1187 qca8k_phy_mmd_write(priv
, i
, 3,
1188 AR40XX_MALIBU_PHY_RLP_CTRL
, val
);
1192 /* adjust psgmii serdes tx amp */
1193 mdiobus_write(priv
->bus
, AR40XX_PSGMII_ID
,
1194 AR40XX_PSGMII_TX_DRIVER_1_CTRL
,
1195 AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP
);
1199 ar40xx_mac_mode_init(struct qca8k_priv
*priv
)
1201 switch (priv
->mac_mode
) {
1202 case PORT_WRAPPER_PSGMII
:
1203 ar40xx_malibu_init(priv
);
1204 ar40xx_psgmii_self_test(priv
);
1205 ar40xx_psgmii_self_test_clean(priv
);
1207 psgmii_write(priv
, AR40XX_PSGMII_MODE_CONTROL
, 0x2200);
1208 psgmii_write(priv
, AR40XX_PSGMIIPHY_TX_CONTROL
, 0x8380);
1210 case PORT_WRAPPER_RGMII
:
1211 qca8k_write(priv
, AR40XX_REG_RGMII_CTRL
, BIT(10));
1217 /* Start of qm error WAR */
1219 #define AR40XX_PORT_LINK_UP 1
1220 #define AR40XX_PORT_LINK_DOWN 0
1221 #define AR40XX_QM_NOT_EMPTY 1
1222 #define AR40XX_QM_EMPTY 0
1225 int ar40xx_force_1g_full(struct qca8k_priv
*priv
, u32 port_id
)
1229 if (port_id
< 0 || port_id
> 6)
1232 reg
= QCA8K_REG_PORT_STATUS(port_id
);
1233 return qca8k_rmw(priv
, reg
, QCA8K_PORT_STATUS_SPEED
,
1234 (QCA8K_PORT_STATUS_SPEED_1000
| QCA8K_PORT_STATUS_DUPLEX
));
1238 int ar40xx_get_qm_status(struct qca8k_priv
*priv
,
1239 u32 port_id
, u32
*qm_buffer_err
)
1244 if (port_id
< 1 || port_id
> 5) {
1250 reg
= AR40XX_REG_QM_PORT0_3_QNUM
;
1251 qca8k_write(priv
, AR40XX_REG_QM_DEBUG_ADDR
, reg
);
1252 qm_val
= qca8k_read(priv
, AR40XX_REG_QM_DEBUG_VALUE
);
1253 /* every 8 bits for each port */
1254 *qm_buffer_err
= (qm_val
>> (port_id
* 8)) & 0xFF;
1256 reg
= AR40XX_REG_QM_PORT4_6_QNUM
;
1257 qca8k_write(priv
, AR40XX_REG_QM_DEBUG_ADDR
, reg
);
1258 qm_val
= qca8k_read(priv
, AR40XX_REG_QM_DEBUG_VALUE
);
1259 /* every 8 bits for each port */
1260 *qm_buffer_err
= (qm_val
>> ((port_id
-4) * 8)) & 0xFF;
1267 ar40xx_sw_mac_polling_task(struct qca8k_priv
*priv
)
1269 static int task_count
;
1272 u32 link
, speed
, duplex
;
1274 u16 port_phy_status
[AR40XX_NUM_PORTS
];
1275 static u32 qm_err_cnt
[AR40XX_NUM_PORTS
] = {0, 0, 0, 0, 0, 0};
1276 static u32 link_cnt
[AR40XX_NUM_PORTS
] = {0, 0, 0, 0, 0, 0};
1277 struct mii_bus
*bus
= NULL
;
1279 if (!priv
|| !priv
->bus
)
1286 for (i
= 1; i
< AR40XX_NUM_PORTS
; ++i
) {
1287 port_phy_status
[i
] =
1288 mdiobus_read(bus
, i
-1, AR40XX_PHY_SPEC_STATUS
);
1289 speed
= link
= duplex
= port_phy_status
[i
];
1290 speed
&= AR40XX_PHY_SPEC_STATUS_SPEED
;
1292 link
&= AR40XX_PHY_SPEC_STATUS_LINK
;
1294 duplex
&= AR40XX_PHY_SPEC_STATUS_DUPLEX
;
1297 if (link
!= priv
->ar40xx_port_old_link
[i
]) {
1300 if ((priv
->ar40xx_port_old_link
[i
] ==
1301 AR40XX_PORT_LINK_UP
) &&
1302 (link
== AR40XX_PORT_LINK_DOWN
)) {
1303 /* LINK_EN disable(MAC force mode)*/
1304 reg
= QCA8K_REG_PORT_STATUS(i
);
1305 qca8k_rmw(priv
, reg
,
1306 QCA8K_PORT_STATUS_LINK_AUTO
, 0);
1308 /* Check queue buffer */
1310 ar40xx_get_qm_status(priv
, i
, &qm_buffer_err
);
1311 if (qm_buffer_err
) {
1312 priv
->ar40xx_port_qm_buf
[i
] =
1313 AR40XX_QM_NOT_EMPTY
;
1317 priv
->ar40xx_port_qm_buf
[i
] =
1319 ar40xx_force_1g_full(priv
, i
);
1320 /* Ref:QCA8337 Datasheet,Clearing
1321 * MENU_CTRL_EN prevents phy to
1322 * stuck in 100BT mode when
1323 * bringing up the link
1325 ar40xx_phy_dbg_read(priv
, i
-1,
1328 phy_val
&= (~AR40XX_PHY_MANU_CTRL_EN
);
1329 ar40xx_phy_dbg_write(priv
, i
-1,
1333 priv
->ar40xx_port_old_link
[i
] = link
;
1334 } else if ((priv
->ar40xx_port_old_link
[i
] ==
1335 AR40XX_PORT_LINK_DOWN
) &&
1336 (link
== AR40XX_PORT_LINK_UP
)) {
1338 if (priv
->port_link_up
[i
] < 1) {
1339 ++priv
->port_link_up
[i
];
1341 /* Change port status */
1342 reg
= QCA8K_REG_PORT_STATUS(i
);
1343 value
= qca8k_read(priv
, reg
);
1344 priv
->port_link_up
[i
] = 0;
1346 value
&= ~(QCA8K_PORT_STATUS_DUPLEX
|
1347 QCA8K_PORT_STATUS_SPEED
);
1348 value
|= speed
| (duplex
? BIT(6) : 0);
1349 /**/qca8k_write(priv
, reg
, value
);
1350 /* clock switch need such time
1353 usleep_range(100, 200);
1355 value
|= QCA8K_PORT_STATUS_LINK_AUTO
;
1356 qca8k_write(priv
, reg
, value
);
1357 /* HW need such time to make sure link
1358 * stable before enable MAC
1360 usleep_range(100, 200);
1362 if (speed
== QCA8K_PORT_STATUS_SPEED_100
) {
1364 /* Enable @100M, if down to 10M
1365 * clock will change smoothly
1367 ar40xx_phy_dbg_read(priv
, i
-1,
1371 AR40XX_PHY_MANU_CTRL_EN
;
1372 ar40xx_phy_dbg_write(priv
, i
-1,
1376 priv
->ar40xx_port_old_link
[i
] = link
;
1381 if (priv
->ar40xx_port_qm_buf
[i
] == AR40XX_QM_NOT_EMPTY
) {
1383 ar40xx_get_qm_status(priv
, i
, &qm_buffer_err
);
1384 if (qm_buffer_err
) {
1387 priv
->ar40xx_port_qm_buf
[i
] =
1390 ar40xx_force_1g_full(priv
, i
);
1396 #define AR40XX_QM_WORK_DELAY 100
1399 ar40xx_qm_err_check_work_task(struct work_struct
*work
)
1401 struct qca8k_priv
*priv
= container_of(work
, struct qca8k_priv
,
1404 mutex_lock(&priv
->qm_lock
);
1406 ar40xx_sw_mac_polling_task(priv
);
1408 mutex_unlock(&priv
->qm_lock
);
1410 schedule_delayed_work(&priv
->qm_dwork
,
1411 msecs_to_jiffies(AR40XX_QM_WORK_DELAY
));
1415 ar40xx_qm_err_check_work_start(struct qca8k_priv
*priv
)
1417 mutex_init(&priv
->qm_lock
);
1419 INIT_DELAYED_WORK(&priv
->qm_dwork
, ar40xx_qm_err_check_work_task
);
1421 schedule_delayed_work(&priv
->qm_dwork
,
1422 msecs_to_jiffies(AR40XX_QM_WORK_DELAY
));
1428 ar40xx_qm_err_check_work_start(struct qca8k_priv
*priv
)
1436 qca8k_dsa_init_work(struct work_struct
*work
)
1438 struct qca8k_priv
*priv
= container_of(work
, struct qca8k_priv
, dsa_init
.work
);
1439 struct device
*parent
= priv
->pdev
->dev
.parent
;
1442 ret
= dsa_register_switch(priv
->ds
);
1449 dev_dbg(&priv
->pdev
->dev
, "dsa_register_switch defered.\n");
1450 schedule_delayed_work(&priv
->dsa_init
, msecs_to_jiffies(200));
1454 dev_err(&priv
->pdev
->dev
, "dsa_register_switch failed with (%d).\n", ret
);
1455 /* unbind anything failed */
1457 device_lock(parent
);
1459 device_release_driver(&priv
->pdev
->dev
);
1461 device_unlock(parent
);
1467 qca8k_mmio_probe(struct platform_device
*pdev
)
1469 struct qca8k_priv
*priv
;
1470 struct device_node
*np
= pdev
->dev
.of_node
, *mii_np
;
1473 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
1476 mutex_init(&priv
->reg_mutex
);
1478 priv
->ess_clk
= of_clk_get_by_name(np
, "ess_clk");
1479 if (IS_ERR(priv
->ess_clk
)) {
1480 dev_err(&pdev
->dev
, "Failed to get ess_clk\n");
1481 return PTR_ERR(priv
->ess_clk
);
1484 priv
->ess_rst
= devm_reset_control_get(&pdev
->dev
, "ess_rst");
1485 if (IS_ERR(priv
->ess_rst
)) {
1486 dev_err(&pdev
->dev
, "Failed to get ess_rst control!\n");
1487 return PTR_ERR(priv
->ess_rst
);
1490 ret
= of_property_read_u32(np
, "mac-mode", &priv
->mac_mode
);
1494 priv
->base
= syscon_node_to_regmap(np
);
1495 if (IS_ERR_OR_NULL(priv
->base
))
1498 priv
->psgmii
= syscon_regmap_lookup_by_phandle(np
, "psgmii-phy");
1499 if (IS_ERR_OR_NULL(priv
->psgmii
))
1502 mii_np
= of_parse_phandle(np
, "mii", 0);
1506 priv
->bus
= of_mdio_find_bus(mii_np
);
1507 of_node_put(mii_np
);
1509 return -EPROBE_DEFER
;
1511 priv
->ds
= dsa_switch_alloc(&pdev
->dev
, DSA_MAX_PORTS
);
1515 priv
->ds
->priv
= priv
;
1516 priv
->ds
->ops
= &qca8k_switch_ops
;
1518 clk_prepare_enable(priv
->ess_clk
);
1520 platform_set_drvdata(pdev
, priv
);
1522 ar40xx_qm_err_check_work_start(priv
);
1526 ar40xx_mac_mode_init(priv
);
1528 reset_control_put(priv
->ess_rst
);
1530 /* Ok. What's going on with the delayed dsa_switch_register?!
1532 * On Bootup, this switch driver loads before the ethernet
1533 * driver. This causes a problem in dsa_register_switch when
1534 * it parses the tree and encounters the not-yet-ready
1535 * "ethernet = <&gmac>;" property.
1537 * Which will err with -EPROBE_DEFER. Normally this should be
1538 * OK and the driver will just get loaded at a later time.
1539 * However, the EthernetSubSystem (ESS for short) really doesn't
1540 * like being resetted more than once in this fashion and will
1541 * "lock it up for good"... like "real good".
1543 * So far, only a reboot can "unwedge" it, which is not what
1546 * So this workaround (running dsa_register_switch in a
1547 * workqueue task) is employed to fix this unknown issue within
1551 INIT_DELAYED_WORK(&priv
->dsa_init
, qca8k_dsa_init_work
);
1552 schedule_delayed_work(&priv
->dsa_init
, msecs_to_jiffies(1000));
1557 static const struct of_device_id qca8k_of_match
[] = {
1558 { .compatible
= "qca,qca8337-mmio" },
1562 static struct platform_driver qca8kmmio_driver
= {
1565 .of_match_table
= qca8k_of_match
,
1569 module_platform_driver_probe(qca8kmmio_driver
, qca8k_mmio_probe
);
1571 MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
1572 MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
1573 MODULE_LICENSE("GPL v2");
1574 MODULE_ALIAS("platform:qca8k");