1 From 4bbaf764e1e1786eb937fdb62172f656f512e116 Mon Sep 17 00:00:00 2001
2 From: Christian Marangi <ansuelsmth@gmail.com>
3 Date: Wed, 13 Jul 2022 22:53:50 +0200
4 Subject: [PATCH 1/1] net: dsa: qca8k: move driver to qca dir
6 Move qca8k driver to qca dir in preparation for code split and
7 introduction of ipq4019 switch based on qca8k.
9 Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
10 Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
11 Signed-off-by: David S. Miller <davem@davemloft.net>
13 drivers/net/dsa/Kconfig | 8 --------
14 drivers/net/dsa/Makefile | 1 -
15 drivers/net/dsa/qca/Kconfig | 8 ++++++++
16 drivers/net/dsa/qca/Makefile | 1 +
17 drivers/net/dsa/{ => qca}/qca8k.c | 0
18 drivers/net/dsa/{ => qca}/qca8k.h | 0
19 6 files changed, 9 insertions(+), 9 deletions(-)
20 rename drivers/net/dsa/{ => qca}/qca8k.c (100%)
21 rename drivers/net/dsa/{ => qca}/qca8k.h (100%)
23 --- a/drivers/net/dsa/Kconfig
24 +++ b/drivers/net/dsa/Kconfig
25 @@ -60,14 +60,6 @@ source "drivers/net/dsa/sja1105/Kconfig"
27 source "drivers/net/dsa/xrs700x/Kconfig"
30 - tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
31 - select NET_DSA_TAG_QCA
34 - This enables support for the Qualcomm Atheros QCA8K Ethernet
37 config NET_DSA_REALTEK_SMI
38 tristate "Realtek SMI Ethernet switch family support"
39 select NET_DSA_TAG_RTL4_A
40 --- a/drivers/net/dsa/Makefile
41 +++ b/drivers/net/dsa/Makefile
43 obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
44 obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
45 obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
46 -obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
47 obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
48 realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o
49 obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
50 --- a/drivers/net/dsa/qca/Kconfig
51 +++ b/drivers/net/dsa/qca/Kconfig
52 @@ -7,3 +7,11 @@ config NET_DSA_AR9331
54 This enables support for the Qualcomm Atheros AR9331 built-in Ethernet
58 + tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
59 + select NET_DSA_TAG_QCA
62 + This enables support for the Qualcomm Atheros QCA8K Ethernet
64 --- a/drivers/net/dsa/qca/Makefile
65 +++ b/drivers/net/dsa/qca/Makefile
67 # SPDX-License-Identifier: GPL-2.0-only
68 obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
69 +obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
71 +++ b/drivers/net/dsa/qca/qca8k.c
73 +// SPDX-License-Identifier: GPL-2.0
75 + * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
76 + * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
77 + * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
78 + * Copyright (c) 2016 John Crispin <john@phrozen.org>
81 +#include <linux/module.h>
82 +#include <linux/phy.h>
83 +#include <linux/netdevice.h>
84 +#include <linux/bitfield.h>
85 +#include <linux/regmap.h>
87 +#include <linux/of_net.h>
88 +#include <linux/of_mdio.h>
89 +#include <linux/of_platform.h>
90 +#include <linux/if_bridge.h>
91 +#include <linux/mdio.h>
92 +#include <linux/phylink.h>
93 +#include <linux/gpio/consumer.h>
94 +#include <linux/etherdevice.h>
95 +#include <linux/dsa/tag_qca.h>
99 +#define MIB_DESC(_s, _o, _n) \
106 +static const struct qca8k_mib_desc ar8327_mib[] = {
107 + MIB_DESC(1, 0x00, "RxBroad"),
108 + MIB_DESC(1, 0x04, "RxPause"),
109 + MIB_DESC(1, 0x08, "RxMulti"),
110 + MIB_DESC(1, 0x0c, "RxFcsErr"),
111 + MIB_DESC(1, 0x10, "RxAlignErr"),
112 + MIB_DESC(1, 0x14, "RxRunt"),
113 + MIB_DESC(1, 0x18, "RxFragment"),
114 + MIB_DESC(1, 0x1c, "Rx64Byte"),
115 + MIB_DESC(1, 0x20, "Rx128Byte"),
116 + MIB_DESC(1, 0x24, "Rx256Byte"),
117 + MIB_DESC(1, 0x28, "Rx512Byte"),
118 + MIB_DESC(1, 0x2c, "Rx1024Byte"),
119 + MIB_DESC(1, 0x30, "Rx1518Byte"),
120 + MIB_DESC(1, 0x34, "RxMaxByte"),
121 + MIB_DESC(1, 0x38, "RxTooLong"),
122 + MIB_DESC(2, 0x3c, "RxGoodByte"),
123 + MIB_DESC(2, 0x44, "RxBadByte"),
124 + MIB_DESC(1, 0x4c, "RxOverFlow"),
125 + MIB_DESC(1, 0x50, "Filtered"),
126 + MIB_DESC(1, 0x54, "TxBroad"),
127 + MIB_DESC(1, 0x58, "TxPause"),
128 + MIB_DESC(1, 0x5c, "TxMulti"),
129 + MIB_DESC(1, 0x60, "TxUnderRun"),
130 + MIB_DESC(1, 0x64, "Tx64Byte"),
131 + MIB_DESC(1, 0x68, "Tx128Byte"),
132 + MIB_DESC(1, 0x6c, "Tx256Byte"),
133 + MIB_DESC(1, 0x70, "Tx512Byte"),
134 + MIB_DESC(1, 0x74, "Tx1024Byte"),
135 + MIB_DESC(1, 0x78, "Tx1518Byte"),
136 + MIB_DESC(1, 0x7c, "TxMaxByte"),
137 + MIB_DESC(1, 0x80, "TxOverSize"),
138 + MIB_DESC(2, 0x84, "TxByte"),
139 + MIB_DESC(1, 0x8c, "TxCollision"),
140 + MIB_DESC(1, 0x90, "TxAbortCol"),
141 + MIB_DESC(1, 0x94, "TxMultiCol"),
142 + MIB_DESC(1, 0x98, "TxSingleCol"),
143 + MIB_DESC(1, 0x9c, "TxExcDefer"),
144 + MIB_DESC(1, 0xa0, "TxDefer"),
145 + MIB_DESC(1, 0xa4, "TxLateCol"),
146 + MIB_DESC(1, 0xa8, "RXUnicast"),
147 + MIB_DESC(1, 0xac, "TXUnicast"),
151 +qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
154 + *r1 = regaddr & 0x1e;
157 + *r2 = regaddr & 0x7;
160 + *page = regaddr & 0x3ff;
164 +qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
166 + u16 *cached_lo = &priv->mdio_cache.lo;
167 + struct mii_bus *bus = priv->bus;
170 + if (lo == *cached_lo)
173 + ret = bus->write(bus, phy_id, regnum, lo);
175 + dev_err_ratelimited(&bus->dev,
176 + "failed to write qca8k 32bit lo register\n");
183 +qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
185 + u16 *cached_hi = &priv->mdio_cache.hi;
186 + struct mii_bus *bus = priv->bus;
189 + if (hi == *cached_hi)
192 + ret = bus->write(bus, phy_id, regnum, hi);
194 + dev_err_ratelimited(&bus->dev,
195 + "failed to write qca8k 32bit hi register\n");
202 +qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
206 + ret = bus->read(bus, phy_id, regnum);
209 + ret = bus->read(bus, phy_id, regnum + 1);
214 + dev_err_ratelimited(&bus->dev,
215 + "failed to read qca8k 32bit register\n");
224 +qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
230 + hi = (u16)(val >> 16);
232 + ret = qca8k_set_lo(priv, phy_id, regnum, lo);
234 + ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
238 +qca8k_set_page(struct qca8k_priv *priv, u16 page)
240 + u16 *cached_page = &priv->mdio_cache.page;
241 + struct mii_bus *bus = priv->bus;
244 + if (page == *cached_page)
247 + ret = bus->write(bus, 0x18, 0, page);
249 + dev_err_ratelimited(&bus->dev,
250 + "failed to set qca8k page\n");
254 + *cached_page = page;
255 + usleep_range(1000, 2000);
260 +qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
262 + return regmap_read(priv->regmap, reg, val);
266 +qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
268 + return regmap_write(priv->regmap, reg, val);
272 +qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
274 + return regmap_update_bits(priv->regmap, reg, mask, write_val);
277 +static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
279 + struct qca8k_mgmt_eth_data *mgmt_eth_data;
280 + struct qca8k_priv *priv = ds->priv;
281 + struct qca_mgmt_ethhdr *mgmt_ethhdr;
284 + mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
285 + mgmt_eth_data = &priv->mgmt_eth_data;
287 + cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
288 + len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
290 + /* Make sure the seq match the requested packet */
291 + if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
292 + mgmt_eth_data->ack = true;
294 + if (cmd == MDIO_READ) {
295 + mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
297 + /* Get the rest of the 12 byte of data.
298 + * The read/write function will extract the requested data.
300 + if (len > QCA_HDR_MGMT_DATA1_LEN)
301 + memcpy(mgmt_eth_data->data + 1, skb->data,
302 + QCA_HDR_MGMT_DATA2_LEN);
305 + complete(&mgmt_eth_data->rw_done);
308 +static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
309 + int priority, unsigned int len)
311 + struct qca_mgmt_ethhdr *mgmt_ethhdr;
312 + unsigned int real_len;
313 + struct sk_buff *skb;
317 + skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
321 + /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
322 + * Actually for some reason the steps are:
324 + * 1-4: first 4 byte
325 + * 5-6: first 12 byte
326 + * 7-15: all 16 byte
333 + skb_reset_mac_header(skb);
334 + skb_set_network_header(skb, skb->len);
336 + mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
338 + hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
339 + hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
340 + hdr |= QCA_HDR_XMIT_FROM_CPU;
341 + hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
342 + hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
344 + mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
345 + mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
346 + mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
347 + mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
348 + QCA_HDR_MGMT_CHECK_CODE_VAL);
350 + if (cmd == MDIO_WRITE)
351 + mgmt_ethhdr->mdio_data = *val;
353 + mgmt_ethhdr->hdr = htons(hdr);
355 + data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
356 + if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
357 + memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
362 +static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
364 + struct qca_mgmt_ethhdr *mgmt_ethhdr;
366 + mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
367 + mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
370 +static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
372 + struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
373 + struct sk_buff *skb;
377 + skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
378 + QCA8K_ETHERNET_MDIO_PRIORITY, len);
382 + mutex_lock(&mgmt_eth_data->mutex);
384 + /* Check mgmt_master if is operational */
385 + if (!priv->mgmt_master) {
387 + mutex_unlock(&mgmt_eth_data->mutex);
391 + skb->dev = priv->mgmt_master;
393 + reinit_completion(&mgmt_eth_data->rw_done);
395 + /* Increment seq_num and set it in the mdio pkt */
396 + mgmt_eth_data->seq++;
397 + qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
398 + mgmt_eth_data->ack = false;
400 + dev_queue_xmit(skb);
402 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
403 + msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
405 + *val = mgmt_eth_data->data[0];
406 + if (len > QCA_HDR_MGMT_DATA1_LEN)
407 + memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
409 + ack = mgmt_eth_data->ack;
411 + mutex_unlock(&mgmt_eth_data->mutex);
422 +static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
424 + struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
425 + struct sk_buff *skb;
429 + skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
430 + QCA8K_ETHERNET_MDIO_PRIORITY, len);
434 + mutex_lock(&mgmt_eth_data->mutex);
436 + /* Check mgmt_master if is operational */
437 + if (!priv->mgmt_master) {
439 + mutex_unlock(&mgmt_eth_data->mutex);
443 + skb->dev = priv->mgmt_master;
445 + reinit_completion(&mgmt_eth_data->rw_done);
447 + /* Increment seq_num and set it in the mdio pkt */
448 + mgmt_eth_data->seq++;
449 + qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
450 + mgmt_eth_data->ack = false;
452 + dev_queue_xmit(skb);
454 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
455 + msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
457 + ack = mgmt_eth_data->ack;
459 + mutex_unlock(&mgmt_eth_data->mutex);
471 +qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
476 + ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
483 + return qca8k_write_eth(priv, reg, &val, sizeof(val));
487 +qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
489 + int i, count = len / sizeof(u32), ret;
491 + if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
494 + for (i = 0; i < count; i++) {
495 + ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
504 +qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
506 + int i, count = len / sizeof(u32), ret;
509 + if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
512 + for (i = 0; i < count; i++) {
515 + ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
524 +qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
526 + struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
527 + struct mii_bus *bus = priv->bus;
531 + if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
534 + qca8k_split_addr(reg, &r1, &r2, &page);
536 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
538 + ret = qca8k_set_page(priv, page);
542 + ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
545 + mutex_unlock(&bus->mdio_lock);
550 +qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
552 + struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
553 + struct mii_bus *bus = priv->bus;
557 + if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
560 + qca8k_split_addr(reg, &r1, &r2, &page);
562 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
564 + ret = qca8k_set_page(priv, page);
568 + qca8k_mii_write32(priv, 0x10 | r2, r1, val);
571 + mutex_unlock(&bus->mdio_lock);
576 +qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
578 + struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
579 + struct mii_bus *bus = priv->bus;
584 + if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
587 + qca8k_split_addr(reg, &r1, &r2, &page);
589 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
591 + ret = qca8k_set_page(priv, page);
595 + ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
601 + qca8k_mii_write32(priv, 0x10 | r2, r1, val);
604 + mutex_unlock(&bus->mdio_lock);
609 +static const struct regmap_range qca8k_readable_ranges[] = {
610 + regmap_reg_range(0x0000, 0x00e4), /* Global control */
611 + regmap_reg_range(0x0100, 0x0168), /* EEE control */
612 + regmap_reg_range(0x0200, 0x0270), /* Parser control */
613 + regmap_reg_range(0x0400, 0x0454), /* ACL */
614 + regmap_reg_range(0x0600, 0x0718), /* Lookup */
615 + regmap_reg_range(0x0800, 0x0b70), /* QM */
616 + regmap_reg_range(0x0c00, 0x0c80), /* PKT */
617 + regmap_reg_range(0x0e00, 0x0e98), /* L3 */
618 + regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
619 + regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
620 + regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
621 + regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
622 + regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
623 + regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
624 + regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
628 +static const struct regmap_access_table qca8k_readable_table = {
629 + .yes_ranges = qca8k_readable_ranges,
630 + .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
633 +static struct regmap_config qca8k_regmap_config = {
637 + .max_register = 0x16ac, /* end MIB - Port6 range */
638 + .reg_read = qca8k_regmap_read,
639 + .reg_write = qca8k_regmap_write,
640 + .reg_update_bits = qca8k_regmap_update_bits,
641 + .rd_table = &qca8k_readable_table,
642 + .disable_locking = true, /* Locking is handled by qca8k read/write */
643 + .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
647 +qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
651 + return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
652 + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
656 +qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
661 + /* load the ARL table into an array */
662 + ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
667 + fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
668 + /* aging - 67:64 */
669 + fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
670 + /* portmask - 54:48 */
671 + fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
673 + fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
674 + fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
675 + fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
676 + fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
677 + fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
678 + fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
684 +qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
687 + u32 reg[3] = { 0 };
690 + reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
691 + /* aging - 67:64 */
692 + reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
693 + /* portmask - 54:48 */
694 + reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
696 + reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
697 + reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
698 + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
699 + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
700 + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
701 + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
703 + /* load the array into the ARL table */
704 + qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
708 +qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
713 + /* Set the command and FDB index */
714 + reg = QCA8K_ATU_FUNC_BUSY;
717 + reg |= QCA8K_ATU_FUNC_PORT_EN;
718 + reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
721 + /* Write the function register triggering the table access */
722 + ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
726 + /* wait for completion */
727 + ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
731 + /* Check for table full violation when adding an entry */
732 + if (cmd == QCA8K_FDB_LOAD) {
733 + ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®);
736 + if (reg & QCA8K_ATU_FUNC_FULL)
744 +qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
748 + qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
749 + ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
753 + return qca8k_fdb_read(priv, fdb);
757 +qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
762 + mutex_lock(&priv->reg_mutex);
763 + qca8k_fdb_write(priv, vid, port_mask, mac, aging);
764 + ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
765 + mutex_unlock(&priv->reg_mutex);
771 +qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
775 + mutex_lock(&priv->reg_mutex);
776 + qca8k_fdb_write(priv, vid, port_mask, mac, 0);
777 + ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
778 + mutex_unlock(&priv->reg_mutex);
784 +qca8k_fdb_flush(struct qca8k_priv *priv)
786 + mutex_lock(&priv->reg_mutex);
787 + qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
788 + mutex_unlock(&priv->reg_mutex);
792 +qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
793 + const u8 *mac, u16 vid)
795 + struct qca8k_fdb fdb = { 0 };
798 + mutex_lock(&priv->reg_mutex);
800 + qca8k_fdb_write(priv, vid, 0, mac, 0);
801 + ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
805 + ret = qca8k_fdb_read(priv, &fdb);
809 + /* Rule exist. Delete first */
811 + ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
816 + /* Add port to fdb portmask */
817 + fdb.port_mask |= port_mask;
819 + qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
820 + ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
823 + mutex_unlock(&priv->reg_mutex);
828 +qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
829 + const u8 *mac, u16 vid)
831 + struct qca8k_fdb fdb = { 0 };
834 + mutex_lock(&priv->reg_mutex);
836 + qca8k_fdb_write(priv, vid, 0, mac, 0);
837 + ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
841 + /* Rule doesn't exist. Why delete? */
847 + ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
851 + /* Only port in the rule is this port. Don't re insert */
852 + if (fdb.port_mask == port_mask)
855 + /* Remove port from port mask */
856 + fdb.port_mask &= ~port_mask;
858 + qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
859 + ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
862 + mutex_unlock(&priv->reg_mutex);
867 +qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
872 + /* Set the command and VLAN index */
873 + reg = QCA8K_VTU_FUNC1_BUSY;
875 + reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
877 + /* Write the function register triggering the table access */
878 + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
882 + /* wait for completion */
883 + ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
887 + /* Check for table full violation when adding an entry */
888 + if (cmd == QCA8K_VLAN_LOAD) {
889 + ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®);
892 + if (reg & QCA8K_VTU_FUNC1_FULL)
900 +qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
906 + We do the right thing with VLAN 0 and treat it as untagged while
907 + preserving the tag on egress.
912 + mutex_lock(&priv->reg_mutex);
913 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
917 + ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
920 + reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
921 + reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
923 + reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
925 + reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
927 + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
930 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
933 + mutex_unlock(&priv->reg_mutex);
939 +qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
945 + mutex_lock(&priv->reg_mutex);
946 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
950 + ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
953 + reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
954 + reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
956 + /* Check if we're the last member to be removed */
958 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
959 + mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
961 + if ((reg & mask) != mask) {
968 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
970 + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
973 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
977 + mutex_unlock(&priv->reg_mutex);
983 +qca8k_mib_init(struct qca8k_priv *priv)
987 + mutex_lock(&priv->reg_mutex);
988 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
989 + QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
990 + FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
995 + ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
999 + ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
1003 + ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
1006 + mutex_unlock(&priv->reg_mutex);
1011 +qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
1013 + u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
1015 + /* Port 0 and 6 have no internal PHY */
1016 + if (port > 0 && port < 6)
1017 + mask |= QCA8K_PORT_STATUS_LINK_AUTO;
1020 + regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
1022 + regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
1026 +qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
1027 + struct sk_buff *read_skb, u32 *val)
1029 + struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
1033 + reinit_completion(&mgmt_eth_data->rw_done);
1035 + /* Increment seq_num and set it in the copy pkt */
1036 + mgmt_eth_data->seq++;
1037 + qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
1038 + mgmt_eth_data->ack = false;
1040 + dev_queue_xmit(skb);
1042 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1043 + QCA8K_ETHERNET_TIMEOUT);
1045 + ack = mgmt_eth_data->ack;
1048 + return -ETIMEDOUT;
1053 + *val = mgmt_eth_data->data[0];
1059 +qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
1060 + int regnum, u16 data)
1062 + struct sk_buff *write_skb, *clear_skb, *read_skb;
1063 + struct qca8k_mgmt_eth_data *mgmt_eth_data;
1064 + u32 write_val, clear_val = 0, val;
1065 + struct net_device *mgmt_master;
1069 + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1072 + mgmt_eth_data = &priv->mgmt_eth_data;
1074 + write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1075 + QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1076 + QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1079 + write_val |= QCA8K_MDIO_MASTER_READ;
1081 + write_val |= QCA8K_MDIO_MASTER_WRITE;
1082 + write_val |= QCA8K_MDIO_MASTER_DATA(data);
1085 + /* Prealloc all the needed skb before the lock */
1086 + write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
1087 + QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
1091 + clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1092 + QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1095 + goto err_clear_skb;
1098 + read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1099 + QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1102 + goto err_read_skb;
1105 + /* Actually start the request:
1106 + * 1. Send mdio master packet
1107 + * 2. Busy Wait for mdio master command
1108 + * 3. Get the data if we are reading
1109 + * 4. Reset the mdio master (even with error)
1111 + mutex_lock(&mgmt_eth_data->mutex);
1113 + /* Check if mgmt_master is operational */
1114 + mgmt_master = priv->mgmt_master;
1115 + if (!mgmt_master) {
1116 + mutex_unlock(&mgmt_eth_data->mutex);
1118 + goto err_mgmt_master;
1121 + read_skb->dev = mgmt_master;
1122 + clear_skb->dev = mgmt_master;
1123 + write_skb->dev = mgmt_master;
1125 + reinit_completion(&mgmt_eth_data->rw_done);
1127 + /* Increment seq_num and set it in the write pkt */
1128 + mgmt_eth_data->seq++;
1129 + qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
1130 + mgmt_eth_data->ack = false;
1132 + dev_queue_xmit(write_skb);
1134 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1135 + QCA8K_ETHERNET_TIMEOUT);
1137 + ack = mgmt_eth_data->ack;
1141 + kfree_skb(read_skb);
1147 + kfree_skb(read_skb);
1151 + ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
1152 + !(val & QCA8K_MDIO_MASTER_BUSY), 0,
1153 + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1154 + mgmt_eth_data, read_skb, &val);
1156 + if (ret < 0 && ret1 < 0) {
1162 + reinit_completion(&mgmt_eth_data->rw_done);
1164 + /* Increment seq_num and set it in the read pkt */
1165 + mgmt_eth_data->seq++;
1166 + qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
1167 + mgmt_eth_data->ack = false;
1169 + dev_queue_xmit(read_skb);
1171 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1172 + QCA8K_ETHERNET_TIMEOUT);
1174 + ack = mgmt_eth_data->ack;
1186 + ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
1188 + kfree_skb(read_skb);
1191 + reinit_completion(&mgmt_eth_data->rw_done);
1193 + /* Increment seq_num and set it in the clear pkt */
1194 + mgmt_eth_data->seq++;
1195 + qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
1196 + mgmt_eth_data->ack = false;
1198 + dev_queue_xmit(clear_skb);
1200 + wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1201 + QCA8K_ETHERNET_TIMEOUT);
1203 + mutex_unlock(&mgmt_eth_data->mutex);
1207 + /* Error handling before lock */
1209 + kfree_skb(read_skb);
1211 + kfree_skb(clear_skb);
1213 + kfree_skb(write_skb);
1219 +qca8k_port_to_phy(int port)
1221 + /* From Andrew Lunn:
1222 + * Port 0 has no internal phy.
1223 + * Port 1 has an internal PHY at MDIO address 0.
1224 + * Port 2 has an internal PHY at MDIO address 1.
1226 + * Port 5 has an internal PHY at MDIO address 4.
1227 + * Port 6 has no internal PHY.
1234 +qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
1240 + qca8k_split_addr(reg, &r1, &r2, &page);
1242 + ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
1243 + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1244 + bus, 0x10 | r2, r1, &val);
1246 + /* Check if qca8k_read has failed for a different reason
1247 + * before returnting -ETIMEDOUT
1249 + if (ret < 0 && ret1 < 0)
1256 +qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
1258 + struct mii_bus *bus = priv->bus;
1263 + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1266 + val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1267 + QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1268 + QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
1269 + QCA8K_MDIO_MASTER_DATA(data);
1271 + qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1273 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1275 + ret = qca8k_set_page(priv, page);
1279 + qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1281 + ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1282 + QCA8K_MDIO_MASTER_BUSY);
1285 + /* even if the busy_wait timeouts try to clear the MASTER_EN */
1286 + qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1288 + mutex_unlock(&bus->mdio_lock);
1294 +qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
1296 + struct mii_bus *bus = priv->bus;
1301 + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1304 + val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1305 + QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1306 + QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1308 + qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1310 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1312 + ret = qca8k_set_page(priv, page);
1316 + qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1318 + ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1319 + QCA8K_MDIO_MASTER_BUSY);
1323 + ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
1326 + /* even if the busy_wait timeouts try to clear the MASTER_EN */
1327 + qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1329 + mutex_unlock(&bus->mdio_lock);
1332 + ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
1338 +qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
1340 + struct qca8k_priv *priv = slave_bus->priv;
1343 + /* Use mdio Ethernet when available, fallback to legacy one on error */
1344 + ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
1348 + return qca8k_mdio_write(priv, phy, regnum, data);
1352 +qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
1354 + struct qca8k_priv *priv = slave_bus->priv;
1357 + /* Use mdio Ethernet when available, fallback to legacy one on error */
1358 + ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
1362 + ret = qca8k_mdio_read(priv, phy, regnum);
1371 +qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
1373 + port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1375 + return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
1379 +qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
1381 + port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1383 + return qca8k_internal_mdio_read(slave_bus, port, regnum);
1387 +qca8k_mdio_register(struct qca8k_priv *priv)
1389 + struct dsa_switch *ds = priv->ds;
1390 + struct device_node *mdio;
1391 + struct mii_bus *bus;
1393 + bus = devm_mdiobus_alloc(ds->dev);
1397 + bus->priv = (void *)priv;
1398 + snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
1399 + ds->dst->index, ds->index);
1400 + bus->parent = ds->dev;
1401 + bus->phy_mask = ~ds->phys_mii_mask;
1402 + ds->slave_mii_bus = bus;
1404 + /* Check if the devicetree declare the port:phy mapping */
1405 + mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
1406 + if (of_device_is_available(mdio)) {
1407 + bus->name = "qca8k slave mii";
1408 + bus->read = qca8k_internal_mdio_read;
1409 + bus->write = qca8k_internal_mdio_write;
1410 + return devm_of_mdiobus_register(priv->dev, bus, mdio);
1413 + /* If a mapping can't be found the legacy mapping is used,
1414 + * using the qca8k_port_to_phy function
1416 + bus->name = "qca8k-legacy slave mii";
1417 + bus->read = qca8k_legacy_mdio_read;
1418 + bus->write = qca8k_legacy_mdio_write;
1419 + return devm_mdiobus_register(priv->dev, bus);
1423 +qca8k_setup_mdio_bus(struct qca8k_priv *priv)
1425 + u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
1426 + struct device_node *ports, *port;
1427 + phy_interface_t mode;
1430 + ports = of_get_child_by_name(priv->dev->of_node, "ports");
1432 + ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
1437 + for_each_available_child_of_node(ports, port) {
1438 + err = of_property_read_u32(port, "reg", ®);
1440 + of_node_put(port);
1441 + of_node_put(ports);
1445 + if (!dsa_is_user_port(priv->ds, reg))
1448 + of_get_phy_mode(port, &mode);
1450 + if (of_property_read_bool(port, "phy-handle") &&
1451 + mode != PHY_INTERFACE_MODE_INTERNAL)
1452 + external_mdio_mask |= BIT(reg);
1454 + internal_mdio_mask |= BIT(reg);
1457 + of_node_put(ports);
1458 + if (!external_mdio_mask && !internal_mdio_mask) {
1459 + dev_err(priv->dev, "no PHYs are defined.\n");
1463 + /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
1464 + * the MDIO_MASTER register also _disconnects_ the external MDC
1465 + * passthrough to the internal PHYs. It's not possible to use both
1466 + * configurations at the same time!
1468 + * Because this came up during the review process:
1469 + * If the external mdio-bus driver is capable magically disabling
1470 + * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
1471 + * accessors for the time being, it would be possible to pull this
1474 + if (!!external_mdio_mask && !!internal_mdio_mask) {
1475 + dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
1479 + if (external_mdio_mask) {
1480 + /* Make sure to disable the internal mdio bus in cases
1481 + * a dt-overlay and driver reload changed the configuration
1484 + return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
1485 + QCA8K_MDIO_MASTER_EN);
1488 + return qca8k_mdio_register(priv);
1492 +qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
1497 + /* SoC specific settings for ipq8064.
1498 + * If more device require this consider adding
1499 + * a dedicated binding.
1501 + if (of_machine_is_compatible("qcom,ipq8064"))
1502 + mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
1504 + /* SoC specific settings for ipq8065 */
1505 + if (of_machine_is_compatible("qcom,ipq8065"))
1506 + mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
1509 + ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
1510 + QCA8K_MAC_PWR_RGMII0_1_8V |
1511 + QCA8K_MAC_PWR_RGMII1_1_8V,
1518 +static int qca8k_find_cpu_port(struct dsa_switch *ds)
1520 + struct qca8k_priv *priv = ds->priv;
1522 + /* Find the connected cpu port. Valid port are 0 or 6 */
1523 + if (dsa_is_cpu_port(ds, 0))
1526 + dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
1528 + if (dsa_is_cpu_port(ds, 6))
1535 +qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
1537 + struct device_node *node = priv->dev->of_node;
1538 + const struct qca8k_match_data *data;
1542 + /* QCA8327 require to set to the correct mode.
1543 + * His bigger brother QCA8328 have the 172 pin layout.
1544 + * Should be applied by default but we set this just to make sure.
1546 + if (priv->switch_id == QCA8K_ID_QCA8327) {
1547 + data = of_device_get_match_data(priv->dev);
1549 + /* Set the correct package of 148 pin for QCA8327 */
1550 + if (data->reduced_package)
1551 + val |= QCA8327_PWS_PACKAGE148_EN;
1553 + ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
1559 + if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
1560 + val |= QCA8K_PWS_POWER_ON_SEL;
1562 + if (of_property_read_bool(node, "qca,led-open-drain")) {
1563 + if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
1564 + dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
1568 + val |= QCA8K_PWS_LED_OPEN_EN_CSR;
1571 + return qca8k_rmw(priv, QCA8K_REG_PWS,
1572 + QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
1577 +qca8k_parse_port_config(struct qca8k_priv *priv)
1579 + int port, cpu_port_index = -1, ret;
1580 + struct device_node *port_dn;
1581 + phy_interface_t mode;
1582 + struct dsa_port *dp;
1585 + /* We have 2 CPU port. Check them */
1586 + for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1587 + /* Skip every other port */
1588 + if (port != 0 && port != 6)
1591 + dp = dsa_to_port(priv->ds, port);
1595 + if (!of_device_is_available(port_dn))
1598 + ret = of_get_phy_mode(port_dn, &mode);
1603 + case PHY_INTERFACE_MODE_RGMII:
1604 + case PHY_INTERFACE_MODE_RGMII_ID:
1605 + case PHY_INTERFACE_MODE_RGMII_TXID:
1606 + case PHY_INTERFACE_MODE_RGMII_RXID:
1607 + case PHY_INTERFACE_MODE_SGMII:
1610 + if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1611 + /* Switch regs accept value in ns, convert ps to ns */
1612 + delay = delay / 1000;
1613 + else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1614 + mode == PHY_INTERFACE_MODE_RGMII_TXID)
1617 + if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1618 + dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1622 + priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1626 + if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1627 + /* Switch regs accept value in ns, convert ps to ns */
1628 + delay = delay / 1000;
1629 + else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1630 + mode == PHY_INTERFACE_MODE_RGMII_RXID)
1633 + if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1634 + dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1638 + priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1640 + /* Skip sgmii parsing for rgmii* mode */
1641 + if (mode == PHY_INTERFACE_MODE_RGMII ||
1642 + mode == PHY_INTERFACE_MODE_RGMII_ID ||
1643 + mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1644 + mode == PHY_INTERFACE_MODE_RGMII_RXID)
1647 + if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1648 + priv->ports_config.sgmii_tx_clk_falling_edge = true;
1650 + if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1651 + priv->ports_config.sgmii_rx_clk_falling_edge = true;
1653 + if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1654 + priv->ports_config.sgmii_enable_pll = true;
1656 + if (priv->switch_id == QCA8K_ID_QCA8327) {
1657 + dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1658 + priv->ports_config.sgmii_enable_pll = false;
1661 + if (priv->switch_revision < 2)
1662 + dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1675 +qca8k_setup(struct dsa_switch *ds)
1677 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1678 + int cpu_port, ret, i;
1681 + cpu_port = qca8k_find_cpu_port(ds);
1682 + if (cpu_port < 0) {
1683 + dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
1687 + /* Parse CPU port config to be later used in phy_link mac_config */
1688 + ret = qca8k_parse_port_config(priv);
1692 + ret = qca8k_setup_mdio_bus(priv);
1696 + ret = qca8k_setup_of_pws_reg(priv);
1700 + ret = qca8k_setup_mac_pwr_sel(priv);
1704 + /* Make sure MAC06 is disabled */
1705 + ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
1706 + QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
1708 + dev_err(priv->dev, "failed disabling MAC06 exchange");
1712 + /* Enable CPU Port */
1713 + ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
1714 + QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
1716 + dev_err(priv->dev, "failed enabling CPU port");
1720 + /* Enable MIB counters */
1721 + ret = qca8k_mib_init(priv);
1723 + dev_warn(priv->dev, "mib init failed");
1725 + /* Initial setup of all ports */
1726 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1727 + /* Disable forwarding by default on all ports */
1728 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1729 + QCA8K_PORT_LOOKUP_MEMBER, 0);
1733 + /* Enable QCA header mode on all cpu ports */
1734 + if (dsa_is_cpu_port(ds, i)) {
1735 + ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
1736 + FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
1737 + FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
1739 + dev_err(priv->dev, "failed enabling QCA header mode");
1744 + /* Disable MAC by default on all user ports */
1745 + if (dsa_is_user_port(ds, i))
1746 + qca8k_port_set_status(priv, i, 0);
1749 + /* Forward all unknown frames to CPU port for Linux processing
1750 + * Notice that in multi-cpu config only one port should be set
1751 + * for igmp, unknown, multicast and broadcast packet
1753 + ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
1754 + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
1755 + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
1756 + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
1757 + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
1761 + /* Setup connection between CPU port & user ports
1762 + * Configure specific switch configuration for ports
1764 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1765 + /* CPU port gets connected to all user ports of the switch */
1766 + if (dsa_is_cpu_port(ds, i)) {
1767 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1768 + QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
1773 + /* Individual user ports get connected to CPU port only */
1774 + if (dsa_is_user_port(ds, i)) {
1775 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1776 + QCA8K_PORT_LOOKUP_MEMBER,
1781 + /* Enable ARP Auto-learning by default */
1782 + ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
1783 + QCA8K_PORT_LOOKUP_LEARN);
1787 + /* For port based vlans to work we need to set the
1788 + * default egress vid
1790 + ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
1791 + QCA8K_EGREES_VLAN_PORT_MASK(i),
1792 + QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
1796 + ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
1797 + QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
1798 + QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
1803 + /* The port 5 of the qca8337 have some problem in flood condition. The
1804 + * original legacy driver had some specific buffer and priority settings
1805 + * for the different port suggested by the QCA switch team. Add this
1806 + * missing settings to improve switch stability under load condition.
1807 + * This problem is limited to qca8337 and other qca8k switch are not affected.
1809 + if (priv->switch_id == QCA8K_ID_QCA8337) {
1811 + /* The 2 CPU port and port 5 requires some different
1812 + * priority than any other ports.
1817 + mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1818 + QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1819 + QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
1820 + QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
1821 + QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
1822 + QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
1823 + QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
1826 + mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1827 + QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1828 + QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
1829 + QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
1830 + QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
1832 + qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
1834 + mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
1835 + QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1836 + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1837 + QCA8K_PORT_HOL_CTRL1_WRED_EN;
1838 + qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
1839 + QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
1840 + QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1841 + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1842 + QCA8K_PORT_HOL_CTRL1_WRED_EN,
1847 + /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
1848 + if (priv->switch_id == QCA8K_ID_QCA8327) {
1849 + mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
1850 + QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
1851 + qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
1852 + QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
1853 + QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
1857 + /* Setup our port MTUs to match power on defaults */
1858 + ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
1860 + dev_warn(priv->dev, "failed setting MTU settings");
1862 + /* Flush the FDB table */
1863 + qca8k_fdb_flush(priv);
1865 + /* We don't have interrupts for link changes, so we need to poll */
1866 + ds->pcs_poll = true;
1868 + /* Set min a max ageing value supported */
1869 + ds->ageing_time_min = 7000;
1870 + ds->ageing_time_max = 458745000;
1872 + /* Set max number of LAGs supported */
1873 + ds->num_lag_ids = QCA8K_NUM_LAGS;
1879 +qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1882 + u32 delay, val = 0;
1885 + /* Delay can be declared in 3 different way.
1886 + * Mode to rgmii and internal-delay standard binding defined
1887 + * rgmii-id or rgmii-tx/rx phy mode set.
1888 + * The parse logic set a delay different than 0 only when one
1889 + * of the 3 different way is used. In all other case delay is
1890 + * not enabled. With ID or TX/RXID delay is enabled and set
1891 + * to the default and recommended value.
1893 + if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1894 + delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1896 + val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1897 + QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1900 + if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1901 + delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1903 + val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1904 + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1907 + /* Set RGMII delay based on the selected values */
1908 + ret = qca8k_rmw(priv, reg,
1909 + QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1910 + QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1911 + QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1912 + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1915 + dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1916 + cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1920 +qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1921 + const struct phylink_link_state *state)
1923 + struct qca8k_priv *priv = ds->priv;
1924 + int cpu_port_index, ret;
1928 + case 0: /* 1st CPU port */
1929 + if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1930 + state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1931 + state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1932 + state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1933 + state->interface != PHY_INTERFACE_MODE_SGMII)
1936 + reg = QCA8K_REG_PORT0_PAD_CTRL;
1937 + cpu_port_index = QCA8K_CPU_PORT0;
1944 + /* Internal PHY, nothing to do */
1946 + case 6: /* 2nd CPU port / external PHY */
1947 + if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1948 + state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1949 + state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1950 + state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1951 + state->interface != PHY_INTERFACE_MODE_SGMII &&
1952 + state->interface != PHY_INTERFACE_MODE_1000BASEX)
1955 + reg = QCA8K_REG_PORT6_PAD_CTRL;
1956 + cpu_port_index = QCA8K_CPU_PORT6;
1959 + dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1963 + if (port != 6 && phylink_autoneg_inband(mode)) {
1964 + dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1969 + switch (state->interface) {
1970 + case PHY_INTERFACE_MODE_RGMII:
1971 + case PHY_INTERFACE_MODE_RGMII_ID:
1972 + case PHY_INTERFACE_MODE_RGMII_TXID:
1973 + case PHY_INTERFACE_MODE_RGMII_RXID:
1974 + qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1976 + /* Configure rgmii delay */
1977 + qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1979 + /* QCA8337 requires to set rgmii rx delay for all ports.
1980 + * This is enabled through PORT5_PAD_CTRL for all ports,
1981 + * rather than individual port registers.
1983 + if (priv->switch_id == QCA8K_ID_QCA8337)
1984 + qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1985 + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1987 + case PHY_INTERFACE_MODE_SGMII:
1988 + case PHY_INTERFACE_MODE_1000BASEX:
1989 + /* Enable SGMII on the port */
1990 + qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1992 + /* Enable/disable SerDes auto-negotiation as necessary */
1993 + ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
1996 + if (phylink_autoneg_inband(mode))
1997 + val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1999 + val |= QCA8K_PWS_SERDES_AEN_DIS;
2000 + qca8k_write(priv, QCA8K_REG_PWS, val);
2002 + /* Configure the SGMII parameters */
2003 + ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
2007 + val |= QCA8K_SGMII_EN_SD;
2009 + if (priv->ports_config.sgmii_enable_pll)
2010 + val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
2011 + QCA8K_SGMII_EN_TX;
2013 + if (dsa_is_cpu_port(ds, port)) {
2014 + /* CPU port, we're talking to the CPU MAC, be a PHY */
2015 + val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
2016 + val |= QCA8K_SGMII_MODE_CTRL_PHY;
2017 + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
2018 + val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
2019 + val |= QCA8K_SGMII_MODE_CTRL_MAC;
2020 + } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
2021 + val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
2022 + val |= QCA8K_SGMII_MODE_CTRL_BASEX;
2025 + qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
2027 + /* From original code is reported port instability as SGMII also
2028 + * require delay set. Apply advised values here or take them from DT.
2030 + if (state->interface == PHY_INTERFACE_MODE_SGMII)
2031 + qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
2033 + /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
2034 + * falling edge is set writing in the PORT0 PAD reg
2036 + if (priv->switch_id == QCA8K_ID_QCA8327 ||
2037 + priv->switch_id == QCA8K_ID_QCA8337)
2038 + reg = QCA8K_REG_PORT0_PAD_CTRL;
2042 + /* SGMII Clock phase configuration */
2043 + if (priv->ports_config.sgmii_rx_clk_falling_edge)
2044 + val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
2046 + if (priv->ports_config.sgmii_tx_clk_falling_edge)
2047 + val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
2050 + ret = qca8k_rmw(priv, reg,
2051 + QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
2052 + QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
2057 + dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
2058 + phy_modes(state->interface), port);
2064 +qca8k_phylink_validate(struct dsa_switch *ds, int port,
2065 + unsigned long *supported,
2066 + struct phylink_link_state *state)
2068 + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2071 + case 0: /* 1st CPU port */
2072 + if (state->interface != PHY_INTERFACE_MODE_NA &&
2073 + state->interface != PHY_INTERFACE_MODE_RGMII &&
2074 + state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
2075 + state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
2076 + state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
2077 + state->interface != PHY_INTERFACE_MODE_SGMII)
2085 + /* Internal PHY */
2086 + if (state->interface != PHY_INTERFACE_MODE_NA &&
2087 + state->interface != PHY_INTERFACE_MODE_GMII &&
2088 + state->interface != PHY_INTERFACE_MODE_INTERNAL)
2091 + case 6: /* 2nd CPU port / external PHY */
2092 + if (state->interface != PHY_INTERFACE_MODE_NA &&
2093 + state->interface != PHY_INTERFACE_MODE_RGMII &&
2094 + state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
2095 + state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
2096 + state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
2097 + state->interface != PHY_INTERFACE_MODE_SGMII &&
2098 + state->interface != PHY_INTERFACE_MODE_1000BASEX)
2103 + linkmode_zero(supported);
2107 + phylink_set_port_modes(mask);
2108 + phylink_set(mask, Autoneg);
2110 + phylink_set(mask, 1000baseT_Full);
2111 + phylink_set(mask, 10baseT_Half);
2112 + phylink_set(mask, 10baseT_Full);
2113 + phylink_set(mask, 100baseT_Half);
2114 + phylink_set(mask, 100baseT_Full);
2116 + if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
2117 + phylink_set(mask, 1000baseX_Full);
2119 + phylink_set(mask, Pause);
2120 + phylink_set(mask, Asym_Pause);
2122 + linkmode_and(supported, supported, mask);
2123 + linkmode_and(state->advertising, state->advertising, mask);
2127 +qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
2128 + struct phylink_link_state *state)
2130 + struct qca8k_priv *priv = ds->priv;
2134 + ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
2138 + state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
2139 + state->an_complete = state->link;
2140 + state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
2141 + state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
2144 + switch (reg & QCA8K_PORT_STATUS_SPEED) {
2145 + case QCA8K_PORT_STATUS_SPEED_10:
2146 + state->speed = SPEED_10;
2148 + case QCA8K_PORT_STATUS_SPEED_100:
2149 + state->speed = SPEED_100;
2151 + case QCA8K_PORT_STATUS_SPEED_1000:
2152 + state->speed = SPEED_1000;
2155 + state->speed = SPEED_UNKNOWN;
2159 + state->pause = MLO_PAUSE_NONE;
2160 + if (reg & QCA8K_PORT_STATUS_RXFLOW)
2161 + state->pause |= MLO_PAUSE_RX;
2162 + if (reg & QCA8K_PORT_STATUS_TXFLOW)
2163 + state->pause |= MLO_PAUSE_TX;
2169 +qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
2170 + phy_interface_t interface)
2172 + struct qca8k_priv *priv = ds->priv;
2174 + qca8k_port_set_status(priv, port, 0);
2178 +qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
2179 + phy_interface_t interface, struct phy_device *phydev,
2180 + int speed, int duplex, bool tx_pause, bool rx_pause)
2182 + struct qca8k_priv *priv = ds->priv;
2185 + if (phylink_autoneg_inband(mode)) {
2186 + reg = QCA8K_PORT_STATUS_LINK_AUTO;
2190 + reg = QCA8K_PORT_STATUS_SPEED_10;
2193 + reg = QCA8K_PORT_STATUS_SPEED_100;
2196 + reg = QCA8K_PORT_STATUS_SPEED_1000;
2199 + reg = QCA8K_PORT_STATUS_LINK_AUTO;
2203 + if (duplex == DUPLEX_FULL)
2204 + reg |= QCA8K_PORT_STATUS_DUPLEX;
2206 + if (rx_pause || dsa_is_cpu_port(ds, port))
2207 + reg |= QCA8K_PORT_STATUS_RXFLOW;
2209 + if (tx_pause || dsa_is_cpu_port(ds, port))
2210 + reg |= QCA8K_PORT_STATUS_TXFLOW;
2213 + reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
2215 + qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
2219 +qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
2221 + const struct qca8k_match_data *match_data;
2222 + struct qca8k_priv *priv = ds->priv;
2225 + if (stringset != ETH_SS_STATS)
2228 + match_data = of_device_get_match_data(priv->dev);
2230 + for (i = 0; i < match_data->mib_count; i++)
2231 + strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
2235 +static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
2237 + const struct qca8k_match_data *match_data;
2238 + struct qca8k_mib_eth_data *mib_eth_data;
2239 + struct qca8k_priv *priv = ds->priv;
2240 + const struct qca8k_mib_desc *mib;
2241 + struct mib_ethhdr *mib_ethhdr;
2242 + int i, mib_len, offset = 0;
2246 + mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
2247 + mib_eth_data = &priv->mib_eth_data;
2249 + /* The switch autocast every port. Ignore other packet and
2250 + * parse only the requested one.
2252 + port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
2253 + if (port != mib_eth_data->req_port)
2256 + match_data = device_get_match_data(priv->dev);
2257 + data = mib_eth_data->data;
2259 + for (i = 0; i < match_data->mib_count; i++) {
2260 + mib = &ar8327_mib[i];
2262 + /* First 3 mib are present in the skb head */
2264 + data[i] = mib_ethhdr->data[i];
2268 + mib_len = sizeof(uint32_t);
2270 + /* Some mib are 64 bit wide */
2271 + if (mib->size == 2)
2272 + mib_len = sizeof(uint64_t);
2274 + /* Copy the mib value from packet to the */
2275 + memcpy(data + i, skb->data + offset, mib_len);
2277 + /* Set the offset for the next mib */
2278 + offset += mib_len;
2282 + /* Complete on receiving all the mib packet */
2283 + if (refcount_dec_and_test(&mib_eth_data->port_parsed))
2284 + complete(&mib_eth_data->rw_done);
2288 +qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
2290 + struct dsa_port *dp = dsa_to_port(ds, port);
2291 + struct qca8k_mib_eth_data *mib_eth_data;
2292 + struct qca8k_priv *priv = ds->priv;
2295 + mib_eth_data = &priv->mib_eth_data;
2297 + mutex_lock(&mib_eth_data->mutex);
2299 + reinit_completion(&mib_eth_data->rw_done);
2301 + mib_eth_data->req_port = dp->index;
2302 + mib_eth_data->data = data;
2303 + refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
2305 + mutex_lock(&priv->reg_mutex);
2307 + /* Send mib autocast request */
2308 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
2309 + QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
2310 + FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
2313 + mutex_unlock(&priv->reg_mutex);
2318 + ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
2321 + mutex_unlock(&mib_eth_data->mutex);
2327 +qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
2330 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2331 + const struct qca8k_match_data *match_data;
2332 + const struct qca8k_mib_desc *mib;
2337 + if (priv->mgmt_master &&
2338 + qca8k_get_ethtool_stats_eth(ds, port, data) > 0)
2341 + match_data = of_device_get_match_data(priv->dev);
2343 + for (i = 0; i < match_data->mib_count; i++) {
2344 + mib = &ar8327_mib[i];
2345 + reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
2347 + ret = qca8k_read(priv, reg, &val);
2351 + if (mib->size == 2) {
2352 + ret = qca8k_read(priv, reg + 4, &hi);
2358 + if (mib->size == 2)
2359 + data[i] |= (u64)hi << 32;
2364 +qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
2366 + const struct qca8k_match_data *match_data;
2367 + struct qca8k_priv *priv = ds->priv;
2369 + if (sset != ETH_SS_STATS)
2372 + match_data = of_device_get_match_data(priv->dev);
2374 + return match_data->mib_count;
2378 +qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
2380 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2381 + u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
2385 + mutex_lock(&priv->reg_mutex);
2386 + ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®);
2390 + if (eee->eee_enabled)
2394 + ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
2397 + mutex_unlock(&priv->reg_mutex);
2402 +qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2404 + /* Nothing to do on the port's MAC */
2409 +qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
2411 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2415 + case BR_STATE_DISABLED:
2416 + stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
2418 + case BR_STATE_BLOCKING:
2419 + stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
2421 + case BR_STATE_LISTENING:
2422 + stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
2424 + case BR_STATE_LEARNING:
2425 + stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
2427 + case BR_STATE_FORWARDING:
2429 + stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
2433 + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2434 + QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
2438 +qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
2440 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2441 + int port_mask, cpu_port;
2444 + cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2445 + port_mask = BIT(cpu_port);
2447 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2448 + if (dsa_is_cpu_port(ds, i))
2450 + if (dsa_to_port(ds, i)->bridge_dev != br)
2452 + /* Add this port to the portvlan mask of the other ports
2455 + ret = regmap_set_bits(priv->regmap,
2456 + QCA8K_PORT_LOOKUP_CTRL(i),
2461 + port_mask |= BIT(i);
2464 + /* Add all other ports to this ports portvlan mask */
2465 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2466 + QCA8K_PORT_LOOKUP_MEMBER, port_mask);
2472 +qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
2474 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2477 + cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2479 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2480 + if (dsa_is_cpu_port(ds, i))
2482 + if (dsa_to_port(ds, i)->bridge_dev != br)
2484 + /* Remove this port to the portvlan mask of the other ports
2487 + regmap_clear_bits(priv->regmap,
2488 + QCA8K_PORT_LOOKUP_CTRL(i),
2492 + /* Set the cpu port to be the only one in the portvlan mask of
2495 + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2496 + QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
2500 +qca8k_port_fast_age(struct dsa_switch *ds, int port)
2502 + struct qca8k_priv *priv = ds->priv;
2504 + mutex_lock(&priv->reg_mutex);
2505 + qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
2506 + mutex_unlock(&priv->reg_mutex);
2510 +qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
2512 + struct qca8k_priv *priv = ds->priv;
2513 + unsigned int secs = msecs / 1000;
2516 + /* AGE_TIME reg is set in 7s step */
2519 + /* Handle case with 0 as val to NOT disable
2525 + return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
2526 + QCA8K_ATU_AGE_TIME(val));
2530 +qca8k_port_enable(struct dsa_switch *ds, int port,
2531 + struct phy_device *phy)
2533 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2535 + qca8k_port_set_status(priv, port, 1);
2536 + priv->port_enabled_map |= BIT(port);
2538 + if (dsa_is_user_port(ds, port))
2539 + phy_support_asym_pause(phy);
2545 +qca8k_port_disable(struct dsa_switch *ds, int port)
2547 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2549 + qca8k_port_set_status(priv, port, 0);
2550 + priv->port_enabled_map &= ~BIT(port);
2554 +qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
2556 + struct qca8k_priv *priv = ds->priv;
2559 + /* We have only have a general MTU setting.
2560 + * DSA always set the CPU port's MTU to the largest MTU of the slave
2562 + * Setting MTU just for the CPU port is sufficient to correctly set a
2563 + * value for every port.
2565 + if (!dsa_is_cpu_port(ds, port))
2568 + /* To change the MAX_FRAME_SIZE the cpu ports must be off or
2569 + * the switch panics.
2570 + * Turn off both cpu ports before applying the new value to prevent
2573 + if (priv->port_enabled_map & BIT(0))
2574 + qca8k_port_set_status(priv, 0, 0);
2576 + if (priv->port_enabled_map & BIT(6))
2577 + qca8k_port_set_status(priv, 6, 0);
2579 + /* Include L2 header / FCS length */
2580 + ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
2582 + if (priv->port_enabled_map & BIT(0))
2583 + qca8k_port_set_status(priv, 0, 1);
2585 + if (priv->port_enabled_map & BIT(6))
2586 + qca8k_port_set_status(priv, 6, 1);
2592 +qca8k_port_max_mtu(struct dsa_switch *ds, int port)
2594 + return QCA8K_MAX_MTU;
2598 +qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
2599 + u16 port_mask, u16 vid)
2601 + /* Set the vid to the port vlan id if no vid is set */
2603 + vid = QCA8K_PORT_VID_DEF;
2605 + return qca8k_fdb_add(priv, addr, port_mask, vid,
2606 + QCA8K_ATU_STATUS_STATIC);
2610 +qca8k_port_fdb_add(struct dsa_switch *ds, int port,
2611 + const unsigned char *addr, u16 vid)
2613 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2614 + u16 port_mask = BIT(port);
2616 + return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
2620 +qca8k_port_fdb_del(struct dsa_switch *ds, int port,
2621 + const unsigned char *addr, u16 vid)
2623 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2624 + u16 port_mask = BIT(port);
2627 + vid = QCA8K_PORT_VID_DEF;
2629 + return qca8k_fdb_del(priv, addr, port_mask, vid);
2633 +qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
2634 + dsa_fdb_dump_cb_t *cb, void *data)
2636 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2637 + struct qca8k_fdb _fdb = { 0 };
2638 + int cnt = QCA8K_NUM_FDB_RECORDS;
2642 + mutex_lock(&priv->reg_mutex);
2643 + while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
2646 + is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
2647 + ret = cb(_fdb.mac, _fdb.vid, is_static, data);
2651 + mutex_unlock(&priv->reg_mutex);
2657 +qca8k_port_mdb_add(struct dsa_switch *ds, int port,
2658 + const struct switchdev_obj_port_mdb *mdb)
2660 + struct qca8k_priv *priv = ds->priv;
2661 + const u8 *addr = mdb->addr;
2662 + u16 vid = mdb->vid;
2664 + return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
2668 +qca8k_port_mdb_del(struct dsa_switch *ds, int port,
2669 + const struct switchdev_obj_port_mdb *mdb)
2671 + struct qca8k_priv *priv = ds->priv;
2672 + const u8 *addr = mdb->addr;
2673 + u16 vid = mdb->vid;
2675 + return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
2679 +qca8k_port_mirror_add(struct dsa_switch *ds, int port,
2680 + struct dsa_mall_mirror_tc_entry *mirror,
2683 + struct qca8k_priv *priv = ds->priv;
2684 + int monitor_port, ret;
2687 + /* Check for existent entry */
2688 + if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
2691 + ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
2695 + /* QCA83xx can have only one port set to mirror mode.
2696 + * Check that the correct port is requested and return error otherwise.
2697 + * When no mirror port is set, the values is set to 0xF
2699 + monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2700 + if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
2703 + /* Set the monitor port */
2704 + val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
2705 + mirror->to_local_port);
2706 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2707 + QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2712 + reg = QCA8K_PORT_LOOKUP_CTRL(port);
2713 + val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2715 + reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2716 + val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2719 + ret = regmap_update_bits(priv->regmap, reg, val, val);
2723 + /* Track mirror port for tx and rx to decide when the
2724 + * mirror port has to be disabled.
2727 + priv->mirror_rx |= BIT(port);
2729 + priv->mirror_tx |= BIT(port);
2735 +qca8k_port_mirror_del(struct dsa_switch *ds, int port,
2736 + struct dsa_mall_mirror_tc_entry *mirror)
2738 + struct qca8k_priv *priv = ds->priv;
2742 + if (mirror->ingress) {
2743 + reg = QCA8K_PORT_LOOKUP_CTRL(port);
2744 + val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2746 + reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2747 + val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2750 + ret = regmap_clear_bits(priv->regmap, reg, val);
2754 + if (mirror->ingress)
2755 + priv->mirror_rx &= ~BIT(port);
2757 + priv->mirror_tx &= ~BIT(port);
2759 + /* No port set to send packet to mirror port. Disable mirror port */
2760 + if (!priv->mirror_rx && !priv->mirror_tx) {
2761 + val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
2762 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2763 + QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2768 + dev_err(priv->dev, "Failed to del mirror port from %d", port);
2772 +qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
2773 + struct netlink_ext_ack *extack)
2775 + struct qca8k_priv *priv = ds->priv;
2778 + if (vlan_filtering) {
2779 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2780 + QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2781 + QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
2783 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2784 + QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2785 + QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
2792 +qca8k_port_vlan_add(struct dsa_switch *ds, int port,
2793 + const struct switchdev_obj_port_vlan *vlan,
2794 + struct netlink_ext_ack *extack)
2796 + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2797 + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2798 + struct qca8k_priv *priv = ds->priv;
2801 + ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
2803 + dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
2808 + ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
2809 + QCA8K_EGREES_VLAN_PORT_MASK(port),
2810 + QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
2814 + ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
2815 + QCA8K_PORT_VLAN_CVID(vlan->vid) |
2816 + QCA8K_PORT_VLAN_SVID(vlan->vid));
2823 +qca8k_port_vlan_del(struct dsa_switch *ds, int port,
2824 + const struct switchdev_obj_port_vlan *vlan)
2826 + struct qca8k_priv *priv = ds->priv;
2829 + ret = qca8k_vlan_del(priv, port, vlan->vid);
2831 + dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
2836 +static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
2838 + struct qca8k_priv *priv = ds->priv;
2840 + /* Communicate to the phy internal driver the switch revision.
2841 + * Based on the switch revision different values needs to be
2842 + * set to the dbg and mmd reg on the phy.
2843 + * The first 2 bit are used to communicate the switch revision
2844 + * to the phy driver.
2846 + if (port > 0 && port < 6)
2847 + return priv->switch_revision;
2852 +static enum dsa_tag_protocol
2853 +qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
2854 + enum dsa_tag_protocol mp)
2856 + return DSA_TAG_PROTO_QCA;
2860 +qca8k_lag_can_offload(struct dsa_switch *ds,
2861 + struct net_device *lag,
2862 + struct netdev_lag_upper_info *info)
2864 + struct dsa_port *dp;
2865 + int id, members = 0;
2867 + id = dsa_lag_id(ds->dst, lag);
2868 + if (id < 0 || id >= ds->num_lag_ids)
2871 + dsa_lag_foreach_port(dp, ds->dst, lag)
2872 + /* Includes the port joining the LAG */
2875 + if (members > QCA8K_NUM_PORTS_FOR_LAG)
2878 + if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2881 + if (info->hash_type != NETDEV_LAG_HASH_L2 &&
2882 + info->hash_type != NETDEV_LAG_HASH_L23)
2889 +qca8k_lag_setup_hash(struct dsa_switch *ds,
2890 + struct net_device *lag,
2891 + struct netdev_lag_upper_info *info)
2893 + struct qca8k_priv *priv = ds->priv;
2894 + bool unique_lag = true;
2898 + id = dsa_lag_id(ds->dst, lag);
2900 + switch (info->hash_type) {
2901 + case NETDEV_LAG_HASH_L23:
2902 + hash |= QCA8K_TRUNK_HASH_SIP_EN;
2903 + hash |= QCA8K_TRUNK_HASH_DIP_EN;
2905 + case NETDEV_LAG_HASH_L2:
2906 + hash |= QCA8K_TRUNK_HASH_SA_EN;
2907 + hash |= QCA8K_TRUNK_HASH_DA_EN;
2909 + default: /* We should NEVER reach this */
2910 + return -EOPNOTSUPP;
2913 + /* Check if we are the unique configured LAG */
2914 + dsa_lags_foreach_id(i, ds->dst)
2915 + if (i != id && dsa_lag_dev(ds->dst, i)) {
2916 + unique_lag = false;
2920 + /* Hash Mode is global. Make sure the same Hash Mode
2921 + * is set to all the 4 possible lag.
2922 + * If we are the unique LAG we can set whatever hash
2924 + * To change hash mode it's needed to remove all LAG
2925 + * and change the mode with the latest.
2928 + priv->lag_hash_mode = hash;
2929 + } else if (priv->lag_hash_mode != hash) {
2930 + netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n");
2931 + return -EOPNOTSUPP;
2934 + return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
2935 + QCA8K_TRUNK_HASH_MASK, hash);
2939 +qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
2940 + struct net_device *lag, bool delete)
2942 + struct qca8k_priv *priv = ds->priv;
2946 + id = dsa_lag_id(ds->dst, lag);
2948 + /* Read current port member */
2949 + ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
2953 + /* Shift val to the correct trunk */
2954 + val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
2955 + val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
2957 + val &= ~BIT(port);
2961 + /* Update port member. With empty portmap disable trunk */
2962 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
2963 + QCA8K_REG_GOL_TRUNK_MEMBER(id) |
2964 + QCA8K_REG_GOL_TRUNK_EN(id),
2965 + !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
2966 + val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
2968 + /* Search empty member if adding or port on deleting */
2969 + for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
2970 + ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
2974 + val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
2975 + val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
2978 + /* If port flagged to be disabled assume this member is
2981 + if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2984 + val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
2988 + /* If port flagged to be enabled assume this member is
2991 + if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2995 + /* We have found the member to add/remove */
2999 + /* Set port in the correct port mask or disable port if in delete mode */
3000 + return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
3001 + QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
3002 + QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
3003 + !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
3004 + port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
3008 +qca8k_port_lag_join(struct dsa_switch *ds, int port,
3009 + struct net_device *lag,
3010 + struct netdev_lag_upper_info *info)
3014 + if (!qca8k_lag_can_offload(ds, lag, info))
3015 + return -EOPNOTSUPP;
3017 + ret = qca8k_lag_setup_hash(ds, lag, info);
3021 + return qca8k_lag_refresh_portmap(ds, port, lag, false);
3025 +qca8k_port_lag_leave(struct dsa_switch *ds, int port,
3026 + struct net_device *lag)
3028 + return qca8k_lag_refresh_portmap(ds, port, lag, true);
3032 +qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
3035 + struct dsa_port *dp = master->dsa_ptr;
3036 + struct qca8k_priv *priv = ds->priv;
3038 + /* Ethernet MIB/MDIO is only supported for CPU port 0 */
3039 + if (dp->index != 0)
3042 + mutex_lock(&priv->mgmt_eth_data.mutex);
3043 + mutex_lock(&priv->mib_eth_data.mutex);
3045 + priv->mgmt_master = operational ? (struct net_device *)master : NULL;
3047 + mutex_unlock(&priv->mib_eth_data.mutex);
3048 + mutex_unlock(&priv->mgmt_eth_data.mutex);
3051 +static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
3052 + enum dsa_tag_protocol proto)
3054 + struct qca_tagger_data *tagger_data;
3057 + case DSA_TAG_PROTO_QCA:
3058 + tagger_data = ds->tagger_data;
3060 + tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
3061 + tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
3065 + return -EOPNOTSUPP;
3071 +static const struct dsa_switch_ops qca8k_switch_ops = {
3072 + .get_tag_protocol = qca8k_get_tag_protocol,
3073 + .setup = qca8k_setup,
3074 + .get_strings = qca8k_get_strings,
3075 + .get_ethtool_stats = qca8k_get_ethtool_stats,
3076 + .get_sset_count = qca8k_get_sset_count,
3077 + .set_ageing_time = qca8k_set_ageing_time,
3078 + .get_mac_eee = qca8k_get_mac_eee,
3079 + .set_mac_eee = qca8k_set_mac_eee,
3080 + .port_enable = qca8k_port_enable,
3081 + .port_disable = qca8k_port_disable,
3082 + .port_change_mtu = qca8k_port_change_mtu,
3083 + .port_max_mtu = qca8k_port_max_mtu,
3084 + .port_stp_state_set = qca8k_port_stp_state_set,
3085 + .port_bridge_join = qca8k_port_bridge_join,
3086 + .port_bridge_leave = qca8k_port_bridge_leave,
3087 + .port_fast_age = qca8k_port_fast_age,
3088 + .port_fdb_add = qca8k_port_fdb_add,
3089 + .port_fdb_del = qca8k_port_fdb_del,
3090 + .port_fdb_dump = qca8k_port_fdb_dump,
3091 + .port_mdb_add = qca8k_port_mdb_add,
3092 + .port_mdb_del = qca8k_port_mdb_del,
3093 + .port_mirror_add = qca8k_port_mirror_add,
3094 + .port_mirror_del = qca8k_port_mirror_del,
3095 + .port_vlan_filtering = qca8k_port_vlan_filtering,
3096 + .port_vlan_add = qca8k_port_vlan_add,
3097 + .port_vlan_del = qca8k_port_vlan_del,
3098 + .phylink_validate = qca8k_phylink_validate,
3099 + .phylink_mac_link_state = qca8k_phylink_mac_link_state,
3100 + .phylink_mac_config = qca8k_phylink_mac_config,
3101 + .phylink_mac_link_down = qca8k_phylink_mac_link_down,
3102 + .phylink_mac_link_up = qca8k_phylink_mac_link_up,
3103 + .get_phy_flags = qca8k_get_phy_flags,
3104 + .port_lag_join = qca8k_port_lag_join,
3105 + .port_lag_leave = qca8k_port_lag_leave,
3106 + .master_state_change = qca8k_master_change,
3107 + .connect_tag_protocol = qca8k_connect_tag_protocol,
3110 +static int qca8k_read_switch_id(struct qca8k_priv *priv)
3112 + const struct qca8k_match_data *data;
3117 + /* get the switches ID from the compatible */
3118 + data = of_device_get_match_data(priv->dev);
3122 + ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
3126 + id = QCA8K_MASK_CTRL_DEVICE_ID(val);
3127 + if (id != data->id) {
3128 + dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
3132 + priv->switch_id = id;
3134 + /* Save revision to communicate to the internal PHY driver */
3135 + priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
3141 +qca8k_sw_probe(struct mdio_device *mdiodev)
3143 + struct qca8k_priv *priv;
3146 + /* allocate the private data struct so that we can probe the switches
3149 + priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
3153 + priv->bus = mdiodev->bus;
3154 + priv->dev = &mdiodev->dev;
3156 + priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
3158 + if (IS_ERR(priv->reset_gpio))
3159 + return PTR_ERR(priv->reset_gpio);
3161 + if (priv->reset_gpio) {
3162 + gpiod_set_value_cansleep(priv->reset_gpio, 1);
3163 + /* The active low duration must be greater than 10 ms
3164 + * and checkpatch.pl wants 20 ms.
3167 + gpiod_set_value_cansleep(priv->reset_gpio, 0);
3170 + /* Start by setting up the register mapping */
3171 + priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
3172 + &qca8k_regmap_config);
3173 + if (IS_ERR(priv->regmap)) {
3174 + dev_err(priv->dev, "regmap initialization failed");
3175 + return PTR_ERR(priv->regmap);
3178 + priv->mdio_cache.page = 0xffff;
3179 + priv->mdio_cache.lo = 0xffff;
3180 + priv->mdio_cache.hi = 0xffff;
3182 + /* Check the detected switch id */
3183 + ret = qca8k_read_switch_id(priv);
3187 + priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
3191 + mutex_init(&priv->mgmt_eth_data.mutex);
3192 + init_completion(&priv->mgmt_eth_data.rw_done);
3194 + mutex_init(&priv->mib_eth_data.mutex);
3195 + init_completion(&priv->mib_eth_data.rw_done);
3197 + priv->ds->dev = &mdiodev->dev;
3198 + priv->ds->num_ports = QCA8K_NUM_PORTS;
3199 + priv->ds->priv = priv;
3200 + priv->ds->ops = &qca8k_switch_ops;
3201 + mutex_init(&priv->reg_mutex);
3202 + dev_set_drvdata(&mdiodev->dev, priv);
3204 + return dsa_register_switch(priv->ds);
3208 +qca8k_sw_remove(struct mdio_device *mdiodev)
3210 + struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3216 + for (i = 0; i < QCA8K_NUM_PORTS; i++)
3217 + qca8k_port_set_status(priv, i, 0);
3219 + dsa_unregister_switch(priv->ds);
3221 + dev_set_drvdata(&mdiodev->dev, NULL);
3224 +static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
3226 + struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3231 + dsa_switch_shutdown(priv->ds);
3233 + dev_set_drvdata(&mdiodev->dev, NULL);
3236 +#ifdef CONFIG_PM_SLEEP
3238 +qca8k_set_pm(struct qca8k_priv *priv, int enable)
3242 + for (port = 0; port < QCA8K_NUM_PORTS; port++) {
3243 + /* Do not enable on resume if the port was
3244 + * disabled before.
3246 + if (!(priv->port_enabled_map & BIT(port)))
3249 + qca8k_port_set_status(priv, port, enable);
3253 +static int qca8k_suspend(struct device *dev)
3255 + struct qca8k_priv *priv = dev_get_drvdata(dev);
3257 + qca8k_set_pm(priv, 0);
3259 + return dsa_switch_suspend(priv->ds);
3262 +static int qca8k_resume(struct device *dev)
3264 + struct qca8k_priv *priv = dev_get_drvdata(dev);
3266 + qca8k_set_pm(priv, 1);
3268 + return dsa_switch_resume(priv->ds);
3270 +#endif /* CONFIG_PM_SLEEP */
3272 +static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
3273 + qca8k_suspend, qca8k_resume);
3275 +static const struct qca8k_match_data qca8327 = {
3276 + .id = QCA8K_ID_QCA8327,
3277 + .reduced_package = true,
3278 + .mib_count = QCA8K_QCA832X_MIB_COUNT,
3281 +static const struct qca8k_match_data qca8328 = {
3282 + .id = QCA8K_ID_QCA8327,
3283 + .mib_count = QCA8K_QCA832X_MIB_COUNT,
3286 +static const struct qca8k_match_data qca833x = {
3287 + .id = QCA8K_ID_QCA8337,
3288 + .mib_count = QCA8K_QCA833X_MIB_COUNT,
3291 +static const struct of_device_id qca8k_of_match[] = {
3292 + { .compatible = "qca,qca8327", .data = &qca8327 },
3293 + { .compatible = "qca,qca8328", .data = &qca8328 },
3294 + { .compatible = "qca,qca8334", .data = &qca833x },
3295 + { .compatible = "qca,qca8337", .data = &qca833x },
3296 + { /* sentinel */ },
3299 +static struct mdio_driver qca8kmdio_driver = {
3300 + .probe = qca8k_sw_probe,
3301 + .remove = qca8k_sw_remove,
3302 + .shutdown = qca8k_sw_shutdown,
3303 + .mdiodrv.driver = {
3305 + .of_match_table = qca8k_of_match,
3306 + .pm = &qca8k_pm_ops,
3310 +mdio_module_driver(qca8kmdio_driver);
3312 +MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
3313 +MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
3314 +MODULE_LICENSE("GPL v2");
3315 +MODULE_ALIAS("platform:qca8k");
3317 +++ b/drivers/net/dsa/qca/qca8k.h
3319 +/* SPDX-License-Identifier: GPL-2.0-only */
3321 + * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
3322 + * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
3323 + * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3329 +#include <linux/delay.h>
3330 +#include <linux/regmap.h>
3331 +#include <linux/gpio.h>
3332 +#include <linux/dsa/tag_qca.h>
3334 +#define QCA8K_ETHERNET_MDIO_PRIORITY 7
3335 +#define QCA8K_ETHERNET_PHY_PRIORITY 6
3336 +#define QCA8K_ETHERNET_TIMEOUT 100
3338 +#define QCA8K_NUM_PORTS 7
3339 +#define QCA8K_NUM_CPU_PORTS 2
3340 +#define QCA8K_MAX_MTU 9000
3341 +#define QCA8K_NUM_LAGS 4
3342 +#define QCA8K_NUM_PORTS_FOR_LAG 4
3344 +#define PHY_ID_QCA8327 0x004dd034
3345 +#define QCA8K_ID_QCA8327 0x12
3346 +#define PHY_ID_QCA8337 0x004dd036
3347 +#define QCA8K_ID_QCA8337 0x13
3349 +#define QCA8K_QCA832X_MIB_COUNT 39
3350 +#define QCA8K_QCA833X_MIB_COUNT 41
3352 +#define QCA8K_BUSY_WAIT_TIMEOUT 2000
3354 +#define QCA8K_NUM_FDB_RECORDS 2048
3356 +#define QCA8K_PORT_VID_DEF 1
3358 +/* Global control registers */
3359 +#define QCA8K_REG_MASK_CTRL 0x000
3360 +#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
3361 +#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x)
3362 +#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
3363 +#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x)
3364 +#define QCA8K_REG_PORT0_PAD_CTRL 0x004
3365 +#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31)
3366 +#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
3367 +#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
3368 +#define QCA8K_REG_PORT5_PAD_CTRL 0x008
3369 +#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
3370 +#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
3371 +#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
3372 +#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x)
3373 +#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
3374 +#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x)
3375 +#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
3376 +#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
3377 +#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
3378 +#define QCA8K_REG_PWS 0x010
3379 +#define QCA8K_PWS_POWER_ON_SEL BIT(31)
3380 +/* This reg is only valid for QCA832x and toggle the package
3381 + * type from 176 pin (by default) to 148 pin used on QCA8327
3383 +#define QCA8327_PWS_PACKAGE148_EN BIT(30)
3384 +#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24)
3385 +#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
3386 +#define QCA8K_REG_MODULE_EN 0x030
3387 +#define QCA8K_MODULE_EN_MIB BIT(0)
3388 +#define QCA8K_REG_MIB 0x034
3389 +#define QCA8K_MIB_FUNC GENMASK(26, 24)
3390 +#define QCA8K_MIB_CPU_KEEP BIT(20)
3391 +#define QCA8K_MIB_BUSY BIT(17)
3392 +#define QCA8K_MDIO_MASTER_CTRL 0x3c
3393 +#define QCA8K_MDIO_MASTER_BUSY BIT(31)
3394 +#define QCA8K_MDIO_MASTER_EN BIT(30)
3395 +#define QCA8K_MDIO_MASTER_READ BIT(27)
3396 +#define QCA8K_MDIO_MASTER_WRITE 0
3397 +#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
3398 +#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21)
3399 +#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x)
3400 +#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16)
3401 +#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x)
3402 +#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
3403 +#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x)
3404 +#define QCA8K_MDIO_MASTER_MAX_PORTS 5
3405 +#define QCA8K_MDIO_MASTER_MAX_REG 32
3406 +#define QCA8K_GOL_MAC_ADDR0 0x60
3407 +#define QCA8K_GOL_MAC_ADDR1 0x64
3408 +#define QCA8K_MAX_FRAME_SIZE 0x78
3409 +#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
3410 +#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0)
3411 +#define QCA8K_PORT_STATUS_SPEED_10 0
3412 +#define QCA8K_PORT_STATUS_SPEED_100 0x1
3413 +#define QCA8K_PORT_STATUS_SPEED_1000 0x2
3414 +#define QCA8K_PORT_STATUS_TXMAC BIT(2)
3415 +#define QCA8K_PORT_STATUS_RXMAC BIT(3)
3416 +#define QCA8K_PORT_STATUS_TXFLOW BIT(4)
3417 +#define QCA8K_PORT_STATUS_RXFLOW BIT(5)
3418 +#define QCA8K_PORT_STATUS_DUPLEX BIT(6)
3419 +#define QCA8K_PORT_STATUS_LINK_UP BIT(8)
3420 +#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9)
3421 +#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10)
3422 +#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
3423 +#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
3424 +#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
3425 +#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
3426 +#define QCA8K_PORT_HDR_CTRL_ALL 2
3427 +#define QCA8K_PORT_HDR_CTRL_MGMT 1
3428 +#define QCA8K_PORT_HDR_CTRL_NONE 0
3429 +#define QCA8K_REG_SGMII_CTRL 0x0e0
3430 +#define QCA8K_SGMII_EN_PLL BIT(1)
3431 +#define QCA8K_SGMII_EN_RX BIT(2)
3432 +#define QCA8K_SGMII_EN_TX BIT(3)
3433 +#define QCA8K_SGMII_EN_SD BIT(4)
3434 +#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
3435 +#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22)
3436 +#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x)
3437 +#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0)
3438 +#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1)
3439 +#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2)
3441 +/* MAC_PWR_SEL registers */
3442 +#define QCA8K_REG_MAC_PWR_SEL 0x0e4
3443 +#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18)
3444 +#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19)
3446 +/* EEE control registers */
3447 +#define QCA8K_REG_EEE_CTRL 0x100
3448 +#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
3450 +/* TRUNK_HASH_EN registers */
3451 +#define QCA8K_TRUNK_HASH_EN_CTRL 0x270
3452 +#define QCA8K_TRUNK_HASH_SIP_EN BIT(3)
3453 +#define QCA8K_TRUNK_HASH_DIP_EN BIT(2)
3454 +#define QCA8K_TRUNK_HASH_SA_EN BIT(1)
3455 +#define QCA8K_TRUNK_HASH_DA_EN BIT(0)
3456 +#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0)
3458 +/* ACL registers */
3459 +#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
3460 +#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16)
3461 +#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x)
3462 +#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0)
3463 +#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x)
3464 +#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
3465 +#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
3466 +#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
3468 +/* Lookup registers */
3469 +#define QCA8K_REG_ATU_DATA0 0x600
3470 +#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24)
3471 +#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16)
3472 +#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8)
3473 +#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0)
3474 +#define QCA8K_REG_ATU_DATA1 0x604
3475 +#define QCA8K_ATU_PORT_MASK GENMASK(22, 16)
3476 +#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8)
3477 +#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0)
3478 +#define QCA8K_REG_ATU_DATA2 0x608
3479 +#define QCA8K_ATU_VID_MASK GENMASK(19, 8)
3480 +#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0)
3481 +#define QCA8K_ATU_STATUS_STATIC 0xf
3482 +#define QCA8K_REG_ATU_FUNC 0x60c
3483 +#define QCA8K_ATU_FUNC_BUSY BIT(31)
3484 +#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
3485 +#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
3486 +#define QCA8K_ATU_FUNC_FULL BIT(12)
3487 +#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8)
3488 +#define QCA8K_REG_VTU_FUNC0 0x610
3489 +#define QCA8K_VTU_FUNC0_VALID BIT(20)
3490 +#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
3491 +/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4)
3492 + * It does contain VLAN_MODE for each port [5:4] for port0,
3493 + * [7:6] for port1 ... [17:16] for port6. Use virtual port
3494 + * define to handle this.
3496 +#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2)
3497 +#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0)
3498 +#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
3499 +#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0)
3500 +#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
3501 +#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1)
3502 +#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
3503 +#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2)
3504 +#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
3505 +#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3)
3506 +#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
3507 +#define QCA8K_REG_VTU_FUNC1 0x614
3508 +#define QCA8K_VTU_FUNC1_BUSY BIT(31)
3509 +#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16)
3510 +#define QCA8K_VTU_FUNC1_FULL BIT(4)
3511 +#define QCA8K_REG_ATU_CTRL 0x618
3512 +#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0)
3513 +#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
3514 +#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
3515 +#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
3516 +#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4)
3517 +#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
3518 +#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24)
3519 +#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16)
3520 +#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8)
3521 +#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0)
3522 +#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
3523 +#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
3524 +#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8)
3525 +#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x)
3526 +#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0)
3527 +#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1)
3528 +#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2)
3529 +#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3)
3530 +#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
3531 +#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x)
3532 +#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0)
3533 +#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1)
3534 +#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2)
3535 +#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
3536 +#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
3537 +#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
3538 +#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
3540 +#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700
3541 +/* 4 max trunk first
3542 + * first 6 bit for member bitmap
3543 + * 7th bit is to enable trunk port
3545 +#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8)
3546 +#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7)
3547 +#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
3548 +#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0)
3549 +#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
3550 +/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */
3551 +#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4))
3552 +#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0)
3553 +#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3)
3554 +#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0)
3555 +#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16)
3556 +#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4)
3557 +/* Complex shift: FIRST shift for port THEN shift for trunk */
3558 +#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i))
3559 +#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
3560 +#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
3562 +#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
3563 +#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
3564 +#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x)
3565 +#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0)
3566 +#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x)
3568 +#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
3569 +#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0)
3570 +#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x)
3571 +#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4)
3572 +#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x)
3573 +#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8)
3574 +#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x)
3575 +#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12)
3576 +#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x)
3577 +#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16)
3578 +#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x)
3579 +#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20)
3580 +#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x)
3581 +#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24)
3582 +#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x)
3584 +#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
3585 +#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0)
3586 +#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x)
3587 +#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
3588 +#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
3589 +#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
3590 +#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
3592 +/* Pkt edit registers */
3593 +#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2))
3594 +#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
3595 +#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
3596 +#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
3599 +#define QCA8K_HROUTER_CONTROL 0xe00
3600 +#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16)
3601 +#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16
3602 +#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1
3603 +#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08
3604 +#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c
3605 +#define QCA8K_HNAT_CONTROL 0xe38
3607 +/* MIB registers */
3608 +#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100)
3610 +/* QCA specific MII registers */
3611 +#define MII_ATH_MMD_ADDR 0x0d
3612 +#define MII_ATH_MMD_DATA 0x0e
3615 + QCA8K_PORT_SPEED_10M = 0,
3616 + QCA8K_PORT_SPEED_100M = 1,
3617 + QCA8K_PORT_SPEED_1000M = 2,
3618 + QCA8K_PORT_SPEED_ERR = 3,
3621 +enum qca8k_fdb_cmd {
3622 + QCA8K_FDB_FLUSH = 1,
3623 + QCA8K_FDB_LOAD = 2,
3624 + QCA8K_FDB_PURGE = 3,
3625 + QCA8K_FDB_FLUSH_PORT = 5,
3626 + QCA8K_FDB_NEXT = 6,
3627 + QCA8K_FDB_SEARCH = 7,
3630 +enum qca8k_vlan_cmd {
3631 + QCA8K_VLAN_FLUSH = 1,
3632 + QCA8K_VLAN_LOAD = 2,
3633 + QCA8K_VLAN_PURGE = 3,
3634 + QCA8K_VLAN_REMOVE_PORT = 4,
3635 + QCA8K_VLAN_NEXT = 5,
3636 + QCA8K_VLAN_READ = 6,
3639 +enum qca8k_mid_cmd {
3640 + QCA8K_MIB_FLUSH = 1,
3641 + QCA8K_MIB_FLUSH_PORT = 2,
3642 + QCA8K_MIB_CAST = 3,
3645 +struct qca8k_match_data {
3647 + bool reduced_package;
3656 +struct qca8k_mgmt_eth_data {
3657 + struct completion rw_done;
3658 + struct mutex mutex; /* Enforce one mdio read/write at time */
3664 +struct qca8k_mib_eth_data {
3665 + struct completion rw_done;
3666 + struct mutex mutex; /* Process one command at time */
3667 + refcount_t port_parsed; /* Counter to track parsed port */
3669 + u64 *data; /* pointer to ethtool data */
3672 +struct qca8k_ports_config {
3673 + bool sgmii_rx_clk_falling_edge;
3674 + bool sgmii_tx_clk_falling_edge;
3675 + bool sgmii_enable_pll;
3676 + u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
3677 + u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
3680 +struct qca8k_mdio_cache {
3681 +/* The 32bit switch registers are accessed indirectly. To achieve this we need
3682 + * to set the page of the register. Track the last page that was set to reduce
3686 +/* lo and hi can also be cached and from Documentation we can skip one
3687 + * extra mdio write if lo or hi is didn't change.
3693 +struct qca8k_priv {
3695 + u8 switch_revision;
3699 + /* Each bit correspond to a port. This switch can support a max of 7 port.
3700 + * Bit 1: port enabled. Bit 0: port disabled.
3702 + u8 port_enabled_map;
3703 + struct qca8k_ports_config ports_config;
3704 + struct regmap *regmap;
3705 + struct mii_bus *bus;
3706 + struct dsa_switch *ds;
3707 + struct mutex reg_mutex;
3708 + struct device *dev;
3709 + struct gpio_desc *reset_gpio;
3710 + struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */
3711 + struct qca8k_mgmt_eth_data mgmt_eth_data;
3712 + struct qca8k_mib_eth_data mib_eth_data;
3713 + struct qca8k_mdio_cache mdio_cache;
3716 +struct qca8k_mib_desc {
3717 + unsigned int size;
3718 + unsigned int offset;
3729 +#endif /* __QCA8K_H */
3730 --- a/drivers/net/dsa/qca8k.c
3733 -// SPDX-License-Identifier: GPL-2.0
3735 - * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
3736 - * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
3737 - * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
3738 - * Copyright (c) 2016 John Crispin <john@phrozen.org>
3741 -#include <linux/module.h>
3742 -#include <linux/phy.h>
3743 -#include <linux/netdevice.h>
3744 -#include <linux/bitfield.h>
3745 -#include <linux/regmap.h>
3746 -#include <net/dsa.h>
3747 -#include <linux/of_net.h>
3748 -#include <linux/of_mdio.h>
3749 -#include <linux/of_platform.h>
3750 -#include <linux/if_bridge.h>
3751 -#include <linux/mdio.h>
3752 -#include <linux/phylink.h>
3753 -#include <linux/gpio/consumer.h>
3754 -#include <linux/etherdevice.h>
3755 -#include <linux/dsa/tag_qca.h>
3759 -#define MIB_DESC(_s, _o, _n) \
3766 -static const struct qca8k_mib_desc ar8327_mib[] = {
3767 - MIB_DESC(1, 0x00, "RxBroad"),
3768 - MIB_DESC(1, 0x04, "RxPause"),
3769 - MIB_DESC(1, 0x08, "RxMulti"),
3770 - MIB_DESC(1, 0x0c, "RxFcsErr"),
3771 - MIB_DESC(1, 0x10, "RxAlignErr"),
3772 - MIB_DESC(1, 0x14, "RxRunt"),
3773 - MIB_DESC(1, 0x18, "RxFragment"),
3774 - MIB_DESC(1, 0x1c, "Rx64Byte"),
3775 - MIB_DESC(1, 0x20, "Rx128Byte"),
3776 - MIB_DESC(1, 0x24, "Rx256Byte"),
3777 - MIB_DESC(1, 0x28, "Rx512Byte"),
3778 - MIB_DESC(1, 0x2c, "Rx1024Byte"),
3779 - MIB_DESC(1, 0x30, "Rx1518Byte"),
3780 - MIB_DESC(1, 0x34, "RxMaxByte"),
3781 - MIB_DESC(1, 0x38, "RxTooLong"),
3782 - MIB_DESC(2, 0x3c, "RxGoodByte"),
3783 - MIB_DESC(2, 0x44, "RxBadByte"),
3784 - MIB_DESC(1, 0x4c, "RxOverFlow"),
3785 - MIB_DESC(1, 0x50, "Filtered"),
3786 - MIB_DESC(1, 0x54, "TxBroad"),
3787 - MIB_DESC(1, 0x58, "TxPause"),
3788 - MIB_DESC(1, 0x5c, "TxMulti"),
3789 - MIB_DESC(1, 0x60, "TxUnderRun"),
3790 - MIB_DESC(1, 0x64, "Tx64Byte"),
3791 - MIB_DESC(1, 0x68, "Tx128Byte"),
3792 - MIB_DESC(1, 0x6c, "Tx256Byte"),
3793 - MIB_DESC(1, 0x70, "Tx512Byte"),
3794 - MIB_DESC(1, 0x74, "Tx1024Byte"),
3795 - MIB_DESC(1, 0x78, "Tx1518Byte"),
3796 - MIB_DESC(1, 0x7c, "TxMaxByte"),
3797 - MIB_DESC(1, 0x80, "TxOverSize"),
3798 - MIB_DESC(2, 0x84, "TxByte"),
3799 - MIB_DESC(1, 0x8c, "TxCollision"),
3800 - MIB_DESC(1, 0x90, "TxAbortCol"),
3801 - MIB_DESC(1, 0x94, "TxMultiCol"),
3802 - MIB_DESC(1, 0x98, "TxSingleCol"),
3803 - MIB_DESC(1, 0x9c, "TxExcDefer"),
3804 - MIB_DESC(1, 0xa0, "TxDefer"),
3805 - MIB_DESC(1, 0xa4, "TxLateCol"),
3806 - MIB_DESC(1, 0xa8, "RXUnicast"),
3807 - MIB_DESC(1, 0xac, "TXUnicast"),
3811 -qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
3814 - *r1 = regaddr & 0x1e;
3817 - *r2 = regaddr & 0x7;
3820 - *page = regaddr & 0x3ff;
3824 -qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
3826 - u16 *cached_lo = &priv->mdio_cache.lo;
3827 - struct mii_bus *bus = priv->bus;
3830 - if (lo == *cached_lo)
3833 - ret = bus->write(bus, phy_id, regnum, lo);
3835 - dev_err_ratelimited(&bus->dev,
3836 - "failed to write qca8k 32bit lo register\n");
3843 -qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
3845 - u16 *cached_hi = &priv->mdio_cache.hi;
3846 - struct mii_bus *bus = priv->bus;
3849 - if (hi == *cached_hi)
3852 - ret = bus->write(bus, phy_id, regnum, hi);
3854 - dev_err_ratelimited(&bus->dev,
3855 - "failed to write qca8k 32bit hi register\n");
3862 -qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
3866 - ret = bus->read(bus, phy_id, regnum);
3869 - ret = bus->read(bus, phy_id, regnum + 1);
3870 - *val |= ret << 16;
3874 - dev_err_ratelimited(&bus->dev,
3875 - "failed to read qca8k 32bit register\n");
3884 -qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
3889 - lo = val & 0xffff;
3890 - hi = (u16)(val >> 16);
3892 - ret = qca8k_set_lo(priv, phy_id, regnum, lo);
3894 - ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
3898 -qca8k_set_page(struct qca8k_priv *priv, u16 page)
3900 - u16 *cached_page = &priv->mdio_cache.page;
3901 - struct mii_bus *bus = priv->bus;
3904 - if (page == *cached_page)
3907 - ret = bus->write(bus, 0x18, 0, page);
3909 - dev_err_ratelimited(&bus->dev,
3910 - "failed to set qca8k page\n");
3914 - *cached_page = page;
3915 - usleep_range(1000, 2000);
3920 -qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
3922 - return regmap_read(priv->regmap, reg, val);
3926 -qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
3928 - return regmap_write(priv->regmap, reg, val);
3932 -qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
3934 - return regmap_update_bits(priv->regmap, reg, mask, write_val);
3937 -static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
3939 - struct qca8k_mgmt_eth_data *mgmt_eth_data;
3940 - struct qca8k_priv *priv = ds->priv;
3941 - struct qca_mgmt_ethhdr *mgmt_ethhdr;
3944 - mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
3945 - mgmt_eth_data = &priv->mgmt_eth_data;
3947 - cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
3948 - len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
3950 - /* Make sure the seq match the requested packet */
3951 - if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
3952 - mgmt_eth_data->ack = true;
3954 - if (cmd == MDIO_READ) {
3955 - mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
3957 - /* Get the rest of the 12 byte of data.
3958 - * The read/write function will extract the requested data.
3960 - if (len > QCA_HDR_MGMT_DATA1_LEN)
3961 - memcpy(mgmt_eth_data->data + 1, skb->data,
3962 - QCA_HDR_MGMT_DATA2_LEN);
3965 - complete(&mgmt_eth_data->rw_done);
3968 -static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
3969 - int priority, unsigned int len)
3971 - struct qca_mgmt_ethhdr *mgmt_ethhdr;
3972 - unsigned int real_len;
3973 - struct sk_buff *skb;
3977 - skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
3981 - /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
3982 - * Actually for some reason the steps are:
3984 - * 1-4: first 4 byte
3985 - * 5-6: first 12 byte
3986 - * 7-15: all 16 byte
3993 - skb_reset_mac_header(skb);
3994 - skb_set_network_header(skb, skb->len);
3996 - mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
3998 - hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
3999 - hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
4000 - hdr |= QCA_HDR_XMIT_FROM_CPU;
4001 - hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
4002 - hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
4004 - mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
4005 - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
4006 - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
4007 - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
4008 - QCA_HDR_MGMT_CHECK_CODE_VAL);
4010 - if (cmd == MDIO_WRITE)
4011 - mgmt_ethhdr->mdio_data = *val;
4013 - mgmt_ethhdr->hdr = htons(hdr);
4015 - data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
4016 - if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
4017 - memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
4022 -static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
4024 - struct qca_mgmt_ethhdr *mgmt_ethhdr;
4026 - mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
4027 - mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
4030 -static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
4032 - struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
4033 - struct sk_buff *skb;
4037 - skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
4038 - QCA8K_ETHERNET_MDIO_PRIORITY, len);
4042 - mutex_lock(&mgmt_eth_data->mutex);
4044 - /* Check mgmt_master if is operational */
4045 - if (!priv->mgmt_master) {
4047 - mutex_unlock(&mgmt_eth_data->mutex);
4051 - skb->dev = priv->mgmt_master;
4053 - reinit_completion(&mgmt_eth_data->rw_done);
4055 - /* Increment seq_num and set it in the mdio pkt */
4056 - mgmt_eth_data->seq++;
4057 - qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
4058 - mgmt_eth_data->ack = false;
4060 - dev_queue_xmit(skb);
4062 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4063 - msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
4065 - *val = mgmt_eth_data->data[0];
4066 - if (len > QCA_HDR_MGMT_DATA1_LEN)
4067 - memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
4069 - ack = mgmt_eth_data->ack;
4071 - mutex_unlock(&mgmt_eth_data->mutex);
4074 - return -ETIMEDOUT;
4082 -static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
4084 - struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
4085 - struct sk_buff *skb;
4089 - skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
4090 - QCA8K_ETHERNET_MDIO_PRIORITY, len);
4094 - mutex_lock(&mgmt_eth_data->mutex);
4096 - /* Check mgmt_master if is operational */
4097 - if (!priv->mgmt_master) {
4099 - mutex_unlock(&mgmt_eth_data->mutex);
4103 - skb->dev = priv->mgmt_master;
4105 - reinit_completion(&mgmt_eth_data->rw_done);
4107 - /* Increment seq_num and set it in the mdio pkt */
4108 - mgmt_eth_data->seq++;
4109 - qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
4110 - mgmt_eth_data->ack = false;
4112 - dev_queue_xmit(skb);
4114 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4115 - msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
4117 - ack = mgmt_eth_data->ack;
4119 - mutex_unlock(&mgmt_eth_data->mutex);
4122 - return -ETIMEDOUT;
4131 -qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
4136 - ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
4143 - return qca8k_write_eth(priv, reg, &val, sizeof(val));
4147 -qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
4149 - int i, count = len / sizeof(u32), ret;
4151 - if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
4154 - for (i = 0; i < count; i++) {
4155 - ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
4164 -qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
4166 - int i, count = len / sizeof(u32), ret;
4169 - if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
4172 - for (i = 0; i < count; i++) {
4175 - ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
4184 -qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
4186 - struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
4187 - struct mii_bus *bus = priv->bus;
4191 - if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
4194 - qca8k_split_addr(reg, &r1, &r2, &page);
4196 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
4198 - ret = qca8k_set_page(priv, page);
4202 - ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
4205 - mutex_unlock(&bus->mdio_lock);
4210 -qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
4212 - struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
4213 - struct mii_bus *bus = priv->bus;
4217 - if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
4220 - qca8k_split_addr(reg, &r1, &r2, &page);
4222 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
4224 - ret = qca8k_set_page(priv, page);
4228 - qca8k_mii_write32(priv, 0x10 | r2, r1, val);
4231 - mutex_unlock(&bus->mdio_lock);
4236 -qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
4238 - struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
4239 - struct mii_bus *bus = priv->bus;
4244 - if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
4247 - qca8k_split_addr(reg, &r1, &r2, &page);
4249 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
4251 - ret = qca8k_set_page(priv, page);
4255 - ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
4261 - qca8k_mii_write32(priv, 0x10 | r2, r1, val);
4264 - mutex_unlock(&bus->mdio_lock);
4269 -static const struct regmap_range qca8k_readable_ranges[] = {
4270 - regmap_reg_range(0x0000, 0x00e4), /* Global control */
4271 - regmap_reg_range(0x0100, 0x0168), /* EEE control */
4272 - regmap_reg_range(0x0200, 0x0270), /* Parser control */
4273 - regmap_reg_range(0x0400, 0x0454), /* ACL */
4274 - regmap_reg_range(0x0600, 0x0718), /* Lookup */
4275 - regmap_reg_range(0x0800, 0x0b70), /* QM */
4276 - regmap_reg_range(0x0c00, 0x0c80), /* PKT */
4277 - regmap_reg_range(0x0e00, 0x0e98), /* L3 */
4278 - regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
4279 - regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
4280 - regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
4281 - regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
4282 - regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
4283 - regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
4284 - regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
4288 -static const struct regmap_access_table qca8k_readable_table = {
4289 - .yes_ranges = qca8k_readable_ranges,
4290 - .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
4293 -static struct regmap_config qca8k_regmap_config = {
4297 - .max_register = 0x16ac, /* end MIB - Port6 range */
4298 - .reg_read = qca8k_regmap_read,
4299 - .reg_write = qca8k_regmap_write,
4300 - .reg_update_bits = qca8k_regmap_update_bits,
4301 - .rd_table = &qca8k_readable_table,
4302 - .disable_locking = true, /* Locking is handled by qca8k read/write */
4303 - .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
4307 -qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
4311 - return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
4312 - QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
4316 -qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
4321 - /* load the ARL table into an array */
4322 - ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
4327 - fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
4328 - /* aging - 67:64 */
4329 - fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
4330 - /* portmask - 54:48 */
4331 - fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
4333 - fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
4334 - fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
4335 - fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
4336 - fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
4337 - fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
4338 - fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
4344 -qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
4347 - u32 reg[3] = { 0 };
4350 - reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
4351 - /* aging - 67:64 */
4352 - reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
4353 - /* portmask - 54:48 */
4354 - reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
4356 - reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
4357 - reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
4358 - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
4359 - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
4360 - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
4361 - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
4363 - /* load the array into the ARL table */
4364 - qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
4368 -qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
4373 - /* Set the command and FDB index */
4374 - reg = QCA8K_ATU_FUNC_BUSY;
4377 - reg |= QCA8K_ATU_FUNC_PORT_EN;
4378 - reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
4381 - /* Write the function register triggering the table access */
4382 - ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
4386 - /* wait for completion */
4387 - ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
4391 - /* Check for table full violation when adding an entry */
4392 - if (cmd == QCA8K_FDB_LOAD) {
4393 - ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®);
4396 - if (reg & QCA8K_ATU_FUNC_FULL)
4404 -qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
4408 - qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
4409 - ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
4413 - return qca8k_fdb_read(priv, fdb);
4417 -qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
4418 - u16 vid, u8 aging)
4422 - mutex_lock(&priv->reg_mutex);
4423 - qca8k_fdb_write(priv, vid, port_mask, mac, aging);
4424 - ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
4425 - mutex_unlock(&priv->reg_mutex);
4431 -qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
4435 - mutex_lock(&priv->reg_mutex);
4436 - qca8k_fdb_write(priv, vid, port_mask, mac, 0);
4437 - ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
4438 - mutex_unlock(&priv->reg_mutex);
4444 -qca8k_fdb_flush(struct qca8k_priv *priv)
4446 - mutex_lock(&priv->reg_mutex);
4447 - qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
4448 - mutex_unlock(&priv->reg_mutex);
4452 -qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
4453 - const u8 *mac, u16 vid)
4455 - struct qca8k_fdb fdb = { 0 };
4458 - mutex_lock(&priv->reg_mutex);
4460 - qca8k_fdb_write(priv, vid, 0, mac, 0);
4461 - ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
4465 - ret = qca8k_fdb_read(priv, &fdb);
4469 - /* Rule exist. Delete first */
4471 - ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
4476 - /* Add port to fdb portmask */
4477 - fdb.port_mask |= port_mask;
4479 - qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
4480 - ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
4483 - mutex_unlock(&priv->reg_mutex);
4488 -qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
4489 - const u8 *mac, u16 vid)
4491 - struct qca8k_fdb fdb = { 0 };
4494 - mutex_lock(&priv->reg_mutex);
4496 - qca8k_fdb_write(priv, vid, 0, mac, 0);
4497 - ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
4501 - /* Rule doesn't exist. Why delete? */
4507 - ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
4511 - /* Only port in the rule is this port. Don't re insert */
4512 - if (fdb.port_mask == port_mask)
4515 - /* Remove port from port mask */
4516 - fdb.port_mask &= ~port_mask;
4518 - qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
4519 - ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
4522 - mutex_unlock(&priv->reg_mutex);
4527 -qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
4532 - /* Set the command and VLAN index */
4533 - reg = QCA8K_VTU_FUNC1_BUSY;
4535 - reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
4537 - /* Write the function register triggering the table access */
4538 - ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
4542 - /* wait for completion */
4543 - ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
4547 - /* Check for table full violation when adding an entry */
4548 - if (cmd == QCA8K_VLAN_LOAD) {
4549 - ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®);
4552 - if (reg & QCA8K_VTU_FUNC1_FULL)
4560 -qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
4566 - We do the right thing with VLAN 0 and treat it as untagged while
4567 - preserving the tag on egress.
4572 - mutex_lock(&priv->reg_mutex);
4573 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
4577 - ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
4580 - reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
4581 - reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
4583 - reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
4585 - reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
4587 - ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
4590 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
4593 - mutex_unlock(&priv->reg_mutex);
4599 -qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
4605 - mutex_lock(&priv->reg_mutex);
4606 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
4610 - ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
4613 - reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
4614 - reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
4616 - /* Check if we're the last member to be removed */
4618 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
4619 - mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
4621 - if ((reg & mask) != mask) {
4628 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
4630 - ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
4633 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
4637 - mutex_unlock(&priv->reg_mutex);
4643 -qca8k_mib_init(struct qca8k_priv *priv)
4647 - mutex_lock(&priv->reg_mutex);
4648 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
4649 - QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
4650 - FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
4655 - ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
4659 - ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
4663 - ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
4666 - mutex_unlock(&priv->reg_mutex);
4671 -qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
4673 - u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
4675 - /* Port 0 and 6 have no internal PHY */
4676 - if (port > 0 && port < 6)
4677 - mask |= QCA8K_PORT_STATUS_LINK_AUTO;
4680 - regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
4682 - regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
4686 -qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
4687 - struct sk_buff *read_skb, u32 *val)
4689 - struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
4693 - reinit_completion(&mgmt_eth_data->rw_done);
4695 - /* Increment seq_num and set it in the copy pkt */
4696 - mgmt_eth_data->seq++;
4697 - qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
4698 - mgmt_eth_data->ack = false;
4700 - dev_queue_xmit(skb);
4702 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4703 - QCA8K_ETHERNET_TIMEOUT);
4705 - ack = mgmt_eth_data->ack;
4708 - return -ETIMEDOUT;
4713 - *val = mgmt_eth_data->data[0];
4719 -qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
4720 - int regnum, u16 data)
4722 - struct sk_buff *write_skb, *clear_skb, *read_skb;
4723 - struct qca8k_mgmt_eth_data *mgmt_eth_data;
4724 - u32 write_val, clear_val = 0, val;
4725 - struct net_device *mgmt_master;
4729 - if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
4732 - mgmt_eth_data = &priv->mgmt_eth_data;
4734 - write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
4735 - QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
4736 - QCA8K_MDIO_MASTER_REG_ADDR(regnum);
4739 - write_val |= QCA8K_MDIO_MASTER_READ;
4741 - write_val |= QCA8K_MDIO_MASTER_WRITE;
4742 - write_val |= QCA8K_MDIO_MASTER_DATA(data);
4745 - /* Prealloc all the needed skb before the lock */
4746 - write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
4747 - QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
4751 - clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
4752 - QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
4755 - goto err_clear_skb;
4758 - read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
4759 - QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
4762 - goto err_read_skb;
4765 - /* Actually start the request:
4766 - * 1. Send mdio master packet
4767 - * 2. Busy Wait for mdio master command
4768 - * 3. Get the data if we are reading
4769 - * 4. Reset the mdio master (even with error)
4771 - mutex_lock(&mgmt_eth_data->mutex);
4773 - /* Check if mgmt_master is operational */
4774 - mgmt_master = priv->mgmt_master;
4775 - if (!mgmt_master) {
4776 - mutex_unlock(&mgmt_eth_data->mutex);
4778 - goto err_mgmt_master;
4781 - read_skb->dev = mgmt_master;
4782 - clear_skb->dev = mgmt_master;
4783 - write_skb->dev = mgmt_master;
4785 - reinit_completion(&mgmt_eth_data->rw_done);
4787 - /* Increment seq_num and set it in the write pkt */
4788 - mgmt_eth_data->seq++;
4789 - qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
4790 - mgmt_eth_data->ack = false;
4792 - dev_queue_xmit(write_skb);
4794 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4795 - QCA8K_ETHERNET_TIMEOUT);
4797 - ack = mgmt_eth_data->ack;
4801 - kfree_skb(read_skb);
4807 - kfree_skb(read_skb);
4811 - ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
4812 - !(val & QCA8K_MDIO_MASTER_BUSY), 0,
4813 - QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
4814 - mgmt_eth_data, read_skb, &val);
4816 - if (ret < 0 && ret1 < 0) {
4822 - reinit_completion(&mgmt_eth_data->rw_done);
4824 - /* Increment seq_num and set it in the read pkt */
4825 - mgmt_eth_data->seq++;
4826 - qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
4827 - mgmt_eth_data->ack = false;
4829 - dev_queue_xmit(read_skb);
4831 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4832 - QCA8K_ETHERNET_TIMEOUT);
4834 - ack = mgmt_eth_data->ack;
4846 - ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
4848 - kfree_skb(read_skb);
4851 - reinit_completion(&mgmt_eth_data->rw_done);
4853 - /* Increment seq_num and set it in the clear pkt */
4854 - mgmt_eth_data->seq++;
4855 - qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
4856 - mgmt_eth_data->ack = false;
4858 - dev_queue_xmit(clear_skb);
4860 - wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4861 - QCA8K_ETHERNET_TIMEOUT);
4863 - mutex_unlock(&mgmt_eth_data->mutex);
4867 - /* Error handling before lock */
4869 - kfree_skb(read_skb);
4871 - kfree_skb(clear_skb);
4873 - kfree_skb(write_skb);
4879 -qca8k_port_to_phy(int port)
4881 - /* From Andrew Lunn:
4882 - * Port 0 has no internal phy.
4883 - * Port 1 has an internal PHY at MDIO address 0.
4884 - * Port 2 has an internal PHY at MDIO address 1.
4886 - * Port 5 has an internal PHY at MDIO address 4.
4887 - * Port 6 has no internal PHY.
4894 -qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
4900 - qca8k_split_addr(reg, &r1, &r2, &page);
4902 - ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
4903 - QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
4904 - bus, 0x10 | r2, r1, &val);
4906 - /* Check if qca8k_read has failed for a different reason
4907 - * before returnting -ETIMEDOUT
4909 - if (ret < 0 && ret1 < 0)
4916 -qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
4918 - struct mii_bus *bus = priv->bus;
4923 - if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
4926 - val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
4927 - QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
4928 - QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
4929 - QCA8K_MDIO_MASTER_DATA(data);
4931 - qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
4933 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
4935 - ret = qca8k_set_page(priv, page);
4939 - qca8k_mii_write32(priv, 0x10 | r2, r1, val);
4941 - ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
4942 - QCA8K_MDIO_MASTER_BUSY);
4945 - /* even if the busy_wait timeouts try to clear the MASTER_EN */
4946 - qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
4948 - mutex_unlock(&bus->mdio_lock);
4954 -qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
4956 - struct mii_bus *bus = priv->bus;
4961 - if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
4964 - val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
4965 - QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
4966 - QCA8K_MDIO_MASTER_REG_ADDR(regnum);
4968 - qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
4970 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
4972 - ret = qca8k_set_page(priv, page);
4976 - qca8k_mii_write32(priv, 0x10 | r2, r1, val);
4978 - ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
4979 - QCA8K_MDIO_MASTER_BUSY);
4983 - ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
4986 - /* even if the busy_wait timeouts try to clear the MASTER_EN */
4987 - qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
4989 - mutex_unlock(&bus->mdio_lock);
4992 - ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
4998 -qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
5000 - struct qca8k_priv *priv = slave_bus->priv;
5003 - /* Use mdio Ethernet when available, fallback to legacy one on error */
5004 - ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
5008 - return qca8k_mdio_write(priv, phy, regnum, data);
5012 -qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
5014 - struct qca8k_priv *priv = slave_bus->priv;
5017 - /* Use mdio Ethernet when available, fallback to legacy one on error */
5018 - ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
5022 - ret = qca8k_mdio_read(priv, phy, regnum);
5031 -qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
5033 - port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
5035 - return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
5039 -qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
5041 - port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
5043 - return qca8k_internal_mdio_read(slave_bus, port, regnum);
5047 -qca8k_mdio_register(struct qca8k_priv *priv)
5049 - struct dsa_switch *ds = priv->ds;
5050 - struct device_node *mdio;
5051 - struct mii_bus *bus;
5053 - bus = devm_mdiobus_alloc(ds->dev);
5057 - bus->priv = (void *)priv;
5058 - snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
5059 - ds->dst->index, ds->index);
5060 - bus->parent = ds->dev;
5061 - bus->phy_mask = ~ds->phys_mii_mask;
5062 - ds->slave_mii_bus = bus;
5064 - /* Check if the devicetree declare the port:phy mapping */
5065 - mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
5066 - if (of_device_is_available(mdio)) {
5067 - bus->name = "qca8k slave mii";
5068 - bus->read = qca8k_internal_mdio_read;
5069 - bus->write = qca8k_internal_mdio_write;
5070 - return devm_of_mdiobus_register(priv->dev, bus, mdio);
5073 - /* If a mapping can't be found the legacy mapping is used,
5074 - * using the qca8k_port_to_phy function
5076 - bus->name = "qca8k-legacy slave mii";
5077 - bus->read = qca8k_legacy_mdio_read;
5078 - bus->write = qca8k_legacy_mdio_write;
5079 - return devm_mdiobus_register(priv->dev, bus);
5083 -qca8k_setup_mdio_bus(struct qca8k_priv *priv)
5085 - u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
5086 - struct device_node *ports, *port;
5087 - phy_interface_t mode;
5090 - ports = of_get_child_by_name(priv->dev->of_node, "ports");
5092 - ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
5097 - for_each_available_child_of_node(ports, port) {
5098 - err = of_property_read_u32(port, "reg", ®);
5100 - of_node_put(port);
5101 - of_node_put(ports);
5105 - if (!dsa_is_user_port(priv->ds, reg))
5108 - of_get_phy_mode(port, &mode);
5110 - if (of_property_read_bool(port, "phy-handle") &&
5111 - mode != PHY_INTERFACE_MODE_INTERNAL)
5112 - external_mdio_mask |= BIT(reg);
5114 - internal_mdio_mask |= BIT(reg);
5117 - of_node_put(ports);
5118 - if (!external_mdio_mask && !internal_mdio_mask) {
5119 - dev_err(priv->dev, "no PHYs are defined.\n");
5123 - /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
5124 - * the MDIO_MASTER register also _disconnects_ the external MDC
5125 - * passthrough to the internal PHYs. It's not possible to use both
5126 - * configurations at the same time!
5128 - * Because this came up during the review process:
5129 - * If the external mdio-bus driver is capable magically disabling
5130 - * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
5131 - * accessors for the time being, it would be possible to pull this
5134 - if (!!external_mdio_mask && !!internal_mdio_mask) {
5135 - dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
5139 - if (external_mdio_mask) {
5140 - /* Make sure to disable the internal mdio bus in cases
5141 - * a dt-overlay and driver reload changed the configuration
5144 - return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
5145 - QCA8K_MDIO_MASTER_EN);
5148 - return qca8k_mdio_register(priv);
5152 -qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
5157 - /* SoC specific settings for ipq8064.
5158 - * If more device require this consider adding
5159 - * a dedicated binding.
5161 - if (of_machine_is_compatible("qcom,ipq8064"))
5162 - mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
5164 - /* SoC specific settings for ipq8065 */
5165 - if (of_machine_is_compatible("qcom,ipq8065"))
5166 - mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
5169 - ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
5170 - QCA8K_MAC_PWR_RGMII0_1_8V |
5171 - QCA8K_MAC_PWR_RGMII1_1_8V,
5178 -static int qca8k_find_cpu_port(struct dsa_switch *ds)
5180 - struct qca8k_priv *priv = ds->priv;
5182 - /* Find the connected cpu port. Valid port are 0 or 6 */
5183 - if (dsa_is_cpu_port(ds, 0))
5186 - dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
5188 - if (dsa_is_cpu_port(ds, 6))
5195 -qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
5197 - struct device_node *node = priv->dev->of_node;
5198 - const struct qca8k_match_data *data;
5202 - /* QCA8327 require to set to the correct mode.
5203 - * His bigger brother QCA8328 have the 172 pin layout.
5204 - * Should be applied by default but we set this just to make sure.
5206 - if (priv->switch_id == QCA8K_ID_QCA8327) {
5207 - data = of_device_get_match_data(priv->dev);
5209 - /* Set the correct package of 148 pin for QCA8327 */
5210 - if (data->reduced_package)
5211 - val |= QCA8327_PWS_PACKAGE148_EN;
5213 - ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
5219 - if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
5220 - val |= QCA8K_PWS_POWER_ON_SEL;
5222 - if (of_property_read_bool(node, "qca,led-open-drain")) {
5223 - if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
5224 - dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
5228 - val |= QCA8K_PWS_LED_OPEN_EN_CSR;
5231 - return qca8k_rmw(priv, QCA8K_REG_PWS,
5232 - QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
5237 -qca8k_parse_port_config(struct qca8k_priv *priv)
5239 - int port, cpu_port_index = -1, ret;
5240 - struct device_node *port_dn;
5241 - phy_interface_t mode;
5242 - struct dsa_port *dp;
5245 - /* We have 2 CPU port. Check them */
5246 - for (port = 0; port < QCA8K_NUM_PORTS; port++) {
5247 - /* Skip every other port */
5248 - if (port != 0 && port != 6)
5251 - dp = dsa_to_port(priv->ds, port);
5255 - if (!of_device_is_available(port_dn))
5258 - ret = of_get_phy_mode(port_dn, &mode);
5263 - case PHY_INTERFACE_MODE_RGMII:
5264 - case PHY_INTERFACE_MODE_RGMII_ID:
5265 - case PHY_INTERFACE_MODE_RGMII_TXID:
5266 - case PHY_INTERFACE_MODE_RGMII_RXID:
5267 - case PHY_INTERFACE_MODE_SGMII:
5270 - if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
5271 - /* Switch regs accept value in ns, convert ps to ns */
5272 - delay = delay / 1000;
5273 - else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
5274 - mode == PHY_INTERFACE_MODE_RGMII_TXID)
5277 - if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
5278 - dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
5282 - priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
5286 - if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
5287 - /* Switch regs accept value in ns, convert ps to ns */
5288 - delay = delay / 1000;
5289 - else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
5290 - mode == PHY_INTERFACE_MODE_RGMII_RXID)
5293 - if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
5294 - dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
5298 - priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
5300 - /* Skip sgmii parsing for rgmii* mode */
5301 - if (mode == PHY_INTERFACE_MODE_RGMII ||
5302 - mode == PHY_INTERFACE_MODE_RGMII_ID ||
5303 - mode == PHY_INTERFACE_MODE_RGMII_TXID ||
5304 - mode == PHY_INTERFACE_MODE_RGMII_RXID)
5307 - if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
5308 - priv->ports_config.sgmii_tx_clk_falling_edge = true;
5310 - if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
5311 - priv->ports_config.sgmii_rx_clk_falling_edge = true;
5313 - if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
5314 - priv->ports_config.sgmii_enable_pll = true;
5316 - if (priv->switch_id == QCA8K_ID_QCA8327) {
5317 - dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
5318 - priv->ports_config.sgmii_enable_pll = false;
5321 - if (priv->switch_revision < 2)
5322 - dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
5335 -qca8k_setup(struct dsa_switch *ds)
5337 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5338 - int cpu_port, ret, i;
5341 - cpu_port = qca8k_find_cpu_port(ds);
5342 - if (cpu_port < 0) {
5343 - dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
5347 - /* Parse CPU port config to be later used in phy_link mac_config */
5348 - ret = qca8k_parse_port_config(priv);
5352 - ret = qca8k_setup_mdio_bus(priv);
5356 - ret = qca8k_setup_of_pws_reg(priv);
5360 - ret = qca8k_setup_mac_pwr_sel(priv);
5364 - /* Make sure MAC06 is disabled */
5365 - ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
5366 - QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
5368 - dev_err(priv->dev, "failed disabling MAC06 exchange");
5372 - /* Enable CPU Port */
5373 - ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
5374 - QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
5376 - dev_err(priv->dev, "failed enabling CPU port");
5380 - /* Enable MIB counters */
5381 - ret = qca8k_mib_init(priv);
5383 - dev_warn(priv->dev, "mib init failed");
5385 - /* Initial setup of all ports */
5386 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
5387 - /* Disable forwarding by default on all ports */
5388 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
5389 - QCA8K_PORT_LOOKUP_MEMBER, 0);
5393 - /* Enable QCA header mode on all cpu ports */
5394 - if (dsa_is_cpu_port(ds, i)) {
5395 - ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
5396 - FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
5397 - FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
5399 - dev_err(priv->dev, "failed enabling QCA header mode");
5404 - /* Disable MAC by default on all user ports */
5405 - if (dsa_is_user_port(ds, i))
5406 - qca8k_port_set_status(priv, i, 0);
5409 - /* Forward all unknown frames to CPU port for Linux processing
5410 - * Notice that in multi-cpu config only one port should be set
5411 - * for igmp, unknown, multicast and broadcast packet
5413 - ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
5414 - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
5415 - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
5416 - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
5417 - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
5421 - /* Setup connection between CPU port & user ports
5422 - * Configure specific switch configuration for ports
5424 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
5425 - /* CPU port gets connected to all user ports of the switch */
5426 - if (dsa_is_cpu_port(ds, i)) {
5427 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
5428 - QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
5433 - /* Individual user ports get connected to CPU port only */
5434 - if (dsa_is_user_port(ds, i)) {
5435 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
5436 - QCA8K_PORT_LOOKUP_MEMBER,
5441 - /* Enable ARP Auto-learning by default */
5442 - ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
5443 - QCA8K_PORT_LOOKUP_LEARN);
5447 - /* For port based vlans to work we need to set the
5448 - * default egress vid
5450 - ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
5451 - QCA8K_EGREES_VLAN_PORT_MASK(i),
5452 - QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
5456 - ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
5457 - QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
5458 - QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
5463 - /* The port 5 of the qca8337 have some problem in flood condition. The
5464 - * original legacy driver had some specific buffer and priority settings
5465 - * for the different port suggested by the QCA switch team. Add this
5466 - * missing settings to improve switch stability under load condition.
5467 - * This problem is limited to qca8337 and other qca8k switch are not affected.
5469 - if (priv->switch_id == QCA8K_ID_QCA8337) {
5471 - /* The 2 CPU port and port 5 requires some different
5472 - * priority than any other ports.
5477 - mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
5478 - QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
5479 - QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
5480 - QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
5481 - QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
5482 - QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
5483 - QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
5486 - mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
5487 - QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
5488 - QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
5489 - QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
5490 - QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
5492 - qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
5494 - mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
5495 - QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
5496 - QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
5497 - QCA8K_PORT_HOL_CTRL1_WRED_EN;
5498 - qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
5499 - QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
5500 - QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
5501 - QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
5502 - QCA8K_PORT_HOL_CTRL1_WRED_EN,
5507 - /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
5508 - if (priv->switch_id == QCA8K_ID_QCA8327) {
5509 - mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
5510 - QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
5511 - qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
5512 - QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
5513 - QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
5517 - /* Setup our port MTUs to match power on defaults */
5518 - ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
5520 - dev_warn(priv->dev, "failed setting MTU settings");
5522 - /* Flush the FDB table */
5523 - qca8k_fdb_flush(priv);
5525 - /* We don't have interrupts for link changes, so we need to poll */
5526 - ds->pcs_poll = true;
5528 - /* Set min a max ageing value supported */
5529 - ds->ageing_time_min = 7000;
5530 - ds->ageing_time_max = 458745000;
5532 - /* Set max number of LAGs supported */
5533 - ds->num_lag_ids = QCA8K_NUM_LAGS;
5539 -qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
5542 - u32 delay, val = 0;
5545 - /* Delay can be declared in 3 different way.
5546 - * Mode to rgmii and internal-delay standard binding defined
5547 - * rgmii-id or rgmii-tx/rx phy mode set.
5548 - * The parse logic set a delay different than 0 only when one
5549 - * of the 3 different way is used. In all other case delay is
5550 - * not enabled. With ID or TX/RXID delay is enabled and set
5551 - * to the default and recommended value.
5553 - if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
5554 - delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
5556 - val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
5557 - QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
5560 - if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
5561 - delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
5563 - val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
5564 - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
5567 - /* Set RGMII delay based on the selected values */
5568 - ret = qca8k_rmw(priv, reg,
5569 - QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
5570 - QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
5571 - QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
5572 - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
5575 - dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
5576 - cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
5580 -qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
5581 - const struct phylink_link_state *state)
5583 - struct qca8k_priv *priv = ds->priv;
5584 - int cpu_port_index, ret;
5588 - case 0: /* 1st CPU port */
5589 - if (state->interface != PHY_INTERFACE_MODE_RGMII &&
5590 - state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
5591 - state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5592 - state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5593 - state->interface != PHY_INTERFACE_MODE_SGMII)
5596 - reg = QCA8K_REG_PORT0_PAD_CTRL;
5597 - cpu_port_index = QCA8K_CPU_PORT0;
5604 - /* Internal PHY, nothing to do */
5606 - case 6: /* 2nd CPU port / external PHY */
5607 - if (state->interface != PHY_INTERFACE_MODE_RGMII &&
5608 - state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
5609 - state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5610 - state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5611 - state->interface != PHY_INTERFACE_MODE_SGMII &&
5612 - state->interface != PHY_INTERFACE_MODE_1000BASEX)
5615 - reg = QCA8K_REG_PORT6_PAD_CTRL;
5616 - cpu_port_index = QCA8K_CPU_PORT6;
5619 - dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
5623 - if (port != 6 && phylink_autoneg_inband(mode)) {
5624 - dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
5629 - switch (state->interface) {
5630 - case PHY_INTERFACE_MODE_RGMII:
5631 - case PHY_INTERFACE_MODE_RGMII_ID:
5632 - case PHY_INTERFACE_MODE_RGMII_TXID:
5633 - case PHY_INTERFACE_MODE_RGMII_RXID:
5634 - qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
5636 - /* Configure rgmii delay */
5637 - qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
5639 - /* QCA8337 requires to set rgmii rx delay for all ports.
5640 - * This is enabled through PORT5_PAD_CTRL for all ports,
5641 - * rather than individual port registers.
5643 - if (priv->switch_id == QCA8K_ID_QCA8337)
5644 - qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
5645 - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
5647 - case PHY_INTERFACE_MODE_SGMII:
5648 - case PHY_INTERFACE_MODE_1000BASEX:
5649 - /* Enable SGMII on the port */
5650 - qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
5652 - /* Enable/disable SerDes auto-negotiation as necessary */
5653 - ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
5656 - if (phylink_autoneg_inband(mode))
5657 - val &= ~QCA8K_PWS_SERDES_AEN_DIS;
5659 - val |= QCA8K_PWS_SERDES_AEN_DIS;
5660 - qca8k_write(priv, QCA8K_REG_PWS, val);
5662 - /* Configure the SGMII parameters */
5663 - ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
5667 - val |= QCA8K_SGMII_EN_SD;
5669 - if (priv->ports_config.sgmii_enable_pll)
5670 - val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
5671 - QCA8K_SGMII_EN_TX;
5673 - if (dsa_is_cpu_port(ds, port)) {
5674 - /* CPU port, we're talking to the CPU MAC, be a PHY */
5675 - val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
5676 - val |= QCA8K_SGMII_MODE_CTRL_PHY;
5677 - } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5678 - val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
5679 - val |= QCA8K_SGMII_MODE_CTRL_MAC;
5680 - } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
5681 - val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
5682 - val |= QCA8K_SGMII_MODE_CTRL_BASEX;
5685 - qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
5687 - /* From original code is reported port instability as SGMII also
5688 - * require delay set. Apply advised values here or take them from DT.
5690 - if (state->interface == PHY_INTERFACE_MODE_SGMII)
5691 - qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
5693 - /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
5694 - * falling edge is set writing in the PORT0 PAD reg
5696 - if (priv->switch_id == QCA8K_ID_QCA8327 ||
5697 - priv->switch_id == QCA8K_ID_QCA8337)
5698 - reg = QCA8K_REG_PORT0_PAD_CTRL;
5702 - /* SGMII Clock phase configuration */
5703 - if (priv->ports_config.sgmii_rx_clk_falling_edge)
5704 - val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
5706 - if (priv->ports_config.sgmii_tx_clk_falling_edge)
5707 - val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
5710 - ret = qca8k_rmw(priv, reg,
5711 - QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
5712 - QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
5717 - dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
5718 - phy_modes(state->interface), port);
5724 -qca8k_phylink_validate(struct dsa_switch *ds, int port,
5725 - unsigned long *supported,
5726 - struct phylink_link_state *state)
5728 - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
5731 - case 0: /* 1st CPU port */
5732 - if (state->interface != PHY_INTERFACE_MODE_NA &&
5733 - state->interface != PHY_INTERFACE_MODE_RGMII &&
5734 - state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
5735 - state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5736 - state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5737 - state->interface != PHY_INTERFACE_MODE_SGMII)
5745 - /* Internal PHY */
5746 - if (state->interface != PHY_INTERFACE_MODE_NA &&
5747 - state->interface != PHY_INTERFACE_MODE_GMII &&
5748 - state->interface != PHY_INTERFACE_MODE_INTERNAL)
5751 - case 6: /* 2nd CPU port / external PHY */
5752 - if (state->interface != PHY_INTERFACE_MODE_NA &&
5753 - state->interface != PHY_INTERFACE_MODE_RGMII &&
5754 - state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
5755 - state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5756 - state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5757 - state->interface != PHY_INTERFACE_MODE_SGMII &&
5758 - state->interface != PHY_INTERFACE_MODE_1000BASEX)
5763 - linkmode_zero(supported);
5767 - phylink_set_port_modes(mask);
5768 - phylink_set(mask, Autoneg);
5770 - phylink_set(mask, 1000baseT_Full);
5771 - phylink_set(mask, 10baseT_Half);
5772 - phylink_set(mask, 10baseT_Full);
5773 - phylink_set(mask, 100baseT_Half);
5774 - phylink_set(mask, 100baseT_Full);
5776 - if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
5777 - phylink_set(mask, 1000baseX_Full);
5779 - phylink_set(mask, Pause);
5780 - phylink_set(mask, Asym_Pause);
5782 - linkmode_and(supported, supported, mask);
5783 - linkmode_and(state->advertising, state->advertising, mask);
5787 -qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
5788 - struct phylink_link_state *state)
5790 - struct qca8k_priv *priv = ds->priv;
5794 - ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
5798 - state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
5799 - state->an_complete = state->link;
5800 - state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
5801 - state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
5804 - switch (reg & QCA8K_PORT_STATUS_SPEED) {
5805 - case QCA8K_PORT_STATUS_SPEED_10:
5806 - state->speed = SPEED_10;
5808 - case QCA8K_PORT_STATUS_SPEED_100:
5809 - state->speed = SPEED_100;
5811 - case QCA8K_PORT_STATUS_SPEED_1000:
5812 - state->speed = SPEED_1000;
5815 - state->speed = SPEED_UNKNOWN;
5819 - state->pause = MLO_PAUSE_NONE;
5820 - if (reg & QCA8K_PORT_STATUS_RXFLOW)
5821 - state->pause |= MLO_PAUSE_RX;
5822 - if (reg & QCA8K_PORT_STATUS_TXFLOW)
5823 - state->pause |= MLO_PAUSE_TX;
5829 -qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
5830 - phy_interface_t interface)
5832 - struct qca8k_priv *priv = ds->priv;
5834 - qca8k_port_set_status(priv, port, 0);
5838 -qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
5839 - phy_interface_t interface, struct phy_device *phydev,
5840 - int speed, int duplex, bool tx_pause, bool rx_pause)
5842 - struct qca8k_priv *priv = ds->priv;
5845 - if (phylink_autoneg_inband(mode)) {
5846 - reg = QCA8K_PORT_STATUS_LINK_AUTO;
5850 - reg = QCA8K_PORT_STATUS_SPEED_10;
5853 - reg = QCA8K_PORT_STATUS_SPEED_100;
5856 - reg = QCA8K_PORT_STATUS_SPEED_1000;
5859 - reg = QCA8K_PORT_STATUS_LINK_AUTO;
5863 - if (duplex == DUPLEX_FULL)
5864 - reg |= QCA8K_PORT_STATUS_DUPLEX;
5866 - if (rx_pause || dsa_is_cpu_port(ds, port))
5867 - reg |= QCA8K_PORT_STATUS_RXFLOW;
5869 - if (tx_pause || dsa_is_cpu_port(ds, port))
5870 - reg |= QCA8K_PORT_STATUS_TXFLOW;
5873 - reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
5875 - qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
5879 -qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
5881 - const struct qca8k_match_data *match_data;
5882 - struct qca8k_priv *priv = ds->priv;
5885 - if (stringset != ETH_SS_STATS)
5888 - match_data = of_device_get_match_data(priv->dev);
5890 - for (i = 0; i < match_data->mib_count; i++)
5891 - strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
5895 -static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
5897 - const struct qca8k_match_data *match_data;
5898 - struct qca8k_mib_eth_data *mib_eth_data;
5899 - struct qca8k_priv *priv = ds->priv;
5900 - const struct qca8k_mib_desc *mib;
5901 - struct mib_ethhdr *mib_ethhdr;
5902 - int i, mib_len, offset = 0;
5906 - mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
5907 - mib_eth_data = &priv->mib_eth_data;
5909 - /* The switch autocast every port. Ignore other packet and
5910 - * parse only the requested one.
5912 - port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
5913 - if (port != mib_eth_data->req_port)
5916 - match_data = device_get_match_data(priv->dev);
5917 - data = mib_eth_data->data;
5919 - for (i = 0; i < match_data->mib_count; i++) {
5920 - mib = &ar8327_mib[i];
5922 - /* First 3 mib are present in the skb head */
5924 - data[i] = mib_ethhdr->data[i];
5928 - mib_len = sizeof(uint32_t);
5930 - /* Some mib are 64 bit wide */
5931 - if (mib->size == 2)
5932 - mib_len = sizeof(uint64_t);
5934 - /* Copy the mib value from packet to the */
5935 - memcpy(data + i, skb->data + offset, mib_len);
5937 - /* Set the offset for the next mib */
5938 - offset += mib_len;
5942 - /* Complete on receiving all the mib packet */
5943 - if (refcount_dec_and_test(&mib_eth_data->port_parsed))
5944 - complete(&mib_eth_data->rw_done);
5948 -qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
5950 - struct dsa_port *dp = dsa_to_port(ds, port);
5951 - struct qca8k_mib_eth_data *mib_eth_data;
5952 - struct qca8k_priv *priv = ds->priv;
5955 - mib_eth_data = &priv->mib_eth_data;
5957 - mutex_lock(&mib_eth_data->mutex);
5959 - reinit_completion(&mib_eth_data->rw_done);
5961 - mib_eth_data->req_port = dp->index;
5962 - mib_eth_data->data = data;
5963 - refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
5965 - mutex_lock(&priv->reg_mutex);
5967 - /* Send mib autocast request */
5968 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
5969 - QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
5970 - FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
5973 - mutex_unlock(&priv->reg_mutex);
5978 - ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
5981 - mutex_unlock(&mib_eth_data->mutex);
5987 -qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
5990 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5991 - const struct qca8k_match_data *match_data;
5992 - const struct qca8k_mib_desc *mib;
5997 - if (priv->mgmt_master &&
5998 - qca8k_get_ethtool_stats_eth(ds, port, data) > 0)
6001 - match_data = of_device_get_match_data(priv->dev);
6003 - for (i = 0; i < match_data->mib_count; i++) {
6004 - mib = &ar8327_mib[i];
6005 - reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
6007 - ret = qca8k_read(priv, reg, &val);
6011 - if (mib->size == 2) {
6012 - ret = qca8k_read(priv, reg + 4, &hi);
6018 - if (mib->size == 2)
6019 - data[i] |= (u64)hi << 32;
6024 -qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
6026 - const struct qca8k_match_data *match_data;
6027 - struct qca8k_priv *priv = ds->priv;
6029 - if (sset != ETH_SS_STATS)
6032 - match_data = of_device_get_match_data(priv->dev);
6034 - return match_data->mib_count;
6038 -qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
6040 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
6041 - u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
6045 - mutex_lock(&priv->reg_mutex);
6046 - ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®);
6050 - if (eee->eee_enabled)
6054 - ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
6057 - mutex_unlock(&priv->reg_mutex);
6062 -qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
6064 - /* Nothing to do on the port's MAC */
6069 -qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
6071 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
6075 - case BR_STATE_DISABLED:
6076 - stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
6078 - case BR_STATE_BLOCKING:
6079 - stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
6081 - case BR_STATE_LISTENING:
6082 - stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
6084 - case BR_STATE_LEARNING:
6085 - stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
6087 - case BR_STATE_FORWARDING:
6089 - stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
6093 - qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
6094 - QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
6098 -qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
6100 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
6101 - int port_mask, cpu_port;
6104 - cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
6105 - port_mask = BIT(cpu_port);
6107 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
6108 - if (dsa_is_cpu_port(ds, i))
6110 - if (dsa_to_port(ds, i)->bridge_dev != br)
6112 - /* Add this port to the portvlan mask of the other ports
6115 - ret = regmap_set_bits(priv->regmap,
6116 - QCA8K_PORT_LOOKUP_CTRL(i),
6121 - port_mask |= BIT(i);
6124 - /* Add all other ports to this ports portvlan mask */
6125 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
6126 - QCA8K_PORT_LOOKUP_MEMBER, port_mask);
6132 -qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
6134 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
6137 - cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
6139 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
6140 - if (dsa_is_cpu_port(ds, i))
6142 - if (dsa_to_port(ds, i)->bridge_dev != br)
6144 - /* Remove this port to the portvlan mask of the other ports
6147 - regmap_clear_bits(priv->regmap,
6148 - QCA8K_PORT_LOOKUP_CTRL(i),
6152 - /* Set the cpu port to be the only one in the portvlan mask of
6155 - qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
6156 - QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
6160 -qca8k_port_fast_age(struct dsa_switch *ds, int port)
6162 - struct qca8k_priv *priv = ds->priv;
6164 - mutex_lock(&priv->reg_mutex);
6165 - qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
6166 - mutex_unlock(&priv->reg_mutex);
6170 -qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
6172 - struct qca8k_priv *priv = ds->priv;
6173 - unsigned int secs = msecs / 1000;
6176 - /* AGE_TIME reg is set in 7s step */
6179 - /* Handle case with 0 as val to NOT disable
6185 - return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
6186 - QCA8K_ATU_AGE_TIME(val));
6190 -qca8k_port_enable(struct dsa_switch *ds, int port,
6191 - struct phy_device *phy)
6193 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
6195 - qca8k_port_set_status(priv, port, 1);
6196 - priv->port_enabled_map |= BIT(port);
6198 - if (dsa_is_user_port(ds, port))
6199 - phy_support_asym_pause(phy);
6205 -qca8k_port_disable(struct dsa_switch *ds, int port)
6207 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
6209 - qca8k_port_set_status(priv, port, 0);
6210 - priv->port_enabled_map &= ~BIT(port);
6214 -qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
6216 - struct qca8k_priv *priv = ds->priv;
6219 - /* We have only have a general MTU setting.
6220 - * DSA always set the CPU port's MTU to the largest MTU of the slave
6222 - * Setting MTU just for the CPU port is sufficient to correctly set a
6223 - * value for every port.
6225 - if (!dsa_is_cpu_port(ds, port))
6228 - /* To change the MAX_FRAME_SIZE the cpu ports must be off or
6229 - * the switch panics.
6230 - * Turn off both cpu ports before applying the new value to prevent
6233 - if (priv->port_enabled_map & BIT(0))
6234 - qca8k_port_set_status(priv, 0, 0);
6236 - if (priv->port_enabled_map & BIT(6))
6237 - qca8k_port_set_status(priv, 6, 0);
6239 - /* Include L2 header / FCS length */
6240 - ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
6242 - if (priv->port_enabled_map & BIT(0))
6243 - qca8k_port_set_status(priv, 0, 1);
6245 - if (priv->port_enabled_map & BIT(6))
6246 - qca8k_port_set_status(priv, 6, 1);
6252 -qca8k_port_max_mtu(struct dsa_switch *ds, int port)
6254 - return QCA8K_MAX_MTU;
6258 -qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
6259 - u16 port_mask, u16 vid)
6261 - /* Set the vid to the port vlan id if no vid is set */
6263 - vid = QCA8K_PORT_VID_DEF;
6265 - return qca8k_fdb_add(priv, addr, port_mask, vid,
6266 - QCA8K_ATU_STATUS_STATIC);
6270 -qca8k_port_fdb_add(struct dsa_switch *ds, int port,
6271 - const unsigned char *addr, u16 vid)
6273 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
6274 - u16 port_mask = BIT(port);
6276 - return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
6280 -qca8k_port_fdb_del(struct dsa_switch *ds, int port,
6281 - const unsigned char *addr, u16 vid)
6283 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
6284 - u16 port_mask = BIT(port);
6287 - vid = QCA8K_PORT_VID_DEF;
6289 - return qca8k_fdb_del(priv, addr, port_mask, vid);
6293 -qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
6294 - dsa_fdb_dump_cb_t *cb, void *data)
6296 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
6297 - struct qca8k_fdb _fdb = { 0 };
6298 - int cnt = QCA8K_NUM_FDB_RECORDS;
6302 - mutex_lock(&priv->reg_mutex);
6303 - while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
6306 - is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
6307 - ret = cb(_fdb.mac, _fdb.vid, is_static, data);
6311 - mutex_unlock(&priv->reg_mutex);
6317 -qca8k_port_mdb_add(struct dsa_switch *ds, int port,
6318 - const struct switchdev_obj_port_mdb *mdb)
6320 - struct qca8k_priv *priv = ds->priv;
6321 - const u8 *addr = mdb->addr;
6322 - u16 vid = mdb->vid;
6324 - return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
6328 -qca8k_port_mdb_del(struct dsa_switch *ds, int port,
6329 - const struct switchdev_obj_port_mdb *mdb)
6331 - struct qca8k_priv *priv = ds->priv;
6332 - const u8 *addr = mdb->addr;
6333 - u16 vid = mdb->vid;
6335 - return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
6339 -qca8k_port_mirror_add(struct dsa_switch *ds, int port,
6340 - struct dsa_mall_mirror_tc_entry *mirror,
6343 - struct qca8k_priv *priv = ds->priv;
6344 - int monitor_port, ret;
6347 - /* Check for existent entry */
6348 - if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
6351 - ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
6355 - /* QCA83xx can have only one port set to mirror mode.
6356 - * Check that the correct port is requested and return error otherwise.
6357 - * When no mirror port is set, the values is set to 0xF
6359 - monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
6360 - if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
6363 - /* Set the monitor port */
6364 - val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
6365 - mirror->to_local_port);
6366 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
6367 - QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
6372 - reg = QCA8K_PORT_LOOKUP_CTRL(port);
6373 - val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
6375 - reg = QCA8K_REG_PORT_HOL_CTRL1(port);
6376 - val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
6379 - ret = regmap_update_bits(priv->regmap, reg, val, val);
6383 - /* Track mirror port for tx and rx to decide when the
6384 - * mirror port has to be disabled.
6387 - priv->mirror_rx |= BIT(port);
6389 - priv->mirror_tx |= BIT(port);
6395 -qca8k_port_mirror_del(struct dsa_switch *ds, int port,
6396 - struct dsa_mall_mirror_tc_entry *mirror)
6398 - struct qca8k_priv *priv = ds->priv;
6402 - if (mirror->ingress) {
6403 - reg = QCA8K_PORT_LOOKUP_CTRL(port);
6404 - val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
6406 - reg = QCA8K_REG_PORT_HOL_CTRL1(port);
6407 - val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
6410 - ret = regmap_clear_bits(priv->regmap, reg, val);
6414 - if (mirror->ingress)
6415 - priv->mirror_rx &= ~BIT(port);
6417 - priv->mirror_tx &= ~BIT(port);
6419 - /* No port set to send packet to mirror port. Disable mirror port */
6420 - if (!priv->mirror_rx && !priv->mirror_tx) {
6421 - val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
6422 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
6423 - QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
6428 - dev_err(priv->dev, "Failed to del mirror port from %d", port);
6432 -qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
6433 - struct netlink_ext_ack *extack)
6435 - struct qca8k_priv *priv = ds->priv;
6438 - if (vlan_filtering) {
6439 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
6440 - QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
6441 - QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
6443 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
6444 - QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
6445 - QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
6452 -qca8k_port_vlan_add(struct dsa_switch *ds, int port,
6453 - const struct switchdev_obj_port_vlan *vlan,
6454 - struct netlink_ext_ack *extack)
6456 - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
6457 - bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
6458 - struct qca8k_priv *priv = ds->priv;
6461 - ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
6463 - dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
6468 - ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
6469 - QCA8K_EGREES_VLAN_PORT_MASK(port),
6470 - QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
6474 - ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
6475 - QCA8K_PORT_VLAN_CVID(vlan->vid) |
6476 - QCA8K_PORT_VLAN_SVID(vlan->vid));
6483 -qca8k_port_vlan_del(struct dsa_switch *ds, int port,
6484 - const struct switchdev_obj_port_vlan *vlan)
6486 - struct qca8k_priv *priv = ds->priv;
6489 - ret = qca8k_vlan_del(priv, port, vlan->vid);
6491 - dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
6496 -static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
6498 - struct qca8k_priv *priv = ds->priv;
6500 - /* Communicate to the phy internal driver the switch revision.
6501 - * Based on the switch revision different values needs to be
6502 - * set to the dbg and mmd reg on the phy.
6503 - * The first 2 bit are used to communicate the switch revision
6504 - * to the phy driver.
6506 - if (port > 0 && port < 6)
6507 - return priv->switch_revision;
6512 -static enum dsa_tag_protocol
6513 -qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
6514 - enum dsa_tag_protocol mp)
6516 - return DSA_TAG_PROTO_QCA;
6520 -qca8k_lag_can_offload(struct dsa_switch *ds,
6521 - struct net_device *lag,
6522 - struct netdev_lag_upper_info *info)
6524 - struct dsa_port *dp;
6525 - int id, members = 0;
6527 - id = dsa_lag_id(ds->dst, lag);
6528 - if (id < 0 || id >= ds->num_lag_ids)
6531 - dsa_lag_foreach_port(dp, ds->dst, lag)
6532 - /* Includes the port joining the LAG */
6535 - if (members > QCA8K_NUM_PORTS_FOR_LAG)
6538 - if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
6541 - if (info->hash_type != NETDEV_LAG_HASH_L2 &&
6542 - info->hash_type != NETDEV_LAG_HASH_L23)
6549 -qca8k_lag_setup_hash(struct dsa_switch *ds,
6550 - struct net_device *lag,
6551 - struct netdev_lag_upper_info *info)
6553 - struct qca8k_priv *priv = ds->priv;
6554 - bool unique_lag = true;
6558 - id = dsa_lag_id(ds->dst, lag);
6560 - switch (info->hash_type) {
6561 - case NETDEV_LAG_HASH_L23:
6562 - hash |= QCA8K_TRUNK_HASH_SIP_EN;
6563 - hash |= QCA8K_TRUNK_HASH_DIP_EN;
6565 - case NETDEV_LAG_HASH_L2:
6566 - hash |= QCA8K_TRUNK_HASH_SA_EN;
6567 - hash |= QCA8K_TRUNK_HASH_DA_EN;
6569 - default: /* We should NEVER reach this */
6570 - return -EOPNOTSUPP;
6573 - /* Check if we are the unique configured LAG */
6574 - dsa_lags_foreach_id(i, ds->dst)
6575 - if (i != id && dsa_lag_dev(ds->dst, i)) {
6576 - unique_lag = false;
6580 - /* Hash Mode is global. Make sure the same Hash Mode
6581 - * is set to all the 4 possible lag.
6582 - * If we are the unique LAG we can set whatever hash
6584 - * To change hash mode it's needed to remove all LAG
6585 - * and change the mode with the latest.
6588 - priv->lag_hash_mode = hash;
6589 - } else if (priv->lag_hash_mode != hash) {
6590 - netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n");
6591 - return -EOPNOTSUPP;
6594 - return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
6595 - QCA8K_TRUNK_HASH_MASK, hash);
6599 -qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
6600 - struct net_device *lag, bool delete)
6602 - struct qca8k_priv *priv = ds->priv;
6606 - id = dsa_lag_id(ds->dst, lag);
6608 - /* Read current port member */
6609 - ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
6613 - /* Shift val to the correct trunk */
6614 - val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
6615 - val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
6617 - val &= ~BIT(port);
6621 - /* Update port member. With empty portmap disable trunk */
6622 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
6623 - QCA8K_REG_GOL_TRUNK_MEMBER(id) |
6624 - QCA8K_REG_GOL_TRUNK_EN(id),
6625 - !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
6626 - val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
6628 - /* Search empty member if adding or port on deleting */
6629 - for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
6630 - ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
6634 - val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
6635 - val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
6638 - /* If port flagged to be disabled assume this member is
6641 - if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
6644 - val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
6648 - /* If port flagged to be enabled assume this member is
6651 - if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
6655 - /* We have found the member to add/remove */
6659 - /* Set port in the correct port mask or disable port if in delete mode */
6660 - return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
6661 - QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
6662 - QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
6663 - !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
6664 - port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
6668 -qca8k_port_lag_join(struct dsa_switch *ds, int port,
6669 - struct net_device *lag,
6670 - struct netdev_lag_upper_info *info)
6674 - if (!qca8k_lag_can_offload(ds, lag, info))
6675 - return -EOPNOTSUPP;
6677 - ret = qca8k_lag_setup_hash(ds, lag, info);
6681 - return qca8k_lag_refresh_portmap(ds, port, lag, false);
6685 -qca8k_port_lag_leave(struct dsa_switch *ds, int port,
6686 - struct net_device *lag)
6688 - return qca8k_lag_refresh_portmap(ds, port, lag, true);
6692 -qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
6695 - struct dsa_port *dp = master->dsa_ptr;
6696 - struct qca8k_priv *priv = ds->priv;
6698 - /* Ethernet MIB/MDIO is only supported for CPU port 0 */
6699 - if (dp->index != 0)
6702 - mutex_lock(&priv->mgmt_eth_data.mutex);
6703 - mutex_lock(&priv->mib_eth_data.mutex);
6705 - priv->mgmt_master = operational ? (struct net_device *)master : NULL;
6707 - mutex_unlock(&priv->mib_eth_data.mutex);
6708 - mutex_unlock(&priv->mgmt_eth_data.mutex);
6711 -static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
6712 - enum dsa_tag_protocol proto)
6714 - struct qca_tagger_data *tagger_data;
6717 - case DSA_TAG_PROTO_QCA:
6718 - tagger_data = ds->tagger_data;
6720 - tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
6721 - tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
6725 - return -EOPNOTSUPP;
6731 -static const struct dsa_switch_ops qca8k_switch_ops = {
6732 - .get_tag_protocol = qca8k_get_tag_protocol,
6733 - .setup = qca8k_setup,
6734 - .get_strings = qca8k_get_strings,
6735 - .get_ethtool_stats = qca8k_get_ethtool_stats,
6736 - .get_sset_count = qca8k_get_sset_count,
6737 - .set_ageing_time = qca8k_set_ageing_time,
6738 - .get_mac_eee = qca8k_get_mac_eee,
6739 - .set_mac_eee = qca8k_set_mac_eee,
6740 - .port_enable = qca8k_port_enable,
6741 - .port_disable = qca8k_port_disable,
6742 - .port_change_mtu = qca8k_port_change_mtu,
6743 - .port_max_mtu = qca8k_port_max_mtu,
6744 - .port_stp_state_set = qca8k_port_stp_state_set,
6745 - .port_bridge_join = qca8k_port_bridge_join,
6746 - .port_bridge_leave = qca8k_port_bridge_leave,
6747 - .port_fast_age = qca8k_port_fast_age,
6748 - .port_fdb_add = qca8k_port_fdb_add,
6749 - .port_fdb_del = qca8k_port_fdb_del,
6750 - .port_fdb_dump = qca8k_port_fdb_dump,
6751 - .port_mdb_add = qca8k_port_mdb_add,
6752 - .port_mdb_del = qca8k_port_mdb_del,
6753 - .port_mirror_add = qca8k_port_mirror_add,
6754 - .port_mirror_del = qca8k_port_mirror_del,
6755 - .port_vlan_filtering = qca8k_port_vlan_filtering,
6756 - .port_vlan_add = qca8k_port_vlan_add,
6757 - .port_vlan_del = qca8k_port_vlan_del,
6758 - .phylink_validate = qca8k_phylink_validate,
6759 - .phylink_mac_link_state = qca8k_phylink_mac_link_state,
6760 - .phylink_mac_config = qca8k_phylink_mac_config,
6761 - .phylink_mac_link_down = qca8k_phylink_mac_link_down,
6762 - .phylink_mac_link_up = qca8k_phylink_mac_link_up,
6763 - .get_phy_flags = qca8k_get_phy_flags,
6764 - .port_lag_join = qca8k_port_lag_join,
6765 - .port_lag_leave = qca8k_port_lag_leave,
6766 - .master_state_change = qca8k_master_change,
6767 - .connect_tag_protocol = qca8k_connect_tag_protocol,
6770 -static int qca8k_read_switch_id(struct qca8k_priv *priv)
6772 - const struct qca8k_match_data *data;
6777 - /* get the switches ID from the compatible */
6778 - data = of_device_get_match_data(priv->dev);
6782 - ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
6786 - id = QCA8K_MASK_CTRL_DEVICE_ID(val);
6787 - if (id != data->id) {
6788 - dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
6792 - priv->switch_id = id;
6794 - /* Save revision to communicate to the internal PHY driver */
6795 - priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
6801 -qca8k_sw_probe(struct mdio_device *mdiodev)
6803 - struct qca8k_priv *priv;
6806 - /* allocate the private data struct so that we can probe the switches
6809 - priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
6813 - priv->bus = mdiodev->bus;
6814 - priv->dev = &mdiodev->dev;
6816 - priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
6818 - if (IS_ERR(priv->reset_gpio))
6819 - return PTR_ERR(priv->reset_gpio);
6821 - if (priv->reset_gpio) {
6822 - gpiod_set_value_cansleep(priv->reset_gpio, 1);
6823 - /* The active low duration must be greater than 10 ms
6824 - * and checkpatch.pl wants 20 ms.
6827 - gpiod_set_value_cansleep(priv->reset_gpio, 0);
6830 - /* Start by setting up the register mapping */
6831 - priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
6832 - &qca8k_regmap_config);
6833 - if (IS_ERR(priv->regmap)) {
6834 - dev_err(priv->dev, "regmap initialization failed");
6835 - return PTR_ERR(priv->regmap);
6838 - priv->mdio_cache.page = 0xffff;
6839 - priv->mdio_cache.lo = 0xffff;
6840 - priv->mdio_cache.hi = 0xffff;
6842 - /* Check the detected switch id */
6843 - ret = qca8k_read_switch_id(priv);
6847 - priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
6851 - mutex_init(&priv->mgmt_eth_data.mutex);
6852 - init_completion(&priv->mgmt_eth_data.rw_done);
6854 - mutex_init(&priv->mib_eth_data.mutex);
6855 - init_completion(&priv->mib_eth_data.rw_done);
6857 - priv->ds->dev = &mdiodev->dev;
6858 - priv->ds->num_ports = QCA8K_NUM_PORTS;
6859 - priv->ds->priv = priv;
6860 - priv->ds->ops = &qca8k_switch_ops;
6861 - mutex_init(&priv->reg_mutex);
6862 - dev_set_drvdata(&mdiodev->dev, priv);
6864 - return dsa_register_switch(priv->ds);
6868 -qca8k_sw_remove(struct mdio_device *mdiodev)
6870 - struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
6876 - for (i = 0; i < QCA8K_NUM_PORTS; i++)
6877 - qca8k_port_set_status(priv, i, 0);
6879 - dsa_unregister_switch(priv->ds);
6881 - dev_set_drvdata(&mdiodev->dev, NULL);
6884 -static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
6886 - struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
6891 - dsa_switch_shutdown(priv->ds);
6893 - dev_set_drvdata(&mdiodev->dev, NULL);
6896 -#ifdef CONFIG_PM_SLEEP
6898 -qca8k_set_pm(struct qca8k_priv *priv, int enable)
6902 - for (port = 0; port < QCA8K_NUM_PORTS; port++) {
6903 - /* Do not enable on resume if the port was
6904 - * disabled before.
6906 - if (!(priv->port_enabled_map & BIT(port)))
6909 - qca8k_port_set_status(priv, port, enable);
6913 -static int qca8k_suspend(struct device *dev)
6915 - struct qca8k_priv *priv = dev_get_drvdata(dev);
6917 - qca8k_set_pm(priv, 0);
6919 - return dsa_switch_suspend(priv->ds);
6922 -static int qca8k_resume(struct device *dev)
6924 - struct qca8k_priv *priv = dev_get_drvdata(dev);
6926 - qca8k_set_pm(priv, 1);
6928 - return dsa_switch_resume(priv->ds);
6930 -#endif /* CONFIG_PM_SLEEP */
6932 -static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
6933 - qca8k_suspend, qca8k_resume);
6935 -static const struct qca8k_match_data qca8327 = {
6936 - .id = QCA8K_ID_QCA8327,
6937 - .reduced_package = true,
6938 - .mib_count = QCA8K_QCA832X_MIB_COUNT,
6941 -static const struct qca8k_match_data qca8328 = {
6942 - .id = QCA8K_ID_QCA8327,
6943 - .mib_count = QCA8K_QCA832X_MIB_COUNT,
6946 -static const struct qca8k_match_data qca833x = {
6947 - .id = QCA8K_ID_QCA8337,
6948 - .mib_count = QCA8K_QCA833X_MIB_COUNT,
6951 -static const struct of_device_id qca8k_of_match[] = {
6952 - { .compatible = "qca,qca8327", .data = &qca8327 },
6953 - { .compatible = "qca,qca8328", .data = &qca8328 },
6954 - { .compatible = "qca,qca8334", .data = &qca833x },
6955 - { .compatible = "qca,qca8337", .data = &qca833x },
6956 - { /* sentinel */ },
6959 -static struct mdio_driver qca8kmdio_driver = {
6960 - .probe = qca8k_sw_probe,
6961 - .remove = qca8k_sw_remove,
6962 - .shutdown = qca8k_sw_shutdown,
6963 - .mdiodrv.driver = {
6965 - .of_match_table = qca8k_of_match,
6966 - .pm = &qca8k_pm_ops,
6970 -mdio_module_driver(qca8kmdio_driver);
6972 -MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
6973 -MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
6974 -MODULE_LICENSE("GPL v2");
6975 -MODULE_ALIAS("platform:qca8k");
6976 --- a/drivers/net/dsa/qca8k.h
6979 -/* SPDX-License-Identifier: GPL-2.0-only */
6981 - * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
6982 - * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
6983 - * Copyright (c) 2015, The Linux Foundation. All rights reserved.
6989 -#include <linux/delay.h>
6990 -#include <linux/regmap.h>
6991 -#include <linux/gpio.h>
6992 -#include <linux/dsa/tag_qca.h>
6994 -#define QCA8K_ETHERNET_MDIO_PRIORITY 7
6995 -#define QCA8K_ETHERNET_PHY_PRIORITY 6
6996 -#define QCA8K_ETHERNET_TIMEOUT 100
6998 -#define QCA8K_NUM_PORTS 7
6999 -#define QCA8K_NUM_CPU_PORTS 2
7000 -#define QCA8K_MAX_MTU 9000
7001 -#define QCA8K_NUM_LAGS 4
7002 -#define QCA8K_NUM_PORTS_FOR_LAG 4
7004 -#define PHY_ID_QCA8327 0x004dd034
7005 -#define QCA8K_ID_QCA8327 0x12
7006 -#define PHY_ID_QCA8337 0x004dd036
7007 -#define QCA8K_ID_QCA8337 0x13
7009 -#define QCA8K_QCA832X_MIB_COUNT 39
7010 -#define QCA8K_QCA833X_MIB_COUNT 41
7012 -#define QCA8K_BUSY_WAIT_TIMEOUT 2000
7014 -#define QCA8K_NUM_FDB_RECORDS 2048
7016 -#define QCA8K_PORT_VID_DEF 1
7018 -/* Global control registers */
7019 -#define QCA8K_REG_MASK_CTRL 0x000
7020 -#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
7021 -#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x)
7022 -#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
7023 -#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x)
7024 -#define QCA8K_REG_PORT0_PAD_CTRL 0x004
7025 -#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31)
7026 -#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
7027 -#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
7028 -#define QCA8K_REG_PORT5_PAD_CTRL 0x008
7029 -#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
7030 -#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
7031 -#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
7032 -#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x)
7033 -#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
7034 -#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x)
7035 -#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
7036 -#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
7037 -#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
7038 -#define QCA8K_REG_PWS 0x010
7039 -#define QCA8K_PWS_POWER_ON_SEL BIT(31)
7040 -/* This reg is only valid for QCA832x and toggle the package
7041 - * type from 176 pin (by default) to 148 pin used on QCA8327
7043 -#define QCA8327_PWS_PACKAGE148_EN BIT(30)
7044 -#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24)
7045 -#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
7046 -#define QCA8K_REG_MODULE_EN 0x030
7047 -#define QCA8K_MODULE_EN_MIB BIT(0)
7048 -#define QCA8K_REG_MIB 0x034
7049 -#define QCA8K_MIB_FUNC GENMASK(26, 24)
7050 -#define QCA8K_MIB_CPU_KEEP BIT(20)
7051 -#define QCA8K_MIB_BUSY BIT(17)
7052 -#define QCA8K_MDIO_MASTER_CTRL 0x3c
7053 -#define QCA8K_MDIO_MASTER_BUSY BIT(31)
7054 -#define QCA8K_MDIO_MASTER_EN BIT(30)
7055 -#define QCA8K_MDIO_MASTER_READ BIT(27)
7056 -#define QCA8K_MDIO_MASTER_WRITE 0
7057 -#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
7058 -#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21)
7059 -#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x)
7060 -#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16)
7061 -#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x)
7062 -#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
7063 -#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x)
7064 -#define QCA8K_MDIO_MASTER_MAX_PORTS 5
7065 -#define QCA8K_MDIO_MASTER_MAX_REG 32
7066 -#define QCA8K_GOL_MAC_ADDR0 0x60
7067 -#define QCA8K_GOL_MAC_ADDR1 0x64
7068 -#define QCA8K_MAX_FRAME_SIZE 0x78
7069 -#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
7070 -#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0)
7071 -#define QCA8K_PORT_STATUS_SPEED_10 0
7072 -#define QCA8K_PORT_STATUS_SPEED_100 0x1
7073 -#define QCA8K_PORT_STATUS_SPEED_1000 0x2
7074 -#define QCA8K_PORT_STATUS_TXMAC BIT(2)
7075 -#define QCA8K_PORT_STATUS_RXMAC BIT(3)
7076 -#define QCA8K_PORT_STATUS_TXFLOW BIT(4)
7077 -#define QCA8K_PORT_STATUS_RXFLOW BIT(5)
7078 -#define QCA8K_PORT_STATUS_DUPLEX BIT(6)
7079 -#define QCA8K_PORT_STATUS_LINK_UP BIT(8)
7080 -#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9)
7081 -#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10)
7082 -#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
7083 -#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
7084 -#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
7085 -#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
7086 -#define QCA8K_PORT_HDR_CTRL_ALL 2
7087 -#define QCA8K_PORT_HDR_CTRL_MGMT 1
7088 -#define QCA8K_PORT_HDR_CTRL_NONE 0
7089 -#define QCA8K_REG_SGMII_CTRL 0x0e0
7090 -#define QCA8K_SGMII_EN_PLL BIT(1)
7091 -#define QCA8K_SGMII_EN_RX BIT(2)
7092 -#define QCA8K_SGMII_EN_TX BIT(3)
7093 -#define QCA8K_SGMII_EN_SD BIT(4)
7094 -#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
7095 -#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22)
7096 -#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x)
7097 -#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0)
7098 -#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1)
7099 -#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2)
7101 -/* MAC_PWR_SEL registers */
7102 -#define QCA8K_REG_MAC_PWR_SEL 0x0e4
7103 -#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18)
7104 -#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19)
7106 -/* EEE control registers */
7107 -#define QCA8K_REG_EEE_CTRL 0x100
7108 -#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
7110 -/* TRUNK_HASH_EN registers */
7111 -#define QCA8K_TRUNK_HASH_EN_CTRL 0x270
7112 -#define QCA8K_TRUNK_HASH_SIP_EN BIT(3)
7113 -#define QCA8K_TRUNK_HASH_DIP_EN BIT(2)
7114 -#define QCA8K_TRUNK_HASH_SA_EN BIT(1)
7115 -#define QCA8K_TRUNK_HASH_DA_EN BIT(0)
7116 -#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0)
7118 -/* ACL registers */
7119 -#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
7120 -#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16)
7121 -#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x)
7122 -#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0)
7123 -#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x)
7124 -#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
7125 -#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
7126 -#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
7128 -/* Lookup registers */
7129 -#define QCA8K_REG_ATU_DATA0 0x600
7130 -#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24)
7131 -#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16)
7132 -#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8)
7133 -#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0)
7134 -#define QCA8K_REG_ATU_DATA1 0x604
7135 -#define QCA8K_ATU_PORT_MASK GENMASK(22, 16)
7136 -#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8)
7137 -#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0)
7138 -#define QCA8K_REG_ATU_DATA2 0x608
7139 -#define QCA8K_ATU_VID_MASK GENMASK(19, 8)
7140 -#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0)
7141 -#define QCA8K_ATU_STATUS_STATIC 0xf
7142 -#define QCA8K_REG_ATU_FUNC 0x60c
7143 -#define QCA8K_ATU_FUNC_BUSY BIT(31)
7144 -#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
7145 -#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
7146 -#define QCA8K_ATU_FUNC_FULL BIT(12)
7147 -#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8)
7148 -#define QCA8K_REG_VTU_FUNC0 0x610
7149 -#define QCA8K_VTU_FUNC0_VALID BIT(20)
7150 -#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
7151 -/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4)
7152 - * It does contain VLAN_MODE for each port [5:4] for port0,
7153 - * [7:6] for port1 ... [17:16] for port6. Use virtual port
7154 - * define to handle this.
7156 -#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2)
7157 -#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0)
7158 -#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
7159 -#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0)
7160 -#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
7161 -#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1)
7162 -#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
7163 -#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2)
7164 -#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
7165 -#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3)
7166 -#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
7167 -#define QCA8K_REG_VTU_FUNC1 0x614
7168 -#define QCA8K_VTU_FUNC1_BUSY BIT(31)
7169 -#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16)
7170 -#define QCA8K_VTU_FUNC1_FULL BIT(4)
7171 -#define QCA8K_REG_ATU_CTRL 0x618
7172 -#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0)
7173 -#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
7174 -#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
7175 -#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
7176 -#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4)
7177 -#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
7178 -#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24)
7179 -#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16)
7180 -#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8)
7181 -#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0)
7182 -#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
7183 -#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
7184 -#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8)
7185 -#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x)
7186 -#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0)
7187 -#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1)
7188 -#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2)
7189 -#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3)
7190 -#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
7191 -#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x)
7192 -#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0)
7193 -#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1)
7194 -#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2)
7195 -#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
7196 -#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
7197 -#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
7198 -#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
7200 -#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700
7201 -/* 4 max trunk first
7202 - * first 6 bit for member bitmap
7203 - * 7th bit is to enable trunk port
7205 -#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8)
7206 -#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7)
7207 -#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
7208 -#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0)
7209 -#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
7210 -/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */
7211 -#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4))
7212 -#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0)
7213 -#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3)
7214 -#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0)
7215 -#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16)
7216 -#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4)
7217 -/* Complex shift: FIRST shift for port THEN shift for trunk */
7218 -#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i))
7219 -#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
7220 -#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
7222 -#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
7223 -#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
7224 -#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x)
7225 -#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0)
7226 -#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x)
7228 -#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
7229 -#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0)
7230 -#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x)
7231 -#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4)
7232 -#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x)
7233 -#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8)
7234 -#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x)
7235 -#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12)
7236 -#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x)
7237 -#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16)
7238 -#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x)
7239 -#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20)
7240 -#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x)
7241 -#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24)
7242 -#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x)
7244 -#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
7245 -#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0)
7246 -#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x)
7247 -#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
7248 -#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
7249 -#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
7250 -#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
7252 -/* Pkt edit registers */
7253 -#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2))
7254 -#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
7255 -#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
7256 -#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
7259 -#define QCA8K_HROUTER_CONTROL 0xe00
7260 -#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16)
7261 -#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16
7262 -#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1
7263 -#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08
7264 -#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c
7265 -#define QCA8K_HNAT_CONTROL 0xe38
7267 -/* MIB registers */
7268 -#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100)
7270 -/* QCA specific MII registers */
7271 -#define MII_ATH_MMD_ADDR 0x0d
7272 -#define MII_ATH_MMD_DATA 0x0e
7275 - QCA8K_PORT_SPEED_10M = 0,
7276 - QCA8K_PORT_SPEED_100M = 1,
7277 - QCA8K_PORT_SPEED_1000M = 2,
7278 - QCA8K_PORT_SPEED_ERR = 3,
7281 -enum qca8k_fdb_cmd {
7282 - QCA8K_FDB_FLUSH = 1,
7283 - QCA8K_FDB_LOAD = 2,
7284 - QCA8K_FDB_PURGE = 3,
7285 - QCA8K_FDB_FLUSH_PORT = 5,
7286 - QCA8K_FDB_NEXT = 6,
7287 - QCA8K_FDB_SEARCH = 7,
7290 -enum qca8k_vlan_cmd {
7291 - QCA8K_VLAN_FLUSH = 1,
7292 - QCA8K_VLAN_LOAD = 2,
7293 - QCA8K_VLAN_PURGE = 3,
7294 - QCA8K_VLAN_REMOVE_PORT = 4,
7295 - QCA8K_VLAN_NEXT = 5,
7296 - QCA8K_VLAN_READ = 6,
7299 -enum qca8k_mid_cmd {
7300 - QCA8K_MIB_FLUSH = 1,
7301 - QCA8K_MIB_FLUSH_PORT = 2,
7302 - QCA8K_MIB_CAST = 3,
7305 -struct qca8k_match_data {
7307 - bool reduced_package;
7316 -struct qca8k_mgmt_eth_data {
7317 - struct completion rw_done;
7318 - struct mutex mutex; /* Enforce one mdio read/write at time */
7324 -struct qca8k_mib_eth_data {
7325 - struct completion rw_done;
7326 - struct mutex mutex; /* Process one command at time */
7327 - refcount_t port_parsed; /* Counter to track parsed port */
7329 - u64 *data; /* pointer to ethtool data */
7332 -struct qca8k_ports_config {
7333 - bool sgmii_rx_clk_falling_edge;
7334 - bool sgmii_tx_clk_falling_edge;
7335 - bool sgmii_enable_pll;
7336 - u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
7337 - u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
7340 -struct qca8k_mdio_cache {
7341 -/* The 32bit switch registers are accessed indirectly. To achieve this we need
7342 - * to set the page of the register. Track the last page that was set to reduce
7346 -/* lo and hi can also be cached and from Documentation we can skip one
7347 - * extra mdio write if lo or hi is didn't change.
7353 -struct qca8k_priv {
7355 - u8 switch_revision;
7359 - /* Each bit correspond to a port. This switch can support a max of 7 port.
7360 - * Bit 1: port enabled. Bit 0: port disabled.
7362 - u8 port_enabled_map;
7363 - struct qca8k_ports_config ports_config;
7364 - struct regmap *regmap;
7365 - struct mii_bus *bus;
7366 - struct dsa_switch *ds;
7367 - struct mutex reg_mutex;
7368 - struct device *dev;
7369 - struct gpio_desc *reset_gpio;
7370 - struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */
7371 - struct qca8k_mgmt_eth_data mgmt_eth_data;
7372 - struct qca8k_mib_eth_data mib_eth_data;
7373 - struct qca8k_mdio_cache mdio_cache;
7376 -struct qca8k_mib_desc {
7377 - unsigned int size;
7378 - unsigned int offset;
7389 -#endif /* __QCA8K_H */