1 From 027152b830434e3632ad5dd678cc5d4740358dbb Mon Sep 17 00:00:00 2001
2 From: Christian Marangi <ansuelsmth@gmail.com>
3 Date: Wed, 27 Jul 2022 13:35:12 +0200
4 Subject: [PATCH 03/14] net: dsa: qca8k: move mib struct to common code
6 The same MIB struct is used by drivers based on qca8k family switch. Move
7 it to common code to make it accessible also by other drivers.
9 Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
10 Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
11 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
13 drivers/net/dsa/qca/Makefile | 1 +
14 drivers/net/dsa/qca/{qca8k.c => qca8k-8xxx.c} | 51 ---------------
15 drivers/net/dsa/qca/qca8k-common.c | 63 +++++++++++++++++++
16 drivers/net/dsa/qca/qca8k.h | 3 +
17 4 files changed, 67 insertions(+), 51 deletions(-)
18 rename drivers/net/dsa/qca/{qca8k.c => qca8k-8xxx.c} (98%)
19 create mode 100644 drivers/net/dsa/qca/qca8k-common.c
21 --- a/drivers/net/dsa/qca/Makefile
22 +++ b/drivers/net/dsa/qca/Makefile
24 # SPDX-License-Identifier: GPL-2.0-only
25 obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
26 obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
27 +qca8k-y += qca8k-common.o qca8k-8xxx.o
28 --- a/drivers/net/dsa/qca/qca8k.c
31 -// SPDX-License-Identifier: GPL-2.0
33 - * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
34 - * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
35 - * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
36 - * Copyright (c) 2016 John Crispin <john@phrozen.org>
39 -#include <linux/module.h>
40 -#include <linux/phy.h>
41 -#include <linux/netdevice.h>
42 -#include <linux/bitfield.h>
43 -#include <linux/regmap.h>
45 -#include <linux/of_net.h>
46 -#include <linux/of_mdio.h>
47 -#include <linux/of_platform.h>
48 -#include <linux/if_bridge.h>
49 -#include <linux/mdio.h>
50 -#include <linux/phylink.h>
51 -#include <linux/gpio/consumer.h>
52 -#include <linux/etherdevice.h>
53 -#include <linux/dsa/tag_qca.h>
57 -#define MIB_DESC(_s, _o, _n) \
64 -static const struct qca8k_mib_desc ar8327_mib[] = {
65 - MIB_DESC(1, 0x00, "RxBroad"),
66 - MIB_DESC(1, 0x04, "RxPause"),
67 - MIB_DESC(1, 0x08, "RxMulti"),
68 - MIB_DESC(1, 0x0c, "RxFcsErr"),
69 - MIB_DESC(1, 0x10, "RxAlignErr"),
70 - MIB_DESC(1, 0x14, "RxRunt"),
71 - MIB_DESC(1, 0x18, "RxFragment"),
72 - MIB_DESC(1, 0x1c, "Rx64Byte"),
73 - MIB_DESC(1, 0x20, "Rx128Byte"),
74 - MIB_DESC(1, 0x24, "Rx256Byte"),
75 - MIB_DESC(1, 0x28, "Rx512Byte"),
76 - MIB_DESC(1, 0x2c, "Rx1024Byte"),
77 - MIB_DESC(1, 0x30, "Rx1518Byte"),
78 - MIB_DESC(1, 0x34, "RxMaxByte"),
79 - MIB_DESC(1, 0x38, "RxTooLong"),
80 - MIB_DESC(2, 0x3c, "RxGoodByte"),
81 - MIB_DESC(2, 0x44, "RxBadByte"),
82 - MIB_DESC(1, 0x4c, "RxOverFlow"),
83 - MIB_DESC(1, 0x50, "Filtered"),
84 - MIB_DESC(1, 0x54, "TxBroad"),
85 - MIB_DESC(1, 0x58, "TxPause"),
86 - MIB_DESC(1, 0x5c, "TxMulti"),
87 - MIB_DESC(1, 0x60, "TxUnderRun"),
88 - MIB_DESC(1, 0x64, "Tx64Byte"),
89 - MIB_DESC(1, 0x68, "Tx128Byte"),
90 - MIB_DESC(1, 0x6c, "Tx256Byte"),
91 - MIB_DESC(1, 0x70, "Tx512Byte"),
92 - MIB_DESC(1, 0x74, "Tx1024Byte"),
93 - MIB_DESC(1, 0x78, "Tx1518Byte"),
94 - MIB_DESC(1, 0x7c, "TxMaxByte"),
95 - MIB_DESC(1, 0x80, "TxOverSize"),
96 - MIB_DESC(2, 0x84, "TxByte"),
97 - MIB_DESC(1, 0x8c, "TxCollision"),
98 - MIB_DESC(1, 0x90, "TxAbortCol"),
99 - MIB_DESC(1, 0x94, "TxMultiCol"),
100 - MIB_DESC(1, 0x98, "TxSingleCol"),
101 - MIB_DESC(1, 0x9c, "TxExcDefer"),
102 - MIB_DESC(1, 0xa0, "TxDefer"),
103 - MIB_DESC(1, 0xa4, "TxLateCol"),
104 - MIB_DESC(1, 0xa8, "RXUnicast"),
105 - MIB_DESC(1, 0xac, "TXUnicast"),
109 -qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
112 - *r1 = regaddr & 0x1e;
115 - *r2 = regaddr & 0x7;
118 - *page = regaddr & 0x3ff;
122 -qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
124 - u16 *cached_lo = &priv->mdio_cache.lo;
125 - struct mii_bus *bus = priv->bus;
128 - if (lo == *cached_lo)
131 - ret = bus->write(bus, phy_id, regnum, lo);
133 - dev_err_ratelimited(&bus->dev,
134 - "failed to write qca8k 32bit lo register\n");
141 -qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
143 - u16 *cached_hi = &priv->mdio_cache.hi;
144 - struct mii_bus *bus = priv->bus;
147 - if (hi == *cached_hi)
150 - ret = bus->write(bus, phy_id, regnum, hi);
152 - dev_err_ratelimited(&bus->dev,
153 - "failed to write qca8k 32bit hi register\n");
160 -qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
164 - ret = bus->read(bus, phy_id, regnum);
167 - ret = bus->read(bus, phy_id, regnum + 1);
172 - dev_err_ratelimited(&bus->dev,
173 - "failed to read qca8k 32bit register\n");
182 -qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
188 - hi = (u16)(val >> 16);
190 - ret = qca8k_set_lo(priv, phy_id, regnum, lo);
192 - ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
196 -qca8k_set_page(struct qca8k_priv *priv, u16 page)
198 - u16 *cached_page = &priv->mdio_cache.page;
199 - struct mii_bus *bus = priv->bus;
202 - if (page == *cached_page)
205 - ret = bus->write(bus, 0x18, 0, page);
207 - dev_err_ratelimited(&bus->dev,
208 - "failed to set qca8k page\n");
212 - *cached_page = page;
213 - usleep_range(1000, 2000);
218 -qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
220 - return regmap_read(priv->regmap, reg, val);
224 -qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
226 - return regmap_write(priv->regmap, reg, val);
230 -qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
232 - return regmap_update_bits(priv->regmap, reg, mask, write_val);
235 -static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
237 - struct qca8k_mgmt_eth_data *mgmt_eth_data;
238 - struct qca8k_priv *priv = ds->priv;
239 - struct qca_mgmt_ethhdr *mgmt_ethhdr;
242 - mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
243 - mgmt_eth_data = &priv->mgmt_eth_data;
245 - cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
246 - len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
248 - /* Make sure the seq match the requested packet */
249 - if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
250 - mgmt_eth_data->ack = true;
252 - if (cmd == MDIO_READ) {
253 - mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
255 - /* Get the rest of the 12 byte of data.
256 - * The read/write function will extract the requested data.
258 - if (len > QCA_HDR_MGMT_DATA1_LEN)
259 - memcpy(mgmt_eth_data->data + 1, skb->data,
260 - QCA_HDR_MGMT_DATA2_LEN);
263 - complete(&mgmt_eth_data->rw_done);
266 -static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
267 - int priority, unsigned int len)
269 - struct qca_mgmt_ethhdr *mgmt_ethhdr;
270 - unsigned int real_len;
271 - struct sk_buff *skb;
275 - skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
279 - /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
280 - * Actually for some reason the steps are:
282 - * 1-4: first 4 byte
283 - * 5-6: first 12 byte
284 - * 7-15: all 16 byte
291 - skb_reset_mac_header(skb);
292 - skb_set_network_header(skb, skb->len);
294 - mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
296 - hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
297 - hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
298 - hdr |= QCA_HDR_XMIT_FROM_CPU;
299 - hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
300 - hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
302 - mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
303 - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
304 - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
305 - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
306 - QCA_HDR_MGMT_CHECK_CODE_VAL);
308 - if (cmd == MDIO_WRITE)
309 - mgmt_ethhdr->mdio_data = *val;
311 - mgmt_ethhdr->hdr = htons(hdr);
313 - data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
314 - if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
315 - memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
320 -static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
322 - struct qca_mgmt_ethhdr *mgmt_ethhdr;
324 - mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
325 - mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
328 -static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
330 - struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
331 - struct sk_buff *skb;
335 - skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
336 - QCA8K_ETHERNET_MDIO_PRIORITY, len);
340 - mutex_lock(&mgmt_eth_data->mutex);
342 - /* Check mgmt_master if is operational */
343 - if (!priv->mgmt_master) {
345 - mutex_unlock(&mgmt_eth_data->mutex);
349 - skb->dev = priv->mgmt_master;
351 - reinit_completion(&mgmt_eth_data->rw_done);
353 - /* Increment seq_num and set it in the mdio pkt */
354 - mgmt_eth_data->seq++;
355 - qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
356 - mgmt_eth_data->ack = false;
358 - dev_queue_xmit(skb);
360 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
361 - msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
363 - *val = mgmt_eth_data->data[0];
364 - if (len > QCA_HDR_MGMT_DATA1_LEN)
365 - memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
367 - ack = mgmt_eth_data->ack;
369 - mutex_unlock(&mgmt_eth_data->mutex);
380 -static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
382 - struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
383 - struct sk_buff *skb;
387 - skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
388 - QCA8K_ETHERNET_MDIO_PRIORITY, len);
392 - mutex_lock(&mgmt_eth_data->mutex);
394 - /* Check mgmt_master if is operational */
395 - if (!priv->mgmt_master) {
397 - mutex_unlock(&mgmt_eth_data->mutex);
401 - skb->dev = priv->mgmt_master;
403 - reinit_completion(&mgmt_eth_data->rw_done);
405 - /* Increment seq_num and set it in the mdio pkt */
406 - mgmt_eth_data->seq++;
407 - qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
408 - mgmt_eth_data->ack = false;
410 - dev_queue_xmit(skb);
412 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
413 - msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
415 - ack = mgmt_eth_data->ack;
417 - mutex_unlock(&mgmt_eth_data->mutex);
429 -qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
434 - ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
441 - return qca8k_write_eth(priv, reg, &val, sizeof(val));
445 -qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
447 - int i, count = len / sizeof(u32), ret;
449 - if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
452 - for (i = 0; i < count; i++) {
453 - ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
462 -qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
464 - int i, count = len / sizeof(u32), ret;
467 - if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
470 - for (i = 0; i < count; i++) {
473 - ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
482 -qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
484 - struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
485 - struct mii_bus *bus = priv->bus;
489 - if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
492 - qca8k_split_addr(reg, &r1, &r2, &page);
494 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
496 - ret = qca8k_set_page(priv, page);
500 - ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
503 - mutex_unlock(&bus->mdio_lock);
508 -qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
510 - struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
511 - struct mii_bus *bus = priv->bus;
515 - if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
518 - qca8k_split_addr(reg, &r1, &r2, &page);
520 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
522 - ret = qca8k_set_page(priv, page);
526 - qca8k_mii_write32(priv, 0x10 | r2, r1, val);
529 - mutex_unlock(&bus->mdio_lock);
534 -qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
536 - struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
537 - struct mii_bus *bus = priv->bus;
542 - if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
545 - qca8k_split_addr(reg, &r1, &r2, &page);
547 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
549 - ret = qca8k_set_page(priv, page);
553 - ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
559 - qca8k_mii_write32(priv, 0x10 | r2, r1, val);
562 - mutex_unlock(&bus->mdio_lock);
567 -static const struct regmap_range qca8k_readable_ranges[] = {
568 - regmap_reg_range(0x0000, 0x00e4), /* Global control */
569 - regmap_reg_range(0x0100, 0x0168), /* EEE control */
570 - regmap_reg_range(0x0200, 0x0270), /* Parser control */
571 - regmap_reg_range(0x0400, 0x0454), /* ACL */
572 - regmap_reg_range(0x0600, 0x0718), /* Lookup */
573 - regmap_reg_range(0x0800, 0x0b70), /* QM */
574 - regmap_reg_range(0x0c00, 0x0c80), /* PKT */
575 - regmap_reg_range(0x0e00, 0x0e98), /* L3 */
576 - regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
577 - regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
578 - regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
579 - regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
580 - regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
581 - regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
582 - regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
586 -static const struct regmap_access_table qca8k_readable_table = {
587 - .yes_ranges = qca8k_readable_ranges,
588 - .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
591 -static struct regmap_config qca8k_regmap_config = {
595 - .max_register = 0x16ac, /* end MIB - Port6 range */
596 - .reg_read = qca8k_regmap_read,
597 - .reg_write = qca8k_regmap_write,
598 - .reg_update_bits = qca8k_regmap_update_bits,
599 - .rd_table = &qca8k_readable_table,
600 - .disable_locking = true, /* Locking is handled by qca8k read/write */
601 - .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
605 -qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
609 - return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
610 - QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
614 -qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
619 - /* load the ARL table into an array */
620 - ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
625 - fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
626 - /* aging - 67:64 */
627 - fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
628 - /* portmask - 54:48 */
629 - fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
631 - fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
632 - fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
633 - fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
634 - fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
635 - fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
636 - fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
642 -qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
645 - u32 reg[3] = { 0 };
648 - reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
649 - /* aging - 67:64 */
650 - reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
651 - /* portmask - 54:48 */
652 - reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
654 - reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
655 - reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
656 - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
657 - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
658 - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
659 - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
661 - /* load the array into the ARL table */
662 - qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
666 -qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
671 - /* Set the command and FDB index */
672 - reg = QCA8K_ATU_FUNC_BUSY;
675 - reg |= QCA8K_ATU_FUNC_PORT_EN;
676 - reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
679 - /* Write the function register triggering the table access */
680 - ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
684 - /* wait for completion */
685 - ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
689 - /* Check for table full violation when adding an entry */
690 - if (cmd == QCA8K_FDB_LOAD) {
691 - ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®);
694 - if (reg & QCA8K_ATU_FUNC_FULL)
702 -qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
706 - qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
707 - ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
711 - return qca8k_fdb_read(priv, fdb);
715 -qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
720 - mutex_lock(&priv->reg_mutex);
721 - qca8k_fdb_write(priv, vid, port_mask, mac, aging);
722 - ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
723 - mutex_unlock(&priv->reg_mutex);
729 -qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
733 - mutex_lock(&priv->reg_mutex);
734 - qca8k_fdb_write(priv, vid, port_mask, mac, 0);
735 - ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
736 - mutex_unlock(&priv->reg_mutex);
742 -qca8k_fdb_flush(struct qca8k_priv *priv)
744 - mutex_lock(&priv->reg_mutex);
745 - qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
746 - mutex_unlock(&priv->reg_mutex);
750 -qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
751 - const u8 *mac, u16 vid)
753 - struct qca8k_fdb fdb = { 0 };
756 - mutex_lock(&priv->reg_mutex);
758 - qca8k_fdb_write(priv, vid, 0, mac, 0);
759 - ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
763 - ret = qca8k_fdb_read(priv, &fdb);
767 - /* Rule exist. Delete first */
769 - ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
774 - /* Add port to fdb portmask */
775 - fdb.port_mask |= port_mask;
777 - qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
778 - ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
781 - mutex_unlock(&priv->reg_mutex);
786 -qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
787 - const u8 *mac, u16 vid)
789 - struct qca8k_fdb fdb = { 0 };
792 - mutex_lock(&priv->reg_mutex);
794 - qca8k_fdb_write(priv, vid, 0, mac, 0);
795 - ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
799 - /* Rule doesn't exist. Why delete? */
805 - ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
809 - /* Only port in the rule is this port. Don't re insert */
810 - if (fdb.port_mask == port_mask)
813 - /* Remove port from port mask */
814 - fdb.port_mask &= ~port_mask;
816 - qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
817 - ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
820 - mutex_unlock(&priv->reg_mutex);
825 -qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
830 - /* Set the command and VLAN index */
831 - reg = QCA8K_VTU_FUNC1_BUSY;
833 - reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
835 - /* Write the function register triggering the table access */
836 - ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
840 - /* wait for completion */
841 - ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
845 - /* Check for table full violation when adding an entry */
846 - if (cmd == QCA8K_VLAN_LOAD) {
847 - ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®);
850 - if (reg & QCA8K_VTU_FUNC1_FULL)
858 -qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
864 - We do the right thing with VLAN 0 and treat it as untagged while
865 - preserving the tag on egress.
870 - mutex_lock(&priv->reg_mutex);
871 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
875 - ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
878 - reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
879 - reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
881 - reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
883 - reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
885 - ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
888 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
891 - mutex_unlock(&priv->reg_mutex);
897 -qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
903 - mutex_lock(&priv->reg_mutex);
904 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
908 - ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
911 - reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
912 - reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
914 - /* Check if we're the last member to be removed */
916 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
917 - mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
919 - if ((reg & mask) != mask) {
926 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
928 - ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
931 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
935 - mutex_unlock(&priv->reg_mutex);
941 -qca8k_mib_init(struct qca8k_priv *priv)
945 - mutex_lock(&priv->reg_mutex);
946 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
947 - QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
948 - FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
953 - ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
957 - ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
961 - ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
964 - mutex_unlock(&priv->reg_mutex);
969 -qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
971 - u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
973 - /* Port 0 and 6 have no internal PHY */
974 - if (port > 0 && port < 6)
975 - mask |= QCA8K_PORT_STATUS_LINK_AUTO;
978 - regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
980 - regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
984 -qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
985 - struct sk_buff *read_skb, u32 *val)
987 - struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
991 - reinit_completion(&mgmt_eth_data->rw_done);
993 - /* Increment seq_num and set it in the copy pkt */
994 - mgmt_eth_data->seq++;
995 - qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
996 - mgmt_eth_data->ack = false;
998 - dev_queue_xmit(skb);
1000 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1001 - QCA8K_ETHERNET_TIMEOUT);
1003 - ack = mgmt_eth_data->ack;
1006 - return -ETIMEDOUT;
1011 - *val = mgmt_eth_data->data[0];
1017 -qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
1018 - int regnum, u16 data)
1020 - struct sk_buff *write_skb, *clear_skb, *read_skb;
1021 - struct qca8k_mgmt_eth_data *mgmt_eth_data;
1022 - u32 write_val, clear_val = 0, val;
1023 - struct net_device *mgmt_master;
1027 - if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1030 - mgmt_eth_data = &priv->mgmt_eth_data;
1032 - write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1033 - QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1034 - QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1037 - write_val |= QCA8K_MDIO_MASTER_READ;
1039 - write_val |= QCA8K_MDIO_MASTER_WRITE;
1040 - write_val |= QCA8K_MDIO_MASTER_DATA(data);
1043 - /* Prealloc all the needed skb before the lock */
1044 - write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
1045 - QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
1049 - clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1050 - QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1053 - goto err_clear_skb;
1056 - read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1057 - QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1060 - goto err_read_skb;
1063 - /* Actually start the request:
1064 - * 1. Send mdio master packet
1065 - * 2. Busy Wait for mdio master command
1066 - * 3. Get the data if we are reading
1067 - * 4. Reset the mdio master (even with error)
1069 - mutex_lock(&mgmt_eth_data->mutex);
1071 - /* Check if mgmt_master is operational */
1072 - mgmt_master = priv->mgmt_master;
1073 - if (!mgmt_master) {
1074 - mutex_unlock(&mgmt_eth_data->mutex);
1076 - goto err_mgmt_master;
1079 - read_skb->dev = mgmt_master;
1080 - clear_skb->dev = mgmt_master;
1081 - write_skb->dev = mgmt_master;
1083 - reinit_completion(&mgmt_eth_data->rw_done);
1085 - /* Increment seq_num and set it in the write pkt */
1086 - mgmt_eth_data->seq++;
1087 - qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
1088 - mgmt_eth_data->ack = false;
1090 - dev_queue_xmit(write_skb);
1092 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1093 - QCA8K_ETHERNET_TIMEOUT);
1095 - ack = mgmt_eth_data->ack;
1099 - kfree_skb(read_skb);
1105 - kfree_skb(read_skb);
1109 - ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
1110 - !(val & QCA8K_MDIO_MASTER_BUSY), 0,
1111 - QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1112 - mgmt_eth_data, read_skb, &val);
1114 - if (ret < 0 && ret1 < 0) {
1120 - reinit_completion(&mgmt_eth_data->rw_done);
1122 - /* Increment seq_num and set it in the read pkt */
1123 - mgmt_eth_data->seq++;
1124 - qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
1125 - mgmt_eth_data->ack = false;
1127 - dev_queue_xmit(read_skb);
1129 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1130 - QCA8K_ETHERNET_TIMEOUT);
1132 - ack = mgmt_eth_data->ack;
1144 - ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
1146 - kfree_skb(read_skb);
1149 - reinit_completion(&mgmt_eth_data->rw_done);
1151 - /* Increment seq_num and set it in the clear pkt */
1152 - mgmt_eth_data->seq++;
1153 - qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
1154 - mgmt_eth_data->ack = false;
1156 - dev_queue_xmit(clear_skb);
1158 - wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1159 - QCA8K_ETHERNET_TIMEOUT);
1161 - mutex_unlock(&mgmt_eth_data->mutex);
1165 - /* Error handling before lock */
1167 - kfree_skb(read_skb);
1169 - kfree_skb(clear_skb);
1171 - kfree_skb(write_skb);
1177 -qca8k_port_to_phy(int port)
1179 - /* From Andrew Lunn:
1180 - * Port 0 has no internal phy.
1181 - * Port 1 has an internal PHY at MDIO address 0.
1182 - * Port 2 has an internal PHY at MDIO address 1.
1184 - * Port 5 has an internal PHY at MDIO address 4.
1185 - * Port 6 has no internal PHY.
1192 -qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
1198 - qca8k_split_addr(reg, &r1, &r2, &page);
1200 - ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
1201 - QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1202 - bus, 0x10 | r2, r1, &val);
1204 - /* Check if qca8k_read has failed for a different reason
1205 - * before returnting -ETIMEDOUT
1207 - if (ret < 0 && ret1 < 0)
1214 -qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
1216 - struct mii_bus *bus = priv->bus;
1221 - if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1224 - val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1225 - QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1226 - QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
1227 - QCA8K_MDIO_MASTER_DATA(data);
1229 - qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1231 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1233 - ret = qca8k_set_page(priv, page);
1237 - qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1239 - ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1240 - QCA8K_MDIO_MASTER_BUSY);
1243 - /* even if the busy_wait timeouts try to clear the MASTER_EN */
1244 - qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1246 - mutex_unlock(&bus->mdio_lock);
1252 -qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
1254 - struct mii_bus *bus = priv->bus;
1259 - if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1262 - val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1263 - QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1264 - QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1266 - qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1268 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1270 - ret = qca8k_set_page(priv, page);
1274 - qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1276 - ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1277 - QCA8K_MDIO_MASTER_BUSY);
1281 - ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
1284 - /* even if the busy_wait timeouts try to clear the MASTER_EN */
1285 - qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1287 - mutex_unlock(&bus->mdio_lock);
1290 - ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
1296 -qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
1298 - struct qca8k_priv *priv = slave_bus->priv;
1301 - /* Use mdio Ethernet when available, fallback to legacy one on error */
1302 - ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
1306 - return qca8k_mdio_write(priv, phy, regnum, data);
1310 -qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
1312 - struct qca8k_priv *priv = slave_bus->priv;
1315 - /* Use mdio Ethernet when available, fallback to legacy one on error */
1316 - ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
1320 - ret = qca8k_mdio_read(priv, phy, regnum);
1329 -qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
1331 - port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1333 - return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
1337 -qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
1339 - port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1341 - return qca8k_internal_mdio_read(slave_bus, port, regnum);
1345 -qca8k_mdio_register(struct qca8k_priv *priv)
1347 - struct dsa_switch *ds = priv->ds;
1348 - struct device_node *mdio;
1349 - struct mii_bus *bus;
1351 - bus = devm_mdiobus_alloc(ds->dev);
1355 - bus->priv = (void *)priv;
1356 - snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
1357 - ds->dst->index, ds->index);
1358 - bus->parent = ds->dev;
1359 - bus->phy_mask = ~ds->phys_mii_mask;
1360 - ds->slave_mii_bus = bus;
1362 - /* Check if the devicetree declare the port:phy mapping */
1363 - mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
1364 - if (of_device_is_available(mdio)) {
1365 - bus->name = "qca8k slave mii";
1366 - bus->read = qca8k_internal_mdio_read;
1367 - bus->write = qca8k_internal_mdio_write;
1368 - return devm_of_mdiobus_register(priv->dev, bus, mdio);
1371 - /* If a mapping can't be found the legacy mapping is used,
1372 - * using the qca8k_port_to_phy function
1374 - bus->name = "qca8k-legacy slave mii";
1375 - bus->read = qca8k_legacy_mdio_read;
1376 - bus->write = qca8k_legacy_mdio_write;
1377 - return devm_mdiobus_register(priv->dev, bus);
1381 -qca8k_setup_mdio_bus(struct qca8k_priv *priv)
1383 - u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
1384 - struct device_node *ports, *port;
1385 - phy_interface_t mode;
1388 - ports = of_get_child_by_name(priv->dev->of_node, "ports");
1390 - ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
1395 - for_each_available_child_of_node(ports, port) {
1396 - err = of_property_read_u32(port, "reg", ®);
1398 - of_node_put(port);
1399 - of_node_put(ports);
1403 - if (!dsa_is_user_port(priv->ds, reg))
1406 - of_get_phy_mode(port, &mode);
1408 - if (of_property_read_bool(port, "phy-handle") &&
1409 - mode != PHY_INTERFACE_MODE_INTERNAL)
1410 - external_mdio_mask |= BIT(reg);
1412 - internal_mdio_mask |= BIT(reg);
1415 - of_node_put(ports);
1416 - if (!external_mdio_mask && !internal_mdio_mask) {
1417 - dev_err(priv->dev, "no PHYs are defined.\n");
1421 - /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
1422 - * the MDIO_MASTER register also _disconnects_ the external MDC
1423 - * passthrough to the internal PHYs. It's not possible to use both
1424 - * configurations at the same time!
1426 - * Because this came up during the review process:
1427 - * If the external mdio-bus driver is capable magically disabling
1428 - * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
1429 - * accessors for the time being, it would be possible to pull this
1432 - if (!!external_mdio_mask && !!internal_mdio_mask) {
1433 - dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
1437 - if (external_mdio_mask) {
1438 - /* Make sure to disable the internal mdio bus in cases
1439 - * a dt-overlay and driver reload changed the configuration
1442 - return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
1443 - QCA8K_MDIO_MASTER_EN);
1446 - return qca8k_mdio_register(priv);
1450 -qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
1455 - /* SoC specific settings for ipq8064.
1456 - * If more device require this consider adding
1457 - * a dedicated binding.
1459 - if (of_machine_is_compatible("qcom,ipq8064"))
1460 - mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
1462 - /* SoC specific settings for ipq8065 */
1463 - if (of_machine_is_compatible("qcom,ipq8065"))
1464 - mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
1467 - ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
1468 - QCA8K_MAC_PWR_RGMII0_1_8V |
1469 - QCA8K_MAC_PWR_RGMII1_1_8V,
1476 -static int qca8k_find_cpu_port(struct dsa_switch *ds)
1478 - struct qca8k_priv *priv = ds->priv;
1480 - /* Find the connected cpu port. Valid port are 0 or 6 */
1481 - if (dsa_is_cpu_port(ds, 0))
1484 - dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
1486 - if (dsa_is_cpu_port(ds, 6))
1493 -qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
1495 - const struct qca8k_match_data *data = priv->info;
1496 - struct device_node *node = priv->dev->of_node;
1500 - /* QCA8327 require to set to the correct mode.
1501 - * His bigger brother QCA8328 have the 172 pin layout.
1502 - * Should be applied by default but we set this just to make sure.
1504 - if (priv->switch_id == QCA8K_ID_QCA8327) {
1505 - /* Set the correct package of 148 pin for QCA8327 */
1506 - if (data->reduced_package)
1507 - val |= QCA8327_PWS_PACKAGE148_EN;
1509 - ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
1515 - if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
1516 - val |= QCA8K_PWS_POWER_ON_SEL;
1518 - if (of_property_read_bool(node, "qca,led-open-drain")) {
1519 - if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
1520 - dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
1524 - val |= QCA8K_PWS_LED_OPEN_EN_CSR;
1527 - return qca8k_rmw(priv, QCA8K_REG_PWS,
1528 - QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
1533 -qca8k_parse_port_config(struct qca8k_priv *priv)
1535 - int port, cpu_port_index = -1, ret;
1536 - struct device_node *port_dn;
1537 - phy_interface_t mode;
1538 - struct dsa_port *dp;
1541 - /* We have 2 CPU port. Check them */
1542 - for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1543 - /* Skip every other port */
1544 - if (port != 0 && port != 6)
1547 - dp = dsa_to_port(priv->ds, port);
1551 - if (!of_device_is_available(port_dn))
1554 - ret = of_get_phy_mode(port_dn, &mode);
1559 - case PHY_INTERFACE_MODE_RGMII:
1560 - case PHY_INTERFACE_MODE_RGMII_ID:
1561 - case PHY_INTERFACE_MODE_RGMII_TXID:
1562 - case PHY_INTERFACE_MODE_RGMII_RXID:
1563 - case PHY_INTERFACE_MODE_SGMII:
1566 - if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1567 - /* Switch regs accept value in ns, convert ps to ns */
1568 - delay = delay / 1000;
1569 - else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1570 - mode == PHY_INTERFACE_MODE_RGMII_TXID)
1573 - if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1574 - dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1578 - priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1582 - if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1583 - /* Switch regs accept value in ns, convert ps to ns */
1584 - delay = delay / 1000;
1585 - else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1586 - mode == PHY_INTERFACE_MODE_RGMII_RXID)
1589 - if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1590 - dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1594 - priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1596 - /* Skip sgmii parsing for rgmii* mode */
1597 - if (mode == PHY_INTERFACE_MODE_RGMII ||
1598 - mode == PHY_INTERFACE_MODE_RGMII_ID ||
1599 - mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1600 - mode == PHY_INTERFACE_MODE_RGMII_RXID)
1603 - if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1604 - priv->ports_config.sgmii_tx_clk_falling_edge = true;
1606 - if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1607 - priv->ports_config.sgmii_rx_clk_falling_edge = true;
1609 - if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1610 - priv->ports_config.sgmii_enable_pll = true;
1612 - if (priv->switch_id == QCA8K_ID_QCA8327) {
1613 - dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1614 - priv->ports_config.sgmii_enable_pll = false;
1617 - if (priv->switch_revision < 2)
1618 - dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1631 -qca8k_setup(struct dsa_switch *ds)
1633 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1634 - int cpu_port, ret, i;
1637 - cpu_port = qca8k_find_cpu_port(ds);
1638 - if (cpu_port < 0) {
1639 - dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
1643 - /* Parse CPU port config to be later used in phy_link mac_config */
1644 - ret = qca8k_parse_port_config(priv);
1648 - ret = qca8k_setup_mdio_bus(priv);
1652 - ret = qca8k_setup_of_pws_reg(priv);
1656 - ret = qca8k_setup_mac_pwr_sel(priv);
1660 - /* Make sure MAC06 is disabled */
1661 - ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
1662 - QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
1664 - dev_err(priv->dev, "failed disabling MAC06 exchange");
1668 - /* Enable CPU Port */
1669 - ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
1670 - QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
1672 - dev_err(priv->dev, "failed enabling CPU port");
1676 - /* Enable MIB counters */
1677 - ret = qca8k_mib_init(priv);
1679 - dev_warn(priv->dev, "mib init failed");
1681 - /* Initial setup of all ports */
1682 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1683 - /* Disable forwarding by default on all ports */
1684 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1685 - QCA8K_PORT_LOOKUP_MEMBER, 0);
1689 - /* Enable QCA header mode on all cpu ports */
1690 - if (dsa_is_cpu_port(ds, i)) {
1691 - ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
1692 - FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
1693 - FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
1695 - dev_err(priv->dev, "failed enabling QCA header mode");
1700 - /* Disable MAC by default on all user ports */
1701 - if (dsa_is_user_port(ds, i))
1702 - qca8k_port_set_status(priv, i, 0);
1705 - /* Forward all unknown frames to CPU port for Linux processing
1706 - * Notice that in multi-cpu config only one port should be set
1707 - * for igmp, unknown, multicast and broadcast packet
1709 - ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
1710 - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
1711 - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
1712 - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
1713 - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
1717 - /* Setup connection between CPU port & user ports
1718 - * Configure specific switch configuration for ports
1720 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1721 - /* CPU port gets connected to all user ports of the switch */
1722 - if (dsa_is_cpu_port(ds, i)) {
1723 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1724 - QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
1729 - /* Individual user ports get connected to CPU port only */
1730 - if (dsa_is_user_port(ds, i)) {
1731 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1732 - QCA8K_PORT_LOOKUP_MEMBER,
1737 - /* Enable ARP Auto-learning by default */
1738 - ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
1739 - QCA8K_PORT_LOOKUP_LEARN);
1743 - /* For port based vlans to work we need to set the
1744 - * default egress vid
1746 - ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
1747 - QCA8K_EGREES_VLAN_PORT_MASK(i),
1748 - QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
1752 - ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
1753 - QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
1754 - QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
1759 - /* The port 5 of the qca8337 have some problem in flood condition. The
1760 - * original legacy driver had some specific buffer and priority settings
1761 - * for the different port suggested by the QCA switch team. Add this
1762 - * missing settings to improve switch stability under load condition.
1763 - * This problem is limited to qca8337 and other qca8k switch are not affected.
1765 - if (priv->switch_id == QCA8K_ID_QCA8337) {
1767 - /* The 2 CPU port and port 5 requires some different
1768 - * priority than any other ports.
1773 - mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1774 - QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1775 - QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
1776 - QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
1777 - QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
1778 - QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
1779 - QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
1782 - mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1783 - QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1784 - QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
1785 - QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
1786 - QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
1788 - qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
1790 - mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
1791 - QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1792 - QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1793 - QCA8K_PORT_HOL_CTRL1_WRED_EN;
1794 - qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
1795 - QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
1796 - QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1797 - QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1798 - QCA8K_PORT_HOL_CTRL1_WRED_EN,
1803 - /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
1804 - if (priv->switch_id == QCA8K_ID_QCA8327) {
1805 - mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
1806 - QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
1807 - qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
1808 - QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
1809 - QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
1813 - /* Setup our port MTUs to match power on defaults */
1814 - ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
1816 - dev_warn(priv->dev, "failed setting MTU settings");
1818 - /* Flush the FDB table */
1819 - qca8k_fdb_flush(priv);
1821 - /* We don't have interrupts for link changes, so we need to poll */
1822 - ds->pcs_poll = true;
1824 - /* Set min a max ageing value supported */
1825 - ds->ageing_time_min = 7000;
1826 - ds->ageing_time_max = 458745000;
1828 - /* Set max number of LAGs supported */
1829 - ds->num_lag_ids = QCA8K_NUM_LAGS;
1835 -qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1838 - u32 delay, val = 0;
1841 - /* Delay can be declared in 3 different way.
1842 - * Mode to rgmii and internal-delay standard binding defined
1843 - * rgmii-id or rgmii-tx/rx phy mode set.
1844 - * The parse logic set a delay different than 0 only when one
1845 - * of the 3 different way is used. In all other case delay is
1846 - * not enabled. With ID or TX/RXID delay is enabled and set
1847 - * to the default and recommended value.
1849 - if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1850 - delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1852 - val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1853 - QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1856 - if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1857 - delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1859 - val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1860 - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1863 - /* Set RGMII delay based on the selected values */
1864 - ret = qca8k_rmw(priv, reg,
1865 - QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1866 - QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1867 - QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1868 - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1871 - dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1872 - cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1876 -qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1877 - const struct phylink_link_state *state)
1879 - struct qca8k_priv *priv = ds->priv;
1880 - int cpu_port_index, ret;
1884 - case 0: /* 1st CPU port */
1885 - if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1886 - state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1887 - state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1888 - state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1889 - state->interface != PHY_INTERFACE_MODE_SGMII)
1892 - reg = QCA8K_REG_PORT0_PAD_CTRL;
1893 - cpu_port_index = QCA8K_CPU_PORT0;
1900 - /* Internal PHY, nothing to do */
1902 - case 6: /* 2nd CPU port / external PHY */
1903 - if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1904 - state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1905 - state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1906 - state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1907 - state->interface != PHY_INTERFACE_MODE_SGMII &&
1908 - state->interface != PHY_INTERFACE_MODE_1000BASEX)
1911 - reg = QCA8K_REG_PORT6_PAD_CTRL;
1912 - cpu_port_index = QCA8K_CPU_PORT6;
1915 - dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1919 - if (port != 6 && phylink_autoneg_inband(mode)) {
1920 - dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1925 - switch (state->interface) {
1926 - case PHY_INTERFACE_MODE_RGMII:
1927 - case PHY_INTERFACE_MODE_RGMII_ID:
1928 - case PHY_INTERFACE_MODE_RGMII_TXID:
1929 - case PHY_INTERFACE_MODE_RGMII_RXID:
1930 - qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1932 - /* Configure rgmii delay */
1933 - qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1935 - /* QCA8337 requires to set rgmii rx delay for all ports.
1936 - * This is enabled through PORT5_PAD_CTRL for all ports,
1937 - * rather than individual port registers.
1939 - if (priv->switch_id == QCA8K_ID_QCA8337)
1940 - qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1941 - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1943 - case PHY_INTERFACE_MODE_SGMII:
1944 - case PHY_INTERFACE_MODE_1000BASEX:
1945 - /* Enable SGMII on the port */
1946 - qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1948 - /* Enable/disable SerDes auto-negotiation as necessary */
1949 - ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
1952 - if (phylink_autoneg_inband(mode))
1953 - val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1955 - val |= QCA8K_PWS_SERDES_AEN_DIS;
1956 - qca8k_write(priv, QCA8K_REG_PWS, val);
1958 - /* Configure the SGMII parameters */
1959 - ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1963 - val |= QCA8K_SGMII_EN_SD;
1965 - if (priv->ports_config.sgmii_enable_pll)
1966 - val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1967 - QCA8K_SGMII_EN_TX;
1969 - if (dsa_is_cpu_port(ds, port)) {
1970 - /* CPU port, we're talking to the CPU MAC, be a PHY */
1971 - val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1972 - val |= QCA8K_SGMII_MODE_CTRL_PHY;
1973 - } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
1974 - val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1975 - val |= QCA8K_SGMII_MODE_CTRL_MAC;
1976 - } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
1977 - val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1978 - val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1981 - qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1983 - /* From original code is reported port instability as SGMII also
1984 - * require delay set. Apply advised values here or take them from DT.
1986 - if (state->interface == PHY_INTERFACE_MODE_SGMII)
1987 - qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1989 - /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
1990 - * falling edge is set writing in the PORT0 PAD reg
1992 - if (priv->switch_id == QCA8K_ID_QCA8327 ||
1993 - priv->switch_id == QCA8K_ID_QCA8337)
1994 - reg = QCA8K_REG_PORT0_PAD_CTRL;
1998 - /* SGMII Clock phase configuration */
1999 - if (priv->ports_config.sgmii_rx_clk_falling_edge)
2000 - val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
2002 - if (priv->ports_config.sgmii_tx_clk_falling_edge)
2003 - val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
2006 - ret = qca8k_rmw(priv, reg,
2007 - QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
2008 - QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
2013 - dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
2014 - phy_modes(state->interface), port);
2020 -qca8k_phylink_validate(struct dsa_switch *ds, int port,
2021 - unsigned long *supported,
2022 - struct phylink_link_state *state)
2024 - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2027 - case 0: /* 1st CPU port */
2028 - if (state->interface != PHY_INTERFACE_MODE_NA &&
2029 - state->interface != PHY_INTERFACE_MODE_RGMII &&
2030 - state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
2031 - state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
2032 - state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
2033 - state->interface != PHY_INTERFACE_MODE_SGMII)
2041 - /* Internal PHY */
2042 - if (state->interface != PHY_INTERFACE_MODE_NA &&
2043 - state->interface != PHY_INTERFACE_MODE_GMII &&
2044 - state->interface != PHY_INTERFACE_MODE_INTERNAL)
2047 - case 6: /* 2nd CPU port / external PHY */
2048 - if (state->interface != PHY_INTERFACE_MODE_NA &&
2049 - state->interface != PHY_INTERFACE_MODE_RGMII &&
2050 - state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
2051 - state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
2052 - state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
2053 - state->interface != PHY_INTERFACE_MODE_SGMII &&
2054 - state->interface != PHY_INTERFACE_MODE_1000BASEX)
2059 - linkmode_zero(supported);
2063 - phylink_set_port_modes(mask);
2064 - phylink_set(mask, Autoneg);
2066 - phylink_set(mask, 1000baseT_Full);
2067 - phylink_set(mask, 10baseT_Half);
2068 - phylink_set(mask, 10baseT_Full);
2069 - phylink_set(mask, 100baseT_Half);
2070 - phylink_set(mask, 100baseT_Full);
2072 - if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
2073 - phylink_set(mask, 1000baseX_Full);
2075 - phylink_set(mask, Pause);
2076 - phylink_set(mask, Asym_Pause);
2078 - linkmode_and(supported, supported, mask);
2079 - linkmode_and(state->advertising, state->advertising, mask);
2083 -qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
2084 - struct phylink_link_state *state)
2086 - struct qca8k_priv *priv = ds->priv;
2090 - ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
2094 - state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
2095 - state->an_complete = state->link;
2096 - state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
2097 - state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
2100 - switch (reg & QCA8K_PORT_STATUS_SPEED) {
2101 - case QCA8K_PORT_STATUS_SPEED_10:
2102 - state->speed = SPEED_10;
2104 - case QCA8K_PORT_STATUS_SPEED_100:
2105 - state->speed = SPEED_100;
2107 - case QCA8K_PORT_STATUS_SPEED_1000:
2108 - state->speed = SPEED_1000;
2111 - state->speed = SPEED_UNKNOWN;
2115 - state->pause = MLO_PAUSE_NONE;
2116 - if (reg & QCA8K_PORT_STATUS_RXFLOW)
2117 - state->pause |= MLO_PAUSE_RX;
2118 - if (reg & QCA8K_PORT_STATUS_TXFLOW)
2119 - state->pause |= MLO_PAUSE_TX;
2125 -qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
2126 - phy_interface_t interface)
2128 - struct qca8k_priv *priv = ds->priv;
2130 - qca8k_port_set_status(priv, port, 0);
2134 -qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
2135 - phy_interface_t interface, struct phy_device *phydev,
2136 - int speed, int duplex, bool tx_pause, bool rx_pause)
2138 - struct qca8k_priv *priv = ds->priv;
2141 - if (phylink_autoneg_inband(mode)) {
2142 - reg = QCA8K_PORT_STATUS_LINK_AUTO;
2146 - reg = QCA8K_PORT_STATUS_SPEED_10;
2149 - reg = QCA8K_PORT_STATUS_SPEED_100;
2152 - reg = QCA8K_PORT_STATUS_SPEED_1000;
2155 - reg = QCA8K_PORT_STATUS_LINK_AUTO;
2159 - if (duplex == DUPLEX_FULL)
2160 - reg |= QCA8K_PORT_STATUS_DUPLEX;
2162 - if (rx_pause || dsa_is_cpu_port(ds, port))
2163 - reg |= QCA8K_PORT_STATUS_RXFLOW;
2165 - if (tx_pause || dsa_is_cpu_port(ds, port))
2166 - reg |= QCA8K_PORT_STATUS_TXFLOW;
2169 - reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
2171 - qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
2175 -qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
2177 - struct qca8k_priv *priv = ds->priv;
2180 - if (stringset != ETH_SS_STATS)
2183 - for (i = 0; i < priv->info->mib_count; i++)
2184 - strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
2188 -static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
2190 - struct qca8k_mib_eth_data *mib_eth_data;
2191 - struct qca8k_priv *priv = ds->priv;
2192 - const struct qca8k_mib_desc *mib;
2193 - struct mib_ethhdr *mib_ethhdr;
2194 - int i, mib_len, offset = 0;
2198 - mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
2199 - mib_eth_data = &priv->mib_eth_data;
2201 - /* The switch autocast every port. Ignore other packet and
2202 - * parse only the requested one.
2204 - port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
2205 - if (port != mib_eth_data->req_port)
2208 - data = mib_eth_data->data;
2210 - for (i = 0; i < priv->info->mib_count; i++) {
2211 - mib = &ar8327_mib[i];
2213 - /* First 3 mib are present in the skb head */
2215 - data[i] = mib_ethhdr->data[i];
2219 - mib_len = sizeof(uint32_t);
2221 - /* Some mib are 64 bit wide */
2222 - if (mib->size == 2)
2223 - mib_len = sizeof(uint64_t);
2225 - /* Copy the mib value from packet to the */
2226 - memcpy(data + i, skb->data + offset, mib_len);
2228 - /* Set the offset for the next mib */
2229 - offset += mib_len;
2233 - /* Complete on receiving all the mib packet */
2234 - if (refcount_dec_and_test(&mib_eth_data->port_parsed))
2235 - complete(&mib_eth_data->rw_done);
2239 -qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
2241 - struct dsa_port *dp = dsa_to_port(ds, port);
2242 - struct qca8k_mib_eth_data *mib_eth_data;
2243 - struct qca8k_priv *priv = ds->priv;
2246 - mib_eth_data = &priv->mib_eth_data;
2248 - mutex_lock(&mib_eth_data->mutex);
2250 - reinit_completion(&mib_eth_data->rw_done);
2252 - mib_eth_data->req_port = dp->index;
2253 - mib_eth_data->data = data;
2254 - refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
2256 - mutex_lock(&priv->reg_mutex);
2258 - /* Send mib autocast request */
2259 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
2260 - QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
2261 - FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
2264 - mutex_unlock(&priv->reg_mutex);
2269 - ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
2272 - mutex_unlock(&mib_eth_data->mutex);
2278 -qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
2281 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2282 - const struct qca8k_mib_desc *mib;
2287 - if (priv->mgmt_master && priv->info->ops->autocast_mib &&
2288 - priv->info->ops->autocast_mib(ds, port, data) > 0)
2291 - for (i = 0; i < priv->info->mib_count; i++) {
2292 - mib = &ar8327_mib[i];
2293 - reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
2295 - ret = qca8k_read(priv, reg, &val);
2299 - if (mib->size == 2) {
2300 - ret = qca8k_read(priv, reg + 4, &hi);
2306 - if (mib->size == 2)
2307 - data[i] |= (u64)hi << 32;
2312 -qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
2314 - struct qca8k_priv *priv = ds->priv;
2316 - if (sset != ETH_SS_STATS)
2319 - return priv->info->mib_count;
2323 -qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
2325 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2326 - u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
2330 - mutex_lock(&priv->reg_mutex);
2331 - ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®);
2335 - if (eee->eee_enabled)
2339 - ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
2342 - mutex_unlock(&priv->reg_mutex);
2347 -qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2349 - /* Nothing to do on the port's MAC */
2354 -qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
2356 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2360 - case BR_STATE_DISABLED:
2361 - stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
2363 - case BR_STATE_BLOCKING:
2364 - stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
2366 - case BR_STATE_LISTENING:
2367 - stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
2369 - case BR_STATE_LEARNING:
2370 - stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
2372 - case BR_STATE_FORWARDING:
2374 - stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
2378 - qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2379 - QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
2383 -qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
2385 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2386 - int port_mask, cpu_port;
2389 - cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2390 - port_mask = BIT(cpu_port);
2392 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2393 - if (dsa_is_cpu_port(ds, i))
2395 - if (dsa_to_port(ds, i)->bridge_dev != br)
2397 - /* Add this port to the portvlan mask of the other ports
2400 - ret = regmap_set_bits(priv->regmap,
2401 - QCA8K_PORT_LOOKUP_CTRL(i),
2406 - port_mask |= BIT(i);
2409 - /* Add all other ports to this ports portvlan mask */
2410 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2411 - QCA8K_PORT_LOOKUP_MEMBER, port_mask);
2417 -qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
2419 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2422 - cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2424 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2425 - if (dsa_is_cpu_port(ds, i))
2427 - if (dsa_to_port(ds, i)->bridge_dev != br)
2429 - /* Remove this port to the portvlan mask of the other ports
2432 - regmap_clear_bits(priv->regmap,
2433 - QCA8K_PORT_LOOKUP_CTRL(i),
2437 - /* Set the cpu port to be the only one in the portvlan mask of
2440 - qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2441 - QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
2445 -qca8k_port_fast_age(struct dsa_switch *ds, int port)
2447 - struct qca8k_priv *priv = ds->priv;
2449 - mutex_lock(&priv->reg_mutex);
2450 - qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
2451 - mutex_unlock(&priv->reg_mutex);
2455 -qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
2457 - struct qca8k_priv *priv = ds->priv;
2458 - unsigned int secs = msecs / 1000;
2461 - /* AGE_TIME reg is set in 7s step */
2464 - /* Handle case with 0 as val to NOT disable
2470 - return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
2471 - QCA8K_ATU_AGE_TIME(val));
2475 -qca8k_port_enable(struct dsa_switch *ds, int port,
2476 - struct phy_device *phy)
2478 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2480 - qca8k_port_set_status(priv, port, 1);
2481 - priv->port_enabled_map |= BIT(port);
2483 - if (dsa_is_user_port(ds, port))
2484 - phy_support_asym_pause(phy);
2490 -qca8k_port_disable(struct dsa_switch *ds, int port)
2492 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2494 - qca8k_port_set_status(priv, port, 0);
2495 - priv->port_enabled_map &= ~BIT(port);
2499 -qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
2501 - struct qca8k_priv *priv = ds->priv;
2504 - /* We have only have a general MTU setting.
2505 - * DSA always set the CPU port's MTU to the largest MTU of the slave
2507 - * Setting MTU just for the CPU port is sufficient to correctly set a
2508 - * value for every port.
2510 - if (!dsa_is_cpu_port(ds, port))
2513 - /* To change the MAX_FRAME_SIZE the cpu ports must be off or
2514 - * the switch panics.
2515 - * Turn off both cpu ports before applying the new value to prevent
2518 - if (priv->port_enabled_map & BIT(0))
2519 - qca8k_port_set_status(priv, 0, 0);
2521 - if (priv->port_enabled_map & BIT(6))
2522 - qca8k_port_set_status(priv, 6, 0);
2524 - /* Include L2 header / FCS length */
2525 - ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
2527 - if (priv->port_enabled_map & BIT(0))
2528 - qca8k_port_set_status(priv, 0, 1);
2530 - if (priv->port_enabled_map & BIT(6))
2531 - qca8k_port_set_status(priv, 6, 1);
2537 -qca8k_port_max_mtu(struct dsa_switch *ds, int port)
2539 - return QCA8K_MAX_MTU;
2543 -qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
2544 - u16 port_mask, u16 vid)
2546 - /* Set the vid to the port vlan id if no vid is set */
2548 - vid = QCA8K_PORT_VID_DEF;
2550 - return qca8k_fdb_add(priv, addr, port_mask, vid,
2551 - QCA8K_ATU_STATUS_STATIC);
2555 -qca8k_port_fdb_add(struct dsa_switch *ds, int port,
2556 - const unsigned char *addr, u16 vid)
2558 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2559 - u16 port_mask = BIT(port);
2561 - return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
2565 -qca8k_port_fdb_del(struct dsa_switch *ds, int port,
2566 - const unsigned char *addr, u16 vid)
2568 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2569 - u16 port_mask = BIT(port);
2572 - vid = QCA8K_PORT_VID_DEF;
2574 - return qca8k_fdb_del(priv, addr, port_mask, vid);
2578 -qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
2579 - dsa_fdb_dump_cb_t *cb, void *data)
2581 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2582 - struct qca8k_fdb _fdb = { 0 };
2583 - int cnt = QCA8K_NUM_FDB_RECORDS;
2587 - mutex_lock(&priv->reg_mutex);
2588 - while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
2591 - is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
2592 - ret = cb(_fdb.mac, _fdb.vid, is_static, data);
2596 - mutex_unlock(&priv->reg_mutex);
2602 -qca8k_port_mdb_add(struct dsa_switch *ds, int port,
2603 - const struct switchdev_obj_port_mdb *mdb)
2605 - struct qca8k_priv *priv = ds->priv;
2606 - const u8 *addr = mdb->addr;
2607 - u16 vid = mdb->vid;
2609 - return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
2613 -qca8k_port_mdb_del(struct dsa_switch *ds, int port,
2614 - const struct switchdev_obj_port_mdb *mdb)
2616 - struct qca8k_priv *priv = ds->priv;
2617 - const u8 *addr = mdb->addr;
2618 - u16 vid = mdb->vid;
2620 - return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
2624 -qca8k_port_mirror_add(struct dsa_switch *ds, int port,
2625 - struct dsa_mall_mirror_tc_entry *mirror,
2628 - struct qca8k_priv *priv = ds->priv;
2629 - int monitor_port, ret;
2632 - /* Check for existent entry */
2633 - if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
2636 - ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
2640 - /* QCA83xx can have only one port set to mirror mode.
2641 - * Check that the correct port is requested and return error otherwise.
2642 - * When no mirror port is set, the values is set to 0xF
2644 - monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2645 - if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
2648 - /* Set the monitor port */
2649 - val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
2650 - mirror->to_local_port);
2651 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2652 - QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2657 - reg = QCA8K_PORT_LOOKUP_CTRL(port);
2658 - val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2660 - reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2661 - val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2664 - ret = regmap_update_bits(priv->regmap, reg, val, val);
2668 - /* Track mirror port for tx and rx to decide when the
2669 - * mirror port has to be disabled.
2672 - priv->mirror_rx |= BIT(port);
2674 - priv->mirror_tx |= BIT(port);
2680 -qca8k_port_mirror_del(struct dsa_switch *ds, int port,
2681 - struct dsa_mall_mirror_tc_entry *mirror)
2683 - struct qca8k_priv *priv = ds->priv;
2687 - if (mirror->ingress) {
2688 - reg = QCA8K_PORT_LOOKUP_CTRL(port);
2689 - val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2691 - reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2692 - val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2695 - ret = regmap_clear_bits(priv->regmap, reg, val);
2699 - if (mirror->ingress)
2700 - priv->mirror_rx &= ~BIT(port);
2702 - priv->mirror_tx &= ~BIT(port);
2704 - /* No port set to send packet to mirror port. Disable mirror port */
2705 - if (!priv->mirror_rx && !priv->mirror_tx) {
2706 - val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
2707 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2708 - QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2713 - dev_err(priv->dev, "Failed to del mirror port from %d", port);
2717 -qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
2718 - struct netlink_ext_ack *extack)
2720 - struct qca8k_priv *priv = ds->priv;
2723 - if (vlan_filtering) {
2724 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2725 - QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2726 - QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
2728 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2729 - QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2730 - QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
2737 -qca8k_port_vlan_add(struct dsa_switch *ds, int port,
2738 - const struct switchdev_obj_port_vlan *vlan,
2739 - struct netlink_ext_ack *extack)
2741 - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2742 - bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2743 - struct qca8k_priv *priv = ds->priv;
2746 - ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
2748 - dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
2753 - ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
2754 - QCA8K_EGREES_VLAN_PORT_MASK(port),
2755 - QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
2759 - ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
2760 - QCA8K_PORT_VLAN_CVID(vlan->vid) |
2761 - QCA8K_PORT_VLAN_SVID(vlan->vid));
2768 -qca8k_port_vlan_del(struct dsa_switch *ds, int port,
2769 - const struct switchdev_obj_port_vlan *vlan)
2771 - struct qca8k_priv *priv = ds->priv;
2774 - ret = qca8k_vlan_del(priv, port, vlan->vid);
2776 - dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
2781 -static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
2783 - struct qca8k_priv *priv = ds->priv;
2785 - /* Communicate to the phy internal driver the switch revision.
2786 - * Based on the switch revision different values needs to be
2787 - * set to the dbg and mmd reg on the phy.
2788 - * The first 2 bit are used to communicate the switch revision
2789 - * to the phy driver.
2791 - if (port > 0 && port < 6)
2792 - return priv->switch_revision;
2797 -static enum dsa_tag_protocol
2798 -qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
2799 - enum dsa_tag_protocol mp)
2801 - return DSA_TAG_PROTO_QCA;
2805 -qca8k_lag_can_offload(struct dsa_switch *ds,
2806 - struct net_device *lag,
2807 - struct netdev_lag_upper_info *info)
2809 - struct dsa_port *dp;
2810 - int id, members = 0;
2812 - id = dsa_lag_id(ds->dst, lag);
2813 - if (id < 0 || id >= ds->num_lag_ids)
2816 - dsa_lag_foreach_port(dp, ds->dst, lag)
2817 - /* Includes the port joining the LAG */
2820 - if (members > QCA8K_NUM_PORTS_FOR_LAG)
2823 - if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2826 - if (info->hash_type != NETDEV_LAG_HASH_L2 &&
2827 - info->hash_type != NETDEV_LAG_HASH_L23)
2834 -qca8k_lag_setup_hash(struct dsa_switch *ds,
2835 - struct net_device *lag,
2836 - struct netdev_lag_upper_info *info)
2838 - struct qca8k_priv *priv = ds->priv;
2839 - bool unique_lag = true;
2843 - id = dsa_lag_id(ds->dst, lag);
2845 - switch (info->hash_type) {
2846 - case NETDEV_LAG_HASH_L23:
2847 - hash |= QCA8K_TRUNK_HASH_SIP_EN;
2848 - hash |= QCA8K_TRUNK_HASH_DIP_EN;
2850 - case NETDEV_LAG_HASH_L2:
2851 - hash |= QCA8K_TRUNK_HASH_SA_EN;
2852 - hash |= QCA8K_TRUNK_HASH_DA_EN;
2854 - default: /* We should NEVER reach this */
2855 - return -EOPNOTSUPP;
2858 - /* Check if we are the unique configured LAG */
2859 - dsa_lags_foreach_id(i, ds->dst)
2860 - if (i != id && dsa_lag_dev(ds->dst, i)) {
2861 - unique_lag = false;
2865 - /* Hash Mode is global. Make sure the same Hash Mode
2866 - * is set to all the 4 possible lag.
2867 - * If we are the unique LAG we can set whatever hash
2869 - * To change hash mode it's needed to remove all LAG
2870 - * and change the mode with the latest.
2873 - priv->lag_hash_mode = hash;
2874 - } else if (priv->lag_hash_mode != hash) {
2875 - netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n");
2876 - return -EOPNOTSUPP;
2879 - return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
2880 - QCA8K_TRUNK_HASH_MASK, hash);
2884 -qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
2885 - struct net_device *lag, bool delete)
2887 - struct qca8k_priv *priv = ds->priv;
2891 - id = dsa_lag_id(ds->dst, lag);
2893 - /* Read current port member */
2894 - ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
2898 - /* Shift val to the correct trunk */
2899 - val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
2900 - val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
2902 - val &= ~BIT(port);
2906 - /* Update port member. With empty portmap disable trunk */
2907 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
2908 - QCA8K_REG_GOL_TRUNK_MEMBER(id) |
2909 - QCA8K_REG_GOL_TRUNK_EN(id),
2910 - !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
2911 - val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
2913 - /* Search empty member if adding or port on deleting */
2914 - for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
2915 - ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
2919 - val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
2920 - val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
2923 - /* If port flagged to be disabled assume this member is
2926 - if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2929 - val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
2933 - /* If port flagged to be enabled assume this member is
2936 - if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2940 - /* We have found the member to add/remove */
2944 - /* Set port in the correct port mask or disable port if in delete mode */
2945 - return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
2946 - QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
2947 - QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
2948 - !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
2949 - port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
2953 -qca8k_port_lag_join(struct dsa_switch *ds, int port,
2954 - struct net_device *lag,
2955 - struct netdev_lag_upper_info *info)
2959 - if (!qca8k_lag_can_offload(ds, lag, info))
2960 - return -EOPNOTSUPP;
2962 - ret = qca8k_lag_setup_hash(ds, lag, info);
2966 - return qca8k_lag_refresh_portmap(ds, port, lag, false);
2970 -qca8k_port_lag_leave(struct dsa_switch *ds, int port,
2971 - struct net_device *lag)
2973 - return qca8k_lag_refresh_portmap(ds, port, lag, true);
2977 -qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
2980 - struct dsa_port *dp = master->dsa_ptr;
2981 - struct qca8k_priv *priv = ds->priv;
2983 - /* Ethernet MIB/MDIO is only supported for CPU port 0 */
2984 - if (dp->index != 0)
2987 - mutex_lock(&priv->mgmt_eth_data.mutex);
2988 - mutex_lock(&priv->mib_eth_data.mutex);
2990 - priv->mgmt_master = operational ? (struct net_device *)master : NULL;
2992 - mutex_unlock(&priv->mib_eth_data.mutex);
2993 - mutex_unlock(&priv->mgmt_eth_data.mutex);
2996 -static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
2997 - enum dsa_tag_protocol proto)
2999 - struct qca_tagger_data *tagger_data;
3002 - case DSA_TAG_PROTO_QCA:
3003 - tagger_data = ds->tagger_data;
3005 - tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
3006 - tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
3010 - return -EOPNOTSUPP;
3016 -static const struct dsa_switch_ops qca8k_switch_ops = {
3017 - .get_tag_protocol = qca8k_get_tag_protocol,
3018 - .setup = qca8k_setup,
3019 - .get_strings = qca8k_get_strings,
3020 - .get_ethtool_stats = qca8k_get_ethtool_stats,
3021 - .get_sset_count = qca8k_get_sset_count,
3022 - .set_ageing_time = qca8k_set_ageing_time,
3023 - .get_mac_eee = qca8k_get_mac_eee,
3024 - .set_mac_eee = qca8k_set_mac_eee,
3025 - .port_enable = qca8k_port_enable,
3026 - .port_disable = qca8k_port_disable,
3027 - .port_change_mtu = qca8k_port_change_mtu,
3028 - .port_max_mtu = qca8k_port_max_mtu,
3029 - .port_stp_state_set = qca8k_port_stp_state_set,
3030 - .port_bridge_join = qca8k_port_bridge_join,
3031 - .port_bridge_leave = qca8k_port_bridge_leave,
3032 - .port_fast_age = qca8k_port_fast_age,
3033 - .port_fdb_add = qca8k_port_fdb_add,
3034 - .port_fdb_del = qca8k_port_fdb_del,
3035 - .port_fdb_dump = qca8k_port_fdb_dump,
3036 - .port_mdb_add = qca8k_port_mdb_add,
3037 - .port_mdb_del = qca8k_port_mdb_del,
3038 - .port_mirror_add = qca8k_port_mirror_add,
3039 - .port_mirror_del = qca8k_port_mirror_del,
3040 - .port_vlan_filtering = qca8k_port_vlan_filtering,
3041 - .port_vlan_add = qca8k_port_vlan_add,
3042 - .port_vlan_del = qca8k_port_vlan_del,
3043 - .phylink_validate = qca8k_phylink_validate,
3044 - .phylink_mac_link_state = qca8k_phylink_mac_link_state,
3045 - .phylink_mac_config = qca8k_phylink_mac_config,
3046 - .phylink_mac_link_down = qca8k_phylink_mac_link_down,
3047 - .phylink_mac_link_up = qca8k_phylink_mac_link_up,
3048 - .get_phy_flags = qca8k_get_phy_flags,
3049 - .port_lag_join = qca8k_port_lag_join,
3050 - .port_lag_leave = qca8k_port_lag_leave,
3051 - .master_state_change = qca8k_master_change,
3052 - .connect_tag_protocol = qca8k_connect_tag_protocol,
3055 -static int qca8k_read_switch_id(struct qca8k_priv *priv)
3064 - ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
3068 - id = QCA8K_MASK_CTRL_DEVICE_ID(val);
3069 - if (id != priv->info->id) {
3070 - dev_err(priv->dev,
3071 - "Switch id detected %x but expected %x",
3072 - id, priv->info->id);
3076 - priv->switch_id = id;
3078 - /* Save revision to communicate to the internal PHY driver */
3079 - priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
3085 -qca8k_sw_probe(struct mdio_device *mdiodev)
3087 - struct qca8k_priv *priv;
3090 - /* allocate the private data struct so that we can probe the switches
3093 - priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
3097 - priv->info = of_device_get_match_data(priv->dev);
3098 - priv->bus = mdiodev->bus;
3099 - priv->dev = &mdiodev->dev;
3101 - priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
3103 - if (IS_ERR(priv->reset_gpio))
3104 - return PTR_ERR(priv->reset_gpio);
3106 - if (priv->reset_gpio) {
3107 - gpiod_set_value_cansleep(priv->reset_gpio, 1);
3108 - /* The active low duration must be greater than 10 ms
3109 - * and checkpatch.pl wants 20 ms.
3112 - gpiod_set_value_cansleep(priv->reset_gpio, 0);
3115 - /* Start by setting up the register mapping */
3116 - priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
3117 - &qca8k_regmap_config);
3118 - if (IS_ERR(priv->regmap)) {
3119 - dev_err(priv->dev, "regmap initialization failed");
3120 - return PTR_ERR(priv->regmap);
3123 - priv->mdio_cache.page = 0xffff;
3124 - priv->mdio_cache.lo = 0xffff;
3125 - priv->mdio_cache.hi = 0xffff;
3127 - /* Check the detected switch id */
3128 - ret = qca8k_read_switch_id(priv);
3132 - priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
3136 - mutex_init(&priv->mgmt_eth_data.mutex);
3137 - init_completion(&priv->mgmt_eth_data.rw_done);
3139 - mutex_init(&priv->mib_eth_data.mutex);
3140 - init_completion(&priv->mib_eth_data.rw_done);
3142 - priv->ds->dev = &mdiodev->dev;
3143 - priv->ds->num_ports = QCA8K_NUM_PORTS;
3144 - priv->ds->priv = priv;
3145 - priv->ds->ops = &qca8k_switch_ops;
3146 - mutex_init(&priv->reg_mutex);
3147 - dev_set_drvdata(&mdiodev->dev, priv);
3149 - return dsa_register_switch(priv->ds);
3153 -qca8k_sw_remove(struct mdio_device *mdiodev)
3155 - struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3161 - for (i = 0; i < QCA8K_NUM_PORTS; i++)
3162 - qca8k_port_set_status(priv, i, 0);
3164 - dsa_unregister_switch(priv->ds);
3166 - dev_set_drvdata(&mdiodev->dev, NULL);
3169 -static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
3171 - struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3176 - dsa_switch_shutdown(priv->ds);
3178 - dev_set_drvdata(&mdiodev->dev, NULL);
3181 -#ifdef CONFIG_PM_SLEEP
3183 -qca8k_set_pm(struct qca8k_priv *priv, int enable)
3187 - for (port = 0; port < QCA8K_NUM_PORTS; port++) {
3188 - /* Do not enable on resume if the port was
3189 - * disabled before.
3191 - if (!(priv->port_enabled_map & BIT(port)))
3194 - qca8k_port_set_status(priv, port, enable);
3198 -static int qca8k_suspend(struct device *dev)
3200 - struct qca8k_priv *priv = dev_get_drvdata(dev);
3202 - qca8k_set_pm(priv, 0);
3204 - return dsa_switch_suspend(priv->ds);
3207 -static int qca8k_resume(struct device *dev)
3209 - struct qca8k_priv *priv = dev_get_drvdata(dev);
3211 - qca8k_set_pm(priv, 1);
3213 - return dsa_switch_resume(priv->ds);
3215 -#endif /* CONFIG_PM_SLEEP */
3217 -static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
3218 - qca8k_suspend, qca8k_resume);
3220 -static const struct qca8k_info_ops qca8xxx_ops = {
3221 - .autocast_mib = qca8k_get_ethtool_stats_eth,
3224 -static const struct qca8k_match_data qca8327 = {
3225 - .id = QCA8K_ID_QCA8327,
3226 - .reduced_package = true,
3227 - .mib_count = QCA8K_QCA832X_MIB_COUNT,
3228 - .ops = &qca8xxx_ops,
3231 -static const struct qca8k_match_data qca8328 = {
3232 - .id = QCA8K_ID_QCA8327,
3233 - .mib_count = QCA8K_QCA832X_MIB_COUNT,
3234 - .ops = &qca8xxx_ops,
3237 -static const struct qca8k_match_data qca833x = {
3238 - .id = QCA8K_ID_QCA8337,
3239 - .mib_count = QCA8K_QCA833X_MIB_COUNT,
3240 - .ops = &qca8xxx_ops,
3243 -static const struct of_device_id qca8k_of_match[] = {
3244 - { .compatible = "qca,qca8327", .data = &qca8327 },
3245 - { .compatible = "qca,qca8328", .data = &qca8328 },
3246 - { .compatible = "qca,qca8334", .data = &qca833x },
3247 - { .compatible = "qca,qca8337", .data = &qca833x },
3248 - { /* sentinel */ },
3251 -static struct mdio_driver qca8kmdio_driver = {
3252 - .probe = qca8k_sw_probe,
3253 - .remove = qca8k_sw_remove,
3254 - .shutdown = qca8k_sw_shutdown,
3255 - .mdiodrv.driver = {
3257 - .of_match_table = qca8k_of_match,
3258 - .pm = &qca8k_pm_ops,
3262 -mdio_module_driver(qca8kmdio_driver);
3264 -MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
3265 -MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
3266 -MODULE_LICENSE("GPL v2");
3267 -MODULE_ALIAS("platform:qca8k");
3269 +++ b/drivers/net/dsa/qca/qca8k-8xxx.c
3271 +// SPDX-License-Identifier: GPL-2.0
3273 + * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
3274 + * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
3275 + * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
3276 + * Copyright (c) 2016 John Crispin <john@phrozen.org>
3279 +#include <linux/module.h>
3280 +#include <linux/phy.h>
3281 +#include <linux/netdevice.h>
3282 +#include <linux/bitfield.h>
3283 +#include <linux/regmap.h>
3284 +#include <net/dsa.h>
3285 +#include <linux/of_net.h>
3286 +#include <linux/of_mdio.h>
3287 +#include <linux/of_platform.h>
3288 +#include <linux/if_bridge.h>
3289 +#include <linux/mdio.h>
3290 +#include <linux/phylink.h>
3291 +#include <linux/gpio/consumer.h>
3292 +#include <linux/etherdevice.h>
3293 +#include <linux/dsa/tag_qca.h>
3298 +qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
3301 + *r1 = regaddr & 0x1e;
3304 + *r2 = regaddr & 0x7;
3307 + *page = regaddr & 0x3ff;
3311 +qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
3313 + u16 *cached_lo = &priv->mdio_cache.lo;
3314 + struct mii_bus *bus = priv->bus;
3317 + if (lo == *cached_lo)
3320 + ret = bus->write(bus, phy_id, regnum, lo);
3322 + dev_err_ratelimited(&bus->dev,
3323 + "failed to write qca8k 32bit lo register\n");
3330 +qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
3332 + u16 *cached_hi = &priv->mdio_cache.hi;
3333 + struct mii_bus *bus = priv->bus;
3336 + if (hi == *cached_hi)
3339 + ret = bus->write(bus, phy_id, regnum, hi);
3341 + dev_err_ratelimited(&bus->dev,
3342 + "failed to write qca8k 32bit hi register\n");
3349 +qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
3353 + ret = bus->read(bus, phy_id, regnum);
3356 + ret = bus->read(bus, phy_id, regnum + 1);
3357 + *val |= ret << 16;
3361 + dev_err_ratelimited(&bus->dev,
3362 + "failed to read qca8k 32bit register\n");
3371 +qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
3376 + lo = val & 0xffff;
3377 + hi = (u16)(val >> 16);
3379 + ret = qca8k_set_lo(priv, phy_id, regnum, lo);
3381 + ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
3385 +qca8k_set_page(struct qca8k_priv *priv, u16 page)
3387 + u16 *cached_page = &priv->mdio_cache.page;
3388 + struct mii_bus *bus = priv->bus;
3391 + if (page == *cached_page)
3394 + ret = bus->write(bus, 0x18, 0, page);
3396 + dev_err_ratelimited(&bus->dev,
3397 + "failed to set qca8k page\n");
3401 + *cached_page = page;
3402 + usleep_range(1000, 2000);
3407 +qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
3409 + return regmap_read(priv->regmap, reg, val);
3413 +qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
3415 + return regmap_write(priv->regmap, reg, val);
3419 +qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
3421 + return regmap_update_bits(priv->regmap, reg, mask, write_val);
3424 +static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
3426 + struct qca8k_mgmt_eth_data *mgmt_eth_data;
3427 + struct qca8k_priv *priv = ds->priv;
3428 + struct qca_mgmt_ethhdr *mgmt_ethhdr;
3431 + mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
3432 + mgmt_eth_data = &priv->mgmt_eth_data;
3434 + cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
3435 + len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
3437 + /* Make sure the seq match the requested packet */
3438 + if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
3439 + mgmt_eth_data->ack = true;
3441 + if (cmd == MDIO_READ) {
3442 + mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
3444 + /* Get the rest of the 12 byte of data.
3445 + * The read/write function will extract the requested data.
3447 + if (len > QCA_HDR_MGMT_DATA1_LEN)
3448 + memcpy(mgmt_eth_data->data + 1, skb->data,
3449 + QCA_HDR_MGMT_DATA2_LEN);
3452 + complete(&mgmt_eth_data->rw_done);
3455 +static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
3456 + int priority, unsigned int len)
3458 + struct qca_mgmt_ethhdr *mgmt_ethhdr;
3459 + unsigned int real_len;
3460 + struct sk_buff *skb;
3464 + skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
3468 + /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
3469 + * Actually for some reason the steps are:
3471 + * 1-4: first 4 byte
3472 + * 5-6: first 12 byte
3473 + * 7-15: all 16 byte
3480 + skb_reset_mac_header(skb);
3481 + skb_set_network_header(skb, skb->len);
3483 + mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
3485 + hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
3486 + hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
3487 + hdr |= QCA_HDR_XMIT_FROM_CPU;
3488 + hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
3489 + hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
3491 + mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
3492 + mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
3493 + mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
3494 + mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
3495 + QCA_HDR_MGMT_CHECK_CODE_VAL);
3497 + if (cmd == MDIO_WRITE)
3498 + mgmt_ethhdr->mdio_data = *val;
3500 + mgmt_ethhdr->hdr = htons(hdr);
3502 + data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
3503 + if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
3504 + memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
3509 +static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
3511 + struct qca_mgmt_ethhdr *mgmt_ethhdr;
3513 + mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
3514 + mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
3517 +static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
3519 + struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
3520 + struct sk_buff *skb;
3524 + skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
3525 + QCA8K_ETHERNET_MDIO_PRIORITY, len);
3529 + mutex_lock(&mgmt_eth_data->mutex);
3531 + /* Check mgmt_master if is operational */
3532 + if (!priv->mgmt_master) {
3534 + mutex_unlock(&mgmt_eth_data->mutex);
3538 + skb->dev = priv->mgmt_master;
3540 + reinit_completion(&mgmt_eth_data->rw_done);
3542 + /* Increment seq_num and set it in the mdio pkt */
3543 + mgmt_eth_data->seq++;
3544 + qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
3545 + mgmt_eth_data->ack = false;
3547 + dev_queue_xmit(skb);
3549 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
3550 + msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
3552 + *val = mgmt_eth_data->data[0];
3553 + if (len > QCA_HDR_MGMT_DATA1_LEN)
3554 + memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
3556 + ack = mgmt_eth_data->ack;
3558 + mutex_unlock(&mgmt_eth_data->mutex);
3561 + return -ETIMEDOUT;
3569 +static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
3571 + struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
3572 + struct sk_buff *skb;
3576 + skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
3577 + QCA8K_ETHERNET_MDIO_PRIORITY, len);
3581 + mutex_lock(&mgmt_eth_data->mutex);
3583 + /* Check mgmt_master if is operational */
3584 + if (!priv->mgmt_master) {
3586 + mutex_unlock(&mgmt_eth_data->mutex);
3590 + skb->dev = priv->mgmt_master;
3592 + reinit_completion(&mgmt_eth_data->rw_done);
3594 + /* Increment seq_num and set it in the mdio pkt */
3595 + mgmt_eth_data->seq++;
3596 + qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
3597 + mgmt_eth_data->ack = false;
3599 + dev_queue_xmit(skb);
3601 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
3602 + msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
3604 + ack = mgmt_eth_data->ack;
3606 + mutex_unlock(&mgmt_eth_data->mutex);
3609 + return -ETIMEDOUT;
3618 +qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
3623 + ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
3630 + return qca8k_write_eth(priv, reg, &val, sizeof(val));
3634 +qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
3636 + int i, count = len / sizeof(u32), ret;
3638 + if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
3641 + for (i = 0; i < count; i++) {
3642 + ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
3651 +qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
3653 + int i, count = len / sizeof(u32), ret;
3656 + if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
3659 + for (i = 0; i < count; i++) {
3662 + ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
3671 +qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
3673 + struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
3674 + struct mii_bus *bus = priv->bus;
3678 + if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
3681 + qca8k_split_addr(reg, &r1, &r2, &page);
3683 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
3685 + ret = qca8k_set_page(priv, page);
3689 + ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
3692 + mutex_unlock(&bus->mdio_lock);
3697 +qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
3699 + struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
3700 + struct mii_bus *bus = priv->bus;
3704 + if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
3707 + qca8k_split_addr(reg, &r1, &r2, &page);
3709 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
3711 + ret = qca8k_set_page(priv, page);
3715 + qca8k_mii_write32(priv, 0x10 | r2, r1, val);
3718 + mutex_unlock(&bus->mdio_lock);
3723 +qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
3725 + struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
3726 + struct mii_bus *bus = priv->bus;
3731 + if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
3734 + qca8k_split_addr(reg, &r1, &r2, &page);
3736 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
3738 + ret = qca8k_set_page(priv, page);
3742 + ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
3748 + qca8k_mii_write32(priv, 0x10 | r2, r1, val);
3751 + mutex_unlock(&bus->mdio_lock);
3756 +static const struct regmap_range qca8k_readable_ranges[] = {
3757 + regmap_reg_range(0x0000, 0x00e4), /* Global control */
3758 + regmap_reg_range(0x0100, 0x0168), /* EEE control */
3759 + regmap_reg_range(0x0200, 0x0270), /* Parser control */
3760 + regmap_reg_range(0x0400, 0x0454), /* ACL */
3761 + regmap_reg_range(0x0600, 0x0718), /* Lookup */
3762 + regmap_reg_range(0x0800, 0x0b70), /* QM */
3763 + regmap_reg_range(0x0c00, 0x0c80), /* PKT */
3764 + regmap_reg_range(0x0e00, 0x0e98), /* L3 */
3765 + regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
3766 + regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
3767 + regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
3768 + regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
3769 + regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
3770 + regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
3771 + regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
3775 +static const struct regmap_access_table qca8k_readable_table = {
3776 + .yes_ranges = qca8k_readable_ranges,
3777 + .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
3780 +static struct regmap_config qca8k_regmap_config = {
3784 + .max_register = 0x16ac, /* end MIB - Port6 range */
3785 + .reg_read = qca8k_regmap_read,
3786 + .reg_write = qca8k_regmap_write,
3787 + .reg_update_bits = qca8k_regmap_update_bits,
3788 + .rd_table = &qca8k_readable_table,
3789 + .disable_locking = true, /* Locking is handled by qca8k read/write */
3790 + .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
3794 +qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
3798 + return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
3799 + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
3803 +qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
3808 + /* load the ARL table into an array */
3809 + ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
3814 + fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
3815 + /* aging - 67:64 */
3816 + fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
3817 + /* portmask - 54:48 */
3818 + fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
3820 + fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
3821 + fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
3822 + fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
3823 + fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
3824 + fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
3825 + fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
3831 +qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
3834 + u32 reg[3] = { 0 };
3837 + reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
3838 + /* aging - 67:64 */
3839 + reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
3840 + /* portmask - 54:48 */
3841 + reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
3843 + reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
3844 + reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
3845 + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
3846 + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
3847 + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
3848 + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
3850 + /* load the array into the ARL table */
3851 + qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
3855 +qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
3860 + /* Set the command and FDB index */
3861 + reg = QCA8K_ATU_FUNC_BUSY;
3864 + reg |= QCA8K_ATU_FUNC_PORT_EN;
3865 + reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
3868 + /* Write the function register triggering the table access */
3869 + ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
3873 + /* wait for completion */
3874 + ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
3878 + /* Check for table full violation when adding an entry */
3879 + if (cmd == QCA8K_FDB_LOAD) {
3880 + ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®);
3883 + if (reg & QCA8K_ATU_FUNC_FULL)
3891 +qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
3895 + qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
3896 + ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
3900 + return qca8k_fdb_read(priv, fdb);
3904 +qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
3905 + u16 vid, u8 aging)
3909 + mutex_lock(&priv->reg_mutex);
3910 + qca8k_fdb_write(priv, vid, port_mask, mac, aging);
3911 + ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
3912 + mutex_unlock(&priv->reg_mutex);
3918 +qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
3922 + mutex_lock(&priv->reg_mutex);
3923 + qca8k_fdb_write(priv, vid, port_mask, mac, 0);
3924 + ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
3925 + mutex_unlock(&priv->reg_mutex);
3931 +qca8k_fdb_flush(struct qca8k_priv *priv)
3933 + mutex_lock(&priv->reg_mutex);
3934 + qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
3935 + mutex_unlock(&priv->reg_mutex);
3939 +qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
3940 + const u8 *mac, u16 vid)
3942 + struct qca8k_fdb fdb = { 0 };
3945 + mutex_lock(&priv->reg_mutex);
3947 + qca8k_fdb_write(priv, vid, 0, mac, 0);
3948 + ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
3952 + ret = qca8k_fdb_read(priv, &fdb);
3956 + /* Rule exist. Delete first */
3958 + ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
3963 + /* Add port to fdb portmask */
3964 + fdb.port_mask |= port_mask;
3966 + qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
3967 + ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
3970 + mutex_unlock(&priv->reg_mutex);
3975 +qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
3976 + const u8 *mac, u16 vid)
3978 + struct qca8k_fdb fdb = { 0 };
3981 + mutex_lock(&priv->reg_mutex);
3983 + qca8k_fdb_write(priv, vid, 0, mac, 0);
3984 + ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
3988 + /* Rule doesn't exist. Why delete? */
3994 + ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
3998 + /* Only port in the rule is this port. Don't re insert */
3999 + if (fdb.port_mask == port_mask)
4002 + /* Remove port from port mask */
4003 + fdb.port_mask &= ~port_mask;
4005 + qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
4006 + ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
4009 + mutex_unlock(&priv->reg_mutex);
4014 +qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
4019 + /* Set the command and VLAN index */
4020 + reg = QCA8K_VTU_FUNC1_BUSY;
4022 + reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
4024 + /* Write the function register triggering the table access */
4025 + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
4029 + /* wait for completion */
4030 + ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
4034 + /* Check for table full violation when adding an entry */
4035 + if (cmd == QCA8K_VLAN_LOAD) {
4036 + ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®);
4039 + if (reg & QCA8K_VTU_FUNC1_FULL)
4047 +qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
4053 + We do the right thing with VLAN 0 and treat it as untagged while
4054 + preserving the tag on egress.
4059 + mutex_lock(&priv->reg_mutex);
4060 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
4064 + ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
4067 + reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
4068 + reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
4070 + reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
4072 + reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
4074 + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
4077 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
4080 + mutex_unlock(&priv->reg_mutex);
4086 +qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
4092 + mutex_lock(&priv->reg_mutex);
4093 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
4097 + ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
4100 + reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
4101 + reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
4103 + /* Check if we're the last member to be removed */
4105 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
4106 + mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
4108 + if ((reg & mask) != mask) {
4115 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
4117 + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
4120 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
4124 + mutex_unlock(&priv->reg_mutex);
4130 +qca8k_mib_init(struct qca8k_priv *priv)
4134 + mutex_lock(&priv->reg_mutex);
4135 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
4136 + QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
4137 + FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
4142 + ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
4146 + ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
4150 + ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
4153 + mutex_unlock(&priv->reg_mutex);
4158 +qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
4160 + u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
4162 + /* Port 0 and 6 have no internal PHY */
4163 + if (port > 0 && port < 6)
4164 + mask |= QCA8K_PORT_STATUS_LINK_AUTO;
4167 + regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
4169 + regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
4173 +qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
4174 + struct sk_buff *read_skb, u32 *val)
4176 + struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
4180 + reinit_completion(&mgmt_eth_data->rw_done);
4182 + /* Increment seq_num and set it in the copy pkt */
4183 + mgmt_eth_data->seq++;
4184 + qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
4185 + mgmt_eth_data->ack = false;
4187 + dev_queue_xmit(skb);
4189 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4190 + QCA8K_ETHERNET_TIMEOUT);
4192 + ack = mgmt_eth_data->ack;
4195 + return -ETIMEDOUT;
4200 + *val = mgmt_eth_data->data[0];
4206 +qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
4207 + int regnum, u16 data)
4209 + struct sk_buff *write_skb, *clear_skb, *read_skb;
4210 + struct qca8k_mgmt_eth_data *mgmt_eth_data;
4211 + u32 write_val, clear_val = 0, val;
4212 + struct net_device *mgmt_master;
4216 + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
4219 + mgmt_eth_data = &priv->mgmt_eth_data;
4221 + write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
4222 + QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
4223 + QCA8K_MDIO_MASTER_REG_ADDR(regnum);
4226 + write_val |= QCA8K_MDIO_MASTER_READ;
4228 + write_val |= QCA8K_MDIO_MASTER_WRITE;
4229 + write_val |= QCA8K_MDIO_MASTER_DATA(data);
4232 + /* Prealloc all the needed skb before the lock */
4233 + write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
4234 + QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
4238 + clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
4239 + QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
4242 + goto err_clear_skb;
4245 + read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
4246 + QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
4249 + goto err_read_skb;
4252 + /* Actually start the request:
4253 + * 1. Send mdio master packet
4254 + * 2. Busy Wait for mdio master command
4255 + * 3. Get the data if we are reading
4256 + * 4. Reset the mdio master (even with error)
4258 + mutex_lock(&mgmt_eth_data->mutex);
4260 + /* Check if mgmt_master is operational */
4261 + mgmt_master = priv->mgmt_master;
4262 + if (!mgmt_master) {
4263 + mutex_unlock(&mgmt_eth_data->mutex);
4265 + goto err_mgmt_master;
4268 + read_skb->dev = mgmt_master;
4269 + clear_skb->dev = mgmt_master;
4270 + write_skb->dev = mgmt_master;
4272 + reinit_completion(&mgmt_eth_data->rw_done);
4274 + /* Increment seq_num and set it in the write pkt */
4275 + mgmt_eth_data->seq++;
4276 + qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
4277 + mgmt_eth_data->ack = false;
4279 + dev_queue_xmit(write_skb);
4281 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4282 + QCA8K_ETHERNET_TIMEOUT);
4284 + ack = mgmt_eth_data->ack;
4288 + kfree_skb(read_skb);
4294 + kfree_skb(read_skb);
4298 + ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
4299 + !(val & QCA8K_MDIO_MASTER_BUSY), 0,
4300 + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
4301 + mgmt_eth_data, read_skb, &val);
4303 + if (ret < 0 && ret1 < 0) {
4309 + reinit_completion(&mgmt_eth_data->rw_done);
4311 + /* Increment seq_num and set it in the read pkt */
4312 + mgmt_eth_data->seq++;
4313 + qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
4314 + mgmt_eth_data->ack = false;
4316 + dev_queue_xmit(read_skb);
4318 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4319 + QCA8K_ETHERNET_TIMEOUT);
4321 + ack = mgmt_eth_data->ack;
4333 + ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
4335 + kfree_skb(read_skb);
4338 + reinit_completion(&mgmt_eth_data->rw_done);
4340 + /* Increment seq_num and set it in the clear pkt */
4341 + mgmt_eth_data->seq++;
4342 + qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
4343 + mgmt_eth_data->ack = false;
4345 + dev_queue_xmit(clear_skb);
4347 + wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4348 + QCA8K_ETHERNET_TIMEOUT);
4350 + mutex_unlock(&mgmt_eth_data->mutex);
4354 + /* Error handling before lock */
4356 + kfree_skb(read_skb);
4358 + kfree_skb(clear_skb);
4360 + kfree_skb(write_skb);
4366 +qca8k_port_to_phy(int port)
4368 + /* From Andrew Lunn:
4369 + * Port 0 has no internal phy.
4370 + * Port 1 has an internal PHY at MDIO address 0.
4371 + * Port 2 has an internal PHY at MDIO address 1.
4373 + * Port 5 has an internal PHY at MDIO address 4.
4374 + * Port 6 has no internal PHY.
4381 +qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
4387 + qca8k_split_addr(reg, &r1, &r2, &page);
4389 + ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
4390 + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
4391 + bus, 0x10 | r2, r1, &val);
4393 + /* Check if qca8k_read has failed for a different reason
4394 + * before returnting -ETIMEDOUT
4396 + if (ret < 0 && ret1 < 0)
4403 +qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
4405 + struct mii_bus *bus = priv->bus;
4410 + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
4413 + val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
4414 + QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
4415 + QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
4416 + QCA8K_MDIO_MASTER_DATA(data);
4418 + qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
4420 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
4422 + ret = qca8k_set_page(priv, page);
4426 + qca8k_mii_write32(priv, 0x10 | r2, r1, val);
4428 + ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
4429 + QCA8K_MDIO_MASTER_BUSY);
4432 + /* even if the busy_wait timeouts try to clear the MASTER_EN */
4433 + qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
4435 + mutex_unlock(&bus->mdio_lock);
4441 +qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
4443 + struct mii_bus *bus = priv->bus;
4448 + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
4451 + val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
4452 + QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
4453 + QCA8K_MDIO_MASTER_REG_ADDR(regnum);
4455 + qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
4457 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
4459 + ret = qca8k_set_page(priv, page);
4463 + qca8k_mii_write32(priv, 0x10 | r2, r1, val);
4465 + ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
4466 + QCA8K_MDIO_MASTER_BUSY);
4470 + ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
4473 + /* even if the busy_wait timeouts try to clear the MASTER_EN */
4474 + qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
4476 + mutex_unlock(&bus->mdio_lock);
4479 + ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
4485 +qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
4487 + struct qca8k_priv *priv = slave_bus->priv;
4490 + /* Use mdio Ethernet when available, fallback to legacy one on error */
4491 + ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
4495 + return qca8k_mdio_write(priv, phy, regnum, data);
4499 +qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
4501 + struct qca8k_priv *priv = slave_bus->priv;
4504 + /* Use mdio Ethernet when available, fallback to legacy one on error */
4505 + ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
4509 + ret = qca8k_mdio_read(priv, phy, regnum);
4518 +qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
4520 + port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
4522 + return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
4526 +qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
4528 + port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
4530 + return qca8k_internal_mdio_read(slave_bus, port, regnum);
4534 +qca8k_mdio_register(struct qca8k_priv *priv)
4536 + struct dsa_switch *ds = priv->ds;
4537 + struct device_node *mdio;
4538 + struct mii_bus *bus;
4540 + bus = devm_mdiobus_alloc(ds->dev);
4544 + bus->priv = (void *)priv;
4545 + snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
4546 + ds->dst->index, ds->index);
4547 + bus->parent = ds->dev;
4548 + bus->phy_mask = ~ds->phys_mii_mask;
4549 + ds->slave_mii_bus = bus;
4551 + /* Check if the devicetree declare the port:phy mapping */
4552 + mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
4553 + if (of_device_is_available(mdio)) {
4554 + bus->name = "qca8k slave mii";
4555 + bus->read = qca8k_internal_mdio_read;
4556 + bus->write = qca8k_internal_mdio_write;
4557 + return devm_of_mdiobus_register(priv->dev, bus, mdio);
4560 + /* If a mapping can't be found the legacy mapping is used,
4561 + * using the qca8k_port_to_phy function
4563 + bus->name = "qca8k-legacy slave mii";
4564 + bus->read = qca8k_legacy_mdio_read;
4565 + bus->write = qca8k_legacy_mdio_write;
4566 + return devm_mdiobus_register(priv->dev, bus);
4570 +qca8k_setup_mdio_bus(struct qca8k_priv *priv)
4572 + u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
4573 + struct device_node *ports, *port;
4574 + phy_interface_t mode;
4577 + ports = of_get_child_by_name(priv->dev->of_node, "ports");
4579 + ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
4584 + for_each_available_child_of_node(ports, port) {
4585 + err = of_property_read_u32(port, "reg", ®);
4587 + of_node_put(port);
4588 + of_node_put(ports);
4592 + if (!dsa_is_user_port(priv->ds, reg))
4595 + of_get_phy_mode(port, &mode);
4597 + if (of_property_read_bool(port, "phy-handle") &&
4598 + mode != PHY_INTERFACE_MODE_INTERNAL)
4599 + external_mdio_mask |= BIT(reg);
4601 + internal_mdio_mask |= BIT(reg);
4604 + of_node_put(ports);
4605 + if (!external_mdio_mask && !internal_mdio_mask) {
4606 + dev_err(priv->dev, "no PHYs are defined.\n");
4610 + /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
4611 + * the MDIO_MASTER register also _disconnects_ the external MDC
4612 + * passthrough to the internal PHYs. It's not possible to use both
4613 + * configurations at the same time!
4615 + * Because this came up during the review process:
4616 + * If the external mdio-bus driver is capable magically disabling
4617 + * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
4618 + * accessors for the time being, it would be possible to pull this
4621 + if (!!external_mdio_mask && !!internal_mdio_mask) {
4622 + dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
4626 + if (external_mdio_mask) {
4627 + /* Make sure to disable the internal mdio bus in cases
4628 + * a dt-overlay and driver reload changed the configuration
4631 + return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
4632 + QCA8K_MDIO_MASTER_EN);
4635 + return qca8k_mdio_register(priv);
4639 +qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
4644 + /* SoC specific settings for ipq8064.
4645 + * If more device require this consider adding
4646 + * a dedicated binding.
4648 + if (of_machine_is_compatible("qcom,ipq8064"))
4649 + mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
4651 + /* SoC specific settings for ipq8065 */
4652 + if (of_machine_is_compatible("qcom,ipq8065"))
4653 + mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
4656 + ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
4657 + QCA8K_MAC_PWR_RGMII0_1_8V |
4658 + QCA8K_MAC_PWR_RGMII1_1_8V,
4665 +static int qca8k_find_cpu_port(struct dsa_switch *ds)
4667 + struct qca8k_priv *priv = ds->priv;
4669 + /* Find the connected cpu port. Valid port are 0 or 6 */
4670 + if (dsa_is_cpu_port(ds, 0))
4673 + dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
4675 + if (dsa_is_cpu_port(ds, 6))
4682 +qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
4684 + const struct qca8k_match_data *data = priv->info;
4685 + struct device_node *node = priv->dev->of_node;
4689 + /* QCA8327 require to set to the correct mode.
4690 + * His bigger brother QCA8328 have the 172 pin layout.
4691 + * Should be applied by default but we set this just to make sure.
4693 + if (priv->switch_id == QCA8K_ID_QCA8327) {
4694 + /* Set the correct package of 148 pin for QCA8327 */
4695 + if (data->reduced_package)
4696 + val |= QCA8327_PWS_PACKAGE148_EN;
4698 + ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
4704 + if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
4705 + val |= QCA8K_PWS_POWER_ON_SEL;
4707 + if (of_property_read_bool(node, "qca,led-open-drain")) {
4708 + if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
4709 + dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
4713 + val |= QCA8K_PWS_LED_OPEN_EN_CSR;
4716 + return qca8k_rmw(priv, QCA8K_REG_PWS,
4717 + QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
4722 +qca8k_parse_port_config(struct qca8k_priv *priv)
4724 + int port, cpu_port_index = -1, ret;
4725 + struct device_node *port_dn;
4726 + phy_interface_t mode;
4727 + struct dsa_port *dp;
4730 + /* We have 2 CPU port. Check them */
4731 + for (port = 0; port < QCA8K_NUM_PORTS; port++) {
4732 + /* Skip every other port */
4733 + if (port != 0 && port != 6)
4736 + dp = dsa_to_port(priv->ds, port);
4740 + if (!of_device_is_available(port_dn))
4743 + ret = of_get_phy_mode(port_dn, &mode);
4748 + case PHY_INTERFACE_MODE_RGMII:
4749 + case PHY_INTERFACE_MODE_RGMII_ID:
4750 + case PHY_INTERFACE_MODE_RGMII_TXID:
4751 + case PHY_INTERFACE_MODE_RGMII_RXID:
4752 + case PHY_INTERFACE_MODE_SGMII:
4755 + if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
4756 + /* Switch regs accept value in ns, convert ps to ns */
4757 + delay = delay / 1000;
4758 + else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
4759 + mode == PHY_INTERFACE_MODE_RGMII_TXID)
4762 + if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
4763 + dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
4767 + priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
4771 + if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
4772 + /* Switch regs accept value in ns, convert ps to ns */
4773 + delay = delay / 1000;
4774 + else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
4775 + mode == PHY_INTERFACE_MODE_RGMII_RXID)
4778 + if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
4779 + dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
4783 + priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
4785 + /* Skip sgmii parsing for rgmii* mode */
4786 + if (mode == PHY_INTERFACE_MODE_RGMII ||
4787 + mode == PHY_INTERFACE_MODE_RGMII_ID ||
4788 + mode == PHY_INTERFACE_MODE_RGMII_TXID ||
4789 + mode == PHY_INTERFACE_MODE_RGMII_RXID)
4792 + if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
4793 + priv->ports_config.sgmii_tx_clk_falling_edge = true;
4795 + if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
4796 + priv->ports_config.sgmii_rx_clk_falling_edge = true;
4798 + if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
4799 + priv->ports_config.sgmii_enable_pll = true;
4801 + if (priv->switch_id == QCA8K_ID_QCA8327) {
4802 + dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
4803 + priv->ports_config.sgmii_enable_pll = false;
4806 + if (priv->switch_revision < 2)
4807 + dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
4820 +qca8k_setup(struct dsa_switch *ds)
4822 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
4823 + int cpu_port, ret, i;
4826 + cpu_port = qca8k_find_cpu_port(ds);
4827 + if (cpu_port < 0) {
4828 + dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
4832 + /* Parse CPU port config to be later used in phy_link mac_config */
4833 + ret = qca8k_parse_port_config(priv);
4837 + ret = qca8k_setup_mdio_bus(priv);
4841 + ret = qca8k_setup_of_pws_reg(priv);
4845 + ret = qca8k_setup_mac_pwr_sel(priv);
4849 + /* Make sure MAC06 is disabled */
4850 + ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
4851 + QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
4853 + dev_err(priv->dev, "failed disabling MAC06 exchange");
4857 + /* Enable CPU Port */
4858 + ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
4859 + QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
4861 + dev_err(priv->dev, "failed enabling CPU port");
4865 + /* Enable MIB counters */
4866 + ret = qca8k_mib_init(priv);
4868 + dev_warn(priv->dev, "mib init failed");
4870 + /* Initial setup of all ports */
4871 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
4872 + /* Disable forwarding by default on all ports */
4873 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
4874 + QCA8K_PORT_LOOKUP_MEMBER, 0);
4878 + /* Enable QCA header mode on all cpu ports */
4879 + if (dsa_is_cpu_port(ds, i)) {
4880 + ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
4881 + FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
4882 + FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
4884 + dev_err(priv->dev, "failed enabling QCA header mode");
4889 + /* Disable MAC by default on all user ports */
4890 + if (dsa_is_user_port(ds, i))
4891 + qca8k_port_set_status(priv, i, 0);
4894 + /* Forward all unknown frames to CPU port for Linux processing
4895 + * Notice that in multi-cpu config only one port should be set
4896 + * for igmp, unknown, multicast and broadcast packet
4898 + ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
4899 + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
4900 + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
4901 + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
4902 + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
4906 + /* Setup connection between CPU port & user ports
4907 + * Configure specific switch configuration for ports
4909 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
4910 + /* CPU port gets connected to all user ports of the switch */
4911 + if (dsa_is_cpu_port(ds, i)) {
4912 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
4913 + QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
4918 + /* Individual user ports get connected to CPU port only */
4919 + if (dsa_is_user_port(ds, i)) {
4920 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
4921 + QCA8K_PORT_LOOKUP_MEMBER,
4926 + /* Enable ARP Auto-learning by default */
4927 + ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
4928 + QCA8K_PORT_LOOKUP_LEARN);
4932 + /* For port based vlans to work we need to set the
4933 + * default egress vid
4935 + ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
4936 + QCA8K_EGREES_VLAN_PORT_MASK(i),
4937 + QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
4941 + ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
4942 + QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
4943 + QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
4948 + /* The port 5 of the qca8337 have some problem in flood condition. The
4949 + * original legacy driver had some specific buffer and priority settings
4950 + * for the different port suggested by the QCA switch team. Add this
4951 + * missing settings to improve switch stability under load condition.
4952 + * This problem is limited to qca8337 and other qca8k switch are not affected.
4954 + if (priv->switch_id == QCA8K_ID_QCA8337) {
4956 + /* The 2 CPU port and port 5 requires some different
4957 + * priority than any other ports.
4962 + mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
4963 + QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
4964 + QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
4965 + QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
4966 + QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
4967 + QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
4968 + QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
4971 + mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
4972 + QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
4973 + QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
4974 + QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
4975 + QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
4977 + qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
4979 + mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
4980 + QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
4981 + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
4982 + QCA8K_PORT_HOL_CTRL1_WRED_EN;
4983 + qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
4984 + QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
4985 + QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
4986 + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
4987 + QCA8K_PORT_HOL_CTRL1_WRED_EN,
4992 + /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
4993 + if (priv->switch_id == QCA8K_ID_QCA8327) {
4994 + mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
4995 + QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
4996 + qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
4997 + QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
4998 + QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
5002 + /* Setup our port MTUs to match power on defaults */
5003 + ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
5005 + dev_warn(priv->dev, "failed setting MTU settings");
5007 + /* Flush the FDB table */
5008 + qca8k_fdb_flush(priv);
5010 + /* We don't have interrupts for link changes, so we need to poll */
5011 + ds->pcs_poll = true;
5013 + /* Set min a max ageing value supported */
5014 + ds->ageing_time_min = 7000;
5015 + ds->ageing_time_max = 458745000;
5017 + /* Set max number of LAGs supported */
5018 + ds->num_lag_ids = QCA8K_NUM_LAGS;
5024 +qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
5027 + u32 delay, val = 0;
5030 + /* Delay can be declared in 3 different way.
5031 + * Mode to rgmii and internal-delay standard binding defined
5032 + * rgmii-id or rgmii-tx/rx phy mode set.
5033 + * The parse logic set a delay different than 0 only when one
5034 + * of the 3 different way is used. In all other case delay is
5035 + * not enabled. With ID or TX/RXID delay is enabled and set
5036 + * to the default and recommended value.
5038 + if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
5039 + delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
5041 + val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
5042 + QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
5045 + if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
5046 + delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
5048 + val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
5049 + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
5052 + /* Set RGMII delay based on the selected values */
5053 + ret = qca8k_rmw(priv, reg,
5054 + QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
5055 + QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
5056 + QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
5057 + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
5060 + dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
5061 + cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
5065 +qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
5066 + const struct phylink_link_state *state)
5068 + struct qca8k_priv *priv = ds->priv;
5069 + int cpu_port_index, ret;
5073 + case 0: /* 1st CPU port */
5074 + if (state->interface != PHY_INTERFACE_MODE_RGMII &&
5075 + state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
5076 + state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5077 + state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5078 + state->interface != PHY_INTERFACE_MODE_SGMII)
5081 + reg = QCA8K_REG_PORT0_PAD_CTRL;
5082 + cpu_port_index = QCA8K_CPU_PORT0;
5089 + /* Internal PHY, nothing to do */
5091 + case 6: /* 2nd CPU port / external PHY */
5092 + if (state->interface != PHY_INTERFACE_MODE_RGMII &&
5093 + state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
5094 + state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5095 + state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5096 + state->interface != PHY_INTERFACE_MODE_SGMII &&
5097 + state->interface != PHY_INTERFACE_MODE_1000BASEX)
5100 + reg = QCA8K_REG_PORT6_PAD_CTRL;
5101 + cpu_port_index = QCA8K_CPU_PORT6;
5104 + dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
5108 + if (port != 6 && phylink_autoneg_inband(mode)) {
5109 + dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
5114 + switch (state->interface) {
5115 + case PHY_INTERFACE_MODE_RGMII:
5116 + case PHY_INTERFACE_MODE_RGMII_ID:
5117 + case PHY_INTERFACE_MODE_RGMII_TXID:
5118 + case PHY_INTERFACE_MODE_RGMII_RXID:
5119 + qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
5121 + /* Configure rgmii delay */
5122 + qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
5124 + /* QCA8337 requires to set rgmii rx delay for all ports.
5125 + * This is enabled through PORT5_PAD_CTRL for all ports,
5126 + * rather than individual port registers.
5128 + if (priv->switch_id == QCA8K_ID_QCA8337)
5129 + qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
5130 + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
5132 + case PHY_INTERFACE_MODE_SGMII:
5133 + case PHY_INTERFACE_MODE_1000BASEX:
5134 + /* Enable SGMII on the port */
5135 + qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
5137 + /* Enable/disable SerDes auto-negotiation as necessary */
5138 + ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
5141 + if (phylink_autoneg_inband(mode))
5142 + val &= ~QCA8K_PWS_SERDES_AEN_DIS;
5144 + val |= QCA8K_PWS_SERDES_AEN_DIS;
5145 + qca8k_write(priv, QCA8K_REG_PWS, val);
5147 + /* Configure the SGMII parameters */
5148 + ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
5152 + val |= QCA8K_SGMII_EN_SD;
5154 + if (priv->ports_config.sgmii_enable_pll)
5155 + val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
5156 + QCA8K_SGMII_EN_TX;
5158 + if (dsa_is_cpu_port(ds, port)) {
5159 + /* CPU port, we're talking to the CPU MAC, be a PHY */
5160 + val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
5161 + val |= QCA8K_SGMII_MODE_CTRL_PHY;
5162 + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5163 + val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
5164 + val |= QCA8K_SGMII_MODE_CTRL_MAC;
5165 + } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
5166 + val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
5167 + val |= QCA8K_SGMII_MODE_CTRL_BASEX;
5170 + qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
5172 + /* From original code is reported port instability as SGMII also
5173 + * require delay set. Apply advised values here or take them from DT.
5175 + if (state->interface == PHY_INTERFACE_MODE_SGMII)
5176 + qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
5178 + /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
5179 + * falling edge is set writing in the PORT0 PAD reg
5181 + if (priv->switch_id == QCA8K_ID_QCA8327 ||
5182 + priv->switch_id == QCA8K_ID_QCA8337)
5183 + reg = QCA8K_REG_PORT0_PAD_CTRL;
5187 + /* SGMII Clock phase configuration */
5188 + if (priv->ports_config.sgmii_rx_clk_falling_edge)
5189 + val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
5191 + if (priv->ports_config.sgmii_tx_clk_falling_edge)
5192 + val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
5195 + ret = qca8k_rmw(priv, reg,
5196 + QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
5197 + QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
5202 + dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
5203 + phy_modes(state->interface), port);
5209 +qca8k_phylink_validate(struct dsa_switch *ds, int port,
5210 + unsigned long *supported,
5211 + struct phylink_link_state *state)
5213 + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
5216 + case 0: /* 1st CPU port */
5217 + if (state->interface != PHY_INTERFACE_MODE_NA &&
5218 + state->interface != PHY_INTERFACE_MODE_RGMII &&
5219 + state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
5220 + state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5221 + state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5222 + state->interface != PHY_INTERFACE_MODE_SGMII)
5230 + /* Internal PHY */
5231 + if (state->interface != PHY_INTERFACE_MODE_NA &&
5232 + state->interface != PHY_INTERFACE_MODE_GMII &&
5233 + state->interface != PHY_INTERFACE_MODE_INTERNAL)
5236 + case 6: /* 2nd CPU port / external PHY */
5237 + if (state->interface != PHY_INTERFACE_MODE_NA &&
5238 + state->interface != PHY_INTERFACE_MODE_RGMII &&
5239 + state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
5240 + state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5241 + state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5242 + state->interface != PHY_INTERFACE_MODE_SGMII &&
5243 + state->interface != PHY_INTERFACE_MODE_1000BASEX)
5248 + linkmode_zero(supported);
5252 + phylink_set_port_modes(mask);
5253 + phylink_set(mask, Autoneg);
5255 + phylink_set(mask, 1000baseT_Full);
5256 + phylink_set(mask, 10baseT_Half);
5257 + phylink_set(mask, 10baseT_Full);
5258 + phylink_set(mask, 100baseT_Half);
5259 + phylink_set(mask, 100baseT_Full);
5261 + if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
5262 + phylink_set(mask, 1000baseX_Full);
5264 + phylink_set(mask, Pause);
5265 + phylink_set(mask, Asym_Pause);
5267 + linkmode_and(supported, supported, mask);
5268 + linkmode_and(state->advertising, state->advertising, mask);
5272 +qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
5273 + struct phylink_link_state *state)
5275 + struct qca8k_priv *priv = ds->priv;
5279 + ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
5283 + state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
5284 + state->an_complete = state->link;
5285 + state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
5286 + state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
5289 + switch (reg & QCA8K_PORT_STATUS_SPEED) {
5290 + case QCA8K_PORT_STATUS_SPEED_10:
5291 + state->speed = SPEED_10;
5293 + case QCA8K_PORT_STATUS_SPEED_100:
5294 + state->speed = SPEED_100;
5296 + case QCA8K_PORT_STATUS_SPEED_1000:
5297 + state->speed = SPEED_1000;
5300 + state->speed = SPEED_UNKNOWN;
5304 + state->pause = MLO_PAUSE_NONE;
5305 + if (reg & QCA8K_PORT_STATUS_RXFLOW)
5306 + state->pause |= MLO_PAUSE_RX;
5307 + if (reg & QCA8K_PORT_STATUS_TXFLOW)
5308 + state->pause |= MLO_PAUSE_TX;
5314 +qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
5315 + phy_interface_t interface)
5317 + struct qca8k_priv *priv = ds->priv;
5319 + qca8k_port_set_status(priv, port, 0);
5323 +qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
5324 + phy_interface_t interface, struct phy_device *phydev,
5325 + int speed, int duplex, bool tx_pause, bool rx_pause)
5327 + struct qca8k_priv *priv = ds->priv;
5330 + if (phylink_autoneg_inband(mode)) {
5331 + reg = QCA8K_PORT_STATUS_LINK_AUTO;
5335 + reg = QCA8K_PORT_STATUS_SPEED_10;
5338 + reg = QCA8K_PORT_STATUS_SPEED_100;
5341 + reg = QCA8K_PORT_STATUS_SPEED_1000;
5344 + reg = QCA8K_PORT_STATUS_LINK_AUTO;
5348 + if (duplex == DUPLEX_FULL)
5349 + reg |= QCA8K_PORT_STATUS_DUPLEX;
5351 + if (rx_pause || dsa_is_cpu_port(ds, port))
5352 + reg |= QCA8K_PORT_STATUS_RXFLOW;
5354 + if (tx_pause || dsa_is_cpu_port(ds, port))
5355 + reg |= QCA8K_PORT_STATUS_TXFLOW;
5358 + reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
5360 + qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
5364 +qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
5366 + struct qca8k_priv *priv = ds->priv;
5369 + if (stringset != ETH_SS_STATS)
5372 + for (i = 0; i < priv->info->mib_count; i++)
5373 + strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
5377 +static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
5379 + struct qca8k_mib_eth_data *mib_eth_data;
5380 + struct qca8k_priv *priv = ds->priv;
5381 + const struct qca8k_mib_desc *mib;
5382 + struct mib_ethhdr *mib_ethhdr;
5383 + int i, mib_len, offset = 0;
5387 + mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
5388 + mib_eth_data = &priv->mib_eth_data;
5390 + /* The switch autocast every port. Ignore other packet and
5391 + * parse only the requested one.
5393 + port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
5394 + if (port != mib_eth_data->req_port)
5397 + data = mib_eth_data->data;
5399 + for (i = 0; i < priv->info->mib_count; i++) {
5400 + mib = &ar8327_mib[i];
5402 + /* First 3 mib are present in the skb head */
5404 + data[i] = mib_ethhdr->data[i];
5408 + mib_len = sizeof(uint32_t);
5410 + /* Some mib are 64 bit wide */
5411 + if (mib->size == 2)
5412 + mib_len = sizeof(uint64_t);
5414 + /* Copy the mib value from packet to the */
5415 + memcpy(data + i, skb->data + offset, mib_len);
5417 + /* Set the offset for the next mib */
5418 + offset += mib_len;
5422 + /* Complete on receiving all the mib packet */
5423 + if (refcount_dec_and_test(&mib_eth_data->port_parsed))
5424 + complete(&mib_eth_data->rw_done);
5428 +qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
5430 + struct dsa_port *dp = dsa_to_port(ds, port);
5431 + struct qca8k_mib_eth_data *mib_eth_data;
5432 + struct qca8k_priv *priv = ds->priv;
5435 + mib_eth_data = &priv->mib_eth_data;
5437 + mutex_lock(&mib_eth_data->mutex);
5439 + reinit_completion(&mib_eth_data->rw_done);
5441 + mib_eth_data->req_port = dp->index;
5442 + mib_eth_data->data = data;
5443 + refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
5445 + mutex_lock(&priv->reg_mutex);
5447 + /* Send mib autocast request */
5448 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
5449 + QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
5450 + FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
5453 + mutex_unlock(&priv->reg_mutex);
5458 + ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
5461 + mutex_unlock(&mib_eth_data->mutex);
5467 +qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
5470 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5471 + const struct qca8k_mib_desc *mib;
5476 + if (priv->mgmt_master && priv->info->ops->autocast_mib &&
5477 + priv->info->ops->autocast_mib(ds, port, data) > 0)
5480 + for (i = 0; i < priv->info->mib_count; i++) {
5481 + mib = &ar8327_mib[i];
5482 + reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
5484 + ret = qca8k_read(priv, reg, &val);
5488 + if (mib->size == 2) {
5489 + ret = qca8k_read(priv, reg + 4, &hi);
5495 + if (mib->size == 2)
5496 + data[i] |= (u64)hi << 32;
5501 +qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
5503 + struct qca8k_priv *priv = ds->priv;
5505 + if (sset != ETH_SS_STATS)
5508 + return priv->info->mib_count;
5512 +qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
5514 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5515 + u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
5519 + mutex_lock(&priv->reg_mutex);
5520 + ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®);
5524 + if (eee->eee_enabled)
5528 + ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
5531 + mutex_unlock(&priv->reg_mutex);
5536 +qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
5538 + /* Nothing to do on the port's MAC */
5543 +qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
5545 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5549 + case BR_STATE_DISABLED:
5550 + stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
5552 + case BR_STATE_BLOCKING:
5553 + stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
5555 + case BR_STATE_LISTENING:
5556 + stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
5558 + case BR_STATE_LEARNING:
5559 + stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
5561 + case BR_STATE_FORWARDING:
5563 + stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
5567 + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
5568 + QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
5572 +qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
5574 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5575 + int port_mask, cpu_port;
5578 + cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
5579 + port_mask = BIT(cpu_port);
5581 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
5582 + if (dsa_is_cpu_port(ds, i))
5584 + if (dsa_to_port(ds, i)->bridge_dev != br)
5586 + /* Add this port to the portvlan mask of the other ports
5589 + ret = regmap_set_bits(priv->regmap,
5590 + QCA8K_PORT_LOOKUP_CTRL(i),
5595 + port_mask |= BIT(i);
5598 + /* Add all other ports to this ports portvlan mask */
5599 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
5600 + QCA8K_PORT_LOOKUP_MEMBER, port_mask);
5606 +qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
5608 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5611 + cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
5613 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
5614 + if (dsa_is_cpu_port(ds, i))
5616 + if (dsa_to_port(ds, i)->bridge_dev != br)
5618 + /* Remove this port to the portvlan mask of the other ports
5621 + regmap_clear_bits(priv->regmap,
5622 + QCA8K_PORT_LOOKUP_CTRL(i),
5626 + /* Set the cpu port to be the only one in the portvlan mask of
5629 + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
5630 + QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
5634 +qca8k_port_fast_age(struct dsa_switch *ds, int port)
5636 + struct qca8k_priv *priv = ds->priv;
5638 + mutex_lock(&priv->reg_mutex);
5639 + qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
5640 + mutex_unlock(&priv->reg_mutex);
5644 +qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
5646 + struct qca8k_priv *priv = ds->priv;
5647 + unsigned int secs = msecs / 1000;
5650 + /* AGE_TIME reg is set in 7s step */
5653 + /* Handle case with 0 as val to NOT disable
5659 + return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
5660 + QCA8K_ATU_AGE_TIME(val));
5664 +qca8k_port_enable(struct dsa_switch *ds, int port,
5665 + struct phy_device *phy)
5667 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5669 + qca8k_port_set_status(priv, port, 1);
5670 + priv->port_enabled_map |= BIT(port);
5672 + if (dsa_is_user_port(ds, port))
5673 + phy_support_asym_pause(phy);
5679 +qca8k_port_disable(struct dsa_switch *ds, int port)
5681 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5683 + qca8k_port_set_status(priv, port, 0);
5684 + priv->port_enabled_map &= ~BIT(port);
5688 +qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
5690 + struct qca8k_priv *priv = ds->priv;
5693 + /* We have only have a general MTU setting.
5694 + * DSA always set the CPU port's MTU to the largest MTU of the slave
5696 + * Setting MTU just for the CPU port is sufficient to correctly set a
5697 + * value for every port.
5699 + if (!dsa_is_cpu_port(ds, port))
5702 + /* To change the MAX_FRAME_SIZE the cpu ports must be off or
5703 + * the switch panics.
5704 + * Turn off both cpu ports before applying the new value to prevent
5707 + if (priv->port_enabled_map & BIT(0))
5708 + qca8k_port_set_status(priv, 0, 0);
5710 + if (priv->port_enabled_map & BIT(6))
5711 + qca8k_port_set_status(priv, 6, 0);
5713 + /* Include L2 header / FCS length */
5714 + ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
5716 + if (priv->port_enabled_map & BIT(0))
5717 + qca8k_port_set_status(priv, 0, 1);
5719 + if (priv->port_enabled_map & BIT(6))
5720 + qca8k_port_set_status(priv, 6, 1);
5726 +qca8k_port_max_mtu(struct dsa_switch *ds, int port)
5728 + return QCA8K_MAX_MTU;
5732 +qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
5733 + u16 port_mask, u16 vid)
5735 + /* Set the vid to the port vlan id if no vid is set */
5737 + vid = QCA8K_PORT_VID_DEF;
5739 + return qca8k_fdb_add(priv, addr, port_mask, vid,
5740 + QCA8K_ATU_STATUS_STATIC);
5744 +qca8k_port_fdb_add(struct dsa_switch *ds, int port,
5745 + const unsigned char *addr, u16 vid)
5747 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5748 + u16 port_mask = BIT(port);
5750 + return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
5754 +qca8k_port_fdb_del(struct dsa_switch *ds, int port,
5755 + const unsigned char *addr, u16 vid)
5757 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5758 + u16 port_mask = BIT(port);
5761 + vid = QCA8K_PORT_VID_DEF;
5763 + return qca8k_fdb_del(priv, addr, port_mask, vid);
5767 +qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
5768 + dsa_fdb_dump_cb_t *cb, void *data)
5770 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5771 + struct qca8k_fdb _fdb = { 0 };
5772 + int cnt = QCA8K_NUM_FDB_RECORDS;
5776 + mutex_lock(&priv->reg_mutex);
5777 + while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
5780 + is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
5781 + ret = cb(_fdb.mac, _fdb.vid, is_static, data);
5785 + mutex_unlock(&priv->reg_mutex);
5791 +qca8k_port_mdb_add(struct dsa_switch *ds, int port,
5792 + const struct switchdev_obj_port_mdb *mdb)
5794 + struct qca8k_priv *priv = ds->priv;
5795 + const u8 *addr = mdb->addr;
5796 + u16 vid = mdb->vid;
5798 + return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
5802 +qca8k_port_mdb_del(struct dsa_switch *ds, int port,
5803 + const struct switchdev_obj_port_mdb *mdb)
5805 + struct qca8k_priv *priv = ds->priv;
5806 + const u8 *addr = mdb->addr;
5807 + u16 vid = mdb->vid;
5809 + return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
5813 +qca8k_port_mirror_add(struct dsa_switch *ds, int port,
5814 + struct dsa_mall_mirror_tc_entry *mirror,
5817 + struct qca8k_priv *priv = ds->priv;
5818 + int monitor_port, ret;
5821 + /* Check for existent entry */
5822 + if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
5825 + ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
5829 + /* QCA83xx can have only one port set to mirror mode.
5830 + * Check that the correct port is requested and return error otherwise.
5831 + * When no mirror port is set, the values is set to 0xF
5833 + monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
5834 + if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
5837 + /* Set the monitor port */
5838 + val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
5839 + mirror->to_local_port);
5840 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
5841 + QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
5846 + reg = QCA8K_PORT_LOOKUP_CTRL(port);
5847 + val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
5849 + reg = QCA8K_REG_PORT_HOL_CTRL1(port);
5850 + val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
5853 + ret = regmap_update_bits(priv->regmap, reg, val, val);
5857 + /* Track mirror port for tx and rx to decide when the
5858 + * mirror port has to be disabled.
5861 + priv->mirror_rx |= BIT(port);
5863 + priv->mirror_tx |= BIT(port);
5869 +qca8k_port_mirror_del(struct dsa_switch *ds, int port,
5870 + struct dsa_mall_mirror_tc_entry *mirror)
5872 + struct qca8k_priv *priv = ds->priv;
5876 + if (mirror->ingress) {
5877 + reg = QCA8K_PORT_LOOKUP_CTRL(port);
5878 + val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
5880 + reg = QCA8K_REG_PORT_HOL_CTRL1(port);
5881 + val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
5884 + ret = regmap_clear_bits(priv->regmap, reg, val);
5888 + if (mirror->ingress)
5889 + priv->mirror_rx &= ~BIT(port);
5891 + priv->mirror_tx &= ~BIT(port);
5893 + /* No port set to send packet to mirror port. Disable mirror port */
5894 + if (!priv->mirror_rx && !priv->mirror_tx) {
5895 + val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
5896 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
5897 + QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
5902 + dev_err(priv->dev, "Failed to del mirror port from %d", port);
5906 +qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
5907 + struct netlink_ext_ack *extack)
5909 + struct qca8k_priv *priv = ds->priv;
5912 + if (vlan_filtering) {
5913 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
5914 + QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
5915 + QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
5917 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
5918 + QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
5919 + QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
5926 +qca8k_port_vlan_add(struct dsa_switch *ds, int port,
5927 + const struct switchdev_obj_port_vlan *vlan,
5928 + struct netlink_ext_ack *extack)
5930 + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
5931 + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
5932 + struct qca8k_priv *priv = ds->priv;
5935 + ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
5937 + dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
5942 + ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
5943 + QCA8K_EGREES_VLAN_PORT_MASK(port),
5944 + QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
5948 + ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
5949 + QCA8K_PORT_VLAN_CVID(vlan->vid) |
5950 + QCA8K_PORT_VLAN_SVID(vlan->vid));
5957 +qca8k_port_vlan_del(struct dsa_switch *ds, int port,
5958 + const struct switchdev_obj_port_vlan *vlan)
5960 + struct qca8k_priv *priv = ds->priv;
5963 + ret = qca8k_vlan_del(priv, port, vlan->vid);
5965 + dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
5970 +static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
5972 + struct qca8k_priv *priv = ds->priv;
5974 + /* Communicate to the phy internal driver the switch revision.
5975 + * Based on the switch revision different values needs to be
5976 + * set to the dbg and mmd reg on the phy.
5977 + * The first 2 bit are used to communicate the switch revision
5978 + * to the phy driver.
5980 + if (port > 0 && port < 6)
5981 + return priv->switch_revision;
5986 +static enum dsa_tag_protocol
5987 +qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
5988 + enum dsa_tag_protocol mp)
5990 + return DSA_TAG_PROTO_QCA;
5994 +qca8k_lag_can_offload(struct dsa_switch *ds,
5995 + struct net_device *lag,
5996 + struct netdev_lag_upper_info *info)
5998 + struct dsa_port *dp;
5999 + int id, members = 0;
6001 + id = dsa_lag_id(ds->dst, lag);
6002 + if (id < 0 || id >= ds->num_lag_ids)
6005 + dsa_lag_foreach_port(dp, ds->dst, lag)
6006 + /* Includes the port joining the LAG */
6009 + if (members > QCA8K_NUM_PORTS_FOR_LAG)
6012 + if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
6015 + if (info->hash_type != NETDEV_LAG_HASH_L2 &&
6016 + info->hash_type != NETDEV_LAG_HASH_L23)
6023 +qca8k_lag_setup_hash(struct dsa_switch *ds,
6024 + struct net_device *lag,
6025 + struct netdev_lag_upper_info *info)
6027 + struct qca8k_priv *priv = ds->priv;
6028 + bool unique_lag = true;
6032 + id = dsa_lag_id(ds->dst, lag);
6034 + switch (info->hash_type) {
6035 + case NETDEV_LAG_HASH_L23:
6036 + hash |= QCA8K_TRUNK_HASH_SIP_EN;
6037 + hash |= QCA8K_TRUNK_HASH_DIP_EN;
6039 + case NETDEV_LAG_HASH_L2:
6040 + hash |= QCA8K_TRUNK_HASH_SA_EN;
6041 + hash |= QCA8K_TRUNK_HASH_DA_EN;
6043 + default: /* We should NEVER reach this */
6044 + return -EOPNOTSUPP;
6047 + /* Check if we are the unique configured LAG */
6048 + dsa_lags_foreach_id(i, ds->dst)
6049 + if (i != id && dsa_lag_dev(ds->dst, i)) {
6050 + unique_lag = false;
6054 + /* Hash Mode is global. Make sure the same Hash Mode
6055 + * is set to all the 4 possible lag.
6056 + * If we are the unique LAG we can set whatever hash
6058 + * To change hash mode it's needed to remove all LAG
6059 + * and change the mode with the latest.
6062 + priv->lag_hash_mode = hash;
6063 + } else if (priv->lag_hash_mode != hash) {
6064 + netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n");
6065 + return -EOPNOTSUPP;
6068 + return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
6069 + QCA8K_TRUNK_HASH_MASK, hash);
6073 +qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
6074 + struct net_device *lag, bool delete)
6076 + struct qca8k_priv *priv = ds->priv;
6080 + id = dsa_lag_id(ds->dst, lag);
6082 + /* Read current port member */
6083 + ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
6087 + /* Shift val to the correct trunk */
6088 + val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
6089 + val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
6091 + val &= ~BIT(port);
6095 + /* Update port member. With empty portmap disable trunk */
6096 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
6097 + QCA8K_REG_GOL_TRUNK_MEMBER(id) |
6098 + QCA8K_REG_GOL_TRUNK_EN(id),
6099 + !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
6100 + val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
6102 + /* Search empty member if adding or port on deleting */
6103 + for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
6104 + ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
6108 + val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
6109 + val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
6112 + /* If port flagged to be disabled assume this member is
6115 + if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
6118 + val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
6122 + /* If port flagged to be enabled assume this member is
6125 + if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
6129 + /* We have found the member to add/remove */
6133 + /* Set port in the correct port mask or disable port if in delete mode */
6134 + return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
6135 + QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
6136 + QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
6137 + !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
6138 + port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
6142 +qca8k_port_lag_join(struct dsa_switch *ds, int port,
6143 + struct net_device *lag,
6144 + struct netdev_lag_upper_info *info)
6148 + if (!qca8k_lag_can_offload(ds, lag, info))
6149 + return -EOPNOTSUPP;
6151 + ret = qca8k_lag_setup_hash(ds, lag, info);
6155 + return qca8k_lag_refresh_portmap(ds, port, lag, false);
6159 +qca8k_port_lag_leave(struct dsa_switch *ds, int port,
6160 + struct net_device *lag)
6162 + return qca8k_lag_refresh_portmap(ds, port, lag, true);
6166 +qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
6169 + struct dsa_port *dp = master->dsa_ptr;
6170 + struct qca8k_priv *priv = ds->priv;
6172 + /* Ethernet MIB/MDIO is only supported for CPU port 0 */
6173 + if (dp->index != 0)
6176 + mutex_lock(&priv->mgmt_eth_data.mutex);
6177 + mutex_lock(&priv->mib_eth_data.mutex);
6179 + priv->mgmt_master = operational ? (struct net_device *)master : NULL;
6181 + mutex_unlock(&priv->mib_eth_data.mutex);
6182 + mutex_unlock(&priv->mgmt_eth_data.mutex);
6185 +static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
6186 + enum dsa_tag_protocol proto)
6188 + struct qca_tagger_data *tagger_data;
6191 + case DSA_TAG_PROTO_QCA:
6192 + tagger_data = ds->tagger_data;
6194 + tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
6195 + tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
6199 + return -EOPNOTSUPP;
6205 +static const struct dsa_switch_ops qca8k_switch_ops = {
6206 + .get_tag_protocol = qca8k_get_tag_protocol,
6207 + .setup = qca8k_setup,
6208 + .get_strings = qca8k_get_strings,
6209 + .get_ethtool_stats = qca8k_get_ethtool_stats,
6210 + .get_sset_count = qca8k_get_sset_count,
6211 + .set_ageing_time = qca8k_set_ageing_time,
6212 + .get_mac_eee = qca8k_get_mac_eee,
6213 + .set_mac_eee = qca8k_set_mac_eee,
6214 + .port_enable = qca8k_port_enable,
6215 + .port_disable = qca8k_port_disable,
6216 + .port_change_mtu = qca8k_port_change_mtu,
6217 + .port_max_mtu = qca8k_port_max_mtu,
6218 + .port_stp_state_set = qca8k_port_stp_state_set,
6219 + .port_bridge_join = qca8k_port_bridge_join,
6220 + .port_bridge_leave = qca8k_port_bridge_leave,
6221 + .port_fast_age = qca8k_port_fast_age,
6222 + .port_fdb_add = qca8k_port_fdb_add,
6223 + .port_fdb_del = qca8k_port_fdb_del,
6224 + .port_fdb_dump = qca8k_port_fdb_dump,
6225 + .port_mdb_add = qca8k_port_mdb_add,
6226 + .port_mdb_del = qca8k_port_mdb_del,
6227 + .port_mirror_add = qca8k_port_mirror_add,
6228 + .port_mirror_del = qca8k_port_mirror_del,
6229 + .port_vlan_filtering = qca8k_port_vlan_filtering,
6230 + .port_vlan_add = qca8k_port_vlan_add,
6231 + .port_vlan_del = qca8k_port_vlan_del,
6232 + .phylink_validate = qca8k_phylink_validate,
6233 + .phylink_mac_link_state = qca8k_phylink_mac_link_state,
6234 + .phylink_mac_config = qca8k_phylink_mac_config,
6235 + .phylink_mac_link_down = qca8k_phylink_mac_link_down,
6236 + .phylink_mac_link_up = qca8k_phylink_mac_link_up,
6237 + .get_phy_flags = qca8k_get_phy_flags,
6238 + .port_lag_join = qca8k_port_lag_join,
6239 + .port_lag_leave = qca8k_port_lag_leave,
6240 + .master_state_change = qca8k_master_change,
6241 + .connect_tag_protocol = qca8k_connect_tag_protocol,
6244 +static int qca8k_read_switch_id(struct qca8k_priv *priv)
6253 + ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
6257 + id = QCA8K_MASK_CTRL_DEVICE_ID(val);
6258 + if (id != priv->info->id) {
6259 + dev_err(priv->dev,
6260 + "Switch id detected %x but expected %x",
6261 + id, priv->info->id);
6265 + priv->switch_id = id;
6267 + /* Save revision to communicate to the internal PHY driver */
6268 + priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
6274 +qca8k_sw_probe(struct mdio_device *mdiodev)
6276 + struct qca8k_priv *priv;
6279 + /* allocate the private data struct so that we can probe the switches
6282 + priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
6286 + priv->info = of_device_get_match_data(priv->dev);
6287 + priv->bus = mdiodev->bus;
6288 + priv->dev = &mdiodev->dev;
6290 + priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
6292 + if (IS_ERR(priv->reset_gpio))
6293 + return PTR_ERR(priv->reset_gpio);
6295 + if (priv->reset_gpio) {
6296 + gpiod_set_value_cansleep(priv->reset_gpio, 1);
6297 + /* The active low duration must be greater than 10 ms
6298 + * and checkpatch.pl wants 20 ms.
6301 + gpiod_set_value_cansleep(priv->reset_gpio, 0);
6304 + /* Start by setting up the register mapping */
6305 + priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
6306 + &qca8k_regmap_config);
6307 + if (IS_ERR(priv->regmap)) {
6308 + dev_err(priv->dev, "regmap initialization failed");
6309 + return PTR_ERR(priv->regmap);
6312 + priv->mdio_cache.page = 0xffff;
6313 + priv->mdio_cache.lo = 0xffff;
6314 + priv->mdio_cache.hi = 0xffff;
6316 + /* Check the detected switch id */
6317 + ret = qca8k_read_switch_id(priv);
6321 + priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
6325 + mutex_init(&priv->mgmt_eth_data.mutex);
6326 + init_completion(&priv->mgmt_eth_data.rw_done);
6328 + mutex_init(&priv->mib_eth_data.mutex);
6329 + init_completion(&priv->mib_eth_data.rw_done);
6331 + priv->ds->dev = &mdiodev->dev;
6332 + priv->ds->num_ports = QCA8K_NUM_PORTS;
6333 + priv->ds->priv = priv;
6334 + priv->ds->ops = &qca8k_switch_ops;
6335 + mutex_init(&priv->reg_mutex);
6336 + dev_set_drvdata(&mdiodev->dev, priv);
6338 + return dsa_register_switch(priv->ds);
6342 +qca8k_sw_remove(struct mdio_device *mdiodev)
6344 + struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
6350 + for (i = 0; i < QCA8K_NUM_PORTS; i++)
6351 + qca8k_port_set_status(priv, i, 0);
6353 + dsa_unregister_switch(priv->ds);
6355 + dev_set_drvdata(&mdiodev->dev, NULL);
6358 +static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
6360 + struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
6365 + dsa_switch_shutdown(priv->ds);
6367 + dev_set_drvdata(&mdiodev->dev, NULL);
6370 +#ifdef CONFIG_PM_SLEEP
6372 +qca8k_set_pm(struct qca8k_priv *priv, int enable)
6376 + for (port = 0; port < QCA8K_NUM_PORTS; port++) {
6377 + /* Do not enable on resume if the port was
6378 + * disabled before.
6380 + if (!(priv->port_enabled_map & BIT(port)))
6383 + qca8k_port_set_status(priv, port, enable);
6387 +static int qca8k_suspend(struct device *dev)
6389 + struct qca8k_priv *priv = dev_get_drvdata(dev);
6391 + qca8k_set_pm(priv, 0);
6393 + return dsa_switch_suspend(priv->ds);
6396 +static int qca8k_resume(struct device *dev)
6398 + struct qca8k_priv *priv = dev_get_drvdata(dev);
6400 + qca8k_set_pm(priv, 1);
6402 + return dsa_switch_resume(priv->ds);
6404 +#endif /* CONFIG_PM_SLEEP */
6406 +static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
6407 + qca8k_suspend, qca8k_resume);
6409 +static const struct qca8k_info_ops qca8xxx_ops = {
6410 + .autocast_mib = qca8k_get_ethtool_stats_eth,
6413 +static const struct qca8k_match_data qca8327 = {
6414 + .id = QCA8K_ID_QCA8327,
6415 + .reduced_package = true,
6416 + .mib_count = QCA8K_QCA832X_MIB_COUNT,
6417 + .ops = &qca8xxx_ops,
6420 +static const struct qca8k_match_data qca8328 = {
6421 + .id = QCA8K_ID_QCA8327,
6422 + .mib_count = QCA8K_QCA832X_MIB_COUNT,
6423 + .ops = &qca8xxx_ops,
6426 +static const struct qca8k_match_data qca833x = {
6427 + .id = QCA8K_ID_QCA8337,
6428 + .mib_count = QCA8K_QCA833X_MIB_COUNT,
6429 + .ops = &qca8xxx_ops,
6432 +static const struct of_device_id qca8k_of_match[] = {
6433 + { .compatible = "qca,qca8327", .data = &qca8327 },
6434 + { .compatible = "qca,qca8328", .data = &qca8328 },
6435 + { .compatible = "qca,qca8334", .data = &qca833x },
6436 + { .compatible = "qca,qca8337", .data = &qca833x },
6437 + { /* sentinel */ },
6440 +static struct mdio_driver qca8kmdio_driver = {
6441 + .probe = qca8k_sw_probe,
6442 + .remove = qca8k_sw_remove,
6443 + .shutdown = qca8k_sw_shutdown,
6444 + .mdiodrv.driver = {
6446 + .of_match_table = qca8k_of_match,
6447 + .pm = &qca8k_pm_ops,
6451 +mdio_module_driver(qca8kmdio_driver);
6453 +MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
6454 +MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
6455 +MODULE_LICENSE("GPL v2");
6456 +MODULE_ALIAS("platform:qca8k");
6458 +++ b/drivers/net/dsa/qca/qca8k-common.c
6460 +// SPDX-License-Identifier: GPL-2.0
6462 + * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
6463 + * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
6464 + * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
6465 + * Copyright (c) 2016 John Crispin <john@phrozen.org>
6468 +#include <linux/netdevice.h>
6469 +#include <net/dsa.h>
6473 +#define MIB_DESC(_s, _o, _n) \
6480 +const struct qca8k_mib_desc ar8327_mib[] = {
6481 + MIB_DESC(1, 0x00, "RxBroad"),
6482 + MIB_DESC(1, 0x04, "RxPause"),
6483 + MIB_DESC(1, 0x08, "RxMulti"),
6484 + MIB_DESC(1, 0x0c, "RxFcsErr"),
6485 + MIB_DESC(1, 0x10, "RxAlignErr"),
6486 + MIB_DESC(1, 0x14, "RxRunt"),
6487 + MIB_DESC(1, 0x18, "RxFragment"),
6488 + MIB_DESC(1, 0x1c, "Rx64Byte"),
6489 + MIB_DESC(1, 0x20, "Rx128Byte"),
6490 + MIB_DESC(1, 0x24, "Rx256Byte"),
6491 + MIB_DESC(1, 0x28, "Rx512Byte"),
6492 + MIB_DESC(1, 0x2c, "Rx1024Byte"),
6493 + MIB_DESC(1, 0x30, "Rx1518Byte"),
6494 + MIB_DESC(1, 0x34, "RxMaxByte"),
6495 + MIB_DESC(1, 0x38, "RxTooLong"),
6496 + MIB_DESC(2, 0x3c, "RxGoodByte"),
6497 + MIB_DESC(2, 0x44, "RxBadByte"),
6498 + MIB_DESC(1, 0x4c, "RxOverFlow"),
6499 + MIB_DESC(1, 0x50, "Filtered"),
6500 + MIB_DESC(1, 0x54, "TxBroad"),
6501 + MIB_DESC(1, 0x58, "TxPause"),
6502 + MIB_DESC(1, 0x5c, "TxMulti"),
6503 + MIB_DESC(1, 0x60, "TxUnderRun"),
6504 + MIB_DESC(1, 0x64, "Tx64Byte"),
6505 + MIB_DESC(1, 0x68, "Tx128Byte"),
6506 + MIB_DESC(1, 0x6c, "Tx256Byte"),
6507 + MIB_DESC(1, 0x70, "Tx512Byte"),
6508 + MIB_DESC(1, 0x74, "Tx1024Byte"),
6509 + MIB_DESC(1, 0x78, "Tx1518Byte"),
6510 + MIB_DESC(1, 0x7c, "TxMaxByte"),
6511 + MIB_DESC(1, 0x80, "TxOverSize"),
6512 + MIB_DESC(2, 0x84, "TxByte"),
6513 + MIB_DESC(1, 0x8c, "TxCollision"),
6514 + MIB_DESC(1, 0x90, "TxAbortCol"),
6515 + MIB_DESC(1, 0x94, "TxMultiCol"),
6516 + MIB_DESC(1, 0x98, "TxSingleCol"),
6517 + MIB_DESC(1, 0x9c, "TxExcDefer"),
6518 + MIB_DESC(1, 0xa0, "TxDefer"),
6519 + MIB_DESC(1, 0xa4, "TxLateCol"),
6520 + MIB_DESC(1, 0xa8, "RXUnicast"),
6521 + MIB_DESC(1, 0xac, "TXUnicast"),
6523 --- a/drivers/net/dsa/qca/qca8k.h
6524 +++ b/drivers/net/dsa/qca/qca8k.h
6525 @@ -414,4 +414,7 @@ struct qca8k_fdb {
6529 +/* Common setup function */
6530 +extern const struct qca8k_mib_desc ar8327_mib[];
6532 #endif /* __QCA8K_H */