6c823b3483400fbb8e332b8e5e604ab00f0e1297
[openwrt/openwrt.git] / target / linux / ipq40xx / patches-5.4 / 705-net-add-qualcomm-ar40xx-phy.patch
1 Index: linux-5.4.51/drivers/net/phy/Kconfig
2 ===================================================================
3 --- linux-5.4.51.orig/drivers/net/phy/Kconfig
4 +++ linux-5.4.51/drivers/net/phy/Kconfig
5 @@ -587,6 +587,13 @@ config MDIO_IPQ40XX
6 This driver supports the MDIO interface found in Qualcomm
7 Atheros ipq40xx Soc chip.
8
9 +config AR40XX_PHY
10 + tristate "Driver for Qualcomm Atheros IPQ40XX switches"
11 + depends on HAS_IOMEM && OF
12 + select SWCONFIG
13 + ---help---
14 + This is the driver for Qualcomm Atheros IPQ40XX ESS switches.
15 +
16 endif # PHYLIB
17
18 config MICREL_KS8995MA
19 Index: linux-5.4.51/drivers/net/phy/Makefile
20 ===================================================================
21 --- linux-5.4.51.orig/drivers/net/phy/Makefile
22 +++ linux-5.4.51/drivers/net/phy/Makefile
23 @@ -70,6 +70,7 @@ ifdef CONFIG_HWMON
24 aquantia-objs += aquantia_hwmon.o
25 endif
26 obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
27 +obj-$(CONFIG_AR40XX_PHY) += ar40xx.o
28 obj-$(CONFIG_AX88796B_PHY) += ax88796b.o
29 obj-$(CONFIG_AT803X_PHY) += at803x.o
30 obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
31 Index: linux-5.4.51/drivers/net/phy/ar40xx.c
32 ===================================================================
33 --- /dev/null
34 +++ linux-5.4.51/drivers/net/phy/ar40xx.c
35 @@ -0,0 +1,2118 @@
36 +/*
37 + * Copyright (c) 2016, The Linux Foundation. All rights reserved.
38 + *
39 + * Permission to use, copy, modify, and/or distribute this software for
40 + * any purpose with or without fee is hereby granted, provided that the
41 + * above copyright notice and this permission notice appear in all copies.
42 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
43 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
44 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
45 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
46 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
47 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
48 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 + */
50 +
51 +#include <linux/module.h>
52 +#include <linux/list.h>
53 +#include <linux/bitops.h>
54 +#include <linux/switch.h>
55 +#include <linux/delay.h>
56 +#include <linux/phy.h>
57 +#include <linux/clk.h>
58 +#include <linux/reset.h>
59 +#include <linux/lockdep.h>
60 +#include <linux/workqueue.h>
61 +#include <linux/of_device.h>
62 +#include <linux/of_address.h>
63 +#include <linux/mdio.h>
64 +#include <linux/gpio.h>
65 +
66 +#include "ar40xx.h"
67 +
68 +static struct ar40xx_priv *ar40xx_priv;
69 +
70 +#define MIB_DESC(_s , _o, _n) \
71 + { \
72 + .size = (_s), \
73 + .offset = (_o), \
74 + .name = (_n), \
75 + }
76 +
77 +static const struct ar40xx_mib_desc ar40xx_mibs[] = {
78 + MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"),
79 + MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"),
80 + MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"),
81 + MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"),
82 + MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"),
83 + MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"),
84 + MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"),
85 + MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"),
86 + MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"),
87 + MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"),
88 + MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"),
89 + MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"),
90 + MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"),
91 + MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"),
92 + MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"),
93 + MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"),
94 + MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"),
95 + MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"),
96 + MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"),
97 + MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"),
98 + MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"),
99 + MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"),
100 + MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"),
101 + MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"),
102 + MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"),
103 + MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"),
104 + MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"),
105 + MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"),
106 + MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"),
107 + MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"),
108 + MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"),
109 + MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"),
110 + MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"),
111 + MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"),
112 + MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"),
113 + MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"),
114 + MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"),
115 + MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"),
116 + MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"),
117 +};
118 +
119 +static u32
120 +ar40xx_read(struct ar40xx_priv *priv, int reg)
121 +{
122 + return readl(priv->hw_addr + reg);
123 +}
124 +
125 +static u32
126 +ar40xx_psgmii_read(struct ar40xx_priv *priv, int reg)
127 +{
128 + return readl(priv->psgmii_hw_addr + reg);
129 +}
130 +
131 +static void
132 +ar40xx_write(struct ar40xx_priv *priv, int reg, u32 val)
133 +{
134 + writel(val, priv->hw_addr + reg);
135 +}
136 +
137 +static u32
138 +ar40xx_rmw(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
139 +{
140 + u32 ret;
141 +
142 + ret = ar40xx_read(priv, reg);
143 + ret &= ~mask;
144 + ret |= val;
145 + ar40xx_write(priv, reg, ret);
146 + return ret;
147 +}
148 +
149 +static void
150 +ar40xx_psgmii_write(struct ar40xx_priv *priv, int reg, u32 val)
151 +{
152 + writel(val, priv->psgmii_hw_addr + reg);
153 +}
154 +
155 +static void
156 +ar40xx_phy_dbg_write(struct ar40xx_priv *priv, int phy_addr,
157 + u16 dbg_addr, u16 dbg_data)
158 +{
159 + struct mii_bus *bus = priv->mii_bus;
160 +
161 + mutex_lock(&bus->mdio_lock);
162 + bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
163 + bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA, dbg_data);
164 + mutex_unlock(&bus->mdio_lock);
165 +}
166 +
167 +static void
168 +ar40xx_phy_dbg_read(struct ar40xx_priv *priv, int phy_addr,
169 + u16 dbg_addr, u16 *dbg_data)
170 +{
171 + struct mii_bus *bus = priv->mii_bus;
172 +
173 + mutex_lock(&bus->mdio_lock);
174 + bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
175 + *dbg_data = bus->read(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA);
176 + mutex_unlock(&bus->mdio_lock);
177 +}
178 +
179 +static void
180 +ar40xx_phy_mmd_write(struct ar40xx_priv *priv, u32 phy_id,
181 + u16 mmd_num, u16 reg_id, u16 reg_val)
182 +{
183 + struct mii_bus *bus = priv->mii_bus;
184 +
185 + mutex_lock(&bus->mdio_lock);
186 + bus->write(bus, phy_id,
187 + AR40XX_MII_ATH_MMD_ADDR, mmd_num);
188 + bus->write(bus, phy_id,
189 + AR40XX_MII_ATH_MMD_DATA, reg_id);
190 + bus->write(bus, phy_id,
191 + AR40XX_MII_ATH_MMD_ADDR,
192 + 0x4000 | mmd_num);
193 + bus->write(bus, phy_id,
194 + AR40XX_MII_ATH_MMD_DATA, reg_val);
195 + mutex_unlock(&bus->mdio_lock);
196 +}
197 +
198 +static u16
199 +ar40xx_phy_mmd_read(struct ar40xx_priv *priv, u32 phy_id,
200 + u16 mmd_num, u16 reg_id)
201 +{
202 + u16 value;
203 + struct mii_bus *bus = priv->mii_bus;
204 +
205 + mutex_lock(&bus->mdio_lock);
206 + bus->write(bus, phy_id,
207 + AR40XX_MII_ATH_MMD_ADDR, mmd_num);
208 + bus->write(bus, phy_id,
209 + AR40XX_MII_ATH_MMD_DATA, reg_id);
210 + bus->write(bus, phy_id,
211 + AR40XX_MII_ATH_MMD_ADDR,
212 + 0x4000 | mmd_num);
213 + value = bus->read(bus, phy_id, AR40XX_MII_ATH_MMD_DATA);
214 + mutex_unlock(&bus->mdio_lock);
215 + return value;
216 +}
217 +
218 +/* Start of swconfig support */
219 +
220 +static void
221 +ar40xx_phy_poll_reset(struct ar40xx_priv *priv)
222 +{
223 + u32 i, in_reset, retries = 500;
224 + struct mii_bus *bus = priv->mii_bus;
225 +
226 + /* Assume RESET was recently issued to some or all of the phys */
227 + in_reset = GENMASK(AR40XX_NUM_PHYS - 1, 0);
228 +
229 + while (retries--) {
230 + /* 1ms should be plenty of time.
231 + * 802.3 spec allows for a max wait time of 500ms
232 + */
233 + usleep_range(1000, 2000);
234 +
235 + for (i = 0; i < AR40XX_NUM_PHYS; i++) {
236 + int val;
237 +
238 + /* skip devices which have completed reset */
239 + if (!(in_reset & BIT(i)))
240 + continue;
241 +
242 + val = mdiobus_read(bus, i, MII_BMCR);
243 + if (val < 0)
244 + continue;
245 +
246 + /* mark when phy is no longer in reset state */
247 + if (!(val & BMCR_RESET))
248 + in_reset &= ~BIT(i);
249 + }
250 +
251 + if (!in_reset)
252 + return;
253 + }
254 +
255 + dev_warn(&bus->dev, "Failed to reset all phys! (in_reset: 0x%x)\n",
256 + in_reset);
257 +}
258 +
259 +static void
260 +ar40xx_phy_init(struct ar40xx_priv *priv)
261 +{
262 + int i;
263 + struct mii_bus *bus;
264 + u16 val;
265 +
266 + bus = priv->mii_bus;
267 + for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
268 + ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
269 + val &= ~AR40XX_PHY_MANU_CTRL_EN;
270 + ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
271 + mdiobus_write(bus, i,
272 + MII_ADVERTISE, ADVERTISE_ALL |
273 + ADVERTISE_PAUSE_CAP |
274 + ADVERTISE_PAUSE_ASYM);
275 + mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
276 + mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
277 + }
278 +
279 + ar40xx_phy_poll_reset(priv);
280 +}
281 +
282 +static void
283 +ar40xx_port_phy_linkdown(struct ar40xx_priv *priv)
284 +{
285 + struct mii_bus *bus;
286 + int i;
287 + u16 val;
288 +
289 + bus = priv->mii_bus;
290 + for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
291 + mdiobus_write(bus, i, MII_CTRL1000, 0);
292 + mdiobus_write(bus, i, MII_ADVERTISE, 0);
293 + mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
294 + ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
295 + val |= AR40XX_PHY_MANU_CTRL_EN;
296 + ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
297 + /* disable transmit */
298 + ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_2, &val);
299 + val &= 0xf00f;
300 + ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_2, val);
301 + }
302 +}
303 +
304 +static void
305 +ar40xx_set_mirror_regs(struct ar40xx_priv *priv)
306 +{
307 + int port;
308 +
309 + /* reset all mirror registers */
310 + ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
311 + AR40XX_FWD_CTRL0_MIRROR_PORT,
312 + (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
313 + for (port = 0; port < AR40XX_NUM_PORTS; port++) {
314 + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(port),
315 + AR40XX_PORT_LOOKUP_ING_MIRROR_EN, 0);
316 +
317 + ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(port),
318 + AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN, 0);
319 + }
320 +
321 + /* now enable mirroring if necessary */
322 + if (priv->source_port >= AR40XX_NUM_PORTS ||
323 + priv->monitor_port >= AR40XX_NUM_PORTS ||
324 + priv->source_port == priv->monitor_port) {
325 + return;
326 + }
327 +
328 + ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
329 + AR40XX_FWD_CTRL0_MIRROR_PORT,
330 + (priv->monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
331 +
332 + if (priv->mirror_rx)
333 + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(priv->source_port), 0,
334 + AR40XX_PORT_LOOKUP_ING_MIRROR_EN);
335 +
336 + if (priv->mirror_tx)
337 + ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(priv->source_port),
338 + 0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN);
339 +}
340 +
341 +static int
342 +ar40xx_sw_get_ports(struct switch_dev *dev, struct switch_val *val)
343 +{
344 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
345 + u8 ports = priv->vlan_table[val->port_vlan];
346 + int i;
347 +
348 + val->len = 0;
349 + for (i = 0; i < dev->ports; i++) {
350 + struct switch_port *p;
351 +
352 + if (!(ports & BIT(i)))
353 + continue;
354 +
355 + p = &val->value.ports[val->len++];
356 + p->id = i;
357 + if ((priv->vlan_tagged & BIT(i)) ||
358 + (priv->pvid[i] != val->port_vlan))
359 + p->flags = BIT(SWITCH_PORT_FLAG_TAGGED);
360 + else
361 + p->flags = 0;
362 + }
363 + return 0;
364 +}
365 +
366 +static int
367 +ar40xx_sw_set_ports(struct switch_dev *dev, struct switch_val *val)
368 +{
369 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
370 + u8 *vt = &priv->vlan_table[val->port_vlan];
371 + int i;
372 +
373 + *vt = 0;
374 + for (i = 0; i < val->len; i++) {
375 + struct switch_port *p = &val->value.ports[i];
376 +
377 + if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) {
378 + if (val->port_vlan == priv->pvid[p->id])
379 + priv->vlan_tagged |= BIT(p->id);
380 + } else {
381 + priv->vlan_tagged &= ~BIT(p->id);
382 + priv->pvid[p->id] = val->port_vlan;
383 + }
384 +
385 + *vt |= BIT(p->id);
386 + }
387 + return 0;
388 +}
389 +
390 +static int
391 +ar40xx_reg_wait(struct ar40xx_priv *priv, u32 reg, u32 mask, u32 val,
392 + unsigned timeout)
393 +{
394 + int i;
395 +
396 + for (i = 0; i < timeout; i++) {
397 + u32 t;
398 +
399 + t = ar40xx_read(priv, reg);
400 + if ((t & mask) == val)
401 + return 0;
402 +
403 + usleep_range(1000, 2000);
404 + }
405 +
406 + return -ETIMEDOUT;
407 +}
408 +
409 +static int
410 +ar40xx_mib_op(struct ar40xx_priv *priv, u32 op)
411 +{
412 + int ret;
413 +
414 + lockdep_assert_held(&priv->mib_lock);
415 +
416 + /* Capture the hardware statistics for all ports */
417 + ar40xx_rmw(priv, AR40XX_REG_MIB_FUNC,
418 + AR40XX_MIB_FUNC, (op << AR40XX_MIB_FUNC_S));
419 +
420 + /* Wait for the capturing to complete. */
421 + ret = ar40xx_reg_wait(priv, AR40XX_REG_MIB_FUNC,
422 + AR40XX_MIB_BUSY, 0, 10);
423 +
424 + return ret;
425 +}
426 +
427 +static void
428 +ar40xx_mib_fetch_port_stat(struct ar40xx_priv *priv, int port, bool flush)
429 +{
430 + unsigned int base;
431 + u64 *mib_stats;
432 + int i;
433 + u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
434 +
435 + WARN_ON(port >= priv->dev.ports);
436 +
437 + lockdep_assert_held(&priv->mib_lock);
438 +
439 + base = AR40XX_REG_PORT_STATS_START +
440 + AR40XX_REG_PORT_STATS_LEN * port;
441 +
442 + mib_stats = &priv->mib_stats[port * num_mibs];
443 + if (flush) {
444 + u32 len;
445 +
446 + len = num_mibs * sizeof(*mib_stats);
447 + memset(mib_stats, 0, len);
448 + return;
449 + }
450 + for (i = 0; i < num_mibs; i++) {
451 + const struct ar40xx_mib_desc *mib;
452 + u64 t;
453 +
454 + mib = &ar40xx_mibs[i];
455 + t = ar40xx_read(priv, base + mib->offset);
456 + if (mib->size == 2) {
457 + u64 hi;
458 +
459 + hi = ar40xx_read(priv, base + mib->offset + 4);
460 + t |= hi << 32;
461 + }
462 +
463 + mib_stats[i] += t;
464 + }
465 +}
466 +
467 +static int
468 +ar40xx_mib_capture(struct ar40xx_priv *priv)
469 +{
470 + return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_CAPTURE);
471 +}
472 +
473 +static int
474 +ar40xx_mib_flush(struct ar40xx_priv *priv)
475 +{
476 + return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_FLUSH);
477 +}
478 +
479 +static int
480 +ar40xx_sw_set_reset_mibs(struct switch_dev *dev,
481 + const struct switch_attr *attr,
482 + struct switch_val *val)
483 +{
484 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
485 + unsigned int len;
486 + int ret;
487 + u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
488 +
489 + mutex_lock(&priv->mib_lock);
490 +
491 + len = priv->dev.ports * num_mibs * sizeof(*priv->mib_stats);
492 + memset(priv->mib_stats, 0, len);
493 + ret = ar40xx_mib_flush(priv);
494 +
495 + mutex_unlock(&priv->mib_lock);
496 + return ret;
497 +}
498 +
499 +static int
500 +ar40xx_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
501 + struct switch_val *val)
502 +{
503 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
504 +
505 + priv->vlan = !!val->value.i;
506 + return 0;
507 +}
508 +
509 +static int
510 +ar40xx_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
511 + struct switch_val *val)
512 +{
513 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
514 +
515 + val->value.i = priv->vlan;
516 + return 0;
517 +}
518 +
519 +static int
520 +ar40xx_sw_set_mirror_rx_enable(struct switch_dev *dev,
521 + const struct switch_attr *attr,
522 + struct switch_val *val)
523 +{
524 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
525 +
526 + mutex_lock(&priv->reg_mutex);
527 + priv->mirror_rx = !!val->value.i;
528 + ar40xx_set_mirror_regs(priv);
529 + mutex_unlock(&priv->reg_mutex);
530 +
531 + return 0;
532 +}
533 +
534 +static int
535 +ar40xx_sw_get_mirror_rx_enable(struct switch_dev *dev,
536 + const struct switch_attr *attr,
537 + struct switch_val *val)
538 +{
539 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
540 +
541 + mutex_lock(&priv->reg_mutex);
542 + val->value.i = priv->mirror_rx;
543 + mutex_unlock(&priv->reg_mutex);
544 + return 0;
545 +}
546 +
547 +static int
548 +ar40xx_sw_set_mirror_tx_enable(struct switch_dev *dev,
549 + const struct switch_attr *attr,
550 + struct switch_val *val)
551 +{
552 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
553 +
554 + mutex_lock(&priv->reg_mutex);
555 + priv->mirror_tx = !!val->value.i;
556 + ar40xx_set_mirror_regs(priv);
557 + mutex_unlock(&priv->reg_mutex);
558 +
559 + return 0;
560 +}
561 +
562 +static int
563 +ar40xx_sw_get_mirror_tx_enable(struct switch_dev *dev,
564 + const struct switch_attr *attr,
565 + struct switch_val *val)
566 +{
567 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
568 +
569 + mutex_lock(&priv->reg_mutex);
570 + val->value.i = priv->mirror_tx;
571 + mutex_unlock(&priv->reg_mutex);
572 + return 0;
573 +}
574 +
575 +static int
576 +ar40xx_sw_set_mirror_monitor_port(struct switch_dev *dev,
577 + const struct switch_attr *attr,
578 + struct switch_val *val)
579 +{
580 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
581 +
582 + mutex_lock(&priv->reg_mutex);
583 + priv->monitor_port = val->value.i;
584 + ar40xx_set_mirror_regs(priv);
585 + mutex_unlock(&priv->reg_mutex);
586 +
587 + return 0;
588 +}
589 +
590 +static int
591 +ar40xx_sw_get_mirror_monitor_port(struct switch_dev *dev,
592 + const struct switch_attr *attr,
593 + struct switch_val *val)
594 +{
595 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
596 +
597 + mutex_lock(&priv->reg_mutex);
598 + val->value.i = priv->monitor_port;
599 + mutex_unlock(&priv->reg_mutex);
600 + return 0;
601 +}
602 +
603 +static int
604 +ar40xx_sw_set_mirror_source_port(struct switch_dev *dev,
605 + const struct switch_attr *attr,
606 + struct switch_val *val)
607 +{
608 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
609 +
610 + mutex_lock(&priv->reg_mutex);
611 + priv->source_port = val->value.i;
612 + ar40xx_set_mirror_regs(priv);
613 + mutex_unlock(&priv->reg_mutex);
614 +
615 + return 0;
616 +}
617 +
618 +static int
619 +ar40xx_sw_get_mirror_source_port(struct switch_dev *dev,
620 + const struct switch_attr *attr,
621 + struct switch_val *val)
622 +{
623 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
624 +
625 + mutex_lock(&priv->reg_mutex);
626 + val->value.i = priv->source_port;
627 + mutex_unlock(&priv->reg_mutex);
628 + return 0;
629 +}
630 +
631 +static int
632 +ar40xx_sw_set_linkdown(struct switch_dev *dev,
633 + const struct switch_attr *attr,
634 + struct switch_val *val)
635 +{
636 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
637 +
638 + if (val->value.i == 1)
639 + ar40xx_port_phy_linkdown(priv);
640 + else
641 + ar40xx_phy_init(priv);
642 +
643 + return 0;
644 +}
645 +
646 +static int
647 +ar40xx_sw_set_port_reset_mib(struct switch_dev *dev,
648 + const struct switch_attr *attr,
649 + struct switch_val *val)
650 +{
651 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
652 + int port;
653 + int ret;
654 +
655 + port = val->port_vlan;
656 + if (port >= dev->ports)
657 + return -EINVAL;
658 +
659 + mutex_lock(&priv->mib_lock);
660 + ret = ar40xx_mib_capture(priv);
661 + if (ret)
662 + goto unlock;
663 +
664 + ar40xx_mib_fetch_port_stat(priv, port, true);
665 +
666 +unlock:
667 + mutex_unlock(&priv->mib_lock);
668 + return ret;
669 +}
670 +
671 +static int
672 +ar40xx_sw_get_port_mib(struct switch_dev *dev,
673 + const struct switch_attr *attr,
674 + struct switch_val *val)
675 +{
676 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
677 + u64 *mib_stats;
678 + int port;
679 + int ret;
680 + char *buf = priv->buf;
681 + int i, len = 0;
682 + u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
683 +
684 + port = val->port_vlan;
685 + if (port >= dev->ports)
686 + return -EINVAL;
687 +
688 + mutex_lock(&priv->mib_lock);
689 + ret = ar40xx_mib_capture(priv);
690 + if (ret)
691 + goto unlock;
692 +
693 + ar40xx_mib_fetch_port_stat(priv, port, false);
694 +
695 + len += snprintf(buf + len, sizeof(priv->buf) - len,
696 + "Port %d MIB counters\n",
697 + port);
698 +
699 + mib_stats = &priv->mib_stats[port * num_mibs];
700 + for (i = 0; i < num_mibs; i++)
701 + len += snprintf(buf + len, sizeof(priv->buf) - len,
702 + "%-12s: %llu\n",
703 + ar40xx_mibs[i].name,
704 + mib_stats[i]);
705 +
706 + val->value.s = buf;
707 + val->len = len;
708 +
709 +unlock:
710 + mutex_unlock(&priv->mib_lock);
711 + return ret;
712 +}
713 +
714 +static int
715 +ar40xx_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
716 + struct switch_val *val)
717 +{
718 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
719 +
720 + priv->vlan_id[val->port_vlan] = val->value.i;
721 + return 0;
722 +}
723 +
724 +static int
725 +ar40xx_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
726 + struct switch_val *val)
727 +{
728 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
729 +
730 + val->value.i = priv->vlan_id[val->port_vlan];
731 + return 0;
732 +}
733 +
734 +static int
735 +ar40xx_sw_get_pvid(struct switch_dev *dev, int port, int *vlan)
736 +{
737 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
738 + *vlan = priv->pvid[port];
739 + return 0;
740 +}
741 +
742 +static int
743 +ar40xx_sw_set_pvid(struct switch_dev *dev, int port, int vlan)
744 +{
745 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
746 +
747 + /* make sure no invalid PVIDs get set */
748 + if (vlan >= dev->vlans)
749 + return -EINVAL;
750 +
751 + priv->pvid[port] = vlan;
752 + return 0;
753 +}
754 +
755 +static void
756 +ar40xx_read_port_link(struct ar40xx_priv *priv, int port,
757 + struct switch_port_link *link)
758 +{
759 + u32 status;
760 + u32 speed;
761 +
762 + memset(link, 0, sizeof(*link));
763 +
764 + status = ar40xx_read(priv, AR40XX_REG_PORT_STATUS(port));
765 +
766 + link->aneg = !!(status & AR40XX_PORT_AUTO_LINK_EN);
767 + if (link->aneg || (port != AR40XX_PORT_CPU))
768 + link->link = !!(status & AR40XX_PORT_STATUS_LINK_UP);
769 + else
770 + link->link = true;
771 +
772 + if (!link->link)
773 + return;
774 +
775 + link->duplex = !!(status & AR40XX_PORT_DUPLEX);
776 + link->tx_flow = !!(status & AR40XX_PORT_STATUS_TXFLOW);
777 + link->rx_flow = !!(status & AR40XX_PORT_STATUS_RXFLOW);
778 +
779 + speed = (status & AR40XX_PORT_SPEED) >>
780 + AR40XX_PORT_STATUS_SPEED_S;
781 +
782 + switch (speed) {
783 + case AR40XX_PORT_SPEED_10M:
784 + link->speed = SWITCH_PORT_SPEED_10;
785 + break;
786 + case AR40XX_PORT_SPEED_100M:
787 + link->speed = SWITCH_PORT_SPEED_100;
788 + break;
789 + case AR40XX_PORT_SPEED_1000M:
790 + link->speed = SWITCH_PORT_SPEED_1000;
791 + break;
792 + default:
793 + link->speed = SWITCH_PORT_SPEED_UNKNOWN;
794 + break;
795 + }
796 +}
797 +
798 +static int
799 +ar40xx_sw_get_port_link(struct switch_dev *dev, int port,
800 + struct switch_port_link *link)
801 +{
802 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
803 +
804 + ar40xx_read_port_link(priv, port, link);
805 + return 0;
806 +}
807 +
808 +static const struct switch_attr ar40xx_sw_attr_globals[] = {
809 + {
810 + .type = SWITCH_TYPE_INT,
811 + .name = "enable_vlan",
812 + .description = "Enable VLAN mode",
813 + .set = ar40xx_sw_set_vlan,
814 + .get = ar40xx_sw_get_vlan,
815 + .max = 1
816 + },
817 + {
818 + .type = SWITCH_TYPE_NOVAL,
819 + .name = "reset_mibs",
820 + .description = "Reset all MIB counters",
821 + .set = ar40xx_sw_set_reset_mibs,
822 + },
823 + {
824 + .type = SWITCH_TYPE_INT,
825 + .name = "enable_mirror_rx",
826 + .description = "Enable mirroring of RX packets",
827 + .set = ar40xx_sw_set_mirror_rx_enable,
828 + .get = ar40xx_sw_get_mirror_rx_enable,
829 + .max = 1
830 + },
831 + {
832 + .type = SWITCH_TYPE_INT,
833 + .name = "enable_mirror_tx",
834 + .description = "Enable mirroring of TX packets",
835 + .set = ar40xx_sw_set_mirror_tx_enable,
836 + .get = ar40xx_sw_get_mirror_tx_enable,
837 + .max = 1
838 + },
839 + {
840 + .type = SWITCH_TYPE_INT,
841 + .name = "mirror_monitor_port",
842 + .description = "Mirror monitor port",
843 + .set = ar40xx_sw_set_mirror_monitor_port,
844 + .get = ar40xx_sw_get_mirror_monitor_port,
845 + .max = AR40XX_NUM_PORTS - 1
846 + },
847 + {
848 + .type = SWITCH_TYPE_INT,
849 + .name = "mirror_source_port",
850 + .description = "Mirror source port",
851 + .set = ar40xx_sw_set_mirror_source_port,
852 + .get = ar40xx_sw_get_mirror_source_port,
853 + .max = AR40XX_NUM_PORTS - 1
854 + },
855 + {
856 + .type = SWITCH_TYPE_INT,
857 + .name = "linkdown",
858 + .description = "Link down all the PHYs",
859 + .set = ar40xx_sw_set_linkdown,
860 + .max = 1
861 + },
862 +};
863 +
864 +static const struct switch_attr ar40xx_sw_attr_port[] = {
865 + {
866 + .type = SWITCH_TYPE_NOVAL,
867 + .name = "reset_mib",
868 + .description = "Reset single port MIB counters",
869 + .set = ar40xx_sw_set_port_reset_mib,
870 + },
871 + {
872 + .type = SWITCH_TYPE_STRING,
873 + .name = "mib",
874 + .description = "Get port's MIB counters",
875 + .set = NULL,
876 + .get = ar40xx_sw_get_port_mib,
877 + },
878 +};
879 +
880 +const struct switch_attr ar40xx_sw_attr_vlan[] = {
881 + {
882 + .type = SWITCH_TYPE_INT,
883 + .name = "vid",
884 + .description = "VLAN ID (0-4094)",
885 + .set = ar40xx_sw_set_vid,
886 + .get = ar40xx_sw_get_vid,
887 + .max = 4094,
888 + },
889 +};
890 +
891 +/* End of swconfig support */
892 +
893 +static int
894 +ar40xx_wait_bit(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
895 +{
896 + int timeout = 20;
897 + u32 t;
898 +
899 + while (1) {
900 + t = ar40xx_read(priv, reg);
901 + if ((t & mask) == val)
902 + return 0;
903 +
904 + if (timeout-- <= 0)
905 + break;
906 +
907 + usleep_range(10, 20);
908 + }
909 +
910 + pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n",
911 + (unsigned int)reg, t, mask, val);
912 + return -ETIMEDOUT;
913 +}
914 +
915 +static int
916 +ar40xx_atu_flush(struct ar40xx_priv *priv)
917 +{
918 + int ret;
919 +
920 + ret = ar40xx_wait_bit(priv, AR40XX_REG_ATU_FUNC,
921 + AR40XX_ATU_FUNC_BUSY, 0);
922 + if (!ret)
923 + ar40xx_write(priv, AR40XX_REG_ATU_FUNC,
924 + AR40XX_ATU_FUNC_OP_FLUSH |
925 + AR40XX_ATU_FUNC_BUSY);
926 +
927 + return ret;
928 +}
929 +
930 +static void
931 +ar40xx_ess_reset(struct ar40xx_priv *priv)
932 +{
933 + reset_control_assert(priv->ess_rst);
934 + mdelay(10);
935 + reset_control_deassert(priv->ess_rst);
936 + /* Waiting for all inner tables init done.
937 + * It cost 5~10ms.
938 + */
939 + mdelay(10);
940 +
941 + pr_info("ESS reset ok!\n");
942 +}
943 +
944 +/* Start of psgmii self test */
945 +
946 +static void
947 +ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv *priv)
948 +{
949 + u32 n;
950 + struct mii_bus *bus = priv->mii_bus;
951 + /* reset phy psgmii */
952 + /* fix phy psgmii RX 20bit */
953 + mdiobus_write(bus, 5, 0x0, 0x005b);
954 + /* reset phy psgmii */
955 + mdiobus_write(bus, 5, 0x0, 0x001b);
956 + /* release reset phy psgmii */
957 + mdiobus_write(bus, 5, 0x0, 0x005b);
958 +
959 + for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
960 + u16 status;
961 +
962 + status = ar40xx_phy_mmd_read(priv, 5, 1, 0x28);
963 + if (status & BIT(0))
964 + break;
965 + /* Polling interval to check PSGMII PLL in malibu is ready
966 + * the worst time is 8.67ms
967 + * for 25MHz reference clock
968 + * [512+(128+2048)*49]*80ns+100us
969 + */
970 + mdelay(2);
971 + }
972 +
973 + /*check malibu psgmii calibration done end..*/
974 +
975 + /*freeze phy psgmii RX CDR*/
976 + mdiobus_write(bus, 5, 0x1a, 0x2230);
977 +
978 + ar40xx_ess_reset(priv);
979 +
980 + /*check psgmii calibration done start*/
981 + for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
982 + u32 status;
983 +
984 + status = ar40xx_psgmii_read(priv, 0xa0);
985 + if (status & BIT(0))
986 + break;
987 + /* Polling interval to check PSGMII PLL in ESS is ready */
988 + mdelay(2);
989 + }
990 +
991 + /* check dakota psgmii calibration done end..*/
992 +
993 + /* relesae phy psgmii RX CDR */
994 + mdiobus_write(bus, 5, 0x1a, 0x3230);
995 + /* release phy psgmii RX 20bit */
996 + mdiobus_write(bus, 5, 0x0, 0x005f);
997 +}
998 +
999 +static void
1000 +ar40xx_psgmii_single_phy_testing(struct ar40xx_priv *priv, int phy)
1001 +{
1002 + int j;
1003 + u32 tx_ok, tx_error;
1004 + u32 rx_ok, rx_error;
1005 + u32 tx_ok_high16;
1006 + u32 rx_ok_high16;
1007 + u32 tx_all_ok, rx_all_ok;
1008 + struct mii_bus *bus = priv->mii_bus;
1009 +
1010 + mdiobus_write(bus, phy, 0x0, 0x9000);
1011 + mdiobus_write(bus, phy, 0x0, 0x4140);
1012 +
1013 + for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
1014 + u16 status;
1015 +
1016 + status = mdiobus_read(bus, phy, 0x11);
1017 + if (status & AR40XX_PHY_SPEC_STATUS_LINK)
1018 + break;
1019 + /* the polling interval to check if the PHY link up or not
1020 + * maxwait_timer: 750 ms +/-10 ms
1021 + * minwait_timer : 1 us +/- 0.1us
1022 + * time resides in minwait_timer ~ maxwait_timer
1023 + * see IEEE 802.3 section 40.4.5.2
1024 + */
1025 + mdelay(8);
1026 + }
1027 +
1028 + /* enable check */
1029 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0000);
1030 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0003);
1031 +
1032 + /* start traffic */
1033 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8020, 0xa000);
1034 + /* wait for all traffic end
1035 + * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1036 + */
1037 + mdelay(50);
1038 +
1039 + /* check counter */
1040 + tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
1041 + tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
1042 + tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
1043 + rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
1044 + rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
1045 + rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
1046 + tx_all_ok = tx_ok + (tx_ok_high16 << 16);
1047 + rx_all_ok = rx_ok + (rx_ok_high16 << 16);
1048 + if (tx_all_ok == 0x1000 && tx_error == 0) {
1049 + /* success */
1050 + priv->phy_t_status &= (~BIT(phy));
1051 + } else {
1052 + pr_info("PHY %d single test PSGMII issue happen!\n", phy);
1053 + priv->phy_t_status |= BIT(phy);
1054 + }
1055 +
1056 + mdiobus_write(bus, phy, 0x0, 0x1840);
1057 +}
1058 +
1059 +static void
1060 +ar40xx_psgmii_all_phy_testing(struct ar40xx_priv *priv)
1061 +{
1062 + int phy, j;
1063 + struct mii_bus *bus = priv->mii_bus;
1064 +
1065 + mdiobus_write(bus, 0x1f, 0x0, 0x9000);
1066 + mdiobus_write(bus, 0x1f, 0x0, 0x4140);
1067 +
1068 + for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
1069 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1070 + u16 status;
1071 +
1072 + status = mdiobus_read(bus, phy, 0x11);
1073 + if (!(status & BIT(10)))
1074 + break;
1075 + }
1076 +
1077 + if (phy >= (AR40XX_NUM_PORTS - 1))
1078 + break;
1079 + /* The polling interva to check if the PHY link up or not */
1080 + mdelay(8);
1081 + }
1082 + /* enable check */
1083 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0000);
1084 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0003);
1085 +
1086 + /* start traffic */
1087 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0xa000);
1088 + /* wait for all traffic end
1089 + * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1090 + */
1091 + mdelay(50);
1092 +
1093 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1094 + u32 tx_ok, tx_error;
1095 + u32 rx_ok, rx_error;
1096 + u32 tx_ok_high16;
1097 + u32 rx_ok_high16;
1098 + u32 tx_all_ok, rx_all_ok;
1099 +
1100 + /* check counter */
1101 + tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
1102 + tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
1103 + tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
1104 + rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
1105 + rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
1106 + rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
1107 + tx_all_ok = tx_ok + (tx_ok_high16<<16);
1108 + rx_all_ok = rx_ok + (rx_ok_high16<<16);
1109 + if (tx_all_ok == 0x1000 && tx_error == 0) {
1110 + /* success */
1111 + priv->phy_t_status &= ~BIT(phy + 8);
1112 + } else {
1113 + pr_info("PHY%d test see issue!\n", phy);
1114 + priv->phy_t_status |= BIT(phy + 8);
1115 + }
1116 + }
1117 +
1118 + pr_debug("PHY all test 0x%x \r\n", priv->phy_t_status);
1119 +}
1120 +
1121 +void
1122 +ar40xx_psgmii_self_test(struct ar40xx_priv *priv)
1123 +{
1124 + u32 i, phy;
1125 + struct mii_bus *bus = priv->mii_bus;
1126 +
1127 + ar40xx_malibu_psgmii_ess_reset(priv);
1128 +
1129 + /* switch to access MII reg for copper */
1130 + mdiobus_write(bus, 4, 0x1f, 0x8500);
1131 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1132 + /*enable phy mdio broadcast write*/
1133 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x801f);
1134 + }
1135 + /* force no link by power down */
1136 + mdiobus_write(bus, 0x1f, 0x0, 0x1840);
1137 + /*packet number*/
1138 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x1000);
1139 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8062, 0x05e0);
1140 +
1141 + /*fix mdi status */
1142 + mdiobus_write(bus, 0x1f, 0x10, 0x6800);
1143 + for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) {
1144 + priv->phy_t_status = 0;
1145 +
1146 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1147 + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
1148 + AR40XX_PORT_LOOKUP_LOOPBACK,
1149 + AR40XX_PORT_LOOKUP_LOOPBACK);
1150 + }
1151 +
1152 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++)
1153 + ar40xx_psgmii_single_phy_testing(priv, phy);
1154 +
1155 + ar40xx_psgmii_all_phy_testing(priv);
1156 +
1157 + if (priv->phy_t_status)
1158 + ar40xx_malibu_psgmii_ess_reset(priv);
1159 + else
1160 + break;
1161 + }
1162 +
1163 + if (i >= AR40XX_PSGMII_CALB_NUM)
1164 + pr_info("PSGMII cannot recover\n");
1165 + else
1166 + pr_debug("PSGMII recovered after %d times reset\n", i);
1167 +
1168 + /* configuration recover */
1169 + /* packet number */
1170 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x0);
1171 + /* disable check */
1172 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0);
1173 + /* disable traffic */
1174 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0x0);
1175 +}
1176 +
1177 +void
1178 +ar40xx_psgmii_self_test_clean(struct ar40xx_priv *priv)
1179 +{
1180 + int phy;
1181 + struct mii_bus *bus = priv->mii_bus;
1182 +
1183 + /* disable phy internal loopback */
1184 + mdiobus_write(bus, 0x1f, 0x10, 0x6860);
1185 + mdiobus_write(bus, 0x1f, 0x0, 0x9040);
1186 +
1187 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1188 + /* disable mac loop back */
1189 + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
1190 + AR40XX_PORT_LOOKUP_LOOPBACK, 0);
1191 + /* disable phy mdio broadcast write */
1192 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x001f);
1193 + }
1194 +
1195 + /* clear fdb entry */
1196 + ar40xx_atu_flush(priv);
1197 +}
1198 +
1199 +/* End of psgmii self test */
1200 +
1201 +static void
1202 +ar40xx_mac_mode_init(struct ar40xx_priv *priv, u32 mode)
1203 +{
1204 + if (mode == PORT_WRAPPER_PSGMII) {
1205 + ar40xx_psgmii_write(priv, AR40XX_PSGMII_MODE_CONTROL, 0x2200);
1206 + ar40xx_psgmii_write(priv, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380);
1207 + }
1208 +}
1209 +
1210 +static
1211 +int ar40xx_cpuport_setup(struct ar40xx_priv *priv)
1212 +{
1213 + u32 t;
1214 +
1215 + t = AR40XX_PORT_STATUS_TXFLOW |
1216 + AR40XX_PORT_STATUS_RXFLOW |
1217 + AR40XX_PORT_TXHALF_FLOW |
1218 + AR40XX_PORT_DUPLEX |
1219 + AR40XX_PORT_SPEED_1000M;
1220 + ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
1221 + usleep_range(10, 20);
1222 +
1223 + t |= AR40XX_PORT_TX_EN |
1224 + AR40XX_PORT_RX_EN;
1225 + ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
1226 +
1227 + return 0;
1228 +}
1229 +
1230 +static void
1231 +ar40xx_init_port(struct ar40xx_priv *priv, int port)
1232 +{
1233 + u32 t;
1234 +
1235 + ar40xx_rmw(priv, AR40XX_REG_PORT_STATUS(port),
1236 + AR40XX_PORT_AUTO_LINK_EN, 0);
1237 +
1238 + /* CPU port is setting headers to limit output ports */
1239 + if (port == 0)
1240 + ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0x8);
1241 + else
1242 + ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0);
1243 +
1244 + ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), 0);
1245 +
1246 + t = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S;
1247 + ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
1248 +
1249 + t = AR40XX_PORT_LOOKUP_LEARN;
1250 + t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
1251 + ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
1252 +}
1253 +
1254 +void
1255 +ar40xx_init_globals(struct ar40xx_priv *priv)
1256 +{
1257 + u32 t;
1258 +
1259 + /* enable CPU port and disable mirror port */
1260 + t = AR40XX_FWD_CTRL0_CPU_PORT_EN |
1261 + AR40XX_FWD_CTRL0_MIRROR_PORT;
1262 + ar40xx_write(priv, AR40XX_REG_FWD_CTRL0, t);
1263 +
1264 + /* forward multicast and broadcast frames to CPU */
1265 + t = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) |
1266 + (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) |
1267 + (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S);
1268 + ar40xx_write(priv, AR40XX_REG_FWD_CTRL1, t);
1269 +
1270 + /* enable jumbo frames */
1271 + ar40xx_rmw(priv, AR40XX_REG_MAX_FRAME_SIZE,
1272 + AR40XX_MAX_FRAME_SIZE_MTU, 9018 + 8 + 2);
1273 +
1274 + /* Enable MIB counters */
1275 + ar40xx_rmw(priv, AR40XX_REG_MODULE_EN, 0,
1276 + AR40XX_MODULE_EN_MIB);
1277 +
1278 + /* Disable AZ */
1279 + ar40xx_write(priv, AR40XX_REG_EEE_CTRL, 0);
1280 +
1281 + /* set flowctrl thershold for cpu port */
1282 + t = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) |
1283 + AR40XX_PORT0_FC_THRESH_OFF_DFLT;
1284 + ar40xx_write(priv, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t);
1285 +
1286 + /* set service tag to 802.1q */
1287 + t = ETH_P_8021Q | AR40XX_ESS_SERVICE_TAG_STAG;
1288 + ar40xx_write(priv, AR40XX_ESS_SERVICE_TAG, t);
1289 +}
1290 +
1291 +static void
1292 +ar40xx_malibu_init(struct ar40xx_priv *priv)
1293 +{
1294 + int i;
1295 + struct mii_bus *bus;
1296 + u16 val;
1297 +
1298 + bus = priv->mii_bus;
1299 +
1300 + /* war to enable AZ transmitting ability */
1301 + ar40xx_phy_mmd_write(priv, AR40XX_PSGMII_ID, 1,
1302 + AR40XX_MALIBU_PSGMII_MODE_CTRL,
1303 + AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL);
1304 + for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
1305 + /* change malibu control_dac */
1306 + val = ar40xx_phy_mmd_read(priv, i, 7,
1307 + AR40XX_MALIBU_PHY_MMD7_DAC_CTRL);
1308 + val &= ~AR40XX_MALIBU_DAC_CTRL_MASK;
1309 + val |= AR40XX_MALIBU_DAC_CTRL_VALUE;
1310 + ar40xx_phy_mmd_write(priv, i, 7,
1311 + AR40XX_MALIBU_PHY_MMD7_DAC_CTRL, val);
1312 + if (i == AR40XX_MALIBU_PHY_LAST_ADDR) {
1313 + /* to avoid goes into hibernation */
1314 + val = ar40xx_phy_mmd_read(priv, i, 3,
1315 + AR40XX_MALIBU_PHY_RLP_CTRL);
1316 + val &= (~(1<<1));
1317 + ar40xx_phy_mmd_write(priv, i, 3,
1318 + AR40XX_MALIBU_PHY_RLP_CTRL, val);
1319 + }
1320 + }
1321 +
1322 + /* adjust psgmii serdes tx amp */
1323 + mdiobus_write(bus, AR40XX_PSGMII_ID, AR40XX_PSGMII_TX_DRIVER_1_CTRL,
1324 + AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP);
1325 +}
1326 +
1327 +static int
1328 +ar40xx_hw_init(struct ar40xx_priv *priv)
1329 +{
1330 + u32 i;
1331 +
1332 + ar40xx_ess_reset(priv);
1333 +
1334 + if (priv->mii_bus)
1335 + ar40xx_malibu_init(priv);
1336 + else
1337 + return -1;
1338 +
1339 + ar40xx_psgmii_self_test(priv);
1340 + ar40xx_psgmii_self_test_clean(priv);
1341 +
1342 + ar40xx_mac_mode_init(priv, priv->mac_mode);
1343 +
1344 + for (i = 0; i < priv->dev.ports; i++)
1345 + ar40xx_init_port(priv, i);
1346 +
1347 + ar40xx_init_globals(priv);
1348 +
1349 + return 0;
1350 +}
1351 +
1352 +/* Start of qm error WAR */
1353 +
1354 +static
1355 +int ar40xx_force_1g_full(struct ar40xx_priv *priv, u32 port_id)
1356 +{
1357 + u32 reg;
1358 +
1359 + if (port_id < 0 || port_id > 6)
1360 + return -1;
1361 +
1362 + reg = AR40XX_REG_PORT_STATUS(port_id);
1363 + return ar40xx_rmw(priv, reg, AR40XX_PORT_SPEED,
1364 + (AR40XX_PORT_SPEED_1000M | AR40XX_PORT_DUPLEX));
1365 +}
1366 +
1367 +static
1368 +int ar40xx_get_qm_status(struct ar40xx_priv *priv,
1369 + u32 port_id, u32 *qm_buffer_err)
1370 +{
1371 + u32 reg;
1372 + u32 qm_val;
1373 +
1374 + if (port_id < 1 || port_id > 5) {
1375 + *qm_buffer_err = 0;
1376 + return -1;
1377 + }
1378 +
1379 + if (port_id < 4) {
1380 + reg = AR40XX_REG_QM_PORT0_3_QNUM;
1381 + ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
1382 + qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
1383 + /* every 8 bits for each port */
1384 + *qm_buffer_err = (qm_val >> (port_id * 8)) & 0xFF;
1385 + } else {
1386 + reg = AR40XX_REG_QM_PORT4_6_QNUM;
1387 + ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
1388 + qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
1389 + /* every 8 bits for each port */
1390 + *qm_buffer_err = (qm_val >> ((port_id-4) * 8)) & 0xFF;
1391 + }
1392 +
1393 + return 0;
1394 +}
1395 +
1396 +static void
1397 +ar40xx_sw_mac_polling_task(struct ar40xx_priv *priv)
1398 +{
1399 + static int task_count;
1400 + u32 i;
1401 + u32 reg, value;
1402 + u32 link, speed, duplex;
1403 + u32 qm_buffer_err;
1404 + u16 port_phy_status[AR40XX_NUM_PORTS];
1405 + static u32 qm_err_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
1406 + static u32 link_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
1407 + struct mii_bus *bus = NULL;
1408 +
1409 + if (!priv || !priv->mii_bus)
1410 + return;
1411 +
1412 + bus = priv->mii_bus;
1413 +
1414 + ++task_count;
1415 +
1416 + for (i = 1; i < AR40XX_NUM_PORTS; ++i) {
1417 + port_phy_status[i] =
1418 + mdiobus_read(bus, i-1, AR40XX_PHY_SPEC_STATUS);
1419 + speed = link = duplex = port_phy_status[i];
1420 + speed &= AR40XX_PHY_SPEC_STATUS_SPEED;
1421 + speed >>= 14;
1422 + link &= AR40XX_PHY_SPEC_STATUS_LINK;
1423 + link >>= 10;
1424 + duplex &= AR40XX_PHY_SPEC_STATUS_DUPLEX;
1425 + duplex >>= 13;
1426 +
1427 + if (link != priv->ar40xx_port_old_link[i]) {
1428 + ++link_cnt[i];
1429 + /* Up --> Down */
1430 + if ((priv->ar40xx_port_old_link[i] ==
1431 + AR40XX_PORT_LINK_UP) &&
1432 + (link == AR40XX_PORT_LINK_DOWN)) {
1433 + /* LINK_EN disable(MAC force mode)*/
1434 + reg = AR40XX_REG_PORT_STATUS(i);
1435 + ar40xx_rmw(priv, reg,
1436 + AR40XX_PORT_AUTO_LINK_EN, 0);
1437 +
1438 + /* Check queue buffer */
1439 + qm_err_cnt[i] = 0;
1440 + ar40xx_get_qm_status(priv, i, &qm_buffer_err);
1441 + if (qm_buffer_err) {
1442 + priv->ar40xx_port_qm_buf[i] =
1443 + AR40XX_QM_NOT_EMPTY;
1444 + } else {
1445 + u16 phy_val = 0;
1446 +
1447 + priv->ar40xx_port_qm_buf[i] =
1448 + AR40XX_QM_EMPTY;
1449 + ar40xx_force_1g_full(priv, i);
1450 + /* Ref:QCA8337 Datasheet,Clearing
1451 + * MENU_CTRL_EN prevents phy to
1452 + * stuck in 100BT mode when
1453 + * bringing up the link
1454 + */
1455 + ar40xx_phy_dbg_read(priv, i-1,
1456 + AR40XX_PHY_DEBUG_0,
1457 + &phy_val);
1458 + phy_val &= (~AR40XX_PHY_MANU_CTRL_EN);
1459 + ar40xx_phy_dbg_write(priv, i-1,
1460 + AR40XX_PHY_DEBUG_0,
1461 + phy_val);
1462 + }
1463 + priv->ar40xx_port_old_link[i] = link;
1464 + } else if ((priv->ar40xx_port_old_link[i] ==
1465 + AR40XX_PORT_LINK_DOWN) &&
1466 + (link == AR40XX_PORT_LINK_UP)) {
1467 + /* Down --> Up */
1468 + if (priv->port_link_up[i] < 1) {
1469 + ++priv->port_link_up[i];
1470 + } else {
1471 + /* Change port status */
1472 + reg = AR40XX_REG_PORT_STATUS(i);
1473 + value = ar40xx_read(priv, reg);
1474 + priv->port_link_up[i] = 0;
1475 +
1476 + value &= ~(AR40XX_PORT_DUPLEX |
1477 + AR40XX_PORT_SPEED);
1478 + value |= speed | (duplex ? BIT(6) : 0);
1479 + ar40xx_write(priv, reg, value);
1480 + /* clock switch need such time
1481 + * to avoid glitch
1482 + */
1483 + usleep_range(100, 200);
1484 +
1485 + value |= AR40XX_PORT_AUTO_LINK_EN;
1486 + ar40xx_write(priv, reg, value);
1487 + /* HW need such time to make sure link
1488 + * stable before enable MAC
1489 + */
1490 + usleep_range(100, 200);
1491 +
1492 + if (speed == AR40XX_PORT_SPEED_100M) {
1493 + u16 phy_val = 0;
1494 + /* Enable @100M, if down to 10M
1495 + * clock will change smoothly
1496 + */
1497 + ar40xx_phy_dbg_read(priv, i-1,
1498 + 0,
1499 + &phy_val);
1500 + phy_val |=
1501 + AR40XX_PHY_MANU_CTRL_EN;
1502 + ar40xx_phy_dbg_write(priv, i-1,
1503 + 0,
1504 + phy_val);
1505 + }
1506 + priv->ar40xx_port_old_link[i] = link;
1507 + }
1508 + }
1509 + }
1510 +
1511 + if (priv->ar40xx_port_qm_buf[i] == AR40XX_QM_NOT_EMPTY) {
1512 + /* Check QM */
1513 + ar40xx_get_qm_status(priv, i, &qm_buffer_err);
1514 + if (qm_buffer_err) {
1515 + ++qm_err_cnt[i];
1516 + } else {
1517 + priv->ar40xx_port_qm_buf[i] =
1518 + AR40XX_QM_EMPTY;
1519 + qm_err_cnt[i] = 0;
1520 + ar40xx_force_1g_full(priv, i);
1521 + }
1522 + }
1523 + }
1524 +}
1525 +
1526 +static void
1527 +ar40xx_qm_err_check_work_task(struct work_struct *work)
1528 +{
1529 + struct ar40xx_priv *priv = container_of(work, struct ar40xx_priv,
1530 + qm_dwork.work);
1531 +
1532 + mutex_lock(&priv->qm_lock);
1533 +
1534 + ar40xx_sw_mac_polling_task(priv);
1535 +
1536 + mutex_unlock(&priv->qm_lock);
1537 +
1538 + schedule_delayed_work(&priv->qm_dwork,
1539 + msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
1540 +}
1541 +
1542 +static int
1543 +ar40xx_qm_err_check_work_start(struct ar40xx_priv *priv)
1544 +{
1545 + mutex_init(&priv->qm_lock);
1546 +
1547 + INIT_DELAYED_WORK(&priv->qm_dwork, ar40xx_qm_err_check_work_task);
1548 +
1549 + schedule_delayed_work(&priv->qm_dwork,
1550 + msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
1551 +
1552 + return 0;
1553 +}
1554 +
1555 +/* End of qm error WAR */
1556 +
1557 +static int
1558 +ar40xx_vlan_init(struct ar40xx_priv *priv)
1559 +{
1560 + int port;
1561 + unsigned long bmp;
1562 +
1563 + /* By default Enable VLAN */
1564 + priv->vlan = 1;
1565 + priv->vlan_table[AR40XX_LAN_VLAN] = priv->cpu_bmp | priv->lan_bmp;
1566 + priv->vlan_table[AR40XX_WAN_VLAN] = priv->cpu_bmp | priv->wan_bmp;
1567 + priv->vlan_tagged = priv->cpu_bmp;
1568 + bmp = priv->lan_bmp;
1569 + for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
1570 + priv->pvid[port] = AR40XX_LAN_VLAN;
1571 +
1572 + bmp = priv->wan_bmp;
1573 + for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
1574 + priv->pvid[port] = AR40XX_WAN_VLAN;
1575 +
1576 + return 0;
1577 +}
1578 +
1579 +static void
1580 +ar40xx_mib_work_func(struct work_struct *work)
1581 +{
1582 + struct ar40xx_priv *priv;
1583 + int err;
1584 +
1585 + priv = container_of(work, struct ar40xx_priv, mib_work.work);
1586 +
1587 + mutex_lock(&priv->mib_lock);
1588 +
1589 + err = ar40xx_mib_capture(priv);
1590 + if (err)
1591 + goto next_port;
1592 +
1593 + ar40xx_mib_fetch_port_stat(priv, priv->mib_next_port, false);
1594 +
1595 +next_port:
1596 + priv->mib_next_port++;
1597 + if (priv->mib_next_port >= priv->dev.ports)
1598 + priv->mib_next_port = 0;
1599 +
1600 + mutex_unlock(&priv->mib_lock);
1601 +
1602 + schedule_delayed_work(&priv->mib_work,
1603 + msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
1604 +}
1605 +
1606 +static void
1607 +ar40xx_setup_port(struct ar40xx_priv *priv, int port, u32 members)
1608 +{
1609 + u32 t;
1610 + u32 egress, ingress;
1611 + u32 pvid = priv->vlan_id[priv->pvid[port]];
1612 +
1613 + if (priv->vlan) {
1614 + if (priv->vlan_tagged & BIT(port))
1615 + egress = AR40XX_PORT_VLAN1_OUT_MODE_TAG;
1616 + else
1617 + egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD;
1618 +
1619 + ingress = AR40XX_IN_SECURE;
1620 + } else {
1621 + egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH;
1622 + ingress = AR40XX_IN_PORT_ONLY;
1623 + }
1624 +
1625 + t = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S;
1626 + t |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S;
1627 + ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), t);
1628 +
1629 + t = egress << AR40XX_PORT_VLAN1_OUT_MODE_S;
1630 +
1631 + /* set CPU port to core port */
1632 + if (port == 0)
1633 + t |= AR40XX_PORT_VLAN1_CORE_PORT;
1634 +
1635 + if (priv->vlan_tagged & BIT(port))
1636 + t |= AR40XX_PORT_VLAN1_PORT_VLAN_PROP;
1637 + else
1638 + t |= AR40XX_PORT_VLAN1_PORT_TLS_MODE;
1639 +
1640 + ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
1641 +
1642 + t = members;
1643 + t |= AR40XX_PORT_LOOKUP_LEARN;
1644 + t |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S;
1645 + t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
1646 + ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
1647 +}
1648 +
1649 +static void
1650 +ar40xx_vtu_op(struct ar40xx_priv *priv, u32 op, u32 val)
1651 +{
1652 + if (ar40xx_wait_bit(priv, AR40XX_REG_VTU_FUNC1,
1653 + AR40XX_VTU_FUNC1_BUSY, 0))
1654 + return;
1655 +
1656 + if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD)
1657 + ar40xx_write(priv, AR40XX_REG_VTU_FUNC0, val);
1658 +
1659 + op |= AR40XX_VTU_FUNC1_BUSY;
1660 + ar40xx_write(priv, AR40XX_REG_VTU_FUNC1, op);
1661 +}
1662 +
1663 +static void
1664 +ar40xx_vtu_load_vlan(struct ar40xx_priv *priv, u32 vid, u32 port_mask)
1665 +{
1666 + u32 op;
1667 + u32 val;
1668 + int i;
1669 +
1670 + op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S);
1671 + val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL;
1672 + for (i = 0; i < AR40XX_NUM_PORTS; i++) {
1673 + u32 mode;
1674 +
1675 + if ((port_mask & BIT(i)) == 0)
1676 + mode = AR40XX_VTU_FUNC0_EG_MODE_NOT;
1677 + else if (priv->vlan == 0)
1678 + mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP;
1679 + else if ((priv->vlan_tagged & BIT(i)) ||
1680 + (priv->vlan_id[priv->pvid[i]] != vid))
1681 + mode = AR40XX_VTU_FUNC0_EG_MODE_TAG;
1682 + else
1683 + mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG;
1684 +
1685 + val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i);
1686 + }
1687 + ar40xx_vtu_op(priv, op, val);
1688 +}
1689 +
1690 +static void
1691 +ar40xx_vtu_flush(struct ar40xx_priv *priv)
1692 +{
1693 + ar40xx_vtu_op(priv, AR40XX_VTU_FUNC1_OP_FLUSH, 0);
1694 +}
1695 +
1696 +static int
1697 +ar40xx_sw_hw_apply(struct switch_dev *dev)
1698 +{
1699 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
1700 + u8 portmask[AR40XX_NUM_PORTS];
1701 + int i, j;
1702 +
1703 + mutex_lock(&priv->reg_mutex);
1704 + /* flush all vlan entries */
1705 + ar40xx_vtu_flush(priv);
1706 +
1707 + memset(portmask, 0, sizeof(portmask));
1708 + if (priv->vlan) {
1709 + for (j = 0; j < AR40XX_MAX_VLANS; j++) {
1710 + u8 vp = priv->vlan_table[j];
1711 +
1712 + if (!vp)
1713 + continue;
1714 +
1715 + for (i = 0; i < dev->ports; i++) {
1716 + u8 mask = BIT(i);
1717 +
1718 + if (vp & mask)
1719 + portmask[i] |= vp & ~mask;
1720 + }
1721 +
1722 + ar40xx_vtu_load_vlan(priv, priv->vlan_id[j],
1723 + priv->vlan_table[j]);
1724 + }
1725 + } else {
1726 + /* 8021q vlan disabled */
1727 + for (i = 0; i < dev->ports; i++) {
1728 + if (i == AR40XX_PORT_CPU)
1729 + continue;
1730 +
1731 + portmask[i] = BIT(AR40XX_PORT_CPU);
1732 + portmask[AR40XX_PORT_CPU] |= BIT(i);
1733 + }
1734 + }
1735 +
1736 + /* update the port destination mask registers and tag settings */
1737 + for (i = 0; i < dev->ports; i++)
1738 + ar40xx_setup_port(priv, i, portmask[i]);
1739 +
1740 + ar40xx_set_mirror_regs(priv);
1741 +
1742 + mutex_unlock(&priv->reg_mutex);
1743 + return 0;
1744 +}
1745 +
1746 +static int
1747 +ar40xx_sw_reset_switch(struct switch_dev *dev)
1748 +{
1749 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
1750 + int i, rv;
1751 +
1752 + mutex_lock(&priv->reg_mutex);
1753 + memset(&priv->vlan, 0, sizeof(struct ar40xx_priv) -
1754 + offsetof(struct ar40xx_priv, vlan));
1755 +
1756 + for (i = 0; i < AR40XX_MAX_VLANS; i++)
1757 + priv->vlan_id[i] = i;
1758 +
1759 + ar40xx_vlan_init(priv);
1760 +
1761 + priv->mirror_rx = false;
1762 + priv->mirror_tx = false;
1763 + priv->source_port = 0;
1764 + priv->monitor_port = 0;
1765 +
1766 + mutex_unlock(&priv->reg_mutex);
1767 +
1768 + rv = ar40xx_sw_hw_apply(dev);
1769 + return rv;
1770 +}
1771 +
1772 +static int
1773 +ar40xx_start(struct ar40xx_priv *priv)
1774 +{
1775 + int ret;
1776 +
1777 + ret = ar40xx_hw_init(priv);
1778 + if (ret)
1779 + return ret;
1780 +
1781 + ret = ar40xx_sw_reset_switch(&priv->dev);
1782 + if (ret)
1783 + return ret;
1784 +
1785 + /* at last, setup cpu port */
1786 + ret = ar40xx_cpuport_setup(priv);
1787 + if (ret)
1788 + return ret;
1789 +
1790 + schedule_delayed_work(&priv->mib_work,
1791 + msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
1792 +
1793 + ar40xx_qm_err_check_work_start(priv);
1794 +
1795 + return 0;
1796 +}
1797 +
1798 +static const struct switch_dev_ops ar40xx_sw_ops = {
1799 + .attr_global = {
1800 + .attr = ar40xx_sw_attr_globals,
1801 + .n_attr = ARRAY_SIZE(ar40xx_sw_attr_globals),
1802 + },
1803 + .attr_port = {
1804 + .attr = ar40xx_sw_attr_port,
1805 + .n_attr = ARRAY_SIZE(ar40xx_sw_attr_port),
1806 + },
1807 + .attr_vlan = {
1808 + .attr = ar40xx_sw_attr_vlan,
1809 + .n_attr = ARRAY_SIZE(ar40xx_sw_attr_vlan),
1810 + },
1811 + .get_port_pvid = ar40xx_sw_get_pvid,
1812 + .set_port_pvid = ar40xx_sw_set_pvid,
1813 + .get_vlan_ports = ar40xx_sw_get_ports,
1814 + .set_vlan_ports = ar40xx_sw_set_ports,
1815 + .apply_config = ar40xx_sw_hw_apply,
1816 + .reset_switch = ar40xx_sw_reset_switch,
1817 + .get_port_link = ar40xx_sw_get_port_link,
1818 +};
1819 +
1820 +/* Start of phy driver support */
1821 +
1822 +static const u32 ar40xx_phy_ids[] = {
1823 + 0x004dd0b1,
1824 + 0x004dd0b2, /* AR40xx */
1825 +};
1826 +
1827 +static bool
1828 +ar40xx_phy_match(u32 phy_id)
1829 +{
1830 + int i;
1831 +
1832 + for (i = 0; i < ARRAY_SIZE(ar40xx_phy_ids); i++)
1833 + if (phy_id == ar40xx_phy_ids[i])
1834 + return true;
1835 +
1836 + return false;
1837 +}
1838 +
1839 +static bool
1840 +is_ar40xx_phy(struct mii_bus *bus)
1841 +{
1842 + unsigned i;
1843 +
1844 + for (i = 0; i < 4; i++) {
1845 + u32 phy_id;
1846 +
1847 + phy_id = mdiobus_read(bus, i, MII_PHYSID1) << 16;
1848 + phy_id |= mdiobus_read(bus, i, MII_PHYSID2);
1849 + if (!ar40xx_phy_match(phy_id))
1850 + return false;
1851 + }
1852 +
1853 + return true;
1854 +}
1855 +
1856 +static int
1857 +ar40xx_phy_probe(struct phy_device *phydev)
1858 +{
1859 + if (!is_ar40xx_phy(phydev->mdio.bus))
1860 + return -ENODEV;
1861 +
1862 + ar40xx_priv->mii_bus = phydev->mdio.bus;
1863 + phydev->priv = ar40xx_priv;
1864 + if (phydev->mdio.addr == 0)
1865 + ar40xx_priv->phy = phydev;
1866 +
1867 + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phydev->supported);
1868 + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phydev->advertising);
1869 + return 0;
1870 +}
1871 +
1872 +static void
1873 +ar40xx_phy_remove(struct phy_device *phydev)
1874 +{
1875 + ar40xx_priv->mii_bus = NULL;
1876 + phydev->priv = NULL;
1877 +}
1878 +
1879 +static int
1880 +ar40xx_phy_config_init(struct phy_device *phydev)
1881 +{
1882 + return 0;
1883 +}
1884 +
1885 +static int
1886 +ar40xx_phy_read_status(struct phy_device *phydev)
1887 +{
1888 + if (phydev->mdio.addr != 0)
1889 + return genphy_read_status(phydev);
1890 +
1891 + return 0;
1892 +}
1893 +
1894 +static int
1895 +ar40xx_phy_config_aneg(struct phy_device *phydev)
1896 +{
1897 + if (phydev->mdio.addr == 0)
1898 + return 0;
1899 +
1900 + return genphy_config_aneg(phydev);
1901 +}
1902 +
1903 +static struct phy_driver ar40xx_phy_driver = {
1904 + .phy_id = 0x004d0000,
1905 + .name = "QCA Malibu",
1906 + .phy_id_mask = 0xffff0000,
1907 + .features = PHY_GBIT_FEATURES,
1908 + .probe = ar40xx_phy_probe,
1909 + .remove = ar40xx_phy_remove,
1910 + .config_init = ar40xx_phy_config_init,
1911 + .config_aneg = ar40xx_phy_config_aneg,
1912 + .read_status = ar40xx_phy_read_status,
1913 +};
1914 +
1915 +static uint16_t ar40xx_gpio_get_phy(unsigned int offset)
1916 +{
1917 + return offset / 4;
1918 +}
1919 +
1920 +static uint16_t ar40xx_gpio_get_reg(unsigned int offset)
1921 +{
1922 + return 0x8074 + offset % 4;
1923 +}
1924 +
1925 +static void ar40xx_gpio_set(struct gpio_chip *gc, unsigned int offset,
1926 + int value)
1927 +{
1928 + struct ar40xx_priv *priv = gpiochip_get_data(gc);
1929 +
1930 + ar40xx_phy_mmd_write(priv, ar40xx_gpio_get_phy(offset), 0x7,
1931 + ar40xx_gpio_get_reg(offset),
1932 + value ? 0xA000 : 0x8000);
1933 +}
1934 +
1935 +static int ar40xx_gpio_get(struct gpio_chip *gc, unsigned offset)
1936 +{
1937 + struct ar40xx_priv *priv = gpiochip_get_data(gc);
1938 +
1939 + return ar40xx_phy_mmd_read(priv, ar40xx_gpio_get_phy(offset), 0x7,
1940 + ar40xx_gpio_get_reg(offset)) == 0xA000;
1941 +}
1942 +
1943 +static int ar40xx_gpio_get_dir(struct gpio_chip *gc, unsigned offset)
1944 +{
1945 + return 0; /* only out direction */
1946 +}
1947 +
1948 +static int ar40xx_gpio_dir_out(struct gpio_chip *gc, unsigned offset,
1949 + int value)
1950 +{
1951 + /*
1952 + * the direction out value is used to set the initial value.
1953 + * support of this function is required by leds-gpio.c
1954 + */
1955 + ar40xx_gpio_set(gc, offset, value);
1956 + return 0;
1957 +}
1958 +
1959 +static void ar40xx_register_gpio(struct device *pdev,
1960 + struct ar40xx_priv *priv,
1961 + struct device_node *switch_node)
1962 +{
1963 + struct gpio_chip *gc;
1964 + int err;
1965 +
1966 + gc = devm_kzalloc(pdev, sizeof(*gc), GFP_KERNEL);
1967 + if (!gc)
1968 + return;
1969 +
1970 + gc->label = "ar40xx_gpio",
1971 + gc->base = -1,
1972 + gc->ngpio = 5 /* mmd 0 - 4 */ * 4 /* 0x8074 - 0x8077 */,
1973 + gc->parent = pdev;
1974 + gc->owner = THIS_MODULE;
1975 +
1976 + gc->get_direction = ar40xx_gpio_get_dir;
1977 + gc->direction_output = ar40xx_gpio_dir_out;
1978 + gc->get = ar40xx_gpio_get;
1979 + gc->set = ar40xx_gpio_set;
1980 + gc->can_sleep = true;
1981 + gc->label = priv->dev.name;
1982 + gc->of_node = switch_node;
1983 +
1984 + err = devm_gpiochip_add_data(pdev, gc, priv);
1985 + if (err != 0)
1986 + dev_err(pdev, "Failed to register gpio %d.\n", err);
1987 +}
1988 +
1989 +/* End of phy driver support */
1990 +
1991 +/* Platform driver probe function */
1992 +
1993 +static int ar40xx_probe(struct platform_device *pdev)
1994 +{
1995 + struct device_node *switch_node;
1996 + struct device_node *psgmii_node;
1997 + const __be32 *mac_mode;
1998 + struct clk *ess_clk;
1999 + struct switch_dev *swdev;
2000 + struct ar40xx_priv *priv;
2001 + u32 len;
2002 + u32 num_mibs;
2003 + struct resource psgmii_base = {0};
2004 + struct resource switch_base = {0};
2005 + int ret;
2006 +
2007 + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
2008 + if (!priv)
2009 + return -ENOMEM;
2010 +
2011 + platform_set_drvdata(pdev, priv);
2012 + ar40xx_priv = priv;
2013 +
2014 + switch_node = of_node_get(pdev->dev.of_node);
2015 + if (of_address_to_resource(switch_node, 0, &switch_base) != 0)
2016 + return -EIO;
2017 +
2018 + priv->hw_addr = devm_ioremap_resource(&pdev->dev, &switch_base);
2019 + if (IS_ERR(priv->hw_addr)) {
2020 + dev_err(&pdev->dev, "Failed to ioremap switch_base!\n");
2021 + return PTR_ERR(priv->hw_addr);
2022 + }
2023 +
2024 + /*psgmii dts get*/
2025 + psgmii_node = of_find_node_by_name(NULL, "ess-psgmii");
2026 + if (!psgmii_node) {
2027 + dev_err(&pdev->dev, "Failed to find ess-psgmii node!\n");
2028 + return -EINVAL;
2029 + }
2030 +
2031 + if (of_address_to_resource(psgmii_node, 0, &psgmii_base) != 0)
2032 + return -EIO;
2033 +
2034 + priv->psgmii_hw_addr = devm_ioremap_resource(&pdev->dev, &psgmii_base);
2035 + if (IS_ERR(priv->psgmii_hw_addr)) {
2036 + dev_err(&pdev->dev, "psgmii ioremap fail!\n");
2037 + return PTR_ERR(priv->psgmii_hw_addr);
2038 + }
2039 +
2040 + mac_mode = of_get_property(switch_node, "switch_mac_mode", &len);
2041 + if (!mac_mode) {
2042 + dev_err(&pdev->dev, "Failed to read switch_mac_mode\n");
2043 + return -EINVAL;
2044 + }
2045 + priv->mac_mode = be32_to_cpup(mac_mode);
2046 +
2047 + ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
2048 + if (ess_clk)
2049 + clk_prepare_enable(ess_clk);
2050 +
2051 + priv->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
2052 + if (IS_ERR(priv->ess_rst)) {
2053 + dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
2054 + return PTR_ERR(priv->ess_rst);
2055 + }
2056 +
2057 + if (of_property_read_u32(switch_node, "switch_cpu_bmp",
2058 + &priv->cpu_bmp) ||
2059 + of_property_read_u32(switch_node, "switch_lan_bmp",
2060 + &priv->lan_bmp) ||
2061 + of_property_read_u32(switch_node, "switch_wan_bmp",
2062 + &priv->wan_bmp)) {
2063 + dev_err(&pdev->dev, "Failed to read port properties\n");
2064 + return -EIO;
2065 + }
2066 +
2067 + ret = phy_driver_register(&ar40xx_phy_driver, THIS_MODULE);
2068 + if (ret) {
2069 + dev_err(&pdev->dev, "Failed to register ar40xx phy driver!\n");
2070 + return -EIO;
2071 + }
2072 +
2073 + mutex_init(&priv->reg_mutex);
2074 + mutex_init(&priv->mib_lock);
2075 + INIT_DELAYED_WORK(&priv->mib_work, ar40xx_mib_work_func);
2076 +
2077 + /* register switch */
2078 + swdev = &priv->dev;
2079 +
2080 + if (priv->mii_bus == NULL) {
2081 + dev_err(&pdev->dev, "Probe failed - Missing PHYs!\n");
2082 + ret = -ENODEV;
2083 + goto err_missing_phy;
2084 + }
2085 +
2086 + swdev->alias = dev_name(&priv->mii_bus->dev);
2087 +
2088 + swdev->cpu_port = AR40XX_PORT_CPU;
2089 + swdev->name = "QCA AR40xx";
2090 + swdev->vlans = AR40XX_MAX_VLANS;
2091 + swdev->ports = AR40XX_NUM_PORTS;
2092 + swdev->ops = &ar40xx_sw_ops;
2093 + ret = register_switch(swdev, NULL);
2094 + if (ret)
2095 + goto err_unregister_phy;
2096 +
2097 + num_mibs = ARRAY_SIZE(ar40xx_mibs);
2098 + len = priv->dev.ports * num_mibs *
2099 + sizeof(*priv->mib_stats);
2100 + priv->mib_stats = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
2101 + if (!priv->mib_stats) {
2102 + ret = -ENOMEM;
2103 + goto err_unregister_switch;
2104 + }
2105 +
2106 + ar40xx_start(priv);
2107 +
2108 + if (of_property_read_bool(switch_node, "gpio-controller"))
2109 + ar40xx_register_gpio(&pdev->dev, ar40xx_priv, switch_node);
2110 +
2111 + return 0;
2112 +
2113 +err_unregister_switch:
2114 + unregister_switch(&priv->dev);
2115 +err_unregister_phy:
2116 + phy_driver_unregister(&ar40xx_phy_driver);
2117 +err_missing_phy:
2118 + platform_set_drvdata(pdev, NULL);
2119 + return ret;
2120 +}
2121 +
2122 +static int ar40xx_remove(struct platform_device *pdev)
2123 +{
2124 + struct ar40xx_priv *priv = platform_get_drvdata(pdev);
2125 +
2126 + cancel_delayed_work_sync(&priv->qm_dwork);
2127 + cancel_delayed_work_sync(&priv->mib_work);
2128 +
2129 + unregister_switch(&priv->dev);
2130 +
2131 + phy_driver_unregister(&ar40xx_phy_driver);
2132 +
2133 + return 0;
2134 +}
2135 +
2136 +static const struct of_device_id ar40xx_of_mtable[] = {
2137 + {.compatible = "qcom,ess-switch" },
2138 + {}
2139 +};
2140 +
2141 +struct platform_driver ar40xx_drv = {
2142 + .probe = ar40xx_probe,
2143 + .remove = ar40xx_remove,
2144 + .driver = {
2145 + .name = "ar40xx",
2146 + .of_match_table = ar40xx_of_mtable,
2147 + },
2148 +};
2149 +
2150 +module_platform_driver(ar40xx_drv);
2151 +
2152 +MODULE_DESCRIPTION("IPQ40XX ESS driver");
2153 +MODULE_LICENSE("Dual BSD/GPL");
2154 Index: linux-5.4.51/drivers/net/phy/ar40xx.h
2155 ===================================================================
2156 --- /dev/null
2157 +++ linux-5.4.51/drivers/net/phy/ar40xx.h
2158 @@ -0,0 +1,342 @@
2159 +/*
2160 + * Copyright (c) 2016, The Linux Foundation. All rights reserved.
2161 + *
2162 + * Permission to use, copy, modify, and/or distribute this software for
2163 + * any purpose with or without fee is hereby granted, provided that the
2164 + * above copyright notice and this permission notice appear in all copies.
2165 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
2166 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
2167 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
2168 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
2169 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
2170 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
2171 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2172 + */
2173 +
2174 + #ifndef __AR40XX_H
2175 +#define __AR40XX_H
2176 +
2177 +#define AR40XX_MAX_VLANS 128
2178 +#define AR40XX_NUM_PORTS 6
2179 +#define AR40XX_NUM_PHYS 5
2180 +
2181 +#define BITS(_s, _n) (((1UL << (_n)) - 1) << _s)
2182 +
2183 +struct ar40xx_priv {
2184 + struct switch_dev dev;
2185 +
2186 + u8 __iomem *hw_addr;
2187 + u8 __iomem *psgmii_hw_addr;
2188 + u32 mac_mode;
2189 + struct reset_control *ess_rst;
2190 + u32 cpu_bmp;
2191 + u32 lan_bmp;
2192 + u32 wan_bmp;
2193 +
2194 + struct mii_bus *mii_bus;
2195 + struct phy_device *phy;
2196 +
2197 + /* mutex for qm task */
2198 + struct mutex qm_lock;
2199 + struct delayed_work qm_dwork;
2200 + u32 port_link_up[AR40XX_NUM_PORTS];
2201 + u32 ar40xx_port_old_link[AR40XX_NUM_PORTS];
2202 + u32 ar40xx_port_qm_buf[AR40XX_NUM_PORTS];
2203 +
2204 + u32 phy_t_status;
2205 +
2206 + /* mutex for switch reg access */
2207 + struct mutex reg_mutex;
2208 +
2209 + /* mutex for mib task */
2210 + struct mutex mib_lock;
2211 + struct delayed_work mib_work;
2212 + int mib_next_port;
2213 + u64 *mib_stats;
2214 +
2215 + char buf[2048];
2216 +
2217 + /* all fields below will be cleared on reset */
2218 + bool vlan;
2219 + u16 vlan_id[AR40XX_MAX_VLANS];
2220 + u8 vlan_table[AR40XX_MAX_VLANS];
2221 + u8 vlan_tagged;
2222 + u16 pvid[AR40XX_NUM_PORTS];
2223 +
2224 + /* mirror */
2225 + bool mirror_rx;
2226 + bool mirror_tx;
2227 + int source_port;
2228 + int monitor_port;
2229 +};
2230 +
2231 +#define AR40XX_PORT_LINK_UP 1
2232 +#define AR40XX_PORT_LINK_DOWN 0
2233 +#define AR40XX_QM_NOT_EMPTY 1
2234 +#define AR40XX_QM_EMPTY 0
2235 +
2236 +#define AR40XX_LAN_VLAN 1
2237 +#define AR40XX_WAN_VLAN 2
2238 +
2239 +enum ar40xx_port_wrapper_cfg {
2240 + PORT_WRAPPER_PSGMII = 0,
2241 +};
2242 +
2243 +struct ar40xx_mib_desc {
2244 + u32 size;
2245 + u32 offset;
2246 + const char *name;
2247 +};
2248 +
2249 +#define AR40XX_PORT_CPU 0
2250 +
2251 +#define AR40XX_PSGMII_MODE_CONTROL 0x1b4
2252 +#define AR40XX_PSGMII_ATHR_CSCO_MODE_25M BIT(0)
2253 +
2254 +#define AR40XX_PSGMIIPHY_TX_CONTROL 0x288
2255 +
2256 +#define AR40XX_MII_ATH_MMD_ADDR 0x0d
2257 +#define AR40XX_MII_ATH_MMD_DATA 0x0e
2258 +#define AR40XX_MII_ATH_DBG_ADDR 0x1d
2259 +#define AR40XX_MII_ATH_DBG_DATA 0x1e
2260 +
2261 +#define AR40XX_STATS_RXBROAD 0x00
2262 +#define AR40XX_STATS_RXPAUSE 0x04
2263 +#define AR40XX_STATS_RXMULTI 0x08
2264 +#define AR40XX_STATS_RXFCSERR 0x0c
2265 +#define AR40XX_STATS_RXALIGNERR 0x10
2266 +#define AR40XX_STATS_RXRUNT 0x14
2267 +#define AR40XX_STATS_RXFRAGMENT 0x18
2268 +#define AR40XX_STATS_RX64BYTE 0x1c
2269 +#define AR40XX_STATS_RX128BYTE 0x20
2270 +#define AR40XX_STATS_RX256BYTE 0x24
2271 +#define AR40XX_STATS_RX512BYTE 0x28
2272 +#define AR40XX_STATS_RX1024BYTE 0x2c
2273 +#define AR40XX_STATS_RX1518BYTE 0x30
2274 +#define AR40XX_STATS_RXMAXBYTE 0x34
2275 +#define AR40XX_STATS_RXTOOLONG 0x38
2276 +#define AR40XX_STATS_RXGOODBYTE 0x3c
2277 +#define AR40XX_STATS_RXBADBYTE 0x44
2278 +#define AR40XX_STATS_RXOVERFLOW 0x4c
2279 +#define AR40XX_STATS_FILTERED 0x50
2280 +#define AR40XX_STATS_TXBROAD 0x54
2281 +#define AR40XX_STATS_TXPAUSE 0x58
2282 +#define AR40XX_STATS_TXMULTI 0x5c
2283 +#define AR40XX_STATS_TXUNDERRUN 0x60
2284 +#define AR40XX_STATS_TX64BYTE 0x64
2285 +#define AR40XX_STATS_TX128BYTE 0x68
2286 +#define AR40XX_STATS_TX256BYTE 0x6c
2287 +#define AR40XX_STATS_TX512BYTE 0x70
2288 +#define AR40XX_STATS_TX1024BYTE 0x74
2289 +#define AR40XX_STATS_TX1518BYTE 0x78
2290 +#define AR40XX_STATS_TXMAXBYTE 0x7c
2291 +#define AR40XX_STATS_TXOVERSIZE 0x80
2292 +#define AR40XX_STATS_TXBYTE 0x84
2293 +#define AR40XX_STATS_TXCOLLISION 0x8c
2294 +#define AR40XX_STATS_TXABORTCOL 0x90
2295 +#define AR40XX_STATS_TXMULTICOL 0x94
2296 +#define AR40XX_STATS_TXSINGLECOL 0x98
2297 +#define AR40XX_STATS_TXEXCDEFER 0x9c
2298 +#define AR40XX_STATS_TXDEFER 0xa0
2299 +#define AR40XX_STATS_TXLATECOL 0xa4
2300 +
2301 +#define AR40XX_REG_MODULE_EN 0x030
2302 +#define AR40XX_MODULE_EN_MIB BIT(0)
2303 +
2304 +#define AR40XX_REG_MIB_FUNC 0x034
2305 +#define AR40XX_MIB_BUSY BIT(17)
2306 +#define AR40XX_MIB_CPU_KEEP BIT(20)
2307 +#define AR40XX_MIB_FUNC BITS(24, 3)
2308 +#define AR40XX_MIB_FUNC_S 24
2309 +#define AR40XX_MIB_FUNC_NO_OP 0x0
2310 +#define AR40XX_MIB_FUNC_FLUSH 0x1
2311 +
2312 +#define AR40XX_ESS_SERVICE_TAG 0x48
2313 +#define AR40XX_ESS_SERVICE_TAG_STAG BIT(17)
2314 +
2315 +#define AR40XX_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
2316 +#define AR40XX_PORT_SPEED BITS(0, 2)
2317 +#define AR40XX_PORT_STATUS_SPEED_S 0
2318 +#define AR40XX_PORT_TX_EN BIT(2)
2319 +#define AR40XX_PORT_RX_EN BIT(3)
2320 +#define AR40XX_PORT_STATUS_TXFLOW BIT(4)
2321 +#define AR40XX_PORT_STATUS_RXFLOW BIT(5)
2322 +#define AR40XX_PORT_DUPLEX BIT(6)
2323 +#define AR40XX_PORT_TXHALF_FLOW BIT(7)
2324 +#define AR40XX_PORT_STATUS_LINK_UP BIT(8)
2325 +#define AR40XX_PORT_AUTO_LINK_EN BIT(9)
2326 +#define AR40XX_PORT_STATUS_FLOW_CONTROL BIT(12)
2327 +
2328 +#define AR40XX_REG_MAX_FRAME_SIZE 0x078
2329 +#define AR40XX_MAX_FRAME_SIZE_MTU BITS(0, 14)
2330 +
2331 +#define AR40XX_REG_PORT_HEADER(_i) (0x09c + (_i) * 4)
2332 +
2333 +#define AR40XX_REG_EEE_CTRL 0x100
2334 +#define AR40XX_EEE_CTRL_DISABLE_PHY(_i) BIT(4 + (_i) * 2)
2335 +
2336 +#define AR40XX_REG_PORT_VLAN0(_i) (0x420 + (_i) * 0x8)
2337 +#define AR40XX_PORT_VLAN0_DEF_SVID BITS(0, 12)
2338 +#define AR40XX_PORT_VLAN0_DEF_SVID_S 0
2339 +#define AR40XX_PORT_VLAN0_DEF_CVID BITS(16, 12)
2340 +#define AR40XX_PORT_VLAN0_DEF_CVID_S 16
2341 +
2342 +#define AR40XX_REG_PORT_VLAN1(_i) (0x424 + (_i) * 0x8)
2343 +#define AR40XX_PORT_VLAN1_CORE_PORT BIT(9)
2344 +#define AR40XX_PORT_VLAN1_PORT_TLS_MODE BIT(7)
2345 +#define AR40XX_PORT_VLAN1_PORT_VLAN_PROP BIT(6)
2346 +#define AR40XX_PORT_VLAN1_OUT_MODE BITS(12, 2)
2347 +#define AR40XX_PORT_VLAN1_OUT_MODE_S 12
2348 +#define AR40XX_PORT_VLAN1_OUT_MODE_UNMOD 0
2349 +#define AR40XX_PORT_VLAN1_OUT_MODE_UNTAG 1
2350 +#define AR40XX_PORT_VLAN1_OUT_MODE_TAG 2
2351 +#define AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH 3
2352 +
2353 +#define AR40XX_REG_VTU_FUNC0 0x0610
2354 +#define AR40XX_VTU_FUNC0_EG_MODE BITS(4, 14)
2355 +#define AR40XX_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
2356 +#define AR40XX_VTU_FUNC0_EG_MODE_KEEP 0
2357 +#define AR40XX_VTU_FUNC0_EG_MODE_UNTAG 1
2358 +#define AR40XX_VTU_FUNC0_EG_MODE_TAG 2
2359 +#define AR40XX_VTU_FUNC0_EG_MODE_NOT 3
2360 +#define AR40XX_VTU_FUNC0_IVL BIT(19)
2361 +#define AR40XX_VTU_FUNC0_VALID BIT(20)
2362 +
2363 +#define AR40XX_REG_VTU_FUNC1 0x0614
2364 +#define AR40XX_VTU_FUNC1_OP BITS(0, 3)
2365 +#define AR40XX_VTU_FUNC1_OP_NOOP 0
2366 +#define AR40XX_VTU_FUNC1_OP_FLUSH 1
2367 +#define AR40XX_VTU_FUNC1_OP_LOAD 2
2368 +#define AR40XX_VTU_FUNC1_OP_PURGE 3
2369 +#define AR40XX_VTU_FUNC1_OP_REMOVE_PORT 4
2370 +#define AR40XX_VTU_FUNC1_OP_GET_NEXT 5
2371 +#define AR40XX7_VTU_FUNC1_OP_GET_ONE 6
2372 +#define AR40XX_VTU_FUNC1_FULL BIT(4)
2373 +#define AR40XX_VTU_FUNC1_PORT BIT(8, 4)
2374 +#define AR40XX_VTU_FUNC1_PORT_S 8
2375 +#define AR40XX_VTU_FUNC1_VID BIT(16, 12)
2376 +#define AR40XX_VTU_FUNC1_VID_S 16
2377 +#define AR40XX_VTU_FUNC1_BUSY BIT(31)
2378 +
2379 +#define AR40XX_REG_FWD_CTRL0 0x620
2380 +#define AR40XX_FWD_CTRL0_CPU_PORT_EN BIT(10)
2381 +#define AR40XX_FWD_CTRL0_MIRROR_PORT BITS(4, 4)
2382 +#define AR40XX_FWD_CTRL0_MIRROR_PORT_S 4
2383 +
2384 +#define AR40XX_REG_FWD_CTRL1 0x624
2385 +#define AR40XX_FWD_CTRL1_UC_FLOOD BITS(0, 7)
2386 +#define AR40XX_FWD_CTRL1_UC_FLOOD_S 0
2387 +#define AR40XX_FWD_CTRL1_MC_FLOOD BITS(8, 7)
2388 +#define AR40XX_FWD_CTRL1_MC_FLOOD_S 8
2389 +#define AR40XX_FWD_CTRL1_BC_FLOOD BITS(16, 7)
2390 +#define AR40XX_FWD_CTRL1_BC_FLOOD_S 16
2391 +#define AR40XX_FWD_CTRL1_IGMP BITS(24, 7)
2392 +#define AR40XX_FWD_CTRL1_IGMP_S 24
2393 +
2394 +#define AR40XX_REG_PORT_LOOKUP(_i) (0x660 + (_i) * 0xc)
2395 +#define AR40XX_PORT_LOOKUP_MEMBER BITS(0, 7)
2396 +#define AR40XX_PORT_LOOKUP_IN_MODE BITS(8, 2)
2397 +#define AR40XX_PORT_LOOKUP_IN_MODE_S 8
2398 +#define AR40XX_PORT_LOOKUP_STATE BITS(16, 3)
2399 +#define AR40XX_PORT_LOOKUP_STATE_S 16
2400 +#define AR40XX_PORT_LOOKUP_LEARN BIT(20)
2401 +#define AR40XX_PORT_LOOKUP_LOOPBACK BIT(21)
2402 +#define AR40XX_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
2403 +
2404 +#define AR40XX_REG_ATU_FUNC 0x60c
2405 +#define AR40XX_ATU_FUNC_OP BITS(0, 4)
2406 +#define AR40XX_ATU_FUNC_OP_NOOP 0x0
2407 +#define AR40XX_ATU_FUNC_OP_FLUSH 0x1
2408 +#define AR40XX_ATU_FUNC_OP_LOAD 0x2
2409 +#define AR40XX_ATU_FUNC_OP_PURGE 0x3
2410 +#define AR40XX_ATU_FUNC_OP_FLUSH_LOCKED 0x4
2411 +#define AR40XX_ATU_FUNC_OP_FLUSH_UNICAST 0x5
2412 +#define AR40XX_ATU_FUNC_OP_GET_NEXT 0x6
2413 +#define AR40XX_ATU_FUNC_OP_SEARCH_MAC 0x7
2414 +#define AR40XX_ATU_FUNC_OP_CHANGE_TRUNK 0x8
2415 +#define AR40XX_ATU_FUNC_BUSY BIT(31)
2416 +
2417 +#define AR40XX_REG_QM_DEBUG_ADDR 0x820
2418 +#define AR40XX_REG_QM_DEBUG_VALUE 0x824
2419 +#define AR40XX_REG_QM_PORT0_3_QNUM 0x1d
2420 +#define AR40XX_REG_QM_PORT4_6_QNUM 0x1e
2421 +
2422 +#define AR40XX_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
2423 +#define AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
2424 +
2425 +#define AR40XX_REG_PORT_FLOWCTRL_THRESH(_i) (0x9b0 + (_i) * 0x4)
2426 +#define AR40XX_PORT0_FC_THRESH_ON_DFLT 0x60
2427 +#define AR40XX_PORT0_FC_THRESH_OFF_DFLT 0x90
2428 +
2429 +#define AR40XX_PHY_DEBUG_0 0
2430 +#define AR40XX_PHY_MANU_CTRL_EN BIT(12)
2431 +
2432 +#define AR40XX_PHY_DEBUG_2 2
2433 +
2434 +#define AR40XX_PHY_SPEC_STATUS 0x11
2435 +#define AR40XX_PHY_SPEC_STATUS_LINK BIT(10)
2436 +#define AR40XX_PHY_SPEC_STATUS_DUPLEX BIT(13)
2437 +#define AR40XX_PHY_SPEC_STATUS_SPEED BITS(14, 2)
2438 +
2439 +/* port forwarding state */
2440 +enum {
2441 + AR40XX_PORT_STATE_DISABLED = 0,
2442 + AR40XX_PORT_STATE_BLOCK = 1,
2443 + AR40XX_PORT_STATE_LISTEN = 2,
2444 + AR40XX_PORT_STATE_LEARN = 3,
2445 + AR40XX_PORT_STATE_FORWARD = 4
2446 +};
2447 +
2448 +/* ingress 802.1q mode */
2449 +enum {
2450 + AR40XX_IN_PORT_ONLY = 0,
2451 + AR40XX_IN_PORT_FALLBACK = 1,
2452 + AR40XX_IN_VLAN_ONLY = 2,
2453 + AR40XX_IN_SECURE = 3
2454 +};
2455 +
2456 +/* egress 802.1q mode */
2457 +enum {
2458 + AR40XX_OUT_KEEP = 0,
2459 + AR40XX_OUT_STRIP_VLAN = 1,
2460 + AR40XX_OUT_ADD_VLAN = 2
2461 +};
2462 +
2463 +/* port speed */
2464 +enum {
2465 + AR40XX_PORT_SPEED_10M = 0,
2466 + AR40XX_PORT_SPEED_100M = 1,
2467 + AR40XX_PORT_SPEED_1000M = 2,
2468 + AR40XX_PORT_SPEED_ERR = 3,
2469 +};
2470 +
2471 +#define AR40XX_MIB_WORK_DELAY 2000 /* msecs */
2472 +
2473 +#define AR40XX_QM_WORK_DELAY 100
2474 +
2475 +#define AR40XX_MIB_FUNC_CAPTURE 0x3
2476 +
2477 +#define AR40XX_REG_PORT_STATS_START 0x1000
2478 +#define AR40XX_REG_PORT_STATS_LEN 0x100
2479 +
2480 +#define AR40XX_PORTS_ALL 0x3f
2481 +
2482 +#define AR40XX_PSGMII_ID 5
2483 +#define AR40XX_PSGMII_CALB_NUM 100
2484 +#define AR40XX_MALIBU_PSGMII_MODE_CTRL 0x6d
2485 +#define AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL 0x220c
2486 +#define AR40XX_MALIBU_PHY_MMD7_DAC_CTRL 0x801a
2487 +#define AR40XX_MALIBU_DAC_CTRL_MASK 0x380
2488 +#define AR40XX_MALIBU_DAC_CTRL_VALUE 0x280
2489 +#define AR40XX_MALIBU_PHY_RLP_CTRL 0x805a
2490 +#define AR40XX_PSGMII_TX_DRIVER_1_CTRL 0xb
2491 +#define AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP 0x8a
2492 +#define AR40XX_MALIBU_PHY_LAST_ADDR 4
2493 +
2494 +static inline struct ar40xx_priv *
2495 +swdev_to_ar40xx(struct switch_dev *swdev)
2496 +{
2497 + return container_of(swdev, struct ar40xx_priv, dev);
2498 +}
2499 +
2500 +#endif