kernel: bump 5.4 to 5.4.52
[openwrt/staging/hauke.git] / target / linux / ipq40xx / patches-5.4 / 705-net-add-qualcomm-ar40xx-phy.patch
1 --- a/drivers/net/phy/Kconfig
2 +++ b/drivers/net/phy/Kconfig
3 @@ -587,6 +587,13 @@ config MDIO_IPQ40XX
4 This driver supports the MDIO interface found in Qualcomm
5 Atheros ipq40xx Soc chip.
6
7 +config AR40XX_PHY
8 + tristate "Driver for Qualcomm Atheros IPQ40XX switches"
9 + depends on HAS_IOMEM && OF
10 + select SWCONFIG
11 + ---help---
12 + This is the driver for Qualcomm Atheros IPQ40XX ESS switches.
13 +
14 endif # PHYLIB
15
16 config MICREL_KS8995MA
17 --- a/drivers/net/phy/Makefile
18 +++ b/drivers/net/phy/Makefile
19 @@ -70,6 +70,7 @@ ifdef CONFIG_HWMON
20 aquantia-objs += aquantia_hwmon.o
21 endif
22 obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
23 +obj-$(CONFIG_AR40XX_PHY) += ar40xx.o
24 obj-$(CONFIG_AX88796B_PHY) += ax88796b.o
25 obj-$(CONFIG_AT803X_PHY) += at803x.o
26 obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
27 --- /dev/null
28 +++ b/drivers/net/phy/ar40xx.c
29 @@ -0,0 +1,2118 @@
30 +/*
31 + * Copyright (c) 2016, The Linux Foundation. All rights reserved.
32 + *
33 + * Permission to use, copy, modify, and/or distribute this software for
34 + * any purpose with or without fee is hereby granted, provided that the
35 + * above copyright notice and this permission notice appear in all copies.
36 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
42 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 + */
44 +
45 +#include <linux/module.h>
46 +#include <linux/list.h>
47 +#include <linux/bitops.h>
48 +#include <linux/switch.h>
49 +#include <linux/delay.h>
50 +#include <linux/phy.h>
51 +#include <linux/clk.h>
52 +#include <linux/reset.h>
53 +#include <linux/lockdep.h>
54 +#include <linux/workqueue.h>
55 +#include <linux/of_device.h>
56 +#include <linux/of_address.h>
57 +#include <linux/mdio.h>
58 +#include <linux/gpio.h>
59 +
60 +#include "ar40xx.h"
61 +
62 +static struct ar40xx_priv *ar40xx_priv;
63 +
64 +#define MIB_DESC(_s , _o, _n) \
65 + { \
66 + .size = (_s), \
67 + .offset = (_o), \
68 + .name = (_n), \
69 + }
70 +
71 +static const struct ar40xx_mib_desc ar40xx_mibs[] = {
72 + MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"),
73 + MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"),
74 + MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"),
75 + MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"),
76 + MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"),
77 + MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"),
78 + MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"),
79 + MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"),
80 + MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"),
81 + MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"),
82 + MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"),
83 + MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"),
84 + MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"),
85 + MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"),
86 + MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"),
87 + MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"),
88 + MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"),
89 + MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"),
90 + MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"),
91 + MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"),
92 + MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"),
93 + MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"),
94 + MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"),
95 + MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"),
96 + MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"),
97 + MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"),
98 + MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"),
99 + MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"),
100 + MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"),
101 + MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"),
102 + MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"),
103 + MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"),
104 + MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"),
105 + MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"),
106 + MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"),
107 + MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"),
108 + MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"),
109 + MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"),
110 + MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"),
111 +};
112 +
113 +static u32
114 +ar40xx_read(struct ar40xx_priv *priv, int reg)
115 +{
116 + return readl(priv->hw_addr + reg);
117 +}
118 +
119 +static u32
120 +ar40xx_psgmii_read(struct ar40xx_priv *priv, int reg)
121 +{
122 + return readl(priv->psgmii_hw_addr + reg);
123 +}
124 +
125 +static void
126 +ar40xx_write(struct ar40xx_priv *priv, int reg, u32 val)
127 +{
128 + writel(val, priv->hw_addr + reg);
129 +}
130 +
131 +static u32
132 +ar40xx_rmw(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
133 +{
134 + u32 ret;
135 +
136 + ret = ar40xx_read(priv, reg);
137 + ret &= ~mask;
138 + ret |= val;
139 + ar40xx_write(priv, reg, ret);
140 + return ret;
141 +}
142 +
143 +static void
144 +ar40xx_psgmii_write(struct ar40xx_priv *priv, int reg, u32 val)
145 +{
146 + writel(val, priv->psgmii_hw_addr + reg);
147 +}
148 +
149 +static void
150 +ar40xx_phy_dbg_write(struct ar40xx_priv *priv, int phy_addr,
151 + u16 dbg_addr, u16 dbg_data)
152 +{
153 + struct mii_bus *bus = priv->mii_bus;
154 +
155 + mutex_lock(&bus->mdio_lock);
156 + bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
157 + bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA, dbg_data);
158 + mutex_unlock(&bus->mdio_lock);
159 +}
160 +
161 +static void
162 +ar40xx_phy_dbg_read(struct ar40xx_priv *priv, int phy_addr,
163 + u16 dbg_addr, u16 *dbg_data)
164 +{
165 + struct mii_bus *bus = priv->mii_bus;
166 +
167 + mutex_lock(&bus->mdio_lock);
168 + bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
169 + *dbg_data = bus->read(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA);
170 + mutex_unlock(&bus->mdio_lock);
171 +}
172 +
173 +static void
174 +ar40xx_phy_mmd_write(struct ar40xx_priv *priv, u32 phy_id,
175 + u16 mmd_num, u16 reg_id, u16 reg_val)
176 +{
177 + struct mii_bus *bus = priv->mii_bus;
178 +
179 + mutex_lock(&bus->mdio_lock);
180 + bus->write(bus, phy_id,
181 + AR40XX_MII_ATH_MMD_ADDR, mmd_num);
182 + bus->write(bus, phy_id,
183 + AR40XX_MII_ATH_MMD_DATA, reg_id);
184 + bus->write(bus, phy_id,
185 + AR40XX_MII_ATH_MMD_ADDR,
186 + 0x4000 | mmd_num);
187 + bus->write(bus, phy_id,
188 + AR40XX_MII_ATH_MMD_DATA, reg_val);
189 + mutex_unlock(&bus->mdio_lock);
190 +}
191 +
192 +static u16
193 +ar40xx_phy_mmd_read(struct ar40xx_priv *priv, u32 phy_id,
194 + u16 mmd_num, u16 reg_id)
195 +{
196 + u16 value;
197 + struct mii_bus *bus = priv->mii_bus;
198 +
199 + mutex_lock(&bus->mdio_lock);
200 + bus->write(bus, phy_id,
201 + AR40XX_MII_ATH_MMD_ADDR, mmd_num);
202 + bus->write(bus, phy_id,
203 + AR40XX_MII_ATH_MMD_DATA, reg_id);
204 + bus->write(bus, phy_id,
205 + AR40XX_MII_ATH_MMD_ADDR,
206 + 0x4000 | mmd_num);
207 + value = bus->read(bus, phy_id, AR40XX_MII_ATH_MMD_DATA);
208 + mutex_unlock(&bus->mdio_lock);
209 + return value;
210 +}
211 +
212 +/* Start of swconfig support */
213 +
214 +static void
215 +ar40xx_phy_poll_reset(struct ar40xx_priv *priv)
216 +{
217 + u32 i, in_reset, retries = 500;
218 + struct mii_bus *bus = priv->mii_bus;
219 +
220 + /* Assume RESET was recently issued to some or all of the phys */
221 + in_reset = GENMASK(AR40XX_NUM_PHYS - 1, 0);
222 +
223 + while (retries--) {
224 + /* 1ms should be plenty of time.
225 + * 802.3 spec allows for a max wait time of 500ms
226 + */
227 + usleep_range(1000, 2000);
228 +
229 + for (i = 0; i < AR40XX_NUM_PHYS; i++) {
230 + int val;
231 +
232 + /* skip devices which have completed reset */
233 + if (!(in_reset & BIT(i)))
234 + continue;
235 +
236 + val = mdiobus_read(bus, i, MII_BMCR);
237 + if (val < 0)
238 + continue;
239 +
240 + /* mark when phy is no longer in reset state */
241 + if (!(val & BMCR_RESET))
242 + in_reset &= ~BIT(i);
243 + }
244 +
245 + if (!in_reset)
246 + return;
247 + }
248 +
249 + dev_warn(&bus->dev, "Failed to reset all phys! (in_reset: 0x%x)\n",
250 + in_reset);
251 +}
252 +
253 +static void
254 +ar40xx_phy_init(struct ar40xx_priv *priv)
255 +{
256 + int i;
257 + struct mii_bus *bus;
258 + u16 val;
259 +
260 + bus = priv->mii_bus;
261 + for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
262 + ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
263 + val &= ~AR40XX_PHY_MANU_CTRL_EN;
264 + ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
265 + mdiobus_write(bus, i,
266 + MII_ADVERTISE, ADVERTISE_ALL |
267 + ADVERTISE_PAUSE_CAP |
268 + ADVERTISE_PAUSE_ASYM);
269 + mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
270 + mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
271 + }
272 +
273 + ar40xx_phy_poll_reset(priv);
274 +}
275 +
276 +static void
277 +ar40xx_port_phy_linkdown(struct ar40xx_priv *priv)
278 +{
279 + struct mii_bus *bus;
280 + int i;
281 + u16 val;
282 +
283 + bus = priv->mii_bus;
284 + for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
285 + mdiobus_write(bus, i, MII_CTRL1000, 0);
286 + mdiobus_write(bus, i, MII_ADVERTISE, 0);
287 + mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
288 + ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
289 + val |= AR40XX_PHY_MANU_CTRL_EN;
290 + ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
291 + /* disable transmit */
292 + ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_2, &val);
293 + val &= 0xf00f;
294 + ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_2, val);
295 + }
296 +}
297 +
298 +static void
299 +ar40xx_set_mirror_regs(struct ar40xx_priv *priv)
300 +{
301 + int port;
302 +
303 + /* reset all mirror registers */
304 + ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
305 + AR40XX_FWD_CTRL0_MIRROR_PORT,
306 + (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
307 + for (port = 0; port < AR40XX_NUM_PORTS; port++) {
308 + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(port),
309 + AR40XX_PORT_LOOKUP_ING_MIRROR_EN, 0);
310 +
311 + ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(port),
312 + AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN, 0);
313 + }
314 +
315 + /* now enable mirroring if necessary */
316 + if (priv->source_port >= AR40XX_NUM_PORTS ||
317 + priv->monitor_port >= AR40XX_NUM_PORTS ||
318 + priv->source_port == priv->monitor_port) {
319 + return;
320 + }
321 +
322 + ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
323 + AR40XX_FWD_CTRL0_MIRROR_PORT,
324 + (priv->monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
325 +
326 + if (priv->mirror_rx)
327 + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(priv->source_port), 0,
328 + AR40XX_PORT_LOOKUP_ING_MIRROR_EN);
329 +
330 + if (priv->mirror_tx)
331 + ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(priv->source_port),
332 + 0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN);
333 +}
334 +
335 +static int
336 +ar40xx_sw_get_ports(struct switch_dev *dev, struct switch_val *val)
337 +{
338 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
339 + u8 ports = priv->vlan_table[val->port_vlan];
340 + int i;
341 +
342 + val->len = 0;
343 + for (i = 0; i < dev->ports; i++) {
344 + struct switch_port *p;
345 +
346 + if (!(ports & BIT(i)))
347 + continue;
348 +
349 + p = &val->value.ports[val->len++];
350 + p->id = i;
351 + if ((priv->vlan_tagged & BIT(i)) ||
352 + (priv->pvid[i] != val->port_vlan))
353 + p->flags = BIT(SWITCH_PORT_FLAG_TAGGED);
354 + else
355 + p->flags = 0;
356 + }
357 + return 0;
358 +}
359 +
360 +static int
361 +ar40xx_sw_set_ports(struct switch_dev *dev, struct switch_val *val)
362 +{
363 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
364 + u8 *vt = &priv->vlan_table[val->port_vlan];
365 + int i;
366 +
367 + *vt = 0;
368 + for (i = 0; i < val->len; i++) {
369 + struct switch_port *p = &val->value.ports[i];
370 +
371 + if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) {
372 + if (val->port_vlan == priv->pvid[p->id])
373 + priv->vlan_tagged |= BIT(p->id);
374 + } else {
375 + priv->vlan_tagged &= ~BIT(p->id);
376 + priv->pvid[p->id] = val->port_vlan;
377 + }
378 +
379 + *vt |= BIT(p->id);
380 + }
381 + return 0;
382 +}
383 +
384 +static int
385 +ar40xx_reg_wait(struct ar40xx_priv *priv, u32 reg, u32 mask, u32 val,
386 + unsigned timeout)
387 +{
388 + int i;
389 +
390 + for (i = 0; i < timeout; i++) {
391 + u32 t;
392 +
393 + t = ar40xx_read(priv, reg);
394 + if ((t & mask) == val)
395 + return 0;
396 +
397 + usleep_range(1000, 2000);
398 + }
399 +
400 + return -ETIMEDOUT;
401 +}
402 +
403 +static int
404 +ar40xx_mib_op(struct ar40xx_priv *priv, u32 op)
405 +{
406 + int ret;
407 +
408 + lockdep_assert_held(&priv->mib_lock);
409 +
410 + /* Capture the hardware statistics for all ports */
411 + ar40xx_rmw(priv, AR40XX_REG_MIB_FUNC,
412 + AR40XX_MIB_FUNC, (op << AR40XX_MIB_FUNC_S));
413 +
414 + /* Wait for the capturing to complete. */
415 + ret = ar40xx_reg_wait(priv, AR40XX_REG_MIB_FUNC,
416 + AR40XX_MIB_BUSY, 0, 10);
417 +
418 + return ret;
419 +}
420 +
421 +static void
422 +ar40xx_mib_fetch_port_stat(struct ar40xx_priv *priv, int port, bool flush)
423 +{
424 + unsigned int base;
425 + u64 *mib_stats;
426 + int i;
427 + u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
428 +
429 + WARN_ON(port >= priv->dev.ports);
430 +
431 + lockdep_assert_held(&priv->mib_lock);
432 +
433 + base = AR40XX_REG_PORT_STATS_START +
434 + AR40XX_REG_PORT_STATS_LEN * port;
435 +
436 + mib_stats = &priv->mib_stats[port * num_mibs];
437 + if (flush) {
438 + u32 len;
439 +
440 + len = num_mibs * sizeof(*mib_stats);
441 + memset(mib_stats, 0, len);
442 + return;
443 + }
444 + for (i = 0; i < num_mibs; i++) {
445 + const struct ar40xx_mib_desc *mib;
446 + u64 t;
447 +
448 + mib = &ar40xx_mibs[i];
449 + t = ar40xx_read(priv, base + mib->offset);
450 + if (mib->size == 2) {
451 + u64 hi;
452 +
453 + hi = ar40xx_read(priv, base + mib->offset + 4);
454 + t |= hi << 32;
455 + }
456 +
457 + mib_stats[i] += t;
458 + }
459 +}
460 +
461 +static int
462 +ar40xx_mib_capture(struct ar40xx_priv *priv)
463 +{
464 + return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_CAPTURE);
465 +}
466 +
467 +static int
468 +ar40xx_mib_flush(struct ar40xx_priv *priv)
469 +{
470 + return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_FLUSH);
471 +}
472 +
473 +static int
474 +ar40xx_sw_set_reset_mibs(struct switch_dev *dev,
475 + const struct switch_attr *attr,
476 + struct switch_val *val)
477 +{
478 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
479 + unsigned int len;
480 + int ret;
481 + u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
482 +
483 + mutex_lock(&priv->mib_lock);
484 +
485 + len = priv->dev.ports * num_mibs * sizeof(*priv->mib_stats);
486 + memset(priv->mib_stats, 0, len);
487 + ret = ar40xx_mib_flush(priv);
488 +
489 + mutex_unlock(&priv->mib_lock);
490 + return ret;
491 +}
492 +
493 +static int
494 +ar40xx_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
495 + struct switch_val *val)
496 +{
497 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
498 +
499 + priv->vlan = !!val->value.i;
500 + return 0;
501 +}
502 +
503 +static int
504 +ar40xx_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
505 + struct switch_val *val)
506 +{
507 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
508 +
509 + val->value.i = priv->vlan;
510 + return 0;
511 +}
512 +
513 +static int
514 +ar40xx_sw_set_mirror_rx_enable(struct switch_dev *dev,
515 + const struct switch_attr *attr,
516 + struct switch_val *val)
517 +{
518 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
519 +
520 + mutex_lock(&priv->reg_mutex);
521 + priv->mirror_rx = !!val->value.i;
522 + ar40xx_set_mirror_regs(priv);
523 + mutex_unlock(&priv->reg_mutex);
524 +
525 + return 0;
526 +}
527 +
528 +static int
529 +ar40xx_sw_get_mirror_rx_enable(struct switch_dev *dev,
530 + const struct switch_attr *attr,
531 + struct switch_val *val)
532 +{
533 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
534 +
535 + mutex_lock(&priv->reg_mutex);
536 + val->value.i = priv->mirror_rx;
537 + mutex_unlock(&priv->reg_mutex);
538 + return 0;
539 +}
540 +
541 +static int
542 +ar40xx_sw_set_mirror_tx_enable(struct switch_dev *dev,
543 + const struct switch_attr *attr,
544 + struct switch_val *val)
545 +{
546 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
547 +
548 + mutex_lock(&priv->reg_mutex);
549 + priv->mirror_tx = !!val->value.i;
550 + ar40xx_set_mirror_regs(priv);
551 + mutex_unlock(&priv->reg_mutex);
552 +
553 + return 0;
554 +}
555 +
556 +static int
557 +ar40xx_sw_get_mirror_tx_enable(struct switch_dev *dev,
558 + const struct switch_attr *attr,
559 + struct switch_val *val)
560 +{
561 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
562 +
563 + mutex_lock(&priv->reg_mutex);
564 + val->value.i = priv->mirror_tx;
565 + mutex_unlock(&priv->reg_mutex);
566 + return 0;
567 +}
568 +
569 +static int
570 +ar40xx_sw_set_mirror_monitor_port(struct switch_dev *dev,
571 + const struct switch_attr *attr,
572 + struct switch_val *val)
573 +{
574 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
575 +
576 + mutex_lock(&priv->reg_mutex);
577 + priv->monitor_port = val->value.i;
578 + ar40xx_set_mirror_regs(priv);
579 + mutex_unlock(&priv->reg_mutex);
580 +
581 + return 0;
582 +}
583 +
584 +static int
585 +ar40xx_sw_get_mirror_monitor_port(struct switch_dev *dev,
586 + const struct switch_attr *attr,
587 + struct switch_val *val)
588 +{
589 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
590 +
591 + mutex_lock(&priv->reg_mutex);
592 + val->value.i = priv->monitor_port;
593 + mutex_unlock(&priv->reg_mutex);
594 + return 0;
595 +}
596 +
597 +static int
598 +ar40xx_sw_set_mirror_source_port(struct switch_dev *dev,
599 + const struct switch_attr *attr,
600 + struct switch_val *val)
601 +{
602 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
603 +
604 + mutex_lock(&priv->reg_mutex);
605 + priv->source_port = val->value.i;
606 + ar40xx_set_mirror_regs(priv);
607 + mutex_unlock(&priv->reg_mutex);
608 +
609 + return 0;
610 +}
611 +
612 +static int
613 +ar40xx_sw_get_mirror_source_port(struct switch_dev *dev,
614 + const struct switch_attr *attr,
615 + struct switch_val *val)
616 +{
617 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
618 +
619 + mutex_lock(&priv->reg_mutex);
620 + val->value.i = priv->source_port;
621 + mutex_unlock(&priv->reg_mutex);
622 + return 0;
623 +}
624 +
625 +static int
626 +ar40xx_sw_set_linkdown(struct switch_dev *dev,
627 + const struct switch_attr *attr,
628 + struct switch_val *val)
629 +{
630 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
631 +
632 + if (val->value.i == 1)
633 + ar40xx_port_phy_linkdown(priv);
634 + else
635 + ar40xx_phy_init(priv);
636 +
637 + return 0;
638 +}
639 +
640 +static int
641 +ar40xx_sw_set_port_reset_mib(struct switch_dev *dev,
642 + const struct switch_attr *attr,
643 + struct switch_val *val)
644 +{
645 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
646 + int port;
647 + int ret;
648 +
649 + port = val->port_vlan;
650 + if (port >= dev->ports)
651 + return -EINVAL;
652 +
653 + mutex_lock(&priv->mib_lock);
654 + ret = ar40xx_mib_capture(priv);
655 + if (ret)
656 + goto unlock;
657 +
658 + ar40xx_mib_fetch_port_stat(priv, port, true);
659 +
660 +unlock:
661 + mutex_unlock(&priv->mib_lock);
662 + return ret;
663 +}
664 +
665 +static int
666 +ar40xx_sw_get_port_mib(struct switch_dev *dev,
667 + const struct switch_attr *attr,
668 + struct switch_val *val)
669 +{
670 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
671 + u64 *mib_stats;
672 + int port;
673 + int ret;
674 + char *buf = priv->buf;
675 + int i, len = 0;
676 + u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
677 +
678 + port = val->port_vlan;
679 + if (port >= dev->ports)
680 + return -EINVAL;
681 +
682 + mutex_lock(&priv->mib_lock);
683 + ret = ar40xx_mib_capture(priv);
684 + if (ret)
685 + goto unlock;
686 +
687 + ar40xx_mib_fetch_port_stat(priv, port, false);
688 +
689 + len += snprintf(buf + len, sizeof(priv->buf) - len,
690 + "Port %d MIB counters\n",
691 + port);
692 +
693 + mib_stats = &priv->mib_stats[port * num_mibs];
694 + for (i = 0; i < num_mibs; i++)
695 + len += snprintf(buf + len, sizeof(priv->buf) - len,
696 + "%-12s: %llu\n",
697 + ar40xx_mibs[i].name,
698 + mib_stats[i]);
699 +
700 + val->value.s = buf;
701 + val->len = len;
702 +
703 +unlock:
704 + mutex_unlock(&priv->mib_lock);
705 + return ret;
706 +}
707 +
708 +static int
709 +ar40xx_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
710 + struct switch_val *val)
711 +{
712 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
713 +
714 + priv->vlan_id[val->port_vlan] = val->value.i;
715 + return 0;
716 +}
717 +
718 +static int
719 +ar40xx_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
720 + struct switch_val *val)
721 +{
722 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
723 +
724 + val->value.i = priv->vlan_id[val->port_vlan];
725 + return 0;
726 +}
727 +
728 +static int
729 +ar40xx_sw_get_pvid(struct switch_dev *dev, int port, int *vlan)
730 +{
731 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
732 + *vlan = priv->pvid[port];
733 + return 0;
734 +}
735 +
736 +static int
737 +ar40xx_sw_set_pvid(struct switch_dev *dev, int port, int vlan)
738 +{
739 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
740 +
741 + /* make sure no invalid PVIDs get set */
742 + if (vlan >= dev->vlans)
743 + return -EINVAL;
744 +
745 + priv->pvid[port] = vlan;
746 + return 0;
747 +}
748 +
749 +static void
750 +ar40xx_read_port_link(struct ar40xx_priv *priv, int port,
751 + struct switch_port_link *link)
752 +{
753 + u32 status;
754 + u32 speed;
755 +
756 + memset(link, 0, sizeof(*link));
757 +
758 + status = ar40xx_read(priv, AR40XX_REG_PORT_STATUS(port));
759 +
760 + link->aneg = !!(status & AR40XX_PORT_AUTO_LINK_EN);
761 + if (link->aneg || (port != AR40XX_PORT_CPU))
762 + link->link = !!(status & AR40XX_PORT_STATUS_LINK_UP);
763 + else
764 + link->link = true;
765 +
766 + if (!link->link)
767 + return;
768 +
769 + link->duplex = !!(status & AR40XX_PORT_DUPLEX);
770 + link->tx_flow = !!(status & AR40XX_PORT_STATUS_TXFLOW);
771 + link->rx_flow = !!(status & AR40XX_PORT_STATUS_RXFLOW);
772 +
773 + speed = (status & AR40XX_PORT_SPEED) >>
774 + AR40XX_PORT_STATUS_SPEED_S;
775 +
776 + switch (speed) {
777 + case AR40XX_PORT_SPEED_10M:
778 + link->speed = SWITCH_PORT_SPEED_10;
779 + break;
780 + case AR40XX_PORT_SPEED_100M:
781 + link->speed = SWITCH_PORT_SPEED_100;
782 + break;
783 + case AR40XX_PORT_SPEED_1000M:
784 + link->speed = SWITCH_PORT_SPEED_1000;
785 + break;
786 + default:
787 + link->speed = SWITCH_PORT_SPEED_UNKNOWN;
788 + break;
789 + }
790 +}
791 +
792 +static int
793 +ar40xx_sw_get_port_link(struct switch_dev *dev, int port,
794 + struct switch_port_link *link)
795 +{
796 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
797 +
798 + ar40xx_read_port_link(priv, port, link);
799 + return 0;
800 +}
801 +
802 +static const struct switch_attr ar40xx_sw_attr_globals[] = {
803 + {
804 + .type = SWITCH_TYPE_INT,
805 + .name = "enable_vlan",
806 + .description = "Enable VLAN mode",
807 + .set = ar40xx_sw_set_vlan,
808 + .get = ar40xx_sw_get_vlan,
809 + .max = 1
810 + },
811 + {
812 + .type = SWITCH_TYPE_NOVAL,
813 + .name = "reset_mibs",
814 + .description = "Reset all MIB counters",
815 + .set = ar40xx_sw_set_reset_mibs,
816 + },
817 + {
818 + .type = SWITCH_TYPE_INT,
819 + .name = "enable_mirror_rx",
820 + .description = "Enable mirroring of RX packets",
821 + .set = ar40xx_sw_set_mirror_rx_enable,
822 + .get = ar40xx_sw_get_mirror_rx_enable,
823 + .max = 1
824 + },
825 + {
826 + .type = SWITCH_TYPE_INT,
827 + .name = "enable_mirror_tx",
828 + .description = "Enable mirroring of TX packets",
829 + .set = ar40xx_sw_set_mirror_tx_enable,
830 + .get = ar40xx_sw_get_mirror_tx_enable,
831 + .max = 1
832 + },
833 + {
834 + .type = SWITCH_TYPE_INT,
835 + .name = "mirror_monitor_port",
836 + .description = "Mirror monitor port",
837 + .set = ar40xx_sw_set_mirror_monitor_port,
838 + .get = ar40xx_sw_get_mirror_monitor_port,
839 + .max = AR40XX_NUM_PORTS - 1
840 + },
841 + {
842 + .type = SWITCH_TYPE_INT,
843 + .name = "mirror_source_port",
844 + .description = "Mirror source port",
845 + .set = ar40xx_sw_set_mirror_source_port,
846 + .get = ar40xx_sw_get_mirror_source_port,
847 + .max = AR40XX_NUM_PORTS - 1
848 + },
849 + {
850 + .type = SWITCH_TYPE_INT,
851 + .name = "linkdown",
852 + .description = "Link down all the PHYs",
853 + .set = ar40xx_sw_set_linkdown,
854 + .max = 1
855 + },
856 +};
857 +
858 +static const struct switch_attr ar40xx_sw_attr_port[] = {
859 + {
860 + .type = SWITCH_TYPE_NOVAL,
861 + .name = "reset_mib",
862 + .description = "Reset single port MIB counters",
863 + .set = ar40xx_sw_set_port_reset_mib,
864 + },
865 + {
866 + .type = SWITCH_TYPE_STRING,
867 + .name = "mib",
868 + .description = "Get port's MIB counters",
869 + .set = NULL,
870 + .get = ar40xx_sw_get_port_mib,
871 + },
872 +};
873 +
874 +const struct switch_attr ar40xx_sw_attr_vlan[] = {
875 + {
876 + .type = SWITCH_TYPE_INT,
877 + .name = "vid",
878 + .description = "VLAN ID (0-4094)",
879 + .set = ar40xx_sw_set_vid,
880 + .get = ar40xx_sw_get_vid,
881 + .max = 4094,
882 + },
883 +};
884 +
885 +/* End of swconfig support */
886 +
887 +static int
888 +ar40xx_wait_bit(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
889 +{
890 + int timeout = 20;
891 + u32 t;
892 +
893 + while (1) {
894 + t = ar40xx_read(priv, reg);
895 + if ((t & mask) == val)
896 + return 0;
897 +
898 + if (timeout-- <= 0)
899 + break;
900 +
901 + usleep_range(10, 20);
902 + }
903 +
904 + pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n",
905 + (unsigned int)reg, t, mask, val);
906 + return -ETIMEDOUT;
907 +}
908 +
909 +static int
910 +ar40xx_atu_flush(struct ar40xx_priv *priv)
911 +{
912 + int ret;
913 +
914 + ret = ar40xx_wait_bit(priv, AR40XX_REG_ATU_FUNC,
915 + AR40XX_ATU_FUNC_BUSY, 0);
916 + if (!ret)
917 + ar40xx_write(priv, AR40XX_REG_ATU_FUNC,
918 + AR40XX_ATU_FUNC_OP_FLUSH |
919 + AR40XX_ATU_FUNC_BUSY);
920 +
921 + return ret;
922 +}
923 +
924 +static void
925 +ar40xx_ess_reset(struct ar40xx_priv *priv)
926 +{
927 + reset_control_assert(priv->ess_rst);
928 + mdelay(10);
929 + reset_control_deassert(priv->ess_rst);
930 + /* Waiting for all inner tables init done.
931 + * It cost 5~10ms.
932 + */
933 + mdelay(10);
934 +
935 + pr_info("ESS reset ok!\n");
936 +}
937 +
938 +/* Start of psgmii self test */
939 +
940 +static void
941 +ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv *priv)
942 +{
943 + u32 n;
944 + struct mii_bus *bus = priv->mii_bus;
945 + /* reset phy psgmii */
946 + /* fix phy psgmii RX 20bit */
947 + mdiobus_write(bus, 5, 0x0, 0x005b);
948 + /* reset phy psgmii */
949 + mdiobus_write(bus, 5, 0x0, 0x001b);
950 + /* release reset phy psgmii */
951 + mdiobus_write(bus, 5, 0x0, 0x005b);
952 +
953 + for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
954 + u16 status;
955 +
956 + status = ar40xx_phy_mmd_read(priv, 5, 1, 0x28);
957 + if (status & BIT(0))
958 + break;
959 + /* Polling interval to check PSGMII PLL in malibu is ready
960 + * the worst time is 8.67ms
961 + * for 25MHz reference clock
962 + * [512+(128+2048)*49]*80ns+100us
963 + */
964 + mdelay(2);
965 + }
966 +
967 + /*check malibu psgmii calibration done end..*/
968 +
969 + /*freeze phy psgmii RX CDR*/
970 + mdiobus_write(bus, 5, 0x1a, 0x2230);
971 +
972 + ar40xx_ess_reset(priv);
973 +
974 + /*check psgmii calibration done start*/
975 + for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
976 + u32 status;
977 +
978 + status = ar40xx_psgmii_read(priv, 0xa0);
979 + if (status & BIT(0))
980 + break;
981 + /* Polling interval to check PSGMII PLL in ESS is ready */
982 + mdelay(2);
983 + }
984 +
985 + /* check dakota psgmii calibration done end..*/
986 +
987 + /* relesae phy psgmii RX CDR */
988 + mdiobus_write(bus, 5, 0x1a, 0x3230);
989 + /* release phy psgmii RX 20bit */
990 + mdiobus_write(bus, 5, 0x0, 0x005f);
991 +}
992 +
993 +static void
994 +ar40xx_psgmii_single_phy_testing(struct ar40xx_priv *priv, int phy)
995 +{
996 + int j;
997 + u32 tx_ok, tx_error;
998 + u32 rx_ok, rx_error;
999 + u32 tx_ok_high16;
1000 + u32 rx_ok_high16;
1001 + u32 tx_all_ok, rx_all_ok;
1002 + struct mii_bus *bus = priv->mii_bus;
1003 +
1004 + mdiobus_write(bus, phy, 0x0, 0x9000);
1005 + mdiobus_write(bus, phy, 0x0, 0x4140);
1006 +
1007 + for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
1008 + u16 status;
1009 +
1010 + status = mdiobus_read(bus, phy, 0x11);
1011 + if (status & AR40XX_PHY_SPEC_STATUS_LINK)
1012 + break;
1013 + /* the polling interval to check if the PHY link up or not
1014 + * maxwait_timer: 750 ms +/-10 ms
1015 + * minwait_timer : 1 us +/- 0.1us
1016 + * time resides in minwait_timer ~ maxwait_timer
1017 + * see IEEE 802.3 section 40.4.5.2
1018 + */
1019 + mdelay(8);
1020 + }
1021 +
1022 + /* enable check */
1023 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0000);
1024 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0003);
1025 +
1026 + /* start traffic */
1027 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8020, 0xa000);
1028 + /* wait for all traffic end
1029 + * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1030 + */
1031 + mdelay(50);
1032 +
1033 + /* check counter */
1034 + tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
1035 + tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
1036 + tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
1037 + rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
1038 + rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
1039 + rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
1040 + tx_all_ok = tx_ok + (tx_ok_high16 << 16);
1041 + rx_all_ok = rx_ok + (rx_ok_high16 << 16);
1042 + if (tx_all_ok == 0x1000 && tx_error == 0) {
1043 + /* success */
1044 + priv->phy_t_status &= (~BIT(phy));
1045 + } else {
1046 + pr_info("PHY %d single test PSGMII issue happen!\n", phy);
1047 + priv->phy_t_status |= BIT(phy);
1048 + }
1049 +
1050 + mdiobus_write(bus, phy, 0x0, 0x1840);
1051 +}
1052 +
1053 +static void
1054 +ar40xx_psgmii_all_phy_testing(struct ar40xx_priv *priv)
1055 +{
1056 + int phy, j;
1057 + struct mii_bus *bus = priv->mii_bus;
1058 +
1059 + mdiobus_write(bus, 0x1f, 0x0, 0x9000);
1060 + mdiobus_write(bus, 0x1f, 0x0, 0x4140);
1061 +
1062 + for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
1063 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1064 + u16 status;
1065 +
1066 + status = mdiobus_read(bus, phy, 0x11);
1067 + if (!(status & BIT(10)))
1068 + break;
1069 + }
1070 +
1071 + if (phy >= (AR40XX_NUM_PORTS - 1))
1072 + break;
1073 + /* The polling interva to check if the PHY link up or not */
1074 + mdelay(8);
1075 + }
1076 + /* enable check */
1077 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0000);
1078 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0003);
1079 +
1080 + /* start traffic */
1081 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0xa000);
1082 + /* wait for all traffic end
1083 + * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1084 + */
1085 + mdelay(50);
1086 +
1087 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1088 + u32 tx_ok, tx_error;
1089 + u32 rx_ok, rx_error;
1090 + u32 tx_ok_high16;
1091 + u32 rx_ok_high16;
1092 + u32 tx_all_ok, rx_all_ok;
1093 +
1094 + /* check counter */
1095 + tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
1096 + tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
1097 + tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
1098 + rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
1099 + rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
1100 + rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
1101 + tx_all_ok = tx_ok + (tx_ok_high16<<16);
1102 + rx_all_ok = rx_ok + (rx_ok_high16<<16);
1103 + if (tx_all_ok == 0x1000 && tx_error == 0) {
1104 + /* success */
1105 + priv->phy_t_status &= ~BIT(phy + 8);
1106 + } else {
1107 + pr_info("PHY%d test see issue!\n", phy);
1108 + priv->phy_t_status |= BIT(phy + 8);
1109 + }
1110 + }
1111 +
1112 + pr_debug("PHY all test 0x%x \r\n", priv->phy_t_status);
1113 +}
1114 +
1115 +void
1116 +ar40xx_psgmii_self_test(struct ar40xx_priv *priv)
1117 +{
1118 + u32 i, phy;
1119 + struct mii_bus *bus = priv->mii_bus;
1120 +
1121 + ar40xx_malibu_psgmii_ess_reset(priv);
1122 +
1123 + /* switch to access MII reg for copper */
1124 + mdiobus_write(bus, 4, 0x1f, 0x8500);
1125 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1126 + /*enable phy mdio broadcast write*/
1127 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x801f);
1128 + }
1129 + /* force no link by power down */
1130 + mdiobus_write(bus, 0x1f, 0x0, 0x1840);
1131 + /*packet number*/
1132 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x1000);
1133 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8062, 0x05e0);
1134 +
1135 + /*fix mdi status */
1136 + mdiobus_write(bus, 0x1f, 0x10, 0x6800);
1137 + for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) {
1138 + priv->phy_t_status = 0;
1139 +
1140 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1141 + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
1142 + AR40XX_PORT_LOOKUP_LOOPBACK,
1143 + AR40XX_PORT_LOOKUP_LOOPBACK);
1144 + }
1145 +
1146 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++)
1147 + ar40xx_psgmii_single_phy_testing(priv, phy);
1148 +
1149 + ar40xx_psgmii_all_phy_testing(priv);
1150 +
1151 + if (priv->phy_t_status)
1152 + ar40xx_malibu_psgmii_ess_reset(priv);
1153 + else
1154 + break;
1155 + }
1156 +
1157 + if (i >= AR40XX_PSGMII_CALB_NUM)
1158 + pr_info("PSGMII cannot recover\n");
1159 + else
1160 + pr_debug("PSGMII recovered after %d times reset\n", i);
1161 +
1162 + /* configuration recover */
1163 + /* packet number */
1164 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x0);
1165 + /* disable check */
1166 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0);
1167 + /* disable traffic */
1168 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0x0);
1169 +}
1170 +
1171 +void
1172 +ar40xx_psgmii_self_test_clean(struct ar40xx_priv *priv)
1173 +{
1174 + int phy;
1175 + struct mii_bus *bus = priv->mii_bus;
1176 +
1177 + /* disable phy internal loopback */
1178 + mdiobus_write(bus, 0x1f, 0x10, 0x6860);
1179 + mdiobus_write(bus, 0x1f, 0x0, 0x9040);
1180 +
1181 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1182 + /* disable mac loop back */
1183 + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
1184 + AR40XX_PORT_LOOKUP_LOOPBACK, 0);
1185 + /* disable phy mdio broadcast write */
1186 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x001f);
1187 + }
1188 +
1189 + /* clear fdb entry */
1190 + ar40xx_atu_flush(priv);
1191 +}
1192 +
1193 +/* End of psgmii self test */
1194 +
1195 +static void
1196 +ar40xx_mac_mode_init(struct ar40xx_priv *priv, u32 mode)
1197 +{
1198 + if (mode == PORT_WRAPPER_PSGMII) {
1199 + ar40xx_psgmii_write(priv, AR40XX_PSGMII_MODE_CONTROL, 0x2200);
1200 + ar40xx_psgmii_write(priv, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380);
1201 + }
1202 +}
1203 +
1204 +static
1205 +int ar40xx_cpuport_setup(struct ar40xx_priv *priv)
1206 +{
1207 + u32 t;
1208 +
1209 + t = AR40XX_PORT_STATUS_TXFLOW |
1210 + AR40XX_PORT_STATUS_RXFLOW |
1211 + AR40XX_PORT_TXHALF_FLOW |
1212 + AR40XX_PORT_DUPLEX |
1213 + AR40XX_PORT_SPEED_1000M;
1214 + ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
1215 + usleep_range(10, 20);
1216 +
1217 + t |= AR40XX_PORT_TX_EN |
1218 + AR40XX_PORT_RX_EN;
1219 + ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
1220 +
1221 + return 0;
1222 +}
1223 +
1224 +static void
1225 +ar40xx_init_port(struct ar40xx_priv *priv, int port)
1226 +{
1227 + u32 t;
1228 +
1229 + ar40xx_rmw(priv, AR40XX_REG_PORT_STATUS(port),
1230 + AR40XX_PORT_AUTO_LINK_EN, 0);
1231 +
1232 + /* CPU port is setting headers to limit output ports */
1233 + if (port == 0)
1234 + ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0x8);
1235 + else
1236 + ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0);
1237 +
1238 + ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), 0);
1239 +
1240 + t = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S;
1241 + ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
1242 +
1243 + t = AR40XX_PORT_LOOKUP_LEARN;
1244 + t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
1245 + ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
1246 +}
1247 +
1248 +void
1249 +ar40xx_init_globals(struct ar40xx_priv *priv)
1250 +{
1251 + u32 t;
1252 +
1253 + /* enable CPU port and disable mirror port */
1254 + t = AR40XX_FWD_CTRL0_CPU_PORT_EN |
1255 + AR40XX_FWD_CTRL0_MIRROR_PORT;
1256 + ar40xx_write(priv, AR40XX_REG_FWD_CTRL0, t);
1257 +
1258 + /* forward multicast and broadcast frames to CPU */
1259 + t = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) |
1260 + (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) |
1261 + (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S);
1262 + ar40xx_write(priv, AR40XX_REG_FWD_CTRL1, t);
1263 +
1264 + /* enable jumbo frames */
1265 + ar40xx_rmw(priv, AR40XX_REG_MAX_FRAME_SIZE,
1266 + AR40XX_MAX_FRAME_SIZE_MTU, 9018 + 8 + 2);
1267 +
1268 + /* Enable MIB counters */
1269 + ar40xx_rmw(priv, AR40XX_REG_MODULE_EN, 0,
1270 + AR40XX_MODULE_EN_MIB);
1271 +
1272 + /* Disable AZ */
1273 + ar40xx_write(priv, AR40XX_REG_EEE_CTRL, 0);
1274 +
1275 + /* set flowctrl thershold for cpu port */
1276 + t = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) |
1277 + AR40XX_PORT0_FC_THRESH_OFF_DFLT;
1278 + ar40xx_write(priv, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t);
1279 +
1280 + /* set service tag to 802.1q */
1281 + t = ETH_P_8021Q | AR40XX_ESS_SERVICE_TAG_STAG;
1282 + ar40xx_write(priv, AR40XX_ESS_SERVICE_TAG, t);
1283 +}
1284 +
1285 +static void
1286 +ar40xx_malibu_init(struct ar40xx_priv *priv)
1287 +{
1288 + int i;
1289 + struct mii_bus *bus;
1290 + u16 val;
1291 +
1292 + bus = priv->mii_bus;
1293 +
1294 + /* war to enable AZ transmitting ability */
1295 + ar40xx_phy_mmd_write(priv, AR40XX_PSGMII_ID, 1,
1296 + AR40XX_MALIBU_PSGMII_MODE_CTRL,
1297 + AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL);
1298 + for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
1299 + /* change malibu control_dac */
1300 + val = ar40xx_phy_mmd_read(priv, i, 7,
1301 + AR40XX_MALIBU_PHY_MMD7_DAC_CTRL);
1302 + val &= ~AR40XX_MALIBU_DAC_CTRL_MASK;
1303 + val |= AR40XX_MALIBU_DAC_CTRL_VALUE;
1304 + ar40xx_phy_mmd_write(priv, i, 7,
1305 + AR40XX_MALIBU_PHY_MMD7_DAC_CTRL, val);
1306 + if (i == AR40XX_MALIBU_PHY_LAST_ADDR) {
1307 + /* to avoid goes into hibernation */
1308 + val = ar40xx_phy_mmd_read(priv, i, 3,
1309 + AR40XX_MALIBU_PHY_RLP_CTRL);
1310 + val &= (~(1<<1));
1311 + ar40xx_phy_mmd_write(priv, i, 3,
1312 + AR40XX_MALIBU_PHY_RLP_CTRL, val);
1313 + }
1314 + }
1315 +
1316 + /* adjust psgmii serdes tx amp */
1317 + mdiobus_write(bus, AR40XX_PSGMII_ID, AR40XX_PSGMII_TX_DRIVER_1_CTRL,
1318 + AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP);
1319 +}
1320 +
1321 +static int
1322 +ar40xx_hw_init(struct ar40xx_priv *priv)
1323 +{
1324 + u32 i;
1325 +
1326 + ar40xx_ess_reset(priv);
1327 +
1328 + if (priv->mii_bus)
1329 + ar40xx_malibu_init(priv);
1330 + else
1331 + return -1;
1332 +
1333 + ar40xx_psgmii_self_test(priv);
1334 + ar40xx_psgmii_self_test_clean(priv);
1335 +
1336 + ar40xx_mac_mode_init(priv, priv->mac_mode);
1337 +
1338 + for (i = 0; i < priv->dev.ports; i++)
1339 + ar40xx_init_port(priv, i);
1340 +
1341 + ar40xx_init_globals(priv);
1342 +
1343 + return 0;
1344 +}
1345 +
1346 +/* Start of qm error WAR */
1347 +
1348 +static
1349 +int ar40xx_force_1g_full(struct ar40xx_priv *priv, u32 port_id)
1350 +{
1351 + u32 reg;
1352 +
1353 + if (port_id < 0 || port_id > 6)
1354 + return -1;
1355 +
1356 + reg = AR40XX_REG_PORT_STATUS(port_id);
1357 + return ar40xx_rmw(priv, reg, AR40XX_PORT_SPEED,
1358 + (AR40XX_PORT_SPEED_1000M | AR40XX_PORT_DUPLEX));
1359 +}
1360 +
1361 +static
1362 +int ar40xx_get_qm_status(struct ar40xx_priv *priv,
1363 + u32 port_id, u32 *qm_buffer_err)
1364 +{
1365 + u32 reg;
1366 + u32 qm_val;
1367 +
1368 + if (port_id < 1 || port_id > 5) {
1369 + *qm_buffer_err = 0;
1370 + return -1;
1371 + }
1372 +
1373 + if (port_id < 4) {
1374 + reg = AR40XX_REG_QM_PORT0_3_QNUM;
1375 + ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
1376 + qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
1377 + /* every 8 bits for each port */
1378 + *qm_buffer_err = (qm_val >> (port_id * 8)) & 0xFF;
1379 + } else {
1380 + reg = AR40XX_REG_QM_PORT4_6_QNUM;
1381 + ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
1382 + qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
1383 + /* every 8 bits for each port */
1384 + *qm_buffer_err = (qm_val >> ((port_id-4) * 8)) & 0xFF;
1385 + }
1386 +
1387 + return 0;
1388 +}
1389 +
1390 +static void
1391 +ar40xx_sw_mac_polling_task(struct ar40xx_priv *priv)
1392 +{
1393 + static int task_count;
1394 + u32 i;
1395 + u32 reg, value;
1396 + u32 link, speed, duplex;
1397 + u32 qm_buffer_err;
1398 + u16 port_phy_status[AR40XX_NUM_PORTS];
1399 + static u32 qm_err_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
1400 + static u32 link_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
1401 + struct mii_bus *bus = NULL;
1402 +
1403 + if (!priv || !priv->mii_bus)
1404 + return;
1405 +
1406 + bus = priv->mii_bus;
1407 +
1408 + ++task_count;
1409 +
1410 + for (i = 1; i < AR40XX_NUM_PORTS; ++i) {
1411 + port_phy_status[i] =
1412 + mdiobus_read(bus, i-1, AR40XX_PHY_SPEC_STATUS);
1413 + speed = link = duplex = port_phy_status[i];
1414 + speed &= AR40XX_PHY_SPEC_STATUS_SPEED;
1415 + speed >>= 14;
1416 + link &= AR40XX_PHY_SPEC_STATUS_LINK;
1417 + link >>= 10;
1418 + duplex &= AR40XX_PHY_SPEC_STATUS_DUPLEX;
1419 + duplex >>= 13;
1420 +
1421 + if (link != priv->ar40xx_port_old_link[i]) {
1422 + ++link_cnt[i];
1423 + /* Up --> Down */
1424 + if ((priv->ar40xx_port_old_link[i] ==
1425 + AR40XX_PORT_LINK_UP) &&
1426 + (link == AR40XX_PORT_LINK_DOWN)) {
1427 + /* LINK_EN disable(MAC force mode)*/
1428 + reg = AR40XX_REG_PORT_STATUS(i);
1429 + ar40xx_rmw(priv, reg,
1430 + AR40XX_PORT_AUTO_LINK_EN, 0);
1431 +
1432 + /* Check queue buffer */
1433 + qm_err_cnt[i] = 0;
1434 + ar40xx_get_qm_status(priv, i, &qm_buffer_err);
1435 + if (qm_buffer_err) {
1436 + priv->ar40xx_port_qm_buf[i] =
1437 + AR40XX_QM_NOT_EMPTY;
1438 + } else {
1439 + u16 phy_val = 0;
1440 +
1441 + priv->ar40xx_port_qm_buf[i] =
1442 + AR40XX_QM_EMPTY;
1443 + ar40xx_force_1g_full(priv, i);
1444 + /* Ref:QCA8337 Datasheet,Clearing
1445 + * MENU_CTRL_EN prevents phy to
1446 + * stuck in 100BT mode when
1447 + * bringing up the link
1448 + */
1449 + ar40xx_phy_dbg_read(priv, i-1,
1450 + AR40XX_PHY_DEBUG_0,
1451 + &phy_val);
1452 + phy_val &= (~AR40XX_PHY_MANU_CTRL_EN);
1453 + ar40xx_phy_dbg_write(priv, i-1,
1454 + AR40XX_PHY_DEBUG_0,
1455 + phy_val);
1456 + }
1457 + priv->ar40xx_port_old_link[i] = link;
1458 + } else if ((priv->ar40xx_port_old_link[i] ==
1459 + AR40XX_PORT_LINK_DOWN) &&
1460 + (link == AR40XX_PORT_LINK_UP)) {
1461 + /* Down --> Up */
1462 + if (priv->port_link_up[i] < 1) {
1463 + ++priv->port_link_up[i];
1464 + } else {
1465 + /* Change port status */
1466 + reg = AR40XX_REG_PORT_STATUS(i);
1467 + value = ar40xx_read(priv, reg);
1468 + priv->port_link_up[i] = 0;
1469 +
1470 + value &= ~(AR40XX_PORT_DUPLEX |
1471 + AR40XX_PORT_SPEED);
1472 + value |= speed | (duplex ? BIT(6) : 0);
1473 + ar40xx_write(priv, reg, value);
1474 + /* clock switch need such time
1475 + * to avoid glitch
1476 + */
1477 + usleep_range(100, 200);
1478 +
1479 + value |= AR40XX_PORT_AUTO_LINK_EN;
1480 + ar40xx_write(priv, reg, value);
1481 + /* HW need such time to make sure link
1482 + * stable before enable MAC
1483 + */
1484 + usleep_range(100, 200);
1485 +
1486 + if (speed == AR40XX_PORT_SPEED_100M) {
1487 + u16 phy_val = 0;
1488 + /* Enable @100M, if down to 10M
1489 + * clock will change smoothly
1490 + */
1491 + ar40xx_phy_dbg_read(priv, i-1,
1492 + 0,
1493 + &phy_val);
1494 + phy_val |=
1495 + AR40XX_PHY_MANU_CTRL_EN;
1496 + ar40xx_phy_dbg_write(priv, i-1,
1497 + 0,
1498 + phy_val);
1499 + }
1500 + priv->ar40xx_port_old_link[i] = link;
1501 + }
1502 + }
1503 + }
1504 +
1505 + if (priv->ar40xx_port_qm_buf[i] == AR40XX_QM_NOT_EMPTY) {
1506 + /* Check QM */
1507 + ar40xx_get_qm_status(priv, i, &qm_buffer_err);
1508 + if (qm_buffer_err) {
1509 + ++qm_err_cnt[i];
1510 + } else {
1511 + priv->ar40xx_port_qm_buf[i] =
1512 + AR40XX_QM_EMPTY;
1513 + qm_err_cnt[i] = 0;
1514 + ar40xx_force_1g_full(priv, i);
1515 + }
1516 + }
1517 + }
1518 +}
1519 +
1520 +static void
1521 +ar40xx_qm_err_check_work_task(struct work_struct *work)
1522 +{
1523 + struct ar40xx_priv *priv = container_of(work, struct ar40xx_priv,
1524 + qm_dwork.work);
1525 +
1526 + mutex_lock(&priv->qm_lock);
1527 +
1528 + ar40xx_sw_mac_polling_task(priv);
1529 +
1530 + mutex_unlock(&priv->qm_lock);
1531 +
1532 + schedule_delayed_work(&priv->qm_dwork,
1533 + msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
1534 +}
1535 +
1536 +static int
1537 +ar40xx_qm_err_check_work_start(struct ar40xx_priv *priv)
1538 +{
1539 + mutex_init(&priv->qm_lock);
1540 +
1541 + INIT_DELAYED_WORK(&priv->qm_dwork, ar40xx_qm_err_check_work_task);
1542 +
1543 + schedule_delayed_work(&priv->qm_dwork,
1544 + msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
1545 +
1546 + return 0;
1547 +}
1548 +
1549 +/* End of qm error WAR */
1550 +
1551 +static int
1552 +ar40xx_vlan_init(struct ar40xx_priv *priv)
1553 +{
1554 + int port;
1555 + unsigned long bmp;
1556 +
1557 + /* By default Enable VLAN */
1558 + priv->vlan = 1;
1559 + priv->vlan_table[AR40XX_LAN_VLAN] = priv->cpu_bmp | priv->lan_bmp;
1560 + priv->vlan_table[AR40XX_WAN_VLAN] = priv->cpu_bmp | priv->wan_bmp;
1561 + priv->vlan_tagged = priv->cpu_bmp;
1562 + bmp = priv->lan_bmp;
1563 + for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
1564 + priv->pvid[port] = AR40XX_LAN_VLAN;
1565 +
1566 + bmp = priv->wan_bmp;
1567 + for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
1568 + priv->pvid[port] = AR40XX_WAN_VLAN;
1569 +
1570 + return 0;
1571 +}
1572 +
1573 +static void
1574 +ar40xx_mib_work_func(struct work_struct *work)
1575 +{
1576 + struct ar40xx_priv *priv;
1577 + int err;
1578 +
1579 + priv = container_of(work, struct ar40xx_priv, mib_work.work);
1580 +
1581 + mutex_lock(&priv->mib_lock);
1582 +
1583 + err = ar40xx_mib_capture(priv);
1584 + if (err)
1585 + goto next_port;
1586 +
1587 + ar40xx_mib_fetch_port_stat(priv, priv->mib_next_port, false);
1588 +
1589 +next_port:
1590 + priv->mib_next_port++;
1591 + if (priv->mib_next_port >= priv->dev.ports)
1592 + priv->mib_next_port = 0;
1593 +
1594 + mutex_unlock(&priv->mib_lock);
1595 +
1596 + schedule_delayed_work(&priv->mib_work,
1597 + msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
1598 +}
1599 +
1600 +static void
1601 +ar40xx_setup_port(struct ar40xx_priv *priv, int port, u32 members)
1602 +{
1603 + u32 t;
1604 + u32 egress, ingress;
1605 + u32 pvid = priv->vlan_id[priv->pvid[port]];
1606 +
1607 + if (priv->vlan) {
1608 + if (priv->vlan_tagged & BIT(port))
1609 + egress = AR40XX_PORT_VLAN1_OUT_MODE_TAG;
1610 + else
1611 + egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD;
1612 +
1613 + ingress = AR40XX_IN_SECURE;
1614 + } else {
1615 + egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH;
1616 + ingress = AR40XX_IN_PORT_ONLY;
1617 + }
1618 +
1619 + t = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S;
1620 + t |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S;
1621 + ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), t);
1622 +
1623 + t = egress << AR40XX_PORT_VLAN1_OUT_MODE_S;
1624 +
1625 + /* set CPU port to core port */
1626 + if (port == 0)
1627 + t |= AR40XX_PORT_VLAN1_CORE_PORT;
1628 +
1629 + if (priv->vlan_tagged & BIT(port))
1630 + t |= AR40XX_PORT_VLAN1_PORT_VLAN_PROP;
1631 + else
1632 + t |= AR40XX_PORT_VLAN1_PORT_TLS_MODE;
1633 +
1634 + ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
1635 +
1636 + t = members;
1637 + t |= AR40XX_PORT_LOOKUP_LEARN;
1638 + t |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S;
1639 + t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
1640 + ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
1641 +}
1642 +
1643 +static void
1644 +ar40xx_vtu_op(struct ar40xx_priv *priv, u32 op, u32 val)
1645 +{
1646 + if (ar40xx_wait_bit(priv, AR40XX_REG_VTU_FUNC1,
1647 + AR40XX_VTU_FUNC1_BUSY, 0))
1648 + return;
1649 +
1650 + if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD)
1651 + ar40xx_write(priv, AR40XX_REG_VTU_FUNC0, val);
1652 +
1653 + op |= AR40XX_VTU_FUNC1_BUSY;
1654 + ar40xx_write(priv, AR40XX_REG_VTU_FUNC1, op);
1655 +}
1656 +
1657 +static void
1658 +ar40xx_vtu_load_vlan(struct ar40xx_priv *priv, u32 vid, u32 port_mask)
1659 +{
1660 + u32 op;
1661 + u32 val;
1662 + int i;
1663 +
1664 + op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S);
1665 + val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL;
1666 + for (i = 0; i < AR40XX_NUM_PORTS; i++) {
1667 + u32 mode;
1668 +
1669 + if ((port_mask & BIT(i)) == 0)
1670 + mode = AR40XX_VTU_FUNC0_EG_MODE_NOT;
1671 + else if (priv->vlan == 0)
1672 + mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP;
1673 + else if ((priv->vlan_tagged & BIT(i)) ||
1674 + (priv->vlan_id[priv->pvid[i]] != vid))
1675 + mode = AR40XX_VTU_FUNC0_EG_MODE_TAG;
1676 + else
1677 + mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG;
1678 +
1679 + val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i);
1680 + }
1681 + ar40xx_vtu_op(priv, op, val);
1682 +}
1683 +
1684 +static void
1685 +ar40xx_vtu_flush(struct ar40xx_priv *priv)
1686 +{
1687 + ar40xx_vtu_op(priv, AR40XX_VTU_FUNC1_OP_FLUSH, 0);
1688 +}
1689 +
1690 +static int
1691 +ar40xx_sw_hw_apply(struct switch_dev *dev)
1692 +{
1693 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
1694 + u8 portmask[AR40XX_NUM_PORTS];
1695 + int i, j;
1696 +
1697 + mutex_lock(&priv->reg_mutex);
1698 + /* flush all vlan entries */
1699 + ar40xx_vtu_flush(priv);
1700 +
1701 + memset(portmask, 0, sizeof(portmask));
1702 + if (priv->vlan) {
1703 + for (j = 0; j < AR40XX_MAX_VLANS; j++) {
1704 + u8 vp = priv->vlan_table[j];
1705 +
1706 + if (!vp)
1707 + continue;
1708 +
1709 + for (i = 0; i < dev->ports; i++) {
1710 + u8 mask = BIT(i);
1711 +
1712 + if (vp & mask)
1713 + portmask[i] |= vp & ~mask;
1714 + }
1715 +
1716 + ar40xx_vtu_load_vlan(priv, priv->vlan_id[j],
1717 + priv->vlan_table[j]);
1718 + }
1719 + } else {
1720 + /* 8021q vlan disabled */
1721 + for (i = 0; i < dev->ports; i++) {
1722 + if (i == AR40XX_PORT_CPU)
1723 + continue;
1724 +
1725 + portmask[i] = BIT(AR40XX_PORT_CPU);
1726 + portmask[AR40XX_PORT_CPU] |= BIT(i);
1727 + }
1728 + }
1729 +
1730 + /* update the port destination mask registers and tag settings */
1731 + for (i = 0; i < dev->ports; i++)
1732 + ar40xx_setup_port(priv, i, portmask[i]);
1733 +
1734 + ar40xx_set_mirror_regs(priv);
1735 +
1736 + mutex_unlock(&priv->reg_mutex);
1737 + return 0;
1738 +}
1739 +
1740 +static int
1741 +ar40xx_sw_reset_switch(struct switch_dev *dev)
1742 +{
1743 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
1744 + int i, rv;
1745 +
1746 + mutex_lock(&priv->reg_mutex);
1747 + memset(&priv->vlan, 0, sizeof(struct ar40xx_priv) -
1748 + offsetof(struct ar40xx_priv, vlan));
1749 +
1750 + for (i = 0; i < AR40XX_MAX_VLANS; i++)
1751 + priv->vlan_id[i] = i;
1752 +
1753 + ar40xx_vlan_init(priv);
1754 +
1755 + priv->mirror_rx = false;
1756 + priv->mirror_tx = false;
1757 + priv->source_port = 0;
1758 + priv->monitor_port = 0;
1759 +
1760 + mutex_unlock(&priv->reg_mutex);
1761 +
1762 + rv = ar40xx_sw_hw_apply(dev);
1763 + return rv;
1764 +}
1765 +
1766 +static int
1767 +ar40xx_start(struct ar40xx_priv *priv)
1768 +{
1769 + int ret;
1770 +
1771 + ret = ar40xx_hw_init(priv);
1772 + if (ret)
1773 + return ret;
1774 +
1775 + ret = ar40xx_sw_reset_switch(&priv->dev);
1776 + if (ret)
1777 + return ret;
1778 +
1779 + /* at last, setup cpu port */
1780 + ret = ar40xx_cpuport_setup(priv);
1781 + if (ret)
1782 + return ret;
1783 +
1784 + schedule_delayed_work(&priv->mib_work,
1785 + msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
1786 +
1787 + ar40xx_qm_err_check_work_start(priv);
1788 +
1789 + return 0;
1790 +}
1791 +
1792 +static const struct switch_dev_ops ar40xx_sw_ops = {
1793 + .attr_global = {
1794 + .attr = ar40xx_sw_attr_globals,
1795 + .n_attr = ARRAY_SIZE(ar40xx_sw_attr_globals),
1796 + },
1797 + .attr_port = {
1798 + .attr = ar40xx_sw_attr_port,
1799 + .n_attr = ARRAY_SIZE(ar40xx_sw_attr_port),
1800 + },
1801 + .attr_vlan = {
1802 + .attr = ar40xx_sw_attr_vlan,
1803 + .n_attr = ARRAY_SIZE(ar40xx_sw_attr_vlan),
1804 + },
1805 + .get_port_pvid = ar40xx_sw_get_pvid,
1806 + .set_port_pvid = ar40xx_sw_set_pvid,
1807 + .get_vlan_ports = ar40xx_sw_get_ports,
1808 + .set_vlan_ports = ar40xx_sw_set_ports,
1809 + .apply_config = ar40xx_sw_hw_apply,
1810 + .reset_switch = ar40xx_sw_reset_switch,
1811 + .get_port_link = ar40xx_sw_get_port_link,
1812 +};
1813 +
1814 +/* Start of phy driver support */
1815 +
1816 +static const u32 ar40xx_phy_ids[] = {
1817 + 0x004dd0b1,
1818 + 0x004dd0b2, /* AR40xx */
1819 +};
1820 +
1821 +static bool
1822 +ar40xx_phy_match(u32 phy_id)
1823 +{
1824 + int i;
1825 +
1826 + for (i = 0; i < ARRAY_SIZE(ar40xx_phy_ids); i++)
1827 + if (phy_id == ar40xx_phy_ids[i])
1828 + return true;
1829 +
1830 + return false;
1831 +}
1832 +
1833 +static bool
1834 +is_ar40xx_phy(struct mii_bus *bus)
1835 +{
1836 + unsigned i;
1837 +
1838 + for (i = 0; i < 4; i++) {
1839 + u32 phy_id;
1840 +
1841 + phy_id = mdiobus_read(bus, i, MII_PHYSID1) << 16;
1842 + phy_id |= mdiobus_read(bus, i, MII_PHYSID2);
1843 + if (!ar40xx_phy_match(phy_id))
1844 + return false;
1845 + }
1846 +
1847 + return true;
1848 +}
1849 +
1850 +static int
1851 +ar40xx_phy_probe(struct phy_device *phydev)
1852 +{
1853 + if (!is_ar40xx_phy(phydev->mdio.bus))
1854 + return -ENODEV;
1855 +
1856 + ar40xx_priv->mii_bus = phydev->mdio.bus;
1857 + phydev->priv = ar40xx_priv;
1858 + if (phydev->mdio.addr == 0)
1859 + ar40xx_priv->phy = phydev;
1860 +
1861 + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phydev->supported);
1862 + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phydev->advertising);
1863 + return 0;
1864 +}
1865 +
1866 +static void
1867 +ar40xx_phy_remove(struct phy_device *phydev)
1868 +{
1869 + ar40xx_priv->mii_bus = NULL;
1870 + phydev->priv = NULL;
1871 +}
1872 +
1873 +static int
1874 +ar40xx_phy_config_init(struct phy_device *phydev)
1875 +{
1876 + return 0;
1877 +}
1878 +
1879 +static int
1880 +ar40xx_phy_read_status(struct phy_device *phydev)
1881 +{
1882 + if (phydev->mdio.addr != 0)
1883 + return genphy_read_status(phydev);
1884 +
1885 + return 0;
1886 +}
1887 +
1888 +static int
1889 +ar40xx_phy_config_aneg(struct phy_device *phydev)
1890 +{
1891 + if (phydev->mdio.addr == 0)
1892 + return 0;
1893 +
1894 + return genphy_config_aneg(phydev);
1895 +}
1896 +
1897 +static struct phy_driver ar40xx_phy_driver = {
1898 + .phy_id = 0x004d0000,
1899 + .name = "QCA Malibu",
1900 + .phy_id_mask = 0xffff0000,
1901 + .features = PHY_GBIT_FEATURES,
1902 + .probe = ar40xx_phy_probe,
1903 + .remove = ar40xx_phy_remove,
1904 + .config_init = ar40xx_phy_config_init,
1905 + .config_aneg = ar40xx_phy_config_aneg,
1906 + .read_status = ar40xx_phy_read_status,
1907 +};
1908 +
1909 +static uint16_t ar40xx_gpio_get_phy(unsigned int offset)
1910 +{
1911 + return offset / 4;
1912 +}
1913 +
1914 +static uint16_t ar40xx_gpio_get_reg(unsigned int offset)
1915 +{
1916 + return 0x8074 + offset % 4;
1917 +}
1918 +
1919 +static void ar40xx_gpio_set(struct gpio_chip *gc, unsigned int offset,
1920 + int value)
1921 +{
1922 + struct ar40xx_priv *priv = gpiochip_get_data(gc);
1923 +
1924 + ar40xx_phy_mmd_write(priv, ar40xx_gpio_get_phy(offset), 0x7,
1925 + ar40xx_gpio_get_reg(offset),
1926 + value ? 0xA000 : 0x8000);
1927 +}
1928 +
1929 +static int ar40xx_gpio_get(struct gpio_chip *gc, unsigned offset)
1930 +{
1931 + struct ar40xx_priv *priv = gpiochip_get_data(gc);
1932 +
1933 + return ar40xx_phy_mmd_read(priv, ar40xx_gpio_get_phy(offset), 0x7,
1934 + ar40xx_gpio_get_reg(offset)) == 0xA000;
1935 +}
1936 +
1937 +static int ar40xx_gpio_get_dir(struct gpio_chip *gc, unsigned offset)
1938 +{
1939 + return 0; /* only out direction */
1940 +}
1941 +
1942 +static int ar40xx_gpio_dir_out(struct gpio_chip *gc, unsigned offset,
1943 + int value)
1944 +{
1945 + /*
1946 + * the direction out value is used to set the initial value.
1947 + * support of this function is required by leds-gpio.c
1948 + */
1949 + ar40xx_gpio_set(gc, offset, value);
1950 + return 0;
1951 +}
1952 +
1953 +static void ar40xx_register_gpio(struct device *pdev,
1954 + struct ar40xx_priv *priv,
1955 + struct device_node *switch_node)
1956 +{
1957 + struct gpio_chip *gc;
1958 + int err;
1959 +
1960 + gc = devm_kzalloc(pdev, sizeof(*gc), GFP_KERNEL);
1961 + if (!gc)
1962 + return;
1963 +
1964 + gc->label = "ar40xx_gpio",
1965 + gc->base = -1,
1966 + gc->ngpio = 5 /* mmd 0 - 4 */ * 4 /* 0x8074 - 0x8077 */,
1967 + gc->parent = pdev;
1968 + gc->owner = THIS_MODULE;
1969 +
1970 + gc->get_direction = ar40xx_gpio_get_dir;
1971 + gc->direction_output = ar40xx_gpio_dir_out;
1972 + gc->get = ar40xx_gpio_get;
1973 + gc->set = ar40xx_gpio_set;
1974 + gc->can_sleep = true;
1975 + gc->label = priv->dev.name;
1976 + gc->of_node = switch_node;
1977 +
1978 + err = devm_gpiochip_add_data(pdev, gc, priv);
1979 + if (err != 0)
1980 + dev_err(pdev, "Failed to register gpio %d.\n", err);
1981 +}
1982 +
1983 +/* End of phy driver support */
1984 +
1985 +/* Platform driver probe function */
1986 +
1987 +static int ar40xx_probe(struct platform_device *pdev)
1988 +{
1989 + struct device_node *switch_node;
1990 + struct device_node *psgmii_node;
1991 + const __be32 *mac_mode;
1992 + struct clk *ess_clk;
1993 + struct switch_dev *swdev;
1994 + struct ar40xx_priv *priv;
1995 + u32 len;
1996 + u32 num_mibs;
1997 + struct resource psgmii_base = {0};
1998 + struct resource switch_base = {0};
1999 + int ret;
2000 +
2001 + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
2002 + if (!priv)
2003 + return -ENOMEM;
2004 +
2005 + platform_set_drvdata(pdev, priv);
2006 + ar40xx_priv = priv;
2007 +
2008 + switch_node = of_node_get(pdev->dev.of_node);
2009 + if (of_address_to_resource(switch_node, 0, &switch_base) != 0)
2010 + return -EIO;
2011 +
2012 + priv->hw_addr = devm_ioremap_resource(&pdev->dev, &switch_base);
2013 + if (IS_ERR(priv->hw_addr)) {
2014 + dev_err(&pdev->dev, "Failed to ioremap switch_base!\n");
2015 + return PTR_ERR(priv->hw_addr);
2016 + }
2017 +
2018 + /*psgmii dts get*/
2019 + psgmii_node = of_find_node_by_name(NULL, "ess-psgmii");
2020 + if (!psgmii_node) {
2021 + dev_err(&pdev->dev, "Failed to find ess-psgmii node!\n");
2022 + return -EINVAL;
2023 + }
2024 +
2025 + if (of_address_to_resource(psgmii_node, 0, &psgmii_base) != 0)
2026 + return -EIO;
2027 +
2028 + priv->psgmii_hw_addr = devm_ioremap_resource(&pdev->dev, &psgmii_base);
2029 + if (IS_ERR(priv->psgmii_hw_addr)) {
2030 + dev_err(&pdev->dev, "psgmii ioremap fail!\n");
2031 + return PTR_ERR(priv->psgmii_hw_addr);
2032 + }
2033 +
2034 + mac_mode = of_get_property(switch_node, "switch_mac_mode", &len);
2035 + if (!mac_mode) {
2036 + dev_err(&pdev->dev, "Failed to read switch_mac_mode\n");
2037 + return -EINVAL;
2038 + }
2039 + priv->mac_mode = be32_to_cpup(mac_mode);
2040 +
2041 + ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
2042 + if (ess_clk)
2043 + clk_prepare_enable(ess_clk);
2044 +
2045 + priv->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
2046 + if (IS_ERR(priv->ess_rst)) {
2047 + dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
2048 + return PTR_ERR(priv->ess_rst);
2049 + }
2050 +
2051 + if (of_property_read_u32(switch_node, "switch_cpu_bmp",
2052 + &priv->cpu_bmp) ||
2053 + of_property_read_u32(switch_node, "switch_lan_bmp",
2054 + &priv->lan_bmp) ||
2055 + of_property_read_u32(switch_node, "switch_wan_bmp",
2056 + &priv->wan_bmp)) {
2057 + dev_err(&pdev->dev, "Failed to read port properties\n");
2058 + return -EIO;
2059 + }
2060 +
2061 + ret = phy_driver_register(&ar40xx_phy_driver, THIS_MODULE);
2062 + if (ret) {
2063 + dev_err(&pdev->dev, "Failed to register ar40xx phy driver!\n");
2064 + return -EIO;
2065 + }
2066 +
2067 + mutex_init(&priv->reg_mutex);
2068 + mutex_init(&priv->mib_lock);
2069 + INIT_DELAYED_WORK(&priv->mib_work, ar40xx_mib_work_func);
2070 +
2071 + /* register switch */
2072 + swdev = &priv->dev;
2073 +
2074 + if (priv->mii_bus == NULL) {
2075 + dev_err(&pdev->dev, "Probe failed - Missing PHYs!\n");
2076 + ret = -ENODEV;
2077 + goto err_missing_phy;
2078 + }
2079 +
2080 + swdev->alias = dev_name(&priv->mii_bus->dev);
2081 +
2082 + swdev->cpu_port = AR40XX_PORT_CPU;
2083 + swdev->name = "QCA AR40xx";
2084 + swdev->vlans = AR40XX_MAX_VLANS;
2085 + swdev->ports = AR40XX_NUM_PORTS;
2086 + swdev->ops = &ar40xx_sw_ops;
2087 + ret = register_switch(swdev, NULL);
2088 + if (ret)
2089 + goto err_unregister_phy;
2090 +
2091 + num_mibs = ARRAY_SIZE(ar40xx_mibs);
2092 + len = priv->dev.ports * num_mibs *
2093 + sizeof(*priv->mib_stats);
2094 + priv->mib_stats = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
2095 + if (!priv->mib_stats) {
2096 + ret = -ENOMEM;
2097 + goto err_unregister_switch;
2098 + }
2099 +
2100 + ar40xx_start(priv);
2101 +
2102 + if (of_property_read_bool(switch_node, "gpio-controller"))
2103 + ar40xx_register_gpio(&pdev->dev, ar40xx_priv, switch_node);
2104 +
2105 + return 0;
2106 +
2107 +err_unregister_switch:
2108 + unregister_switch(&priv->dev);
2109 +err_unregister_phy:
2110 + phy_driver_unregister(&ar40xx_phy_driver);
2111 +err_missing_phy:
2112 + platform_set_drvdata(pdev, NULL);
2113 + return ret;
2114 +}
2115 +
2116 +static int ar40xx_remove(struct platform_device *pdev)
2117 +{
2118 + struct ar40xx_priv *priv = platform_get_drvdata(pdev);
2119 +
2120 + cancel_delayed_work_sync(&priv->qm_dwork);
2121 + cancel_delayed_work_sync(&priv->mib_work);
2122 +
2123 + unregister_switch(&priv->dev);
2124 +
2125 + phy_driver_unregister(&ar40xx_phy_driver);
2126 +
2127 + return 0;
2128 +}
2129 +
2130 +static const struct of_device_id ar40xx_of_mtable[] = {
2131 + {.compatible = "qcom,ess-switch" },
2132 + {}
2133 +};
2134 +
2135 +struct platform_driver ar40xx_drv = {
2136 + .probe = ar40xx_probe,
2137 + .remove = ar40xx_remove,
2138 + .driver = {
2139 + .name = "ar40xx",
2140 + .of_match_table = ar40xx_of_mtable,
2141 + },
2142 +};
2143 +
2144 +module_platform_driver(ar40xx_drv);
2145 +
2146 +MODULE_DESCRIPTION("IPQ40XX ESS driver");
2147 +MODULE_LICENSE("Dual BSD/GPL");
2148 --- /dev/null
2149 +++ b/drivers/net/phy/ar40xx.h
2150 @@ -0,0 +1,342 @@
2151 +/*
2152 + * Copyright (c) 2016, The Linux Foundation. All rights reserved.
2153 + *
2154 + * Permission to use, copy, modify, and/or distribute this software for
2155 + * any purpose with or without fee is hereby granted, provided that the
2156 + * above copyright notice and this permission notice appear in all copies.
2157 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
2158 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
2159 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
2160 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
2161 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
2162 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
2163 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2164 + */
2165 +
2166 + #ifndef __AR40XX_H
2167 +#define __AR40XX_H
2168 +
2169 +#define AR40XX_MAX_VLANS 128
2170 +#define AR40XX_NUM_PORTS 6
2171 +#define AR40XX_NUM_PHYS 5
2172 +
2173 +#define BITS(_s, _n) (((1UL << (_n)) - 1) << _s)
2174 +
2175 +struct ar40xx_priv {
2176 + struct switch_dev dev;
2177 +
2178 + u8 __iomem *hw_addr;
2179 + u8 __iomem *psgmii_hw_addr;
2180 + u32 mac_mode;
2181 + struct reset_control *ess_rst;
2182 + u32 cpu_bmp;
2183 + u32 lan_bmp;
2184 + u32 wan_bmp;
2185 +
2186 + struct mii_bus *mii_bus;
2187 + struct phy_device *phy;
2188 +
2189 + /* mutex for qm task */
2190 + struct mutex qm_lock;
2191 + struct delayed_work qm_dwork;
2192 + u32 port_link_up[AR40XX_NUM_PORTS];
2193 + u32 ar40xx_port_old_link[AR40XX_NUM_PORTS];
2194 + u32 ar40xx_port_qm_buf[AR40XX_NUM_PORTS];
2195 +
2196 + u32 phy_t_status;
2197 +
2198 + /* mutex for switch reg access */
2199 + struct mutex reg_mutex;
2200 +
2201 + /* mutex for mib task */
2202 + struct mutex mib_lock;
2203 + struct delayed_work mib_work;
2204 + int mib_next_port;
2205 + u64 *mib_stats;
2206 +
2207 + char buf[2048];
2208 +
2209 + /* all fields below will be cleared on reset */
2210 + bool vlan;
2211 + u16 vlan_id[AR40XX_MAX_VLANS];
2212 + u8 vlan_table[AR40XX_MAX_VLANS];
2213 + u8 vlan_tagged;
2214 + u16 pvid[AR40XX_NUM_PORTS];
2215 +
2216 + /* mirror */
2217 + bool mirror_rx;
2218 + bool mirror_tx;
2219 + int source_port;
2220 + int monitor_port;
2221 +};
2222 +
2223 +#define AR40XX_PORT_LINK_UP 1
2224 +#define AR40XX_PORT_LINK_DOWN 0
2225 +#define AR40XX_QM_NOT_EMPTY 1
2226 +#define AR40XX_QM_EMPTY 0
2227 +
2228 +#define AR40XX_LAN_VLAN 1
2229 +#define AR40XX_WAN_VLAN 2
2230 +
2231 +enum ar40xx_port_wrapper_cfg {
2232 + PORT_WRAPPER_PSGMII = 0,
2233 +};
2234 +
2235 +struct ar40xx_mib_desc {
2236 + u32 size;
2237 + u32 offset;
2238 + const char *name;
2239 +};
2240 +
2241 +#define AR40XX_PORT_CPU 0
2242 +
2243 +#define AR40XX_PSGMII_MODE_CONTROL 0x1b4
2244 +#define AR40XX_PSGMII_ATHR_CSCO_MODE_25M BIT(0)
2245 +
2246 +#define AR40XX_PSGMIIPHY_TX_CONTROL 0x288
2247 +
2248 +#define AR40XX_MII_ATH_MMD_ADDR 0x0d
2249 +#define AR40XX_MII_ATH_MMD_DATA 0x0e
2250 +#define AR40XX_MII_ATH_DBG_ADDR 0x1d
2251 +#define AR40XX_MII_ATH_DBG_DATA 0x1e
2252 +
2253 +#define AR40XX_STATS_RXBROAD 0x00
2254 +#define AR40XX_STATS_RXPAUSE 0x04
2255 +#define AR40XX_STATS_RXMULTI 0x08
2256 +#define AR40XX_STATS_RXFCSERR 0x0c
2257 +#define AR40XX_STATS_RXALIGNERR 0x10
2258 +#define AR40XX_STATS_RXRUNT 0x14
2259 +#define AR40XX_STATS_RXFRAGMENT 0x18
2260 +#define AR40XX_STATS_RX64BYTE 0x1c
2261 +#define AR40XX_STATS_RX128BYTE 0x20
2262 +#define AR40XX_STATS_RX256BYTE 0x24
2263 +#define AR40XX_STATS_RX512BYTE 0x28
2264 +#define AR40XX_STATS_RX1024BYTE 0x2c
2265 +#define AR40XX_STATS_RX1518BYTE 0x30
2266 +#define AR40XX_STATS_RXMAXBYTE 0x34
2267 +#define AR40XX_STATS_RXTOOLONG 0x38
2268 +#define AR40XX_STATS_RXGOODBYTE 0x3c
2269 +#define AR40XX_STATS_RXBADBYTE 0x44
2270 +#define AR40XX_STATS_RXOVERFLOW 0x4c
2271 +#define AR40XX_STATS_FILTERED 0x50
2272 +#define AR40XX_STATS_TXBROAD 0x54
2273 +#define AR40XX_STATS_TXPAUSE 0x58
2274 +#define AR40XX_STATS_TXMULTI 0x5c
2275 +#define AR40XX_STATS_TXUNDERRUN 0x60
2276 +#define AR40XX_STATS_TX64BYTE 0x64
2277 +#define AR40XX_STATS_TX128BYTE 0x68
2278 +#define AR40XX_STATS_TX256BYTE 0x6c
2279 +#define AR40XX_STATS_TX512BYTE 0x70
2280 +#define AR40XX_STATS_TX1024BYTE 0x74
2281 +#define AR40XX_STATS_TX1518BYTE 0x78
2282 +#define AR40XX_STATS_TXMAXBYTE 0x7c
2283 +#define AR40XX_STATS_TXOVERSIZE 0x80
2284 +#define AR40XX_STATS_TXBYTE 0x84
2285 +#define AR40XX_STATS_TXCOLLISION 0x8c
2286 +#define AR40XX_STATS_TXABORTCOL 0x90
2287 +#define AR40XX_STATS_TXMULTICOL 0x94
2288 +#define AR40XX_STATS_TXSINGLECOL 0x98
2289 +#define AR40XX_STATS_TXEXCDEFER 0x9c
2290 +#define AR40XX_STATS_TXDEFER 0xa0
2291 +#define AR40XX_STATS_TXLATECOL 0xa4
2292 +
2293 +#define AR40XX_REG_MODULE_EN 0x030
2294 +#define AR40XX_MODULE_EN_MIB BIT(0)
2295 +
2296 +#define AR40XX_REG_MIB_FUNC 0x034
2297 +#define AR40XX_MIB_BUSY BIT(17)
2298 +#define AR40XX_MIB_CPU_KEEP BIT(20)
2299 +#define AR40XX_MIB_FUNC BITS(24, 3)
2300 +#define AR40XX_MIB_FUNC_S 24
2301 +#define AR40XX_MIB_FUNC_NO_OP 0x0
2302 +#define AR40XX_MIB_FUNC_FLUSH 0x1
2303 +
2304 +#define AR40XX_ESS_SERVICE_TAG 0x48
2305 +#define AR40XX_ESS_SERVICE_TAG_STAG BIT(17)
2306 +
2307 +#define AR40XX_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
2308 +#define AR40XX_PORT_SPEED BITS(0, 2)
2309 +#define AR40XX_PORT_STATUS_SPEED_S 0
2310 +#define AR40XX_PORT_TX_EN BIT(2)
2311 +#define AR40XX_PORT_RX_EN BIT(3)
2312 +#define AR40XX_PORT_STATUS_TXFLOW BIT(4)
2313 +#define AR40XX_PORT_STATUS_RXFLOW BIT(5)
2314 +#define AR40XX_PORT_DUPLEX BIT(6)
2315 +#define AR40XX_PORT_TXHALF_FLOW BIT(7)
2316 +#define AR40XX_PORT_STATUS_LINK_UP BIT(8)
2317 +#define AR40XX_PORT_AUTO_LINK_EN BIT(9)
2318 +#define AR40XX_PORT_STATUS_FLOW_CONTROL BIT(12)
2319 +
2320 +#define AR40XX_REG_MAX_FRAME_SIZE 0x078
2321 +#define AR40XX_MAX_FRAME_SIZE_MTU BITS(0, 14)
2322 +
2323 +#define AR40XX_REG_PORT_HEADER(_i) (0x09c + (_i) * 4)
2324 +
2325 +#define AR40XX_REG_EEE_CTRL 0x100
2326 +#define AR40XX_EEE_CTRL_DISABLE_PHY(_i) BIT(4 + (_i) * 2)
2327 +
2328 +#define AR40XX_REG_PORT_VLAN0(_i) (0x420 + (_i) * 0x8)
2329 +#define AR40XX_PORT_VLAN0_DEF_SVID BITS(0, 12)
2330 +#define AR40XX_PORT_VLAN0_DEF_SVID_S 0
2331 +#define AR40XX_PORT_VLAN0_DEF_CVID BITS(16, 12)
2332 +#define AR40XX_PORT_VLAN0_DEF_CVID_S 16
2333 +
2334 +#define AR40XX_REG_PORT_VLAN1(_i) (0x424 + (_i) * 0x8)
2335 +#define AR40XX_PORT_VLAN1_CORE_PORT BIT(9)
2336 +#define AR40XX_PORT_VLAN1_PORT_TLS_MODE BIT(7)
2337 +#define AR40XX_PORT_VLAN1_PORT_VLAN_PROP BIT(6)
2338 +#define AR40XX_PORT_VLAN1_OUT_MODE BITS(12, 2)
2339 +#define AR40XX_PORT_VLAN1_OUT_MODE_S 12
2340 +#define AR40XX_PORT_VLAN1_OUT_MODE_UNMOD 0
2341 +#define AR40XX_PORT_VLAN1_OUT_MODE_UNTAG 1
2342 +#define AR40XX_PORT_VLAN1_OUT_MODE_TAG 2
2343 +#define AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH 3
2344 +
2345 +#define AR40XX_REG_VTU_FUNC0 0x0610
2346 +#define AR40XX_VTU_FUNC0_EG_MODE BITS(4, 14)
2347 +#define AR40XX_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
2348 +#define AR40XX_VTU_FUNC0_EG_MODE_KEEP 0
2349 +#define AR40XX_VTU_FUNC0_EG_MODE_UNTAG 1
2350 +#define AR40XX_VTU_FUNC0_EG_MODE_TAG 2
2351 +#define AR40XX_VTU_FUNC0_EG_MODE_NOT 3
2352 +#define AR40XX_VTU_FUNC0_IVL BIT(19)
2353 +#define AR40XX_VTU_FUNC0_VALID BIT(20)
2354 +
2355 +#define AR40XX_REG_VTU_FUNC1 0x0614
2356 +#define AR40XX_VTU_FUNC1_OP BITS(0, 3)
2357 +#define AR40XX_VTU_FUNC1_OP_NOOP 0
2358 +#define AR40XX_VTU_FUNC1_OP_FLUSH 1
2359 +#define AR40XX_VTU_FUNC1_OP_LOAD 2
2360 +#define AR40XX_VTU_FUNC1_OP_PURGE 3
2361 +#define AR40XX_VTU_FUNC1_OP_REMOVE_PORT 4
2362 +#define AR40XX_VTU_FUNC1_OP_GET_NEXT 5
2363 +#define AR40XX7_VTU_FUNC1_OP_GET_ONE 6
2364 +#define AR40XX_VTU_FUNC1_FULL BIT(4)
2365 +#define AR40XX_VTU_FUNC1_PORT BIT(8, 4)
2366 +#define AR40XX_VTU_FUNC1_PORT_S 8
2367 +#define AR40XX_VTU_FUNC1_VID BIT(16, 12)
2368 +#define AR40XX_VTU_FUNC1_VID_S 16
2369 +#define AR40XX_VTU_FUNC1_BUSY BIT(31)
2370 +
2371 +#define AR40XX_REG_FWD_CTRL0 0x620
2372 +#define AR40XX_FWD_CTRL0_CPU_PORT_EN BIT(10)
2373 +#define AR40XX_FWD_CTRL0_MIRROR_PORT BITS(4, 4)
2374 +#define AR40XX_FWD_CTRL0_MIRROR_PORT_S 4
2375 +
2376 +#define AR40XX_REG_FWD_CTRL1 0x624
2377 +#define AR40XX_FWD_CTRL1_UC_FLOOD BITS(0, 7)
2378 +#define AR40XX_FWD_CTRL1_UC_FLOOD_S 0
2379 +#define AR40XX_FWD_CTRL1_MC_FLOOD BITS(8, 7)
2380 +#define AR40XX_FWD_CTRL1_MC_FLOOD_S 8
2381 +#define AR40XX_FWD_CTRL1_BC_FLOOD BITS(16, 7)
2382 +#define AR40XX_FWD_CTRL1_BC_FLOOD_S 16
2383 +#define AR40XX_FWD_CTRL1_IGMP BITS(24, 7)
2384 +#define AR40XX_FWD_CTRL1_IGMP_S 24
2385 +
2386 +#define AR40XX_REG_PORT_LOOKUP(_i) (0x660 + (_i) * 0xc)
2387 +#define AR40XX_PORT_LOOKUP_MEMBER BITS(0, 7)
2388 +#define AR40XX_PORT_LOOKUP_IN_MODE BITS(8, 2)
2389 +#define AR40XX_PORT_LOOKUP_IN_MODE_S 8
2390 +#define AR40XX_PORT_LOOKUP_STATE BITS(16, 3)
2391 +#define AR40XX_PORT_LOOKUP_STATE_S 16
2392 +#define AR40XX_PORT_LOOKUP_LEARN BIT(20)
2393 +#define AR40XX_PORT_LOOKUP_LOOPBACK BIT(21)
2394 +#define AR40XX_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
2395 +
2396 +#define AR40XX_REG_ATU_FUNC 0x60c
2397 +#define AR40XX_ATU_FUNC_OP BITS(0, 4)
2398 +#define AR40XX_ATU_FUNC_OP_NOOP 0x0
2399 +#define AR40XX_ATU_FUNC_OP_FLUSH 0x1
2400 +#define AR40XX_ATU_FUNC_OP_LOAD 0x2
2401 +#define AR40XX_ATU_FUNC_OP_PURGE 0x3
2402 +#define AR40XX_ATU_FUNC_OP_FLUSH_LOCKED 0x4
2403 +#define AR40XX_ATU_FUNC_OP_FLUSH_UNICAST 0x5
2404 +#define AR40XX_ATU_FUNC_OP_GET_NEXT 0x6
2405 +#define AR40XX_ATU_FUNC_OP_SEARCH_MAC 0x7
2406 +#define AR40XX_ATU_FUNC_OP_CHANGE_TRUNK 0x8
2407 +#define AR40XX_ATU_FUNC_BUSY BIT(31)
2408 +
2409 +#define AR40XX_REG_QM_DEBUG_ADDR 0x820
2410 +#define AR40XX_REG_QM_DEBUG_VALUE 0x824
2411 +#define AR40XX_REG_QM_PORT0_3_QNUM 0x1d
2412 +#define AR40XX_REG_QM_PORT4_6_QNUM 0x1e
2413 +
2414 +#define AR40XX_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
2415 +#define AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
2416 +
2417 +#define AR40XX_REG_PORT_FLOWCTRL_THRESH(_i) (0x9b0 + (_i) * 0x4)
2418 +#define AR40XX_PORT0_FC_THRESH_ON_DFLT 0x60
2419 +#define AR40XX_PORT0_FC_THRESH_OFF_DFLT 0x90
2420 +
2421 +#define AR40XX_PHY_DEBUG_0 0
2422 +#define AR40XX_PHY_MANU_CTRL_EN BIT(12)
2423 +
2424 +#define AR40XX_PHY_DEBUG_2 2
2425 +
2426 +#define AR40XX_PHY_SPEC_STATUS 0x11
2427 +#define AR40XX_PHY_SPEC_STATUS_LINK BIT(10)
2428 +#define AR40XX_PHY_SPEC_STATUS_DUPLEX BIT(13)
2429 +#define AR40XX_PHY_SPEC_STATUS_SPEED BITS(14, 2)
2430 +
2431 +/* port forwarding state */
2432 +enum {
2433 + AR40XX_PORT_STATE_DISABLED = 0,
2434 + AR40XX_PORT_STATE_BLOCK = 1,
2435 + AR40XX_PORT_STATE_LISTEN = 2,
2436 + AR40XX_PORT_STATE_LEARN = 3,
2437 + AR40XX_PORT_STATE_FORWARD = 4
2438 +};
2439 +
2440 +/* ingress 802.1q mode */
2441 +enum {
2442 + AR40XX_IN_PORT_ONLY = 0,
2443 + AR40XX_IN_PORT_FALLBACK = 1,
2444 + AR40XX_IN_VLAN_ONLY = 2,
2445 + AR40XX_IN_SECURE = 3
2446 +};
2447 +
2448 +/* egress 802.1q mode */
2449 +enum {
2450 + AR40XX_OUT_KEEP = 0,
2451 + AR40XX_OUT_STRIP_VLAN = 1,
2452 + AR40XX_OUT_ADD_VLAN = 2
2453 +};
2454 +
2455 +/* port speed */
2456 +enum {
2457 + AR40XX_PORT_SPEED_10M = 0,
2458 + AR40XX_PORT_SPEED_100M = 1,
2459 + AR40XX_PORT_SPEED_1000M = 2,
2460 + AR40XX_PORT_SPEED_ERR = 3,
2461 +};
2462 +
2463 +#define AR40XX_MIB_WORK_DELAY 2000 /* msecs */
2464 +
2465 +#define AR40XX_QM_WORK_DELAY 100
2466 +
2467 +#define AR40XX_MIB_FUNC_CAPTURE 0x3
2468 +
2469 +#define AR40XX_REG_PORT_STATS_START 0x1000
2470 +#define AR40XX_REG_PORT_STATS_LEN 0x100
2471 +
2472 +#define AR40XX_PORTS_ALL 0x3f
2473 +
2474 +#define AR40XX_PSGMII_ID 5
2475 +#define AR40XX_PSGMII_CALB_NUM 100
2476 +#define AR40XX_MALIBU_PSGMII_MODE_CTRL 0x6d
2477 +#define AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL 0x220c
2478 +#define AR40XX_MALIBU_PHY_MMD7_DAC_CTRL 0x801a
2479 +#define AR40XX_MALIBU_DAC_CTRL_MASK 0x380
2480 +#define AR40XX_MALIBU_DAC_CTRL_VALUE 0x280
2481 +#define AR40XX_MALIBU_PHY_RLP_CTRL 0x805a
2482 +#define AR40XX_PSGMII_TX_DRIVER_1_CTRL 0xb
2483 +#define AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP 0x8a
2484 +#define AR40XX_MALIBU_PHY_LAST_ADDR 4
2485 +
2486 +static inline struct ar40xx_priv *
2487 +swdev_to_ar40xx(struct switch_dev *swdev)
2488 +{
2489 + return container_of(swdev, struct ar40xx_priv, dev);
2490 +}
2491 +
2492 +#endif