ipq40xx: add target
[openwrt/staging/mkresin.git] / target / linux / ipq40xx / patches-4.14 / 700-net-add-qualcomm-mdio-and-phy.patch
1 From 5a71a2005a2e1e6bbe36f00386c495ad6626beb2 Mon Sep 17 00:00:00 2001
2 From: Christian Lamparter <chunkeey@googlemail.com>
3 Date: Thu, 19 Jan 2017 01:59:43 +0100
4 Subject: [PATCH 30/38] NET: add qualcomm mdio and PHY
5
6 ---
7 drivers/net/phy/Kconfig | 14 ++++++++++++++
8 drivers/net/phy/Makefile | 2 ++
9 2 files changed, 16 insertions(+)
10
11 --- a/drivers/net/phy/Kconfig
12 +++ b/drivers/net/phy/Kconfig
13 @@ -481,6 +481,20 @@ config XILINX_GMII2RGMII
14 the Reduced Gigabit Media Independent Interface(RGMII) between
15 Ethernet physical media devices and the Gigabit Ethernet controller.
16
17 +config MDIO_IPQ40XX
18 + tristate "Qualcomm Atheros ipq40xx MDIO interface"
19 + depends on HAS_IOMEM && OF
20 + ---help---
21 + This driver supports the MDIO interface found in Qualcomm
22 + Atheros ipq40xx Soc chip.
23 +
24 +config AR40XX_PHY
25 + tristate "Driver for Qualcomm Atheros IPQ40XX switches"
26 + depends on HAS_IOMEM && OF
27 + select SWCONFIG
28 + ---help---
29 + This is the driver for Qualcomm Atheros IPQ40XX ESS switches.
30 +
31 endif # PHYLIB
32
33 config MICREL_KS8995MA
34 --- a/drivers/net/phy/Makefile
35 +++ b/drivers/net/phy/Makefile
36 @@ -48,6 +48,7 @@ obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium
37 obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
38 obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
39 obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
40 +obj-$(CONFIG_MDIO_IPQ40XX) += mdio-ipq40xx.o
41 obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
42 obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
43 obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
44 @@ -60,6 +61,7 @@ obj-y += $(sfp-obj-y) $(sfp-obj-m)
45
46 obj-$(CONFIG_AMD_PHY) += amd.o
47 obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
48 +obj-$(CONFIG_AR40XX_PHY) += ar40xx.o
49 obj-$(CONFIG_AT803X_PHY) += at803x.o
50 obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
51 obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o
52 --- /dev/null
53 +++ b/drivers/net/phy/ar40xx.c
54 @@ -0,0 +1,2090 @@
55 +/*
56 + * Copyright (c) 2016, The Linux Foundation. All rights reserved.
57 + *
58 + * Permission to use, copy, modify, and/or distribute this software for
59 + * any purpose with or without fee is hereby granted, provided that the
60 + * above copyright notice and this permission notice appear in all copies.
61 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
62 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
63 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
64 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
65 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
66 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
67 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
68 + */
69 +
70 +#include <linux/module.h>
71 +#include <linux/list.h>
72 +#include <linux/bitops.h>
73 +#include <linux/switch.h>
74 +#include <linux/delay.h>
75 +#include <linux/phy.h>
76 +#include <linux/clk.h>
77 +#include <linux/reset.h>
78 +#include <linux/lockdep.h>
79 +#include <linux/workqueue.h>
80 +#include <linux/of_device.h>
81 +#include <linux/of_address.h>
82 +#include <linux/mdio.h>
83 +#include <linux/gpio.h>
84 +
85 +#include "ar40xx.h"
86 +
87 +static struct ar40xx_priv *ar40xx_priv;
88 +
89 +#define MIB_DESC(_s , _o, _n) \
90 + { \
91 + .size = (_s), \
92 + .offset = (_o), \
93 + .name = (_n), \
94 + }
95 +
96 +static const struct ar40xx_mib_desc ar40xx_mibs[] = {
97 + MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"),
98 + MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"),
99 + MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"),
100 + MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"),
101 + MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"),
102 + MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"),
103 + MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"),
104 + MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"),
105 + MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"),
106 + MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"),
107 + MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"),
108 + MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"),
109 + MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"),
110 + MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"),
111 + MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"),
112 + MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"),
113 + MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"),
114 + MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"),
115 + MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"),
116 + MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"),
117 + MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"),
118 + MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"),
119 + MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"),
120 + MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"),
121 + MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"),
122 + MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"),
123 + MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"),
124 + MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"),
125 + MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"),
126 + MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"),
127 + MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"),
128 + MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"),
129 + MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"),
130 + MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"),
131 + MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"),
132 + MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"),
133 + MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"),
134 + MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"),
135 + MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"),
136 +};
137 +
138 +static u32
139 +ar40xx_read(struct ar40xx_priv *priv, int reg)
140 +{
141 + return readl(priv->hw_addr + reg);
142 +}
143 +
144 +static u32
145 +ar40xx_psgmii_read(struct ar40xx_priv *priv, int reg)
146 +{
147 + return readl(priv->psgmii_hw_addr + reg);
148 +}
149 +
150 +static void
151 +ar40xx_write(struct ar40xx_priv *priv, int reg, u32 val)
152 +{
153 + writel(val, priv->hw_addr + reg);
154 +}
155 +
156 +static u32
157 +ar40xx_rmw(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
158 +{
159 + u32 ret;
160 +
161 + ret = ar40xx_read(priv, reg);
162 + ret &= ~mask;
163 + ret |= val;
164 + ar40xx_write(priv, reg, ret);
165 + return ret;
166 +}
167 +
168 +static void
169 +ar40xx_psgmii_write(struct ar40xx_priv *priv, int reg, u32 val)
170 +{
171 + writel(val, priv->psgmii_hw_addr + reg);
172 +}
173 +
174 +static void
175 +ar40xx_phy_dbg_write(struct ar40xx_priv *priv, int phy_addr,
176 + u16 dbg_addr, u16 dbg_data)
177 +{
178 + struct mii_bus *bus = priv->mii_bus;
179 +
180 + mutex_lock(&bus->mdio_lock);
181 + bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
182 + bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA, dbg_data);
183 + mutex_unlock(&bus->mdio_lock);
184 +}
185 +
186 +static void
187 +ar40xx_phy_dbg_read(struct ar40xx_priv *priv, int phy_addr,
188 + u16 dbg_addr, u16 *dbg_data)
189 +{
190 + struct mii_bus *bus = priv->mii_bus;
191 +
192 + mutex_lock(&bus->mdio_lock);
193 + bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
194 + *dbg_data = bus->read(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA);
195 + mutex_unlock(&bus->mdio_lock);
196 +}
197 +
198 +static void
199 +ar40xx_phy_mmd_write(struct ar40xx_priv *priv, u32 phy_id,
200 + u16 mmd_num, u16 reg_id, u16 reg_val)
201 +{
202 + struct mii_bus *bus = priv->mii_bus;
203 +
204 + mutex_lock(&bus->mdio_lock);
205 + bus->write(bus, phy_id,
206 + AR40XX_MII_ATH_MMD_ADDR, mmd_num);
207 + bus->write(bus, phy_id,
208 + AR40XX_MII_ATH_MMD_DATA, reg_id);
209 + bus->write(bus, phy_id,
210 + AR40XX_MII_ATH_MMD_ADDR,
211 + 0x4000 | mmd_num);
212 + bus->write(bus, phy_id,
213 + AR40XX_MII_ATH_MMD_DATA, reg_val);
214 + mutex_unlock(&bus->mdio_lock);
215 +}
216 +
217 +static u16
218 +ar40xx_phy_mmd_read(struct ar40xx_priv *priv, u32 phy_id,
219 + u16 mmd_num, u16 reg_id)
220 +{
221 + u16 value;
222 + struct mii_bus *bus = priv->mii_bus;
223 +
224 + mutex_lock(&bus->mdio_lock);
225 + bus->write(bus, phy_id,
226 + AR40XX_MII_ATH_MMD_ADDR, mmd_num);
227 + bus->write(bus, phy_id,
228 + AR40XX_MII_ATH_MMD_DATA, reg_id);
229 + bus->write(bus, phy_id,
230 + AR40XX_MII_ATH_MMD_ADDR,
231 + 0x4000 | mmd_num);
232 + value = bus->read(bus, phy_id, AR40XX_MII_ATH_MMD_DATA);
233 + mutex_unlock(&bus->mdio_lock);
234 + return value;
235 +}
236 +
237 +/* Start of swconfig support */
238 +
239 +static void
240 +ar40xx_phy_poll_reset(struct ar40xx_priv *priv)
241 +{
242 + u32 i, in_reset, retries = 500;
243 + struct mii_bus *bus = priv->mii_bus;
244 +
245 + /* Assume RESET was recently issued to some or all of the phys */
246 + in_reset = GENMASK(AR40XX_NUM_PHYS - 1, 0);
247 +
248 + while (retries--) {
249 + /* 1ms should be plenty of time.
250 + * 802.3 spec allows for a max wait time of 500ms
251 + */
252 + usleep_range(1000, 2000);
253 +
254 + for (i = 0; i < AR40XX_NUM_PHYS; i++) {
255 + int val;
256 +
257 + /* skip devices which have completed reset */
258 + if (!(in_reset & BIT(i)))
259 + continue;
260 +
261 + val = mdiobus_read(bus, i, MII_BMCR);
262 + if (val < 0)
263 + continue;
264 +
265 + /* mark when phy is no longer in reset state */
266 + if (!(val & BMCR_RESET))
267 + in_reset &= ~BIT(i);
268 + }
269 +
270 + if (!in_reset)
271 + return;
272 + }
273 +
274 + dev_warn(&bus->dev, "Failed to reset all phys! (in_reset: 0x%x)\n",
275 + in_reset);
276 +}
277 +
278 +static void
279 +ar40xx_phy_init(struct ar40xx_priv *priv)
280 +{
281 + int i;
282 + struct mii_bus *bus;
283 + u16 val;
284 +
285 + bus = priv->mii_bus;
286 + for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
287 + ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
288 + val &= ~AR40XX_PHY_MANU_CTRL_EN;
289 + ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
290 + mdiobus_write(bus, i,
291 + MII_ADVERTISE, ADVERTISE_ALL |
292 + ADVERTISE_PAUSE_CAP |
293 + ADVERTISE_PAUSE_ASYM);
294 + mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
295 + mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
296 + }
297 +
298 + ar40xx_phy_poll_reset(priv);
299 +}
300 +
301 +static void
302 +ar40xx_port_phy_linkdown(struct ar40xx_priv *priv)
303 +{
304 + struct mii_bus *bus;
305 + int i;
306 + u16 val;
307 +
308 + bus = priv->mii_bus;
309 + for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
310 + mdiobus_write(bus, i, MII_CTRL1000, 0);
311 + mdiobus_write(bus, i, MII_ADVERTISE, 0);
312 + mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
313 + ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
314 + val |= AR40XX_PHY_MANU_CTRL_EN;
315 + ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
316 + /* disable transmit */
317 + ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_2, &val);
318 + val &= 0xf00f;
319 + ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_2, val);
320 + }
321 +}
322 +
323 +static void
324 +ar40xx_set_mirror_regs(struct ar40xx_priv *priv)
325 +{
326 + int port;
327 +
328 + /* reset all mirror registers */
329 + ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
330 + AR40XX_FWD_CTRL0_MIRROR_PORT,
331 + (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
332 + for (port = 0; port < AR40XX_NUM_PORTS; port++) {
333 + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(port),
334 + AR40XX_PORT_LOOKUP_ING_MIRROR_EN, 0);
335 +
336 + ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(port),
337 + AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN, 0);
338 + }
339 +
340 + /* now enable mirroring if necessary */
341 + if (priv->source_port >= AR40XX_NUM_PORTS ||
342 + priv->monitor_port >= AR40XX_NUM_PORTS ||
343 + priv->source_port == priv->monitor_port) {
344 + return;
345 + }
346 +
347 + ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
348 + AR40XX_FWD_CTRL0_MIRROR_PORT,
349 + (priv->monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
350 +
351 + if (priv->mirror_rx)
352 + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(priv->source_port), 0,
353 + AR40XX_PORT_LOOKUP_ING_MIRROR_EN);
354 +
355 + if (priv->mirror_tx)
356 + ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(priv->source_port),
357 + 0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN);
358 +}
359 +
360 +static int
361 +ar40xx_sw_get_ports(struct switch_dev *dev, struct switch_val *val)
362 +{
363 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
364 + u8 ports = priv->vlan_table[val->port_vlan];
365 + int i;
366 +
367 + val->len = 0;
368 + for (i = 0; i < dev->ports; i++) {
369 + struct switch_port *p;
370 +
371 + if (!(ports & BIT(i)))
372 + continue;
373 +
374 + p = &val->value.ports[val->len++];
375 + p->id = i;
376 + if ((priv->vlan_tagged & BIT(i)) ||
377 + (priv->pvid[i] != val->port_vlan))
378 + p->flags = BIT(SWITCH_PORT_FLAG_TAGGED);
379 + else
380 + p->flags = 0;
381 + }
382 + return 0;
383 +}
384 +
385 +static int
386 +ar40xx_sw_set_ports(struct switch_dev *dev, struct switch_val *val)
387 +{
388 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
389 + u8 *vt = &priv->vlan_table[val->port_vlan];
390 + int i;
391 +
392 + *vt = 0;
393 + for (i = 0; i < val->len; i++) {
394 + struct switch_port *p = &val->value.ports[i];
395 +
396 + if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) {
397 + if (val->port_vlan == priv->pvid[p->id])
398 + priv->vlan_tagged |= BIT(p->id);
399 + } else {
400 + priv->vlan_tagged &= ~BIT(p->id);
401 + priv->pvid[p->id] = val->port_vlan;
402 + }
403 +
404 + *vt |= BIT(p->id);
405 + }
406 + return 0;
407 +}
408 +
409 +static int
410 +ar40xx_reg_wait(struct ar40xx_priv *priv, u32 reg, u32 mask, u32 val,
411 + unsigned timeout)
412 +{
413 + int i;
414 +
415 + for (i = 0; i < timeout; i++) {
416 + u32 t;
417 +
418 + t = ar40xx_read(priv, reg);
419 + if ((t & mask) == val)
420 + return 0;
421 +
422 + usleep_range(1000, 2000);
423 + }
424 +
425 + return -ETIMEDOUT;
426 +}
427 +
428 +static int
429 +ar40xx_mib_op(struct ar40xx_priv *priv, u32 op)
430 +{
431 + int ret;
432 +
433 + lockdep_assert_held(&priv->mib_lock);
434 +
435 + /* Capture the hardware statistics for all ports */
436 + ar40xx_rmw(priv, AR40XX_REG_MIB_FUNC,
437 + AR40XX_MIB_FUNC, (op << AR40XX_MIB_FUNC_S));
438 +
439 + /* Wait for the capturing to complete. */
440 + ret = ar40xx_reg_wait(priv, AR40XX_REG_MIB_FUNC,
441 + AR40XX_MIB_BUSY, 0, 10);
442 +
443 + return ret;
444 +}
445 +
446 +static void
447 +ar40xx_mib_fetch_port_stat(struct ar40xx_priv *priv, int port, bool flush)
448 +{
449 + unsigned int base;
450 + u64 *mib_stats;
451 + int i;
452 + u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
453 +
454 + WARN_ON(port >= priv->dev.ports);
455 +
456 + lockdep_assert_held(&priv->mib_lock);
457 +
458 + base = AR40XX_REG_PORT_STATS_START +
459 + AR40XX_REG_PORT_STATS_LEN * port;
460 +
461 + mib_stats = &priv->mib_stats[port * num_mibs];
462 + if (flush) {
463 + u32 len;
464 +
465 + len = num_mibs * sizeof(*mib_stats);
466 + memset(mib_stats, 0, len);
467 + return;
468 + }
469 + for (i = 0; i < num_mibs; i++) {
470 + const struct ar40xx_mib_desc *mib;
471 + u64 t;
472 +
473 + mib = &ar40xx_mibs[i];
474 + t = ar40xx_read(priv, base + mib->offset);
475 + if (mib->size == 2) {
476 + u64 hi;
477 +
478 + hi = ar40xx_read(priv, base + mib->offset + 4);
479 + t |= hi << 32;
480 + }
481 +
482 + mib_stats[i] += t;
483 + }
484 +}
485 +
486 +static int
487 +ar40xx_mib_capture(struct ar40xx_priv *priv)
488 +{
489 + return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_CAPTURE);
490 +}
491 +
492 +static int
493 +ar40xx_mib_flush(struct ar40xx_priv *priv)
494 +{
495 + return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_FLUSH);
496 +}
497 +
498 +static int
499 +ar40xx_sw_set_reset_mibs(struct switch_dev *dev,
500 + const struct switch_attr *attr,
501 + struct switch_val *val)
502 +{
503 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
504 + unsigned int len;
505 + int ret;
506 + u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
507 +
508 + mutex_lock(&priv->mib_lock);
509 +
510 + len = priv->dev.ports * num_mibs * sizeof(*priv->mib_stats);
511 + memset(priv->mib_stats, 0, len);
512 + ret = ar40xx_mib_flush(priv);
513 +
514 + mutex_unlock(&priv->mib_lock);
515 + return ret;
516 +}
517 +
518 +static int
519 +ar40xx_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
520 + struct switch_val *val)
521 +{
522 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
523 +
524 + priv->vlan = !!val->value.i;
525 + return 0;
526 +}
527 +
528 +static int
529 +ar40xx_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
530 + struct switch_val *val)
531 +{
532 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
533 +
534 + val->value.i = priv->vlan;
535 + return 0;
536 +}
537 +
538 +static int
539 +ar40xx_sw_set_mirror_rx_enable(struct switch_dev *dev,
540 + const struct switch_attr *attr,
541 + struct switch_val *val)
542 +{
543 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
544 +
545 + mutex_lock(&priv->reg_mutex);
546 + priv->mirror_rx = !!val->value.i;
547 + ar40xx_set_mirror_regs(priv);
548 + mutex_unlock(&priv->reg_mutex);
549 +
550 + return 0;
551 +}
552 +
553 +static int
554 +ar40xx_sw_get_mirror_rx_enable(struct switch_dev *dev,
555 + const struct switch_attr *attr,
556 + struct switch_val *val)
557 +{
558 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
559 +
560 + mutex_lock(&priv->reg_mutex);
561 + val->value.i = priv->mirror_rx;
562 + mutex_unlock(&priv->reg_mutex);
563 + return 0;
564 +}
565 +
566 +static int
567 +ar40xx_sw_set_mirror_tx_enable(struct switch_dev *dev,
568 + const struct switch_attr *attr,
569 + struct switch_val *val)
570 +{
571 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
572 +
573 + mutex_lock(&priv->reg_mutex);
574 + priv->mirror_tx = !!val->value.i;
575 + ar40xx_set_mirror_regs(priv);
576 + mutex_unlock(&priv->reg_mutex);
577 +
578 + return 0;
579 +}
580 +
581 +static int
582 +ar40xx_sw_get_mirror_tx_enable(struct switch_dev *dev,
583 + const struct switch_attr *attr,
584 + struct switch_val *val)
585 +{
586 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
587 +
588 + mutex_lock(&priv->reg_mutex);
589 + val->value.i = priv->mirror_tx;
590 + mutex_unlock(&priv->reg_mutex);
591 + return 0;
592 +}
593 +
594 +static int
595 +ar40xx_sw_set_mirror_monitor_port(struct switch_dev *dev,
596 + const struct switch_attr *attr,
597 + struct switch_val *val)
598 +{
599 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
600 +
601 + mutex_lock(&priv->reg_mutex);
602 + priv->monitor_port = val->value.i;
603 + ar40xx_set_mirror_regs(priv);
604 + mutex_unlock(&priv->reg_mutex);
605 +
606 + return 0;
607 +}
608 +
609 +static int
610 +ar40xx_sw_get_mirror_monitor_port(struct switch_dev *dev,
611 + const struct switch_attr *attr,
612 + struct switch_val *val)
613 +{
614 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
615 +
616 + mutex_lock(&priv->reg_mutex);
617 + val->value.i = priv->monitor_port;
618 + mutex_unlock(&priv->reg_mutex);
619 + return 0;
620 +}
621 +
622 +static int
623 +ar40xx_sw_set_mirror_source_port(struct switch_dev *dev,
624 + const struct switch_attr *attr,
625 + struct switch_val *val)
626 +{
627 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
628 +
629 + mutex_lock(&priv->reg_mutex);
630 + priv->source_port = val->value.i;
631 + ar40xx_set_mirror_regs(priv);
632 + mutex_unlock(&priv->reg_mutex);
633 +
634 + return 0;
635 +}
636 +
637 +static int
638 +ar40xx_sw_get_mirror_source_port(struct switch_dev *dev,
639 + const struct switch_attr *attr,
640 + struct switch_val *val)
641 +{
642 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
643 +
644 + mutex_lock(&priv->reg_mutex);
645 + val->value.i = priv->source_port;
646 + mutex_unlock(&priv->reg_mutex);
647 + return 0;
648 +}
649 +
650 +static int
651 +ar40xx_sw_set_linkdown(struct switch_dev *dev,
652 + const struct switch_attr *attr,
653 + struct switch_val *val)
654 +{
655 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
656 +
657 + if (val->value.i == 1)
658 + ar40xx_port_phy_linkdown(priv);
659 + else
660 + ar40xx_phy_init(priv);
661 +
662 + return 0;
663 +}
664 +
665 +static int
666 +ar40xx_sw_set_port_reset_mib(struct switch_dev *dev,
667 + const struct switch_attr *attr,
668 + struct switch_val *val)
669 +{
670 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
671 + int port;
672 + int ret;
673 +
674 + port = val->port_vlan;
675 + if (port >= dev->ports)
676 + return -EINVAL;
677 +
678 + mutex_lock(&priv->mib_lock);
679 + ret = ar40xx_mib_capture(priv);
680 + if (ret)
681 + goto unlock;
682 +
683 + ar40xx_mib_fetch_port_stat(priv, port, true);
684 +
685 +unlock:
686 + mutex_unlock(&priv->mib_lock);
687 + return ret;
688 +}
689 +
690 +static int
691 +ar40xx_sw_get_port_mib(struct switch_dev *dev,
692 + const struct switch_attr *attr,
693 + struct switch_val *val)
694 +{
695 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
696 + u64 *mib_stats;
697 + int port;
698 + int ret;
699 + char *buf = priv->buf;
700 + int i, len = 0;
701 + u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
702 +
703 + port = val->port_vlan;
704 + if (port >= dev->ports)
705 + return -EINVAL;
706 +
707 + mutex_lock(&priv->mib_lock);
708 + ret = ar40xx_mib_capture(priv);
709 + if (ret)
710 + goto unlock;
711 +
712 + ar40xx_mib_fetch_port_stat(priv, port, false);
713 +
714 + len += snprintf(buf + len, sizeof(priv->buf) - len,
715 + "Port %d MIB counters\n",
716 + port);
717 +
718 + mib_stats = &priv->mib_stats[port * num_mibs];
719 + for (i = 0; i < num_mibs; i++)
720 + len += snprintf(buf + len, sizeof(priv->buf) - len,
721 + "%-12s: %llu\n",
722 + ar40xx_mibs[i].name,
723 + mib_stats[i]);
724 +
725 + val->value.s = buf;
726 + val->len = len;
727 +
728 +unlock:
729 + mutex_unlock(&priv->mib_lock);
730 + return ret;
731 +}
732 +
733 +static int
734 +ar40xx_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
735 + struct switch_val *val)
736 +{
737 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
738 +
739 + priv->vlan_id[val->port_vlan] = val->value.i;
740 + return 0;
741 +}
742 +
743 +static int
744 +ar40xx_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
745 + struct switch_val *val)
746 +{
747 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
748 +
749 + val->value.i = priv->vlan_id[val->port_vlan];
750 + return 0;
751 +}
752 +
753 +static int
754 +ar40xx_sw_get_pvid(struct switch_dev *dev, int port, int *vlan)
755 +{
756 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
757 + *vlan = priv->pvid[port];
758 + return 0;
759 +}
760 +
761 +static int
762 +ar40xx_sw_set_pvid(struct switch_dev *dev, int port, int vlan)
763 +{
764 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
765 +
766 + /* make sure no invalid PVIDs get set */
767 + if (vlan >= dev->vlans)
768 + return -EINVAL;
769 +
770 + priv->pvid[port] = vlan;
771 + return 0;
772 +}
773 +
774 +static void
775 +ar40xx_read_port_link(struct ar40xx_priv *priv, int port,
776 + struct switch_port_link *link)
777 +{
778 + u32 status;
779 + u32 speed;
780 +
781 + memset(link, 0, sizeof(*link));
782 +
783 + status = ar40xx_read(priv, AR40XX_REG_PORT_STATUS(port));
784 +
785 + link->aneg = !!(status & AR40XX_PORT_AUTO_LINK_EN);
786 + if (link->aneg || (port != AR40XX_PORT_CPU))
787 + link->link = !!(status & AR40XX_PORT_STATUS_LINK_UP);
788 + else
789 + link->link = true;
790 +
791 + if (!link->link)
792 + return;
793 +
794 + link->duplex = !!(status & AR40XX_PORT_DUPLEX);
795 + link->tx_flow = !!(status & AR40XX_PORT_STATUS_TXFLOW);
796 + link->rx_flow = !!(status & AR40XX_PORT_STATUS_RXFLOW);
797 +
798 + speed = (status & AR40XX_PORT_SPEED) >>
799 + AR40XX_PORT_STATUS_SPEED_S;
800 +
801 + switch (speed) {
802 + case AR40XX_PORT_SPEED_10M:
803 + link->speed = SWITCH_PORT_SPEED_10;
804 + break;
805 + case AR40XX_PORT_SPEED_100M:
806 + link->speed = SWITCH_PORT_SPEED_100;
807 + break;
808 + case AR40XX_PORT_SPEED_1000M:
809 + link->speed = SWITCH_PORT_SPEED_1000;
810 + break;
811 + default:
812 + link->speed = SWITCH_PORT_SPEED_UNKNOWN;
813 + break;
814 + }
815 +}
816 +
817 +static int
818 +ar40xx_sw_get_port_link(struct switch_dev *dev, int port,
819 + struct switch_port_link *link)
820 +{
821 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
822 +
823 + ar40xx_read_port_link(priv, port, link);
824 + return 0;
825 +}
826 +
827 +static const struct switch_attr ar40xx_sw_attr_globals[] = {
828 + {
829 + .type = SWITCH_TYPE_INT,
830 + .name = "enable_vlan",
831 + .description = "Enable VLAN mode",
832 + .set = ar40xx_sw_set_vlan,
833 + .get = ar40xx_sw_get_vlan,
834 + .max = 1
835 + },
836 + {
837 + .type = SWITCH_TYPE_NOVAL,
838 + .name = "reset_mibs",
839 + .description = "Reset all MIB counters",
840 + .set = ar40xx_sw_set_reset_mibs,
841 + },
842 + {
843 + .type = SWITCH_TYPE_INT,
844 + .name = "enable_mirror_rx",
845 + .description = "Enable mirroring of RX packets",
846 + .set = ar40xx_sw_set_mirror_rx_enable,
847 + .get = ar40xx_sw_get_mirror_rx_enable,
848 + .max = 1
849 + },
850 + {
851 + .type = SWITCH_TYPE_INT,
852 + .name = "enable_mirror_tx",
853 + .description = "Enable mirroring of TX packets",
854 + .set = ar40xx_sw_set_mirror_tx_enable,
855 + .get = ar40xx_sw_get_mirror_tx_enable,
856 + .max = 1
857 + },
858 + {
859 + .type = SWITCH_TYPE_INT,
860 + .name = "mirror_monitor_port",
861 + .description = "Mirror monitor port",
862 + .set = ar40xx_sw_set_mirror_monitor_port,
863 + .get = ar40xx_sw_get_mirror_monitor_port,
864 + .max = AR40XX_NUM_PORTS - 1
865 + },
866 + {
867 + .type = SWITCH_TYPE_INT,
868 + .name = "mirror_source_port",
869 + .description = "Mirror source port",
870 + .set = ar40xx_sw_set_mirror_source_port,
871 + .get = ar40xx_sw_get_mirror_source_port,
872 + .max = AR40XX_NUM_PORTS - 1
873 + },
874 + {
875 + .type = SWITCH_TYPE_INT,
876 + .name = "linkdown",
877 + .description = "Link down all the PHYs",
878 + .set = ar40xx_sw_set_linkdown,
879 + .max = 1
880 + },
881 +};
882 +
883 +static const struct switch_attr ar40xx_sw_attr_port[] = {
884 + {
885 + .type = SWITCH_TYPE_NOVAL,
886 + .name = "reset_mib",
887 + .description = "Reset single port MIB counters",
888 + .set = ar40xx_sw_set_port_reset_mib,
889 + },
890 + {
891 + .type = SWITCH_TYPE_STRING,
892 + .name = "mib",
893 + .description = "Get port's MIB counters",
894 + .set = NULL,
895 + .get = ar40xx_sw_get_port_mib,
896 + },
897 +};
898 +
899 +const struct switch_attr ar40xx_sw_attr_vlan[] = {
900 + {
901 + .type = SWITCH_TYPE_INT,
902 + .name = "vid",
903 + .description = "VLAN ID (0-4094)",
904 + .set = ar40xx_sw_set_vid,
905 + .get = ar40xx_sw_get_vid,
906 + .max = 4094,
907 + },
908 +};
909 +
910 +/* End of swconfig support */
911 +
912 +static int
913 +ar40xx_wait_bit(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
914 +{
915 + int timeout = 20;
916 + u32 t;
917 +
918 + while (1) {
919 + t = ar40xx_read(priv, reg);
920 + if ((t & mask) == val)
921 + return 0;
922 +
923 + if (timeout-- <= 0)
924 + break;
925 +
926 + usleep_range(10, 20);
927 + }
928 +
929 + pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n",
930 + (unsigned int)reg, t, mask, val);
931 + return -ETIMEDOUT;
932 +}
933 +
934 +static int
935 +ar40xx_atu_flush(struct ar40xx_priv *priv)
936 +{
937 + int ret;
938 +
939 + ret = ar40xx_wait_bit(priv, AR40XX_REG_ATU_FUNC,
940 + AR40XX_ATU_FUNC_BUSY, 0);
941 + if (!ret)
942 + ar40xx_write(priv, AR40XX_REG_ATU_FUNC,
943 + AR40XX_ATU_FUNC_OP_FLUSH |
944 + AR40XX_ATU_FUNC_BUSY);
945 +
946 + return ret;
947 +}
948 +
949 +static void
950 +ar40xx_ess_reset(struct ar40xx_priv *priv)
951 +{
952 + reset_control_assert(priv->ess_rst);
953 + mdelay(10);
954 + reset_control_deassert(priv->ess_rst);
955 + /* Waiting for all inner tables init done.
956 + * It cost 5~10ms.
957 + */
958 + mdelay(10);
959 +
960 + pr_info("ESS reset ok!\n");
961 +}
962 +
963 +/* Start of psgmii self test */
964 +
965 +static void
966 +ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv *priv)
967 +{
968 + u32 n;
969 + struct mii_bus *bus = priv->mii_bus;
970 + /* reset phy psgmii */
971 + /* fix phy psgmii RX 20bit */
972 + mdiobus_write(bus, 5, 0x0, 0x005b);
973 + /* reset phy psgmii */
974 + mdiobus_write(bus, 5, 0x0, 0x001b);
975 + /* release reset phy psgmii */
976 + mdiobus_write(bus, 5, 0x0, 0x005b);
977 +
978 + for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
979 + u16 status;
980 +
981 + status = ar40xx_phy_mmd_read(priv, 5, 1, 0x28);
982 + if (status & BIT(0))
983 + break;
984 + /* Polling interval to check PSGMII PLL in malibu is ready
985 + * the worst time is 8.67ms
986 + * for 25MHz reference clock
987 + * [512+(128+2048)*49]*80ns+100us
988 + */
989 + mdelay(2);
990 + }
991 +
992 + /*check malibu psgmii calibration done end..*/
993 +
994 + /*freeze phy psgmii RX CDR*/
995 + mdiobus_write(bus, 5, 0x1a, 0x2230);
996 +
997 + ar40xx_ess_reset(priv);
998 +
999 + /*check psgmii calibration done start*/
1000 + for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
1001 + u32 status;
1002 +
1003 + status = ar40xx_psgmii_read(priv, 0xa0);
1004 + if (status & BIT(0))
1005 + break;
1006 + /* Polling interval to check PSGMII PLL in ESS is ready */
1007 + mdelay(2);
1008 + }
1009 +
1010 + /* check dakota psgmii calibration done end..*/
1011 +
1012 + /* relesae phy psgmii RX CDR */
1013 + mdiobus_write(bus, 5, 0x1a, 0x3230);
1014 + /* release phy psgmii RX 20bit */
1015 + mdiobus_write(bus, 5, 0x0, 0x005f);
1016 +}
1017 +
1018 +static void
1019 +ar40xx_psgmii_single_phy_testing(struct ar40xx_priv *priv, int phy)
1020 +{
1021 + int j;
1022 + u32 tx_ok, tx_error;
1023 + u32 rx_ok, rx_error;
1024 + u32 tx_ok_high16;
1025 + u32 rx_ok_high16;
1026 + u32 tx_all_ok, rx_all_ok;
1027 + struct mii_bus *bus = priv->mii_bus;
1028 +
1029 + mdiobus_write(bus, phy, 0x0, 0x9000);
1030 + mdiobus_write(bus, phy, 0x0, 0x4140);
1031 +
1032 + for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
1033 + u16 status;
1034 +
1035 + status = mdiobus_read(bus, phy, 0x11);
1036 + if (status & AR40XX_PHY_SPEC_STATUS_LINK)
1037 + break;
1038 + /* the polling interval to check if the PHY link up or not
1039 + * maxwait_timer: 750 ms +/-10 ms
1040 + * minwait_timer : 1 us +/- 0.1us
1041 + * time resides in minwait_timer ~ maxwait_timer
1042 + * see IEEE 802.3 section 40.4.5.2
1043 + */
1044 + mdelay(8);
1045 + }
1046 +
1047 + /* enable check */
1048 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0000);
1049 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0003);
1050 +
1051 + /* start traffic */
1052 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8020, 0xa000);
1053 + /* wait for all traffic end
1054 + * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1055 + */
1056 + mdelay(50);
1057 +
1058 + /* check counter */
1059 + tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
1060 + tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
1061 + tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
1062 + rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
1063 + rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
1064 + rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
1065 + tx_all_ok = tx_ok + (tx_ok_high16 << 16);
1066 + rx_all_ok = rx_ok + (rx_ok_high16 << 16);
1067 + if (tx_all_ok == 0x1000 && tx_error == 0) {
1068 + /* success */
1069 + priv->phy_t_status &= (~BIT(phy));
1070 + } else {
1071 + pr_info("PHY %d single test PSGMII issue happen!\n", phy);
1072 + priv->phy_t_status |= BIT(phy);
1073 + }
1074 +
1075 + mdiobus_write(bus, phy, 0x0, 0x1840);
1076 +}
1077 +
1078 +static void
1079 +ar40xx_psgmii_all_phy_testing(struct ar40xx_priv *priv)
1080 +{
1081 + int phy, j;
1082 + struct mii_bus *bus = priv->mii_bus;
1083 +
1084 + mdiobus_write(bus, 0x1f, 0x0, 0x9000);
1085 + mdiobus_write(bus, 0x1f, 0x0, 0x4140);
1086 +
1087 + for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
1088 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1089 + u16 status;
1090 +
1091 + status = mdiobus_read(bus, phy, 0x11);
1092 + if (!(status & BIT(10)))
1093 + break;
1094 + }
1095 +
1096 + if (phy >= (AR40XX_NUM_PORTS - 1))
1097 + break;
1098 + /* The polling interva to check if the PHY link up or not */
1099 + mdelay(8);
1100 + }
1101 + /* enable check */
1102 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0000);
1103 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0003);
1104 +
1105 + /* start traffic */
1106 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0xa000);
1107 + /* wait for all traffic end
1108 + * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1109 + */
1110 + mdelay(50);
1111 +
1112 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1113 + u32 tx_ok, tx_error;
1114 + u32 rx_ok, rx_error;
1115 + u32 tx_ok_high16;
1116 + u32 rx_ok_high16;
1117 + u32 tx_all_ok, rx_all_ok;
1118 +
1119 + /* check counter */
1120 + tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
1121 + tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
1122 + tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
1123 + rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
1124 + rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
1125 + rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
1126 + tx_all_ok = tx_ok + (tx_ok_high16<<16);
1127 + rx_all_ok = rx_ok + (rx_ok_high16<<16);
1128 + if (tx_all_ok == 0x1000 && tx_error == 0) {
1129 + /* success */
1130 + priv->phy_t_status &= ~BIT(phy + 8);
1131 + } else {
1132 + pr_info("PHY%d test see issue!\n", phy);
1133 + priv->phy_t_status |= BIT(phy + 8);
1134 + }
1135 + }
1136 +
1137 + pr_debug("PHY all test 0x%x \r\n", priv->phy_t_status);
1138 +}
1139 +
1140 +void
1141 +ar40xx_psgmii_self_test(struct ar40xx_priv *priv)
1142 +{
1143 + u32 i, phy;
1144 + struct mii_bus *bus = priv->mii_bus;
1145 +
1146 + ar40xx_malibu_psgmii_ess_reset(priv);
1147 +
1148 + /* switch to access MII reg for copper */
1149 + mdiobus_write(bus, 4, 0x1f, 0x8500);
1150 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1151 + /*enable phy mdio broadcast write*/
1152 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x801f);
1153 + }
1154 + /* force no link by power down */
1155 + mdiobus_write(bus, 0x1f, 0x0, 0x1840);
1156 + /*packet number*/
1157 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x1000);
1158 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8062, 0x05e0);
1159 +
1160 + /*fix mdi status */
1161 + mdiobus_write(bus, 0x1f, 0x10, 0x6800);
1162 + for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) {
1163 + priv->phy_t_status = 0;
1164 +
1165 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1166 + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
1167 + AR40XX_PORT_LOOKUP_LOOPBACK,
1168 + AR40XX_PORT_LOOKUP_LOOPBACK);
1169 + }
1170 +
1171 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++)
1172 + ar40xx_psgmii_single_phy_testing(priv, phy);
1173 +
1174 + ar40xx_psgmii_all_phy_testing(priv);
1175 +
1176 + if (priv->phy_t_status)
1177 + ar40xx_malibu_psgmii_ess_reset(priv);
1178 + else
1179 + break;
1180 + }
1181 +
1182 + if (i >= AR40XX_PSGMII_CALB_NUM)
1183 + pr_info("PSGMII cannot recover\n");
1184 + else
1185 + pr_debug("PSGMII recovered after %d times reset\n", i);
1186 +
1187 + /* configuration recover */
1188 + /* packet number */
1189 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x0);
1190 + /* disable check */
1191 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0);
1192 + /* disable traffic */
1193 + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0x0);
1194 +}
1195 +
1196 +void
1197 +ar40xx_psgmii_self_test_clean(struct ar40xx_priv *priv)
1198 +{
1199 + int phy;
1200 + struct mii_bus *bus = priv->mii_bus;
1201 +
1202 + /* disable phy internal loopback */
1203 + mdiobus_write(bus, 0x1f, 0x10, 0x6860);
1204 + mdiobus_write(bus, 0x1f, 0x0, 0x9040);
1205 +
1206 + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1207 + /* disable mac loop back */
1208 + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
1209 + AR40XX_PORT_LOOKUP_LOOPBACK, 0);
1210 + /* disable phy mdio broadcast write */
1211 + ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x001f);
1212 + }
1213 +
1214 + /* clear fdb entry */
1215 + ar40xx_atu_flush(priv);
1216 +}
1217 +
1218 +/* End of psgmii self test */
1219 +
1220 +static void
1221 +ar40xx_mac_mode_init(struct ar40xx_priv *priv, u32 mode)
1222 +{
1223 + if (mode == PORT_WRAPPER_PSGMII) {
1224 + ar40xx_psgmii_write(priv, AR40XX_PSGMII_MODE_CONTROL, 0x2200);
1225 + ar40xx_psgmii_write(priv, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380);
1226 + }
1227 +}
1228 +
1229 +static
1230 +int ar40xx_cpuport_setup(struct ar40xx_priv *priv)
1231 +{
1232 + u32 t;
1233 +
1234 + t = AR40XX_PORT_STATUS_TXFLOW |
1235 + AR40XX_PORT_STATUS_RXFLOW |
1236 + AR40XX_PORT_TXHALF_FLOW |
1237 + AR40XX_PORT_DUPLEX |
1238 + AR40XX_PORT_SPEED_1000M;
1239 + ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
1240 + usleep_range(10, 20);
1241 +
1242 + t |= AR40XX_PORT_TX_EN |
1243 + AR40XX_PORT_RX_EN;
1244 + ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
1245 +
1246 + return 0;
1247 +}
1248 +
1249 +static void
1250 +ar40xx_init_port(struct ar40xx_priv *priv, int port)
1251 +{
1252 + u32 t;
1253 +
1254 + ar40xx_rmw(priv, AR40XX_REG_PORT_STATUS(port),
1255 + AR40XX_PORT_AUTO_LINK_EN, 0);
1256 +
1257 + ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0);
1258 +
1259 + ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), 0);
1260 +
1261 + t = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S;
1262 + ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
1263 +
1264 + t = AR40XX_PORT_LOOKUP_LEARN;
1265 + t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
1266 + ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
1267 +}
1268 +
1269 +void
1270 +ar40xx_init_globals(struct ar40xx_priv *priv)
1271 +{
1272 + u32 t;
1273 +
1274 + /* enable CPU port and disable mirror port */
1275 + t = AR40XX_FWD_CTRL0_CPU_PORT_EN |
1276 + AR40XX_FWD_CTRL0_MIRROR_PORT;
1277 + ar40xx_write(priv, AR40XX_REG_FWD_CTRL0, t);
1278 +
1279 + /* forward multicast and broadcast frames to CPU */
1280 + t = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) |
1281 + (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) |
1282 + (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S);
1283 + ar40xx_write(priv, AR40XX_REG_FWD_CTRL1, t);
1284 +
1285 + /* enable jumbo frames */
1286 + ar40xx_rmw(priv, AR40XX_REG_MAX_FRAME_SIZE,
1287 + AR40XX_MAX_FRAME_SIZE_MTU, 9018 + 8 + 2);
1288 +
1289 + /* Enable MIB counters */
1290 + ar40xx_rmw(priv, AR40XX_REG_MODULE_EN, 0,
1291 + AR40XX_MODULE_EN_MIB);
1292 +
1293 + /* Disable AZ */
1294 + ar40xx_write(priv, AR40XX_REG_EEE_CTRL, 0);
1295 +
1296 + /* set flowctrl thershold for cpu port */
1297 + t = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) |
1298 + AR40XX_PORT0_FC_THRESH_OFF_DFLT;
1299 + ar40xx_write(priv, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t);
1300 +}
1301 +
1302 +static void
1303 +ar40xx_malibu_init(struct ar40xx_priv *priv)
1304 +{
1305 + int i;
1306 + struct mii_bus *bus;
1307 + u16 val;
1308 +
1309 + bus = priv->mii_bus;
1310 +
1311 + /* war to enable AZ transmitting ability */
1312 + ar40xx_phy_mmd_write(priv, AR40XX_PSGMII_ID, 1,
1313 + AR40XX_MALIBU_PSGMII_MODE_CTRL,
1314 + AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL);
1315 + for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
1316 + /* change malibu control_dac */
1317 + val = ar40xx_phy_mmd_read(priv, i, 7,
1318 + AR40XX_MALIBU_PHY_MMD7_DAC_CTRL);
1319 + val &= ~AR40XX_MALIBU_DAC_CTRL_MASK;
1320 + val |= AR40XX_MALIBU_DAC_CTRL_VALUE;
1321 + ar40xx_phy_mmd_write(priv, i, 7,
1322 + AR40XX_MALIBU_PHY_MMD7_DAC_CTRL, val);
1323 + if (i == AR40XX_MALIBU_PHY_LAST_ADDR) {
1324 + /* to avoid goes into hibernation */
1325 + val = ar40xx_phy_mmd_read(priv, i, 3,
1326 + AR40XX_MALIBU_PHY_RLP_CTRL);
1327 + val &= (~(1<<1));
1328 + ar40xx_phy_mmd_write(priv, i, 3,
1329 + AR40XX_MALIBU_PHY_RLP_CTRL, val);
1330 + }
1331 + }
1332 +
1333 + /* adjust psgmii serdes tx amp */
1334 + mdiobus_write(bus, AR40XX_PSGMII_ID, AR40XX_PSGMII_TX_DRIVER_1_CTRL,
1335 + AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP);
1336 +}
1337 +
1338 +static int
1339 +ar40xx_hw_init(struct ar40xx_priv *priv)
1340 +{
1341 + u32 i;
1342 +
1343 + ar40xx_ess_reset(priv);
1344 +
1345 + if (priv->mii_bus)
1346 + ar40xx_malibu_init(priv);
1347 + else
1348 + return -1;
1349 +
1350 + ar40xx_psgmii_self_test(priv);
1351 + ar40xx_psgmii_self_test_clean(priv);
1352 +
1353 + ar40xx_mac_mode_init(priv, priv->mac_mode);
1354 +
1355 + for (i = 0; i < priv->dev.ports; i++)
1356 + ar40xx_init_port(priv, i);
1357 +
1358 + ar40xx_init_globals(priv);
1359 +
1360 + return 0;
1361 +}
1362 +
1363 +/* Start of qm error WAR */
1364 +
1365 +static
1366 +int ar40xx_force_1g_full(struct ar40xx_priv *priv, u32 port_id)
1367 +{
1368 + u32 reg;
1369 +
1370 + if (port_id < 0 || port_id > 6)
1371 + return -1;
1372 +
1373 + reg = AR40XX_REG_PORT_STATUS(port_id);
1374 + return ar40xx_rmw(priv, reg, AR40XX_PORT_SPEED,
1375 + (AR40XX_PORT_SPEED_1000M | AR40XX_PORT_DUPLEX));
1376 +}
1377 +
1378 +static
1379 +int ar40xx_get_qm_status(struct ar40xx_priv *priv,
1380 + u32 port_id, u32 *qm_buffer_err)
1381 +{
1382 + u32 reg;
1383 + u32 qm_val;
1384 +
1385 + if (port_id < 1 || port_id > 5) {
1386 + *qm_buffer_err = 0;
1387 + return -1;
1388 + }
1389 +
1390 + if (port_id < 4) {
1391 + reg = AR40XX_REG_QM_PORT0_3_QNUM;
1392 + ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
1393 + qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
1394 + /* every 8 bits for each port */
1395 + *qm_buffer_err = (qm_val >> (port_id * 8)) & 0xFF;
1396 + } else {
1397 + reg = AR40XX_REG_QM_PORT4_6_QNUM;
1398 + ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
1399 + qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
1400 + /* every 8 bits for each port */
1401 + *qm_buffer_err = (qm_val >> ((port_id-4) * 8)) & 0xFF;
1402 + }
1403 +
1404 + return 0;
1405 +}
1406 +
1407 +static void
1408 +ar40xx_sw_mac_polling_task(struct ar40xx_priv *priv)
1409 +{
1410 + static int task_count;
1411 + u32 i;
1412 + u32 reg, value;
1413 + u32 link, speed, duplex;
1414 + u32 qm_buffer_err;
1415 + u16 port_phy_status[AR40XX_NUM_PORTS];
1416 + static u32 qm_err_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
1417 + static u32 link_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
1418 + struct mii_bus *bus = NULL;
1419 +
1420 + if (!priv || !priv->mii_bus)
1421 + return;
1422 +
1423 + bus = priv->mii_bus;
1424 +
1425 + ++task_count;
1426 +
1427 + for (i = 1; i < AR40XX_NUM_PORTS; ++i) {
1428 + port_phy_status[i] =
1429 + mdiobus_read(bus, i-1, AR40XX_PHY_SPEC_STATUS);
1430 + speed = link = duplex = port_phy_status[i];
1431 + speed &= AR40XX_PHY_SPEC_STATUS_SPEED;
1432 + speed >>= 14;
1433 + link &= AR40XX_PHY_SPEC_STATUS_LINK;
1434 + link >>= 10;
1435 + duplex &= AR40XX_PHY_SPEC_STATUS_DUPLEX;
1436 + duplex >>= 13;
1437 +
1438 + if (link != priv->ar40xx_port_old_link[i]) {
1439 + ++link_cnt[i];
1440 + /* Up --> Down */
1441 + if ((priv->ar40xx_port_old_link[i] ==
1442 + AR40XX_PORT_LINK_UP) &&
1443 + (link == AR40XX_PORT_LINK_DOWN)) {
1444 + /* LINK_EN disable(MAC force mode)*/
1445 + reg = AR40XX_REG_PORT_STATUS(i);
1446 + ar40xx_rmw(priv, reg,
1447 + AR40XX_PORT_AUTO_LINK_EN, 0);
1448 +
1449 + /* Check queue buffer */
1450 + qm_err_cnt[i] = 0;
1451 + ar40xx_get_qm_status(priv, i, &qm_buffer_err);
1452 + if (qm_buffer_err) {
1453 + priv->ar40xx_port_qm_buf[i] =
1454 + AR40XX_QM_NOT_EMPTY;
1455 + } else {
1456 + u16 phy_val = 0;
1457 +
1458 + priv->ar40xx_port_qm_buf[i] =
1459 + AR40XX_QM_EMPTY;
1460 + ar40xx_force_1g_full(priv, i);
1461 + /* Ref:QCA8337 Datasheet,Clearing
1462 + * MENU_CTRL_EN prevents phy to
1463 + * stuck in 100BT mode when
1464 + * bringing up the link
1465 + */
1466 + ar40xx_phy_dbg_read(priv, i-1,
1467 + AR40XX_PHY_DEBUG_0,
1468 + &phy_val);
1469 + phy_val &= (~AR40XX_PHY_MANU_CTRL_EN);
1470 + ar40xx_phy_dbg_write(priv, i-1,
1471 + AR40XX_PHY_DEBUG_0,
1472 + phy_val);
1473 + }
1474 + priv->ar40xx_port_old_link[i] = link;
1475 + } else if ((priv->ar40xx_port_old_link[i] ==
1476 + AR40XX_PORT_LINK_DOWN) &&
1477 + (link == AR40XX_PORT_LINK_UP)) {
1478 + /* Down --> Up */
1479 + if (priv->port_link_up[i] < 1) {
1480 + ++priv->port_link_up[i];
1481 + } else {
1482 + /* Change port status */
1483 + reg = AR40XX_REG_PORT_STATUS(i);
1484 + value = ar40xx_read(priv, reg);
1485 + priv->port_link_up[i] = 0;
1486 +
1487 + value &= ~(AR40XX_PORT_DUPLEX |
1488 + AR40XX_PORT_SPEED);
1489 + value |= speed | (duplex ? BIT(6) : 0);
1490 + ar40xx_write(priv, reg, value);
1491 + /* clock switch need such time
1492 + * to avoid glitch
1493 + */
1494 + usleep_range(100, 200);
1495 +
1496 + value |= AR40XX_PORT_AUTO_LINK_EN;
1497 + ar40xx_write(priv, reg, value);
1498 + /* HW need such time to make sure link
1499 + * stable before enable MAC
1500 + */
1501 + usleep_range(100, 200);
1502 +
1503 + if (speed == AR40XX_PORT_SPEED_100M) {
1504 + u16 phy_val = 0;
1505 + /* Enable @100M, if down to 10M
1506 + * clock will change smoothly
1507 + */
1508 + ar40xx_phy_dbg_read(priv, i-1,
1509 + 0,
1510 + &phy_val);
1511 + phy_val |=
1512 + AR40XX_PHY_MANU_CTRL_EN;
1513 + ar40xx_phy_dbg_write(priv, i-1,
1514 + 0,
1515 + phy_val);
1516 + }
1517 + priv->ar40xx_port_old_link[i] = link;
1518 + }
1519 + }
1520 + }
1521 +
1522 + if (priv->ar40xx_port_qm_buf[i] == AR40XX_QM_NOT_EMPTY) {
1523 + /* Check QM */
1524 + ar40xx_get_qm_status(priv, i, &qm_buffer_err);
1525 + if (qm_buffer_err) {
1526 + ++qm_err_cnt[i];
1527 + } else {
1528 + priv->ar40xx_port_qm_buf[i] =
1529 + AR40XX_QM_EMPTY;
1530 + qm_err_cnt[i] = 0;
1531 + ar40xx_force_1g_full(priv, i);
1532 + }
1533 + }
1534 + }
1535 +}
1536 +
1537 +static void
1538 +ar40xx_qm_err_check_work_task(struct work_struct *work)
1539 +{
1540 + struct ar40xx_priv *priv = container_of(work, struct ar40xx_priv,
1541 + qm_dwork.work);
1542 +
1543 + mutex_lock(&priv->qm_lock);
1544 +
1545 + ar40xx_sw_mac_polling_task(priv);
1546 +
1547 + mutex_unlock(&priv->qm_lock);
1548 +
1549 + schedule_delayed_work(&priv->qm_dwork,
1550 + msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
1551 +}
1552 +
1553 +static int
1554 +ar40xx_qm_err_check_work_start(struct ar40xx_priv *priv)
1555 +{
1556 + mutex_init(&priv->qm_lock);
1557 +
1558 + INIT_DELAYED_WORK(&priv->qm_dwork, ar40xx_qm_err_check_work_task);
1559 +
1560 + schedule_delayed_work(&priv->qm_dwork,
1561 + msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
1562 +
1563 + return 0;
1564 +}
1565 +
1566 +/* End of qm error WAR */
1567 +
1568 +static int
1569 +ar40xx_vlan_init(struct ar40xx_priv *priv)
1570 +{
1571 + int port;
1572 + unsigned long bmp;
1573 +
1574 + /* By default Enable VLAN */
1575 + priv->vlan = 1;
1576 + priv->vlan_table[AR40XX_LAN_VLAN] = priv->cpu_bmp | priv->lan_bmp;
1577 + priv->vlan_table[AR40XX_WAN_VLAN] = priv->cpu_bmp | priv->wan_bmp;
1578 + priv->vlan_tagged = priv->cpu_bmp;
1579 + bmp = priv->lan_bmp;
1580 + for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
1581 + priv->pvid[port] = AR40XX_LAN_VLAN;
1582 +
1583 + bmp = priv->wan_bmp;
1584 + for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
1585 + priv->pvid[port] = AR40XX_WAN_VLAN;
1586 +
1587 + return 0;
1588 +}
1589 +
1590 +static void
1591 +ar40xx_mib_work_func(struct work_struct *work)
1592 +{
1593 + struct ar40xx_priv *priv;
1594 + int err;
1595 +
1596 + priv = container_of(work, struct ar40xx_priv, mib_work.work);
1597 +
1598 + mutex_lock(&priv->mib_lock);
1599 +
1600 + err = ar40xx_mib_capture(priv);
1601 + if (err)
1602 + goto next_port;
1603 +
1604 + ar40xx_mib_fetch_port_stat(priv, priv->mib_next_port, false);
1605 +
1606 +next_port:
1607 + priv->mib_next_port++;
1608 + if (priv->mib_next_port >= priv->dev.ports)
1609 + priv->mib_next_port = 0;
1610 +
1611 + mutex_unlock(&priv->mib_lock);
1612 +
1613 + schedule_delayed_work(&priv->mib_work,
1614 + msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
1615 +}
1616 +
1617 +static void
1618 +ar40xx_setup_port(struct ar40xx_priv *priv, int port, u32 members)
1619 +{
1620 + u32 t;
1621 + u32 egress, ingress;
1622 + u32 pvid = priv->vlan_id[priv->pvid[port]];
1623 +
1624 + if (priv->vlan) {
1625 + egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD;
1626 + ingress = AR40XX_IN_SECURE;
1627 + } else {
1628 + egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH;
1629 + ingress = AR40XX_IN_PORT_ONLY;
1630 + }
1631 +
1632 + t = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S;
1633 + t |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S;
1634 + ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), t);
1635 +
1636 + t = AR40XX_PORT_VLAN1_PORT_VLAN_PROP;
1637 + t |= egress << AR40XX_PORT_VLAN1_OUT_MODE_S;
1638 + ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
1639 +
1640 + t = members;
1641 + t |= AR40XX_PORT_LOOKUP_LEARN;
1642 + t |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S;
1643 + t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
1644 + ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
1645 +}
1646 +
1647 +static void
1648 +ar40xx_vtu_op(struct ar40xx_priv *priv, u32 op, u32 val)
1649 +{
1650 + if (ar40xx_wait_bit(priv, AR40XX_REG_VTU_FUNC1,
1651 + AR40XX_VTU_FUNC1_BUSY, 0))
1652 + return;
1653 +
1654 + if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD)
1655 + ar40xx_write(priv, AR40XX_REG_VTU_FUNC0, val);
1656 +
1657 + op |= AR40XX_VTU_FUNC1_BUSY;
1658 + ar40xx_write(priv, AR40XX_REG_VTU_FUNC1, op);
1659 +}
1660 +
1661 +static void
1662 +ar40xx_vtu_load_vlan(struct ar40xx_priv *priv, u32 vid, u32 port_mask)
1663 +{
1664 + u32 op;
1665 + u32 val;
1666 + int i;
1667 +
1668 + op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S);
1669 + val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL;
1670 + for (i = 0; i < AR40XX_NUM_PORTS; i++) {
1671 + u32 mode;
1672 +
1673 + if ((port_mask & BIT(i)) == 0)
1674 + mode = AR40XX_VTU_FUNC0_EG_MODE_NOT;
1675 + else if (priv->vlan == 0)
1676 + mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP;
1677 + else if ((priv->vlan_tagged & BIT(i)) ||
1678 + (priv->vlan_id[priv->pvid[i]] != vid))
1679 + mode = AR40XX_VTU_FUNC0_EG_MODE_TAG;
1680 + else
1681 + mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG;
1682 +
1683 + val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i);
1684 + }
1685 + ar40xx_vtu_op(priv, op, val);
1686 +}
1687 +
1688 +static void
1689 +ar40xx_vtu_flush(struct ar40xx_priv *priv)
1690 +{
1691 + ar40xx_vtu_op(priv, AR40XX_VTU_FUNC1_OP_FLUSH, 0);
1692 +}
1693 +
1694 +static int
1695 +ar40xx_sw_hw_apply(struct switch_dev *dev)
1696 +{
1697 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
1698 + u8 portmask[AR40XX_NUM_PORTS];
1699 + int i, j;
1700 +
1701 + mutex_lock(&priv->reg_mutex);
1702 + /* flush all vlan entries */
1703 + ar40xx_vtu_flush(priv);
1704 +
1705 + memset(portmask, 0, sizeof(portmask));
1706 + if (priv->vlan) {
1707 + for (j = 0; j < AR40XX_MAX_VLANS; j++) {
1708 + u8 vp = priv->vlan_table[j];
1709 +
1710 + if (!vp)
1711 + continue;
1712 +
1713 + for (i = 0; i < dev->ports; i++) {
1714 + u8 mask = BIT(i);
1715 +
1716 + if (vp & mask)
1717 + portmask[i] |= vp & ~mask;
1718 + }
1719 +
1720 + ar40xx_vtu_load_vlan(priv, priv->vlan_id[j],
1721 + priv->vlan_table[j]);
1722 + }
1723 + } else {
1724 + /* 8021q vlan disabled */
1725 + for (i = 0; i < dev->ports; i++) {
1726 + if (i == AR40XX_PORT_CPU)
1727 + continue;
1728 +
1729 + portmask[i] = BIT(AR40XX_PORT_CPU);
1730 + portmask[AR40XX_PORT_CPU] |= BIT(i);
1731 + }
1732 + }
1733 +
1734 + /* update the port destination mask registers and tag settings */
1735 + for (i = 0; i < dev->ports; i++)
1736 + ar40xx_setup_port(priv, i, portmask[i]);
1737 +
1738 + ar40xx_set_mirror_regs(priv);
1739 +
1740 + mutex_unlock(&priv->reg_mutex);
1741 + return 0;
1742 +}
1743 +
1744 +static int
1745 +ar40xx_sw_reset_switch(struct switch_dev *dev)
1746 +{
1747 + struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
1748 + int i, rv;
1749 +
1750 + mutex_lock(&priv->reg_mutex);
1751 + memset(&priv->vlan, 0, sizeof(struct ar40xx_priv) -
1752 + offsetof(struct ar40xx_priv, vlan));
1753 +
1754 + for (i = 0; i < AR40XX_MAX_VLANS; i++)
1755 + priv->vlan_id[i] = i;
1756 +
1757 + ar40xx_vlan_init(priv);
1758 +
1759 + priv->mirror_rx = false;
1760 + priv->mirror_tx = false;
1761 + priv->source_port = 0;
1762 + priv->monitor_port = 0;
1763 +
1764 + mutex_unlock(&priv->reg_mutex);
1765 +
1766 + rv = ar40xx_sw_hw_apply(dev);
1767 + return rv;
1768 +}
1769 +
1770 +static int
1771 +ar40xx_start(struct ar40xx_priv *priv)
1772 +{
1773 + int ret;
1774 +
1775 + ret = ar40xx_hw_init(priv);
1776 + if (ret)
1777 + return ret;
1778 +
1779 + ret = ar40xx_sw_reset_switch(&priv->dev);
1780 + if (ret)
1781 + return ret;
1782 +
1783 + /* at last, setup cpu port */
1784 + ret = ar40xx_cpuport_setup(priv);
1785 + if (ret)
1786 + return ret;
1787 +
1788 + schedule_delayed_work(&priv->mib_work,
1789 + msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
1790 +
1791 + ar40xx_qm_err_check_work_start(priv);
1792 +
1793 + return 0;
1794 +}
1795 +
1796 +static const struct switch_dev_ops ar40xx_sw_ops = {
1797 + .attr_global = {
1798 + .attr = ar40xx_sw_attr_globals,
1799 + .n_attr = ARRAY_SIZE(ar40xx_sw_attr_globals),
1800 + },
1801 + .attr_port = {
1802 + .attr = ar40xx_sw_attr_port,
1803 + .n_attr = ARRAY_SIZE(ar40xx_sw_attr_port),
1804 + },
1805 + .attr_vlan = {
1806 + .attr = ar40xx_sw_attr_vlan,
1807 + .n_attr = ARRAY_SIZE(ar40xx_sw_attr_vlan),
1808 + },
1809 + .get_port_pvid = ar40xx_sw_get_pvid,
1810 + .set_port_pvid = ar40xx_sw_set_pvid,
1811 + .get_vlan_ports = ar40xx_sw_get_ports,
1812 + .set_vlan_ports = ar40xx_sw_set_ports,
1813 + .apply_config = ar40xx_sw_hw_apply,
1814 + .reset_switch = ar40xx_sw_reset_switch,
1815 + .get_port_link = ar40xx_sw_get_port_link,
1816 +};
1817 +
1818 +/* Start of phy driver support */
1819 +
1820 +static const u32 ar40xx_phy_ids[] = {
1821 + 0x004dd0b1,
1822 + 0x004dd0b2, /* AR40xx */
1823 +};
1824 +
1825 +static bool
1826 +ar40xx_phy_match(u32 phy_id)
1827 +{
1828 + int i;
1829 +
1830 + for (i = 0; i < ARRAY_SIZE(ar40xx_phy_ids); i++)
1831 + if (phy_id == ar40xx_phy_ids[i])
1832 + return true;
1833 +
1834 + return false;
1835 +}
1836 +
1837 +static bool
1838 +is_ar40xx_phy(struct mii_bus *bus)
1839 +{
1840 + unsigned i;
1841 +
1842 + for (i = 0; i < 4; i++) {
1843 + u32 phy_id;
1844 +
1845 + phy_id = mdiobus_read(bus, i, MII_PHYSID1) << 16;
1846 + phy_id |= mdiobus_read(bus, i, MII_PHYSID2);
1847 + if (!ar40xx_phy_match(phy_id))
1848 + return false;
1849 + }
1850 +
1851 + return true;
1852 +}
1853 +
1854 +static int
1855 +ar40xx_phy_probe(struct phy_device *phydev)
1856 +{
1857 + if (!is_ar40xx_phy(phydev->mdio.bus))
1858 + return -ENODEV;
1859 +
1860 + ar40xx_priv->mii_bus = phydev->mdio.bus;
1861 + phydev->priv = ar40xx_priv;
1862 + if (phydev->mdio.addr == 0)
1863 + ar40xx_priv->phy = phydev;
1864 +
1865 + phydev->supported |= SUPPORTED_1000baseT_Full;
1866 + phydev->advertising |= ADVERTISED_1000baseT_Full;
1867 + return 0;
1868 +}
1869 +
1870 +static void
1871 +ar40xx_phy_remove(struct phy_device *phydev)
1872 +{
1873 + ar40xx_priv->mii_bus = NULL;
1874 + phydev->priv = NULL;
1875 +}
1876 +
1877 +static int
1878 +ar40xx_phy_config_init(struct phy_device *phydev)
1879 +{
1880 + return 0;
1881 +}
1882 +
1883 +static int
1884 +ar40xx_phy_read_status(struct phy_device *phydev)
1885 +{
1886 + if (phydev->mdio.addr != 0)
1887 + return genphy_read_status(phydev);
1888 +
1889 + return 0;
1890 +}
1891 +
1892 +static int
1893 +ar40xx_phy_config_aneg(struct phy_device *phydev)
1894 +{
1895 + if (phydev->mdio.addr == 0)
1896 + return 0;
1897 +
1898 + return genphy_config_aneg(phydev);
1899 +}
1900 +
1901 +static struct phy_driver ar40xx_phy_driver = {
1902 + .phy_id = 0x004d0000,
1903 + .name = "QCA Malibu",
1904 + .phy_id_mask = 0xffff0000,
1905 + .features = PHY_BASIC_FEATURES,
1906 + .probe = ar40xx_phy_probe,
1907 + .remove = ar40xx_phy_remove,
1908 + .config_init = ar40xx_phy_config_init,
1909 + .config_aneg = ar40xx_phy_config_aneg,
1910 + .read_status = ar40xx_phy_read_status,
1911 +};
1912 +
1913 +static uint16_t ar40xx_gpio_get_phy(unsigned int offset)
1914 +{
1915 + return offset / 4;
1916 +}
1917 +
1918 +static uint16_t ar40xx_gpio_get_reg(unsigned int offset)
1919 +{
1920 + return 0x8074 + offset % 4;
1921 +}
1922 +
1923 +static void ar40xx_gpio_set(struct gpio_chip *gc, unsigned int offset,
1924 + int value)
1925 +{
1926 + struct ar40xx_priv *priv = gpiochip_get_data(gc);
1927 +
1928 + ar40xx_phy_mmd_write(priv, ar40xx_gpio_get_phy(offset), 0x7,
1929 + ar40xx_gpio_get_reg(offset),
1930 + value ? 0xA000 : 0x8000);
1931 +}
1932 +
1933 +static int ar40xx_gpio_get(struct gpio_chip *gc, unsigned offset)
1934 +{
1935 + struct ar40xx_priv *priv = gpiochip_get_data(gc);
1936 +
1937 + return ar40xx_phy_mmd_read(priv, ar40xx_gpio_get_phy(offset), 0x7,
1938 + ar40xx_gpio_get_reg(offset)) == 0xA000;
1939 +}
1940 +
1941 +static int ar40xx_gpio_get_dir(struct gpio_chip *gc, unsigned offset)
1942 +{
1943 + return 0; /* only out direction */
1944 +}
1945 +
1946 +static int ar40xx_gpio_dir_out(struct gpio_chip *gc, unsigned offset,
1947 + int value)
1948 +{
1949 + /*
1950 + * the direction out value is used to set the initial value.
1951 + * support of this function is required by leds-gpio.c
1952 + */
1953 + ar40xx_gpio_set(gc, offset, value);
1954 + return 0;
1955 +}
1956 +
1957 +static void ar40xx_register_gpio(struct device *pdev,
1958 + struct ar40xx_priv *priv,
1959 + struct device_node *switch_node)
1960 +{
1961 + struct gpio_chip *gc;
1962 + int err;
1963 +
1964 + gc = devm_kzalloc(pdev, sizeof(*gc), GFP_KERNEL);
1965 + if (!gc)
1966 + return;
1967 +
1968 + gc->label = "ar40xx_gpio",
1969 + gc->base = -1,
1970 + gc->ngpio = 5 /* mmd 0 - 4 */ * 4 /* 0x8074 - 0x8077 */,
1971 + gc->parent = pdev;
1972 + gc->owner = THIS_MODULE;
1973 +
1974 + gc->get_direction = ar40xx_gpio_get_dir;
1975 + gc->direction_output = ar40xx_gpio_dir_out;
1976 + gc->get = ar40xx_gpio_get;
1977 + gc->set = ar40xx_gpio_set;
1978 + gc->can_sleep = true;
1979 + gc->label = priv->dev.name;
1980 + gc->of_node = switch_node;
1981 +
1982 + err = devm_gpiochip_add_data(pdev, gc, priv);
1983 + if (err != 0)
1984 + dev_err(pdev, "Failed to register gpio %d.\n", err);
1985 +}
1986 +
1987 +/* End of phy driver support */
1988 +
1989 +/* Platform driver probe function */
1990 +
1991 +static int ar40xx_probe(struct platform_device *pdev)
1992 +{
1993 + struct device_node *switch_node;
1994 + struct device_node *psgmii_node;
1995 + const __be32 *mac_mode;
1996 + struct clk *ess_clk;
1997 + struct switch_dev *swdev;
1998 + struct ar40xx_priv *priv;
1999 + u32 len;
2000 + u32 num_mibs;
2001 + struct resource psgmii_base = {0};
2002 + struct resource switch_base = {0};
2003 + int ret;
2004 +
2005 + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
2006 + if (!priv)
2007 + return -ENOMEM;
2008 +
2009 + platform_set_drvdata(pdev, priv);
2010 + ar40xx_priv = priv;
2011 +
2012 + switch_node = of_node_get(pdev->dev.of_node);
2013 + if (of_address_to_resource(switch_node, 0, &switch_base) != 0)
2014 + return -EIO;
2015 +
2016 + priv->hw_addr = devm_ioremap_resource(&pdev->dev, &switch_base);
2017 + if (IS_ERR(priv->hw_addr)) {
2018 + dev_err(&pdev->dev, "Failed to ioremap switch_base!\n");
2019 + return PTR_ERR(priv->hw_addr);
2020 + }
2021 +
2022 + /*psgmii dts get*/
2023 + psgmii_node = of_find_node_by_name(NULL, "ess-psgmii");
2024 + if (!psgmii_node) {
2025 + dev_err(&pdev->dev, "Failed to find ess-psgmii node!\n");
2026 + return -EINVAL;
2027 + }
2028 +
2029 + if (of_address_to_resource(psgmii_node, 0, &psgmii_base) != 0)
2030 + return -EIO;
2031 +
2032 + priv->psgmii_hw_addr = devm_ioremap_resource(&pdev->dev, &psgmii_base);
2033 + if (IS_ERR(priv->psgmii_hw_addr)) {
2034 + dev_err(&pdev->dev, "psgmii ioremap fail!\n");
2035 + return PTR_ERR(priv->psgmii_hw_addr);
2036 + }
2037 +
2038 + mac_mode = of_get_property(switch_node, "switch_mac_mode", &len);
2039 + if (!mac_mode) {
2040 + dev_err(&pdev->dev, "Failed to read switch_mac_mode\n");
2041 + return -EINVAL;
2042 + }
2043 + priv->mac_mode = be32_to_cpup(mac_mode);
2044 +
2045 + ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
2046 + if (ess_clk)
2047 + clk_prepare_enable(ess_clk);
2048 +
2049 + priv->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
2050 + if (IS_ERR(priv->ess_rst)) {
2051 + dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
2052 + return PTR_ERR(priv->ess_rst);
2053 + }
2054 +
2055 + if (of_property_read_u32(switch_node, "switch_cpu_bmp",
2056 + &priv->cpu_bmp) ||
2057 + of_property_read_u32(switch_node, "switch_lan_bmp",
2058 + &priv->lan_bmp) ||
2059 + of_property_read_u32(switch_node, "switch_wan_bmp",
2060 + &priv->wan_bmp)) {
2061 + dev_err(&pdev->dev, "Failed to read port properties\n");
2062 + return -EIO;
2063 + }
2064 +
2065 + ret = phy_driver_register(&ar40xx_phy_driver, THIS_MODULE);
2066 + if (ret) {
2067 + dev_err(&pdev->dev, "Failed to register ar40xx phy driver!\n");
2068 + return -EIO;
2069 + }
2070 +
2071 + mutex_init(&priv->reg_mutex);
2072 + mutex_init(&priv->mib_lock);
2073 + INIT_DELAYED_WORK(&priv->mib_work, ar40xx_mib_work_func);
2074 +
2075 + /* register switch */
2076 + swdev = &priv->dev;
2077 +
2078 + swdev->alias = dev_name(&priv->mii_bus->dev);
2079 +
2080 + swdev->cpu_port = AR40XX_PORT_CPU;
2081 + swdev->name = "QCA AR40xx";
2082 + swdev->vlans = AR40XX_MAX_VLANS;
2083 + swdev->ports = AR40XX_NUM_PORTS;
2084 + swdev->ops = &ar40xx_sw_ops;
2085 + ret = register_switch(swdev, NULL);
2086 + if (ret)
2087 + goto err_unregister_phy;
2088 +
2089 + num_mibs = ARRAY_SIZE(ar40xx_mibs);
2090 + len = priv->dev.ports * num_mibs *
2091 + sizeof(*priv->mib_stats);
2092 + priv->mib_stats = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
2093 + if (!priv->mib_stats) {
2094 + ret = -ENOMEM;
2095 + goto err_unregister_switch;
2096 + }
2097 +
2098 + ar40xx_start(priv);
2099 +
2100 + if (of_property_read_bool(switch_node, "gpio-controller"))
2101 + ar40xx_register_gpio(&pdev->dev, ar40xx_priv, switch_node);
2102 +
2103 + return 0;
2104 +
2105 +err_unregister_switch:
2106 + unregister_switch(&priv->dev);
2107 +err_unregister_phy:
2108 + phy_driver_unregister(&ar40xx_phy_driver);
2109 + platform_set_drvdata(pdev, NULL);
2110 + return ret;
2111 +}
2112 +
2113 +static int ar40xx_remove(struct platform_device *pdev)
2114 +{
2115 + struct ar40xx_priv *priv = platform_get_drvdata(pdev);
2116 +
2117 + cancel_delayed_work_sync(&priv->qm_dwork);
2118 + cancel_delayed_work_sync(&priv->mib_work);
2119 +
2120 + unregister_switch(&priv->dev);
2121 +
2122 + phy_driver_unregister(&ar40xx_phy_driver);
2123 +
2124 + return 0;
2125 +}
2126 +
2127 +static const struct of_device_id ar40xx_of_mtable[] = {
2128 + {.compatible = "qcom,ess-switch" },
2129 + {}
2130 +};
2131 +
2132 +struct platform_driver ar40xx_drv = {
2133 + .probe = ar40xx_probe,
2134 + .remove = ar40xx_remove,
2135 + .driver = {
2136 + .name = "ar40xx",
2137 + .of_match_table = ar40xx_of_mtable,
2138 + },
2139 +};
2140 +
2141 +module_platform_driver(ar40xx_drv);
2142 +
2143 +MODULE_DESCRIPTION("IPQ40XX ESS driver");
2144 +MODULE_LICENSE("Dual BSD/GPL");
2145 --- /dev/null
2146 +++ b/drivers/net/phy/ar40xx.h
2147 @@ -0,0 +1,337 @@
2148 +/*
2149 + * Copyright (c) 2016, The Linux Foundation. All rights reserved.
2150 + *
2151 + * Permission to use, copy, modify, and/or distribute this software for
2152 + * any purpose with or without fee is hereby granted, provided that the
2153 + * above copyright notice and this permission notice appear in all copies.
2154 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
2155 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
2156 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
2157 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
2158 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
2159 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
2160 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2161 + */
2162 +
2163 + #ifndef __AR40XX_H
2164 +#define __AR40XX_H
2165 +
2166 +#define AR40XX_MAX_VLANS 128
2167 +#define AR40XX_NUM_PORTS 6
2168 +#define AR40XX_NUM_PHYS 5
2169 +
2170 +#define BITS(_s, _n) (((1UL << (_n)) - 1) << _s)
2171 +
2172 +struct ar40xx_priv {
2173 + struct switch_dev dev;
2174 +
2175 + u8 __iomem *hw_addr;
2176 + u8 __iomem *psgmii_hw_addr;
2177 + u32 mac_mode;
2178 + struct reset_control *ess_rst;
2179 + u32 cpu_bmp;
2180 + u32 lan_bmp;
2181 + u32 wan_bmp;
2182 +
2183 + struct mii_bus *mii_bus;
2184 + struct phy_device *phy;
2185 +
2186 + /* mutex for qm task */
2187 + struct mutex qm_lock;
2188 + struct delayed_work qm_dwork;
2189 + u32 port_link_up[AR40XX_NUM_PORTS];
2190 + u32 ar40xx_port_old_link[AR40XX_NUM_PORTS];
2191 + u32 ar40xx_port_qm_buf[AR40XX_NUM_PORTS];
2192 +
2193 + u32 phy_t_status;
2194 +
2195 + /* mutex for switch reg access */
2196 + struct mutex reg_mutex;
2197 +
2198 + /* mutex for mib task */
2199 + struct mutex mib_lock;
2200 + struct delayed_work mib_work;
2201 + int mib_next_port;
2202 + u64 *mib_stats;
2203 +
2204 + char buf[2048];
2205 +
2206 + /* all fields below will be cleared on reset */
2207 + bool vlan;
2208 + u16 vlan_id[AR40XX_MAX_VLANS];
2209 + u8 vlan_table[AR40XX_MAX_VLANS];
2210 + u8 vlan_tagged;
2211 + u16 pvid[AR40XX_NUM_PORTS];
2212 +
2213 + /* mirror */
2214 + bool mirror_rx;
2215 + bool mirror_tx;
2216 + int source_port;
2217 + int monitor_port;
2218 +};
2219 +
2220 +#define AR40XX_PORT_LINK_UP 1
2221 +#define AR40XX_PORT_LINK_DOWN 0
2222 +#define AR40XX_QM_NOT_EMPTY 1
2223 +#define AR40XX_QM_EMPTY 0
2224 +
2225 +#define AR40XX_LAN_VLAN 1
2226 +#define AR40XX_WAN_VLAN 2
2227 +
2228 +enum ar40xx_port_wrapper_cfg {
2229 + PORT_WRAPPER_PSGMII = 0,
2230 +};
2231 +
2232 +struct ar40xx_mib_desc {
2233 + u32 size;
2234 + u32 offset;
2235 + const char *name;
2236 +};
2237 +
2238 +#define AR40XX_PORT_CPU 0
2239 +
2240 +#define AR40XX_PSGMII_MODE_CONTROL 0x1b4
2241 +#define AR40XX_PSGMII_ATHR_CSCO_MODE_25M BIT(0)
2242 +
2243 +#define AR40XX_PSGMIIPHY_TX_CONTROL 0x288
2244 +
2245 +#define AR40XX_MII_ATH_MMD_ADDR 0x0d
2246 +#define AR40XX_MII_ATH_MMD_DATA 0x0e
2247 +#define AR40XX_MII_ATH_DBG_ADDR 0x1d
2248 +#define AR40XX_MII_ATH_DBG_DATA 0x1e
2249 +
2250 +#define AR40XX_STATS_RXBROAD 0x00
2251 +#define AR40XX_STATS_RXPAUSE 0x04
2252 +#define AR40XX_STATS_RXMULTI 0x08
2253 +#define AR40XX_STATS_RXFCSERR 0x0c
2254 +#define AR40XX_STATS_RXALIGNERR 0x10
2255 +#define AR40XX_STATS_RXRUNT 0x14
2256 +#define AR40XX_STATS_RXFRAGMENT 0x18
2257 +#define AR40XX_STATS_RX64BYTE 0x1c
2258 +#define AR40XX_STATS_RX128BYTE 0x20
2259 +#define AR40XX_STATS_RX256BYTE 0x24
2260 +#define AR40XX_STATS_RX512BYTE 0x28
2261 +#define AR40XX_STATS_RX1024BYTE 0x2c
2262 +#define AR40XX_STATS_RX1518BYTE 0x30
2263 +#define AR40XX_STATS_RXMAXBYTE 0x34
2264 +#define AR40XX_STATS_RXTOOLONG 0x38
2265 +#define AR40XX_STATS_RXGOODBYTE 0x3c
2266 +#define AR40XX_STATS_RXBADBYTE 0x44
2267 +#define AR40XX_STATS_RXOVERFLOW 0x4c
2268 +#define AR40XX_STATS_FILTERED 0x50
2269 +#define AR40XX_STATS_TXBROAD 0x54
2270 +#define AR40XX_STATS_TXPAUSE 0x58
2271 +#define AR40XX_STATS_TXMULTI 0x5c
2272 +#define AR40XX_STATS_TXUNDERRUN 0x60
2273 +#define AR40XX_STATS_TX64BYTE 0x64
2274 +#define AR40XX_STATS_TX128BYTE 0x68
2275 +#define AR40XX_STATS_TX256BYTE 0x6c
2276 +#define AR40XX_STATS_TX512BYTE 0x70
2277 +#define AR40XX_STATS_TX1024BYTE 0x74
2278 +#define AR40XX_STATS_TX1518BYTE 0x78
2279 +#define AR40XX_STATS_TXMAXBYTE 0x7c
2280 +#define AR40XX_STATS_TXOVERSIZE 0x80
2281 +#define AR40XX_STATS_TXBYTE 0x84
2282 +#define AR40XX_STATS_TXCOLLISION 0x8c
2283 +#define AR40XX_STATS_TXABORTCOL 0x90
2284 +#define AR40XX_STATS_TXMULTICOL 0x94
2285 +#define AR40XX_STATS_TXSINGLECOL 0x98
2286 +#define AR40XX_STATS_TXEXCDEFER 0x9c
2287 +#define AR40XX_STATS_TXDEFER 0xa0
2288 +#define AR40XX_STATS_TXLATECOL 0xa4
2289 +
2290 +#define AR40XX_REG_MODULE_EN 0x030
2291 +#define AR40XX_MODULE_EN_MIB BIT(0)
2292 +
2293 +#define AR40XX_REG_MIB_FUNC 0x034
2294 +#define AR40XX_MIB_BUSY BIT(17)
2295 +#define AR40XX_MIB_CPU_KEEP BIT(20)
2296 +#define AR40XX_MIB_FUNC BITS(24, 3)
2297 +#define AR40XX_MIB_FUNC_S 24
2298 +#define AR40XX_MIB_FUNC_NO_OP 0x0
2299 +#define AR40XX_MIB_FUNC_FLUSH 0x1
2300 +
2301 +#define AR40XX_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
2302 +#define AR40XX_PORT_SPEED BITS(0, 2)
2303 +#define AR40XX_PORT_STATUS_SPEED_S 0
2304 +#define AR40XX_PORT_TX_EN BIT(2)
2305 +#define AR40XX_PORT_RX_EN BIT(3)
2306 +#define AR40XX_PORT_STATUS_TXFLOW BIT(4)
2307 +#define AR40XX_PORT_STATUS_RXFLOW BIT(5)
2308 +#define AR40XX_PORT_DUPLEX BIT(6)
2309 +#define AR40XX_PORT_TXHALF_FLOW BIT(7)
2310 +#define AR40XX_PORT_STATUS_LINK_UP BIT(8)
2311 +#define AR40XX_PORT_AUTO_LINK_EN BIT(9)
2312 +#define AR40XX_PORT_STATUS_FLOW_CONTROL BIT(12)
2313 +
2314 +#define AR40XX_REG_MAX_FRAME_SIZE 0x078
2315 +#define AR40XX_MAX_FRAME_SIZE_MTU BITS(0, 14)
2316 +
2317 +#define AR40XX_REG_PORT_HEADER(_i) (0x09c + (_i) * 4)
2318 +
2319 +#define AR40XX_REG_EEE_CTRL 0x100
2320 +#define AR40XX_EEE_CTRL_DISABLE_PHY(_i) BIT(4 + (_i) * 2)
2321 +
2322 +#define AR40XX_REG_PORT_VLAN0(_i) (0x420 + (_i) * 0x8)
2323 +#define AR40XX_PORT_VLAN0_DEF_SVID BITS(0, 12)
2324 +#define AR40XX_PORT_VLAN0_DEF_SVID_S 0
2325 +#define AR40XX_PORT_VLAN0_DEF_CVID BITS(16, 12)
2326 +#define AR40XX_PORT_VLAN0_DEF_CVID_S 16
2327 +
2328 +#define AR40XX_REG_PORT_VLAN1(_i) (0x424 + (_i) * 0x8)
2329 +#define AR40XX_PORT_VLAN1_PORT_VLAN_PROP BIT(6)
2330 +#define AR40XX_PORT_VLAN1_OUT_MODE BITS(12, 2)
2331 +#define AR40XX_PORT_VLAN1_OUT_MODE_S 12
2332 +#define AR40XX_PORT_VLAN1_OUT_MODE_UNMOD 0
2333 +#define AR40XX_PORT_VLAN1_OUT_MODE_UNTAG 1
2334 +#define AR40XX_PORT_VLAN1_OUT_MODE_TAG 2
2335 +#define AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH 3
2336 +
2337 +#define AR40XX_REG_VTU_FUNC0 0x0610
2338 +#define AR40XX_VTU_FUNC0_EG_MODE BITS(4, 14)
2339 +#define AR40XX_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
2340 +#define AR40XX_VTU_FUNC0_EG_MODE_KEEP 0
2341 +#define AR40XX_VTU_FUNC0_EG_MODE_UNTAG 1
2342 +#define AR40XX_VTU_FUNC0_EG_MODE_TAG 2
2343 +#define AR40XX_VTU_FUNC0_EG_MODE_NOT 3
2344 +#define AR40XX_VTU_FUNC0_IVL BIT(19)
2345 +#define AR40XX_VTU_FUNC0_VALID BIT(20)
2346 +
2347 +#define AR40XX_REG_VTU_FUNC1 0x0614
2348 +#define AR40XX_VTU_FUNC1_OP BITS(0, 3)
2349 +#define AR40XX_VTU_FUNC1_OP_NOOP 0
2350 +#define AR40XX_VTU_FUNC1_OP_FLUSH 1
2351 +#define AR40XX_VTU_FUNC1_OP_LOAD 2
2352 +#define AR40XX_VTU_FUNC1_OP_PURGE 3
2353 +#define AR40XX_VTU_FUNC1_OP_REMOVE_PORT 4
2354 +#define AR40XX_VTU_FUNC1_OP_GET_NEXT 5
2355 +#define AR40XX7_VTU_FUNC1_OP_GET_ONE 6
2356 +#define AR40XX_VTU_FUNC1_FULL BIT(4)
2357 +#define AR40XX_VTU_FUNC1_PORT BIT(8, 4)
2358 +#define AR40XX_VTU_FUNC1_PORT_S 8
2359 +#define AR40XX_VTU_FUNC1_VID BIT(16, 12)
2360 +#define AR40XX_VTU_FUNC1_VID_S 16
2361 +#define AR40XX_VTU_FUNC1_BUSY BIT(31)
2362 +
2363 +#define AR40XX_REG_FWD_CTRL0 0x620
2364 +#define AR40XX_FWD_CTRL0_CPU_PORT_EN BIT(10)
2365 +#define AR40XX_FWD_CTRL0_MIRROR_PORT BITS(4, 4)
2366 +#define AR40XX_FWD_CTRL0_MIRROR_PORT_S 4
2367 +
2368 +#define AR40XX_REG_FWD_CTRL1 0x624
2369 +#define AR40XX_FWD_CTRL1_UC_FLOOD BITS(0, 7)
2370 +#define AR40XX_FWD_CTRL1_UC_FLOOD_S 0
2371 +#define AR40XX_FWD_CTRL1_MC_FLOOD BITS(8, 7)
2372 +#define AR40XX_FWD_CTRL1_MC_FLOOD_S 8
2373 +#define AR40XX_FWD_CTRL1_BC_FLOOD BITS(16, 7)
2374 +#define AR40XX_FWD_CTRL1_BC_FLOOD_S 16
2375 +#define AR40XX_FWD_CTRL1_IGMP BITS(24, 7)
2376 +#define AR40XX_FWD_CTRL1_IGMP_S 24
2377 +
2378 +#define AR40XX_REG_PORT_LOOKUP(_i) (0x660 + (_i) * 0xc)
2379 +#define AR40XX_PORT_LOOKUP_MEMBER BITS(0, 7)
2380 +#define AR40XX_PORT_LOOKUP_IN_MODE BITS(8, 2)
2381 +#define AR40XX_PORT_LOOKUP_IN_MODE_S 8
2382 +#define AR40XX_PORT_LOOKUP_STATE BITS(16, 3)
2383 +#define AR40XX_PORT_LOOKUP_STATE_S 16
2384 +#define AR40XX_PORT_LOOKUP_LEARN BIT(20)
2385 +#define AR40XX_PORT_LOOKUP_LOOPBACK BIT(21)
2386 +#define AR40XX_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
2387 +
2388 +#define AR40XX_REG_ATU_FUNC 0x60c
2389 +#define AR40XX_ATU_FUNC_OP BITS(0, 4)
2390 +#define AR40XX_ATU_FUNC_OP_NOOP 0x0
2391 +#define AR40XX_ATU_FUNC_OP_FLUSH 0x1
2392 +#define AR40XX_ATU_FUNC_OP_LOAD 0x2
2393 +#define AR40XX_ATU_FUNC_OP_PURGE 0x3
2394 +#define AR40XX_ATU_FUNC_OP_FLUSH_LOCKED 0x4
2395 +#define AR40XX_ATU_FUNC_OP_FLUSH_UNICAST 0x5
2396 +#define AR40XX_ATU_FUNC_OP_GET_NEXT 0x6
2397 +#define AR40XX_ATU_FUNC_OP_SEARCH_MAC 0x7
2398 +#define AR40XX_ATU_FUNC_OP_CHANGE_TRUNK 0x8
2399 +#define AR40XX_ATU_FUNC_BUSY BIT(31)
2400 +
2401 +#define AR40XX_REG_QM_DEBUG_ADDR 0x820
2402 +#define AR40XX_REG_QM_DEBUG_VALUE 0x824
2403 +#define AR40XX_REG_QM_PORT0_3_QNUM 0x1d
2404 +#define AR40XX_REG_QM_PORT4_6_QNUM 0x1e
2405 +
2406 +#define AR40XX_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
2407 +#define AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
2408 +
2409 +#define AR40XX_REG_PORT_FLOWCTRL_THRESH(_i) (0x9b0 + (_i) * 0x4)
2410 +#define AR40XX_PORT0_FC_THRESH_ON_DFLT 0x60
2411 +#define AR40XX_PORT0_FC_THRESH_OFF_DFLT 0x90
2412 +
2413 +#define AR40XX_PHY_DEBUG_0 0
2414 +#define AR40XX_PHY_MANU_CTRL_EN BIT(12)
2415 +
2416 +#define AR40XX_PHY_DEBUG_2 2
2417 +
2418 +#define AR40XX_PHY_SPEC_STATUS 0x11
2419 +#define AR40XX_PHY_SPEC_STATUS_LINK BIT(10)
2420 +#define AR40XX_PHY_SPEC_STATUS_DUPLEX BIT(13)
2421 +#define AR40XX_PHY_SPEC_STATUS_SPEED BITS(14, 2)
2422 +
2423 +/* port forwarding state */
2424 +enum {
2425 + AR40XX_PORT_STATE_DISABLED = 0,
2426 + AR40XX_PORT_STATE_BLOCK = 1,
2427 + AR40XX_PORT_STATE_LISTEN = 2,
2428 + AR40XX_PORT_STATE_LEARN = 3,
2429 + AR40XX_PORT_STATE_FORWARD = 4
2430 +};
2431 +
2432 +/* ingress 802.1q mode */
2433 +enum {
2434 + AR40XX_IN_PORT_ONLY = 0,
2435 + AR40XX_IN_PORT_FALLBACK = 1,
2436 + AR40XX_IN_VLAN_ONLY = 2,
2437 + AR40XX_IN_SECURE = 3
2438 +};
2439 +
2440 +/* egress 802.1q mode */
2441 +enum {
2442 + AR40XX_OUT_KEEP = 0,
2443 + AR40XX_OUT_STRIP_VLAN = 1,
2444 + AR40XX_OUT_ADD_VLAN = 2
2445 +};
2446 +
2447 +/* port speed */
2448 +enum {
2449 + AR40XX_PORT_SPEED_10M = 0,
2450 + AR40XX_PORT_SPEED_100M = 1,
2451 + AR40XX_PORT_SPEED_1000M = 2,
2452 + AR40XX_PORT_SPEED_ERR = 3,
2453 +};
2454 +
2455 +#define AR40XX_MIB_WORK_DELAY 2000 /* msecs */
2456 +
2457 +#define AR40XX_QM_WORK_DELAY 100
2458 +
2459 +#define AR40XX_MIB_FUNC_CAPTURE 0x3
2460 +
2461 +#define AR40XX_REG_PORT_STATS_START 0x1000
2462 +#define AR40XX_REG_PORT_STATS_LEN 0x100
2463 +
2464 +#define AR40XX_PORTS_ALL 0x3f
2465 +
2466 +#define AR40XX_PSGMII_ID 5
2467 +#define AR40XX_PSGMII_CALB_NUM 100
2468 +#define AR40XX_MALIBU_PSGMII_MODE_CTRL 0x6d
2469 +#define AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL 0x220c
2470 +#define AR40XX_MALIBU_PHY_MMD7_DAC_CTRL 0x801a
2471 +#define AR40XX_MALIBU_DAC_CTRL_MASK 0x380
2472 +#define AR40XX_MALIBU_DAC_CTRL_VALUE 0x280
2473 +#define AR40XX_MALIBU_PHY_RLP_CTRL 0x805a
2474 +#define AR40XX_PSGMII_TX_DRIVER_1_CTRL 0xb
2475 +#define AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP 0x8a
2476 +#define AR40XX_MALIBU_PHY_LAST_ADDR 4
2477 +
2478 +static inline struct ar40xx_priv *
2479 +swdev_to_ar40xx(struct switch_dev *swdev)
2480 +{
2481 + return container_of(swdev, struct ar40xx_priv, dev);
2482 +}
2483 +
2484 +#endif
2485 --- /dev/null
2486 +++ b/drivers/net/phy/mdio-ipq40xx.c
2487 @@ -0,0 +1,203 @@
2488 +/*
2489 + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
2490 + *
2491 + * Permission to use, copy, modify, and/or distribute this software for
2492 + * any purpose with or without fee is hereby granted, provided that the
2493 + * above copyright notice and this permission notice appear in all copies.
2494 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
2495 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
2496 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
2497 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
2498 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
2499 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
2500 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2501 + */
2502 +
2503 +#include <linux/delay.h>
2504 +#include <linux/kernel.h>
2505 +#include <linux/module.h>
2506 +#include <linux/mutex.h>
2507 +#include <linux/io.h>
2508 +#include <linux/of_address.h>
2509 +#include <linux/of_mdio.h>
2510 +#include <linux/phy.h>
2511 +#include <linux/platform_device.h>
2512 +
2513 +#define MDIO_CTRL_0_REG 0x40
2514 +#define MDIO_CTRL_1_REG 0x44
2515 +#define MDIO_CTRL_2_REG 0x48
2516 +#define MDIO_CTRL_3_REG 0x4c
2517 +#define MDIO_CTRL_4_REG 0x50
2518 +#define MDIO_CTRL_4_ACCESS_BUSY BIT(16)
2519 +#define MDIO_CTRL_4_ACCESS_START BIT(8)
2520 +#define MDIO_CTRL_4_ACCESS_CODE_READ 0
2521 +#define MDIO_CTRL_4_ACCESS_CODE_WRITE 1
2522 +#define CTRL_0_REG_DEFAULT_VALUE 0x150FF
2523 +
2524 +#define IPQ40XX_MDIO_RETRY 1000
2525 +#define IPQ40XX_MDIO_DELAY 10
2526 +
2527 +struct ipq40xx_mdio_data {
2528 + struct mii_bus *mii_bus;
2529 + void __iomem *membase;
2530 + int phy_irq[PHY_MAX_ADDR];
2531 + struct device *dev;
2532 +};
2533 +
2534 +static int ipq40xx_mdio_wait_busy(struct ipq40xx_mdio_data *am)
2535 +{
2536 + int i;
2537 +
2538 + for (i = 0; i < IPQ40XX_MDIO_RETRY; i++) {
2539 + unsigned int busy;
2540 +
2541 + busy = readl(am->membase + MDIO_CTRL_4_REG) &
2542 + MDIO_CTRL_4_ACCESS_BUSY;
2543 + if (!busy)
2544 + return 0;
2545 +
2546 + /* BUSY might take to be cleard by 15~20 times of loop */
2547 + udelay(IPQ40XX_MDIO_DELAY);
2548 + }
2549 +
2550 + dev_err(am->dev, "%s: MDIO operation timed out\n", am->mii_bus->name);
2551 +
2552 + return -ETIMEDOUT;
2553 +}
2554 +
2555 +static int ipq40xx_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
2556 +{
2557 + struct ipq40xx_mdio_data *am = bus->priv;
2558 + int value = 0;
2559 + unsigned int cmd = 0;
2560 +
2561 + lockdep_assert_held(&bus->mdio_lock);
2562 +
2563 + if (ipq40xx_mdio_wait_busy(am))
2564 + return -ETIMEDOUT;
2565 +
2566 + /* issue the phy address and reg */
2567 + writel((mii_id << 8) | regnum, am->membase + MDIO_CTRL_1_REG);
2568 +
2569 + cmd = MDIO_CTRL_4_ACCESS_START|MDIO_CTRL_4_ACCESS_CODE_READ;
2570 +
2571 + /* issue read command */
2572 + writel(cmd, am->membase + MDIO_CTRL_4_REG);
2573 +
2574 + /* Wait read complete */
2575 + if (ipq40xx_mdio_wait_busy(am))
2576 + return -ETIMEDOUT;
2577 +
2578 + /* Read data */
2579 + value = readl(am->membase + MDIO_CTRL_3_REG);
2580 +
2581 + return value;
2582 +}
2583 +
2584 +static int ipq40xx_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
2585 + u16 value)
2586 +{
2587 + struct ipq40xx_mdio_data *am = bus->priv;
2588 + unsigned int cmd = 0;
2589 +
2590 + lockdep_assert_held(&bus->mdio_lock);
2591 +
2592 + if (ipq40xx_mdio_wait_busy(am))
2593 + return -ETIMEDOUT;
2594 +
2595 + /* issue the phy address and reg */
2596 + writel((mii_id << 8) | regnum, am->membase + MDIO_CTRL_1_REG);
2597 +
2598 + /* issue write data */
2599 + writel(value, am->membase + MDIO_CTRL_2_REG);
2600 +
2601 + cmd = MDIO_CTRL_4_ACCESS_START|MDIO_CTRL_4_ACCESS_CODE_WRITE;
2602 + /* issue write command */
2603 + writel(cmd, am->membase + MDIO_CTRL_4_REG);
2604 +
2605 + /* Wait write complete */
2606 + if (ipq40xx_mdio_wait_busy(am))
2607 + return -ETIMEDOUT;
2608 +
2609 + return 0;
2610 +}
2611 +
2612 +static int ipq40xx_mdio_probe(struct platform_device *pdev)
2613 +{
2614 + struct ipq40xx_mdio_data *am;
2615 + struct resource *res;
2616 + int i;
2617 +
2618 + am = devm_kzalloc(&pdev->dev, sizeof(*am), GFP_KERNEL);
2619 + if (!am)
2620 + return -ENOMEM;
2621 +
2622 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2623 + if (!res) {
2624 + dev_err(&pdev->dev, "no iomem resource found\n");
2625 + return -ENXIO;
2626 + }
2627 +
2628 + am->membase = devm_ioremap_resource(&pdev->dev, res);
2629 + if (IS_ERR(am->membase)) {
2630 + dev_err(&pdev->dev, "unable to ioremap registers\n");
2631 + return PTR_ERR(am->membase);
2632 + }
2633 +
2634 + am->mii_bus = devm_mdiobus_alloc(&pdev->dev);
2635 + if (!am->mii_bus)
2636 + return -ENOMEM;
2637 +
2638 + writel(CTRL_0_REG_DEFAULT_VALUE, am->membase + MDIO_CTRL_0_REG);
2639 +
2640 + am->mii_bus->name = "ipq40xx_mdio";
2641 + am->mii_bus->read = ipq40xx_mdio_read;
2642 + am->mii_bus->write = ipq40xx_mdio_write;
2643 + memcpy(am->mii_bus->irq, am->phy_irq, sizeof(am->phy_irq));
2644 + am->mii_bus->priv = am;
2645 + am->mii_bus->parent = &pdev->dev;
2646 + snprintf(am->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(&pdev->dev));
2647 +
2648 + for (i = 0; i < PHY_MAX_ADDR; i++)
2649 + am->phy_irq[i] = PHY_POLL;
2650 +
2651 + am->dev = &pdev->dev;
2652 + platform_set_drvdata(pdev, am);
2653 +
2654 + /* edma_axi_probe() use "am" drvdata.
2655 + * ipq40xx_mdio_probe() must be called first.
2656 + */
2657 + return of_mdiobus_register(am->mii_bus, pdev->dev.of_node);
2658 +}
2659 +
2660 +static int ipq40xx_mdio_remove(struct platform_device *pdev)
2661 +{
2662 + struct ipq40xx_mdio_data *am = platform_get_drvdata(pdev);
2663 +
2664 + mdiobus_unregister(am->mii_bus);
2665 + return 0;
2666 +}
2667 +
2668 +static const struct of_device_id ipq40xx_mdio_dt_ids[] = {
2669 + { .compatible = "qcom,ipq4019-mdio" },
2670 + { }
2671 +};
2672 +MODULE_DEVICE_TABLE(of, ipq40xx_mdio_dt_ids);
2673 +
2674 +static struct platform_driver ipq40xx_mdio_driver = {
2675 + .probe = ipq40xx_mdio_probe,
2676 + .remove = ipq40xx_mdio_remove,
2677 + .driver = {
2678 + .name = "ipq40xx-mdio",
2679 + .of_match_table = ipq40xx_mdio_dt_ids,
2680 + },
2681 +};
2682 +
2683 +module_platform_driver(ipq40xx_mdio_driver);
2684 +
2685 +#define DRV_VERSION "1.0"
2686 +
2687 +MODULE_DESCRIPTION("IPQ40XX MDIO interface driver");
2688 +MODULE_AUTHOR("Qualcomm Atheros");
2689 +MODULE_VERSION(DRV_VERSION);
2690 +MODULE_LICENSE("Dual BSD/GPL");