2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/bitfield.h>
17 #include <linux/module.h>
18 #include <linux/list.h>
19 #include <linux/bitops.h>
20 #include <linux/switch.h>
21 #include <linux/delay.h>
22 #include <linux/phy.h>
23 #include <linux/clk.h>
24 #include <linux/reset.h>
25 #include <linux/lockdep.h>
26 #include <linux/workqueue.h>
27 #include <linux/of_device.h>
28 #include <linux/of_address.h>
29 #include <linux/of_mdio.h>
30 #include <linux/mdio.h>
31 #include <linux/gpio.h>
35 static struct ar40xx_priv
*ar40xx_priv
;
37 #define MIB_DESC(_s , _o, _n) \
44 static const struct ar40xx_mib_desc ar40xx_mibs
[] = {
45 MIB_DESC(1, AR40XX_STATS_RXBROAD
, "RxBroad"),
46 MIB_DESC(1, AR40XX_STATS_RXPAUSE
, "RxPause"),
47 MIB_DESC(1, AR40XX_STATS_RXMULTI
, "RxMulti"),
48 MIB_DESC(1, AR40XX_STATS_RXFCSERR
, "RxFcsErr"),
49 MIB_DESC(1, AR40XX_STATS_RXALIGNERR
, "RxAlignErr"),
50 MIB_DESC(1, AR40XX_STATS_RXRUNT
, "RxRunt"),
51 MIB_DESC(1, AR40XX_STATS_RXFRAGMENT
, "RxFragment"),
52 MIB_DESC(1, AR40XX_STATS_RX64BYTE
, "Rx64Byte"),
53 MIB_DESC(1, AR40XX_STATS_RX128BYTE
, "Rx128Byte"),
54 MIB_DESC(1, AR40XX_STATS_RX256BYTE
, "Rx256Byte"),
55 MIB_DESC(1, AR40XX_STATS_RX512BYTE
, "Rx512Byte"),
56 MIB_DESC(1, AR40XX_STATS_RX1024BYTE
, "Rx1024Byte"),
57 MIB_DESC(1, AR40XX_STATS_RX1518BYTE
, "Rx1518Byte"),
58 MIB_DESC(1, AR40XX_STATS_RXMAXBYTE
, "RxMaxByte"),
59 MIB_DESC(1, AR40XX_STATS_RXTOOLONG
, "RxTooLong"),
60 MIB_DESC(2, AR40XX_STATS_RXGOODBYTE
, "RxGoodByte"),
61 MIB_DESC(2, AR40XX_STATS_RXBADBYTE
, "RxBadByte"),
62 MIB_DESC(1, AR40XX_STATS_RXOVERFLOW
, "RxOverFlow"),
63 MIB_DESC(1, AR40XX_STATS_FILTERED
, "Filtered"),
64 MIB_DESC(1, AR40XX_STATS_TXBROAD
, "TxBroad"),
65 MIB_DESC(1, AR40XX_STATS_TXPAUSE
, "TxPause"),
66 MIB_DESC(1, AR40XX_STATS_TXMULTI
, "TxMulti"),
67 MIB_DESC(1, AR40XX_STATS_TXUNDERRUN
, "TxUnderRun"),
68 MIB_DESC(1, AR40XX_STATS_TX64BYTE
, "Tx64Byte"),
69 MIB_DESC(1, AR40XX_STATS_TX128BYTE
, "Tx128Byte"),
70 MIB_DESC(1, AR40XX_STATS_TX256BYTE
, "Tx256Byte"),
71 MIB_DESC(1, AR40XX_STATS_TX512BYTE
, "Tx512Byte"),
72 MIB_DESC(1, AR40XX_STATS_TX1024BYTE
, "Tx1024Byte"),
73 MIB_DESC(1, AR40XX_STATS_TX1518BYTE
, "Tx1518Byte"),
74 MIB_DESC(1, AR40XX_STATS_TXMAXBYTE
, "TxMaxByte"),
75 MIB_DESC(1, AR40XX_STATS_TXOVERSIZE
, "TxOverSize"),
76 MIB_DESC(2, AR40XX_STATS_TXBYTE
, "TxByte"),
77 MIB_DESC(1, AR40XX_STATS_TXCOLLISION
, "TxCollision"),
78 MIB_DESC(1, AR40XX_STATS_TXABORTCOL
, "TxAbortCol"),
79 MIB_DESC(1, AR40XX_STATS_TXMULTICOL
, "TxMultiCol"),
80 MIB_DESC(1, AR40XX_STATS_TXSINGLECOL
, "TxSingleCol"),
81 MIB_DESC(1, AR40XX_STATS_TXEXCDEFER
, "TxExcDefer"),
82 MIB_DESC(1, AR40XX_STATS_TXDEFER
, "TxDefer"),
83 MIB_DESC(1, AR40XX_STATS_TXLATECOL
, "TxLateCol"),
87 ar40xx_read(struct ar40xx_priv
*priv
, int reg
)
89 return readl(priv
->hw_addr
+ reg
);
93 ar40xx_psgmii_read(struct ar40xx_priv
*priv
, int reg
)
95 return readl(priv
->psgmii_hw_addr
+ reg
);
99 ar40xx_write(struct ar40xx_priv
*priv
, int reg
, u32 val
)
101 writel(val
, priv
->hw_addr
+ reg
);
105 ar40xx_rmw(struct ar40xx_priv
*priv
, int reg
, u32 mask
, u32 val
)
109 ret
= ar40xx_read(priv
, reg
);
112 ar40xx_write(priv
, reg
, ret
);
117 ar40xx_psgmii_write(struct ar40xx_priv
*priv
, int reg
, u32 val
)
119 writel(val
, priv
->psgmii_hw_addr
+ reg
);
123 ar40xx_phy_dbg_write(struct ar40xx_priv
*priv
, int phy_addr
,
124 u16 dbg_addr
, u16 dbg_data
)
126 struct mii_bus
*bus
= priv
->mii_bus
;
128 mutex_lock(&bus
->mdio_lock
);
129 bus
->write(bus
, phy_addr
, AR40XX_MII_ATH_DBG_ADDR
, dbg_addr
);
130 bus
->write(bus
, phy_addr
, AR40XX_MII_ATH_DBG_DATA
, dbg_data
);
131 mutex_unlock(&bus
->mdio_lock
);
135 ar40xx_phy_dbg_read(struct ar40xx_priv
*priv
, int phy_addr
,
136 u16 dbg_addr
, u16
*dbg_data
)
138 struct mii_bus
*bus
= priv
->mii_bus
;
140 mutex_lock(&bus
->mdio_lock
);
141 bus
->write(bus
, phy_addr
, AR40XX_MII_ATH_DBG_ADDR
, dbg_addr
);
142 *dbg_data
= bus
->read(bus
, phy_addr
, AR40XX_MII_ATH_DBG_DATA
);
143 mutex_unlock(&bus
->mdio_lock
);
147 ar40xx_phy_mmd_write(struct ar40xx_priv
*priv
, u32 phy_id
,
148 u16 mmd_num
, u16 reg_id
, u16 reg_val
)
150 struct mii_bus
*bus
= priv
->mii_bus
;
152 mutex_lock(&bus
->mdio_lock
);
153 bus
->write(bus
, phy_id
,
154 AR40XX_MII_ATH_MMD_ADDR
, mmd_num
);
155 bus
->write(bus
, phy_id
,
156 AR40XX_MII_ATH_MMD_DATA
, reg_id
);
157 bus
->write(bus
, phy_id
,
158 AR40XX_MII_ATH_MMD_ADDR
,
160 bus
->write(bus
, phy_id
,
161 AR40XX_MII_ATH_MMD_DATA
, reg_val
);
162 mutex_unlock(&bus
->mdio_lock
);
166 ar40xx_phy_mmd_read(struct ar40xx_priv
*priv
, u32 phy_id
,
167 u16 mmd_num
, u16 reg_id
)
170 struct mii_bus
*bus
= priv
->mii_bus
;
172 mutex_lock(&bus
->mdio_lock
);
173 bus
->write(bus
, phy_id
,
174 AR40XX_MII_ATH_MMD_ADDR
, mmd_num
);
175 bus
->write(bus
, phy_id
,
176 AR40XX_MII_ATH_MMD_DATA
, reg_id
);
177 bus
->write(bus
, phy_id
,
178 AR40XX_MII_ATH_MMD_ADDR
,
180 value
= bus
->read(bus
, phy_id
, AR40XX_MII_ATH_MMD_DATA
);
181 mutex_unlock(&bus
->mdio_lock
);
185 /* Start of swconfig support */
188 ar40xx_phy_poll_reset(struct ar40xx_priv
*priv
)
190 u32 i
, in_reset
, retries
= 500;
191 struct mii_bus
*bus
= priv
->mii_bus
;
193 /* Assume RESET was recently issued to some or all of the phys */
194 in_reset
= GENMASK(AR40XX_NUM_PHYS
- 1, 0);
197 /* 1ms should be plenty of time.
198 * 802.3 spec allows for a max wait time of 500ms
200 usleep_range(1000, 2000);
202 for (i
= 0; i
< AR40XX_NUM_PHYS
; i
++) {
205 /* skip devices which have completed reset */
206 if (!(in_reset
& BIT(i
)))
209 val
= mdiobus_read(bus
, i
, MII_BMCR
);
213 /* mark when phy is no longer in reset state */
214 if (!(val
& BMCR_RESET
))
222 dev_warn(&bus
->dev
, "Failed to reset all phys! (in_reset: 0x%x)\n",
227 ar40xx_phy_init(struct ar40xx_priv
*priv
)
234 for (i
= 0; i
< AR40XX_NUM_PORTS
- 1; i
++) {
235 ar40xx_phy_dbg_read(priv
, i
, AR40XX_PHY_DEBUG_0
, &val
);
236 val
&= ~AR40XX_PHY_MANU_CTRL_EN
;
237 ar40xx_phy_dbg_write(priv
, i
, AR40XX_PHY_DEBUG_0
, val
);
238 mdiobus_write(bus
, i
,
239 MII_ADVERTISE
, ADVERTISE_ALL
|
240 ADVERTISE_PAUSE_CAP
|
241 ADVERTISE_PAUSE_ASYM
);
242 mdiobus_write(bus
, i
, MII_CTRL1000
, ADVERTISE_1000FULL
);
243 mdiobus_write(bus
, i
, MII_BMCR
, BMCR_RESET
| BMCR_ANENABLE
);
246 ar40xx_phy_poll_reset(priv
);
250 ar40xx_port_phy_linkdown(struct ar40xx_priv
*priv
)
257 for (i
= 0; i
< AR40XX_NUM_PORTS
- 1; i
++) {
258 mdiobus_write(bus
, i
, MII_CTRL1000
, 0);
259 mdiobus_write(bus
, i
, MII_ADVERTISE
, 0);
260 mdiobus_write(bus
, i
, MII_BMCR
, BMCR_RESET
| BMCR_ANENABLE
);
261 ar40xx_phy_dbg_read(priv
, i
, AR40XX_PHY_DEBUG_0
, &val
);
262 val
|= AR40XX_PHY_MANU_CTRL_EN
;
263 ar40xx_phy_dbg_write(priv
, i
, AR40XX_PHY_DEBUG_0
, val
);
264 /* disable transmit */
265 ar40xx_phy_dbg_read(priv
, i
, AR40XX_PHY_DEBUG_2
, &val
);
267 ar40xx_phy_dbg_write(priv
, i
, AR40XX_PHY_DEBUG_2
, val
);
272 ar40xx_set_mirror_regs(struct ar40xx_priv
*priv
)
276 /* reset all mirror registers */
277 ar40xx_rmw(priv
, AR40XX_REG_FWD_CTRL0
,
278 AR40XX_FWD_CTRL0_MIRROR_PORT
,
279 (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S
));
280 for (port
= 0; port
< AR40XX_NUM_PORTS
; port
++) {
281 ar40xx_rmw(priv
, AR40XX_REG_PORT_LOOKUP(port
),
282 AR40XX_PORT_LOOKUP_ING_MIRROR_EN
, 0);
284 ar40xx_rmw(priv
, AR40XX_REG_PORT_HOL_CTRL1(port
),
285 AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN
, 0);
288 /* now enable mirroring if necessary */
289 if (priv
->source_port
>= AR40XX_NUM_PORTS
||
290 priv
->monitor_port
>= AR40XX_NUM_PORTS
||
291 priv
->source_port
== priv
->monitor_port
) {
295 ar40xx_rmw(priv
, AR40XX_REG_FWD_CTRL0
,
296 AR40XX_FWD_CTRL0_MIRROR_PORT
,
297 (priv
->monitor_port
<< AR40XX_FWD_CTRL0_MIRROR_PORT_S
));
300 ar40xx_rmw(priv
, AR40XX_REG_PORT_LOOKUP(priv
->source_port
), 0,
301 AR40XX_PORT_LOOKUP_ING_MIRROR_EN
);
304 ar40xx_rmw(priv
, AR40XX_REG_PORT_HOL_CTRL1(priv
->source_port
),
305 0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN
);
309 ar40xx_sw_get_ports(struct switch_dev
*dev
, struct switch_val
*val
)
311 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
312 u8 ports
= priv
->vlan_table
[val
->port_vlan
];
316 for (i
= 0; i
< dev
->ports
; i
++) {
317 struct switch_port
*p
;
319 if (!(ports
& BIT(i
)))
322 p
= &val
->value
.ports
[val
->len
++];
324 if ((priv
->vlan_tagged
& BIT(i
)) ||
325 (priv
->pvid
[i
] != val
->port_vlan
))
326 p
->flags
= BIT(SWITCH_PORT_FLAG_TAGGED
);
334 ar40xx_sw_set_ports(struct switch_dev
*dev
, struct switch_val
*val
)
336 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
337 u8
*vt
= &priv
->vlan_table
[val
->port_vlan
];
341 for (i
= 0; i
< val
->len
; i
++) {
342 struct switch_port
*p
= &val
->value
.ports
[i
];
344 if (p
->flags
& BIT(SWITCH_PORT_FLAG_TAGGED
)) {
345 if (val
->port_vlan
== priv
->pvid
[p
->id
])
346 priv
->vlan_tagged
|= BIT(p
->id
);
348 priv
->vlan_tagged
&= ~BIT(p
->id
);
349 priv
->pvid
[p
->id
] = val
->port_vlan
;
358 ar40xx_reg_wait(struct ar40xx_priv
*priv
, u32 reg
, u32 mask
, u32 val
,
363 for (i
= 0; i
< timeout
; i
++) {
366 t
= ar40xx_read(priv
, reg
);
367 if ((t
& mask
) == val
)
370 usleep_range(1000, 2000);
377 ar40xx_mib_op(struct ar40xx_priv
*priv
, u32 op
)
381 lockdep_assert_held(&priv
->mib_lock
);
383 /* Capture the hardware statistics for all ports */
384 ar40xx_rmw(priv
, AR40XX_REG_MIB_FUNC
,
385 AR40XX_MIB_FUNC
, (op
<< AR40XX_MIB_FUNC_S
));
387 /* Wait for the capturing to complete. */
388 ret
= ar40xx_reg_wait(priv
, AR40XX_REG_MIB_FUNC
,
389 AR40XX_MIB_BUSY
, 0, 10);
395 ar40xx_mib_fetch_port_stat(struct ar40xx_priv
*priv
, int port
, bool flush
)
400 u32 num_mibs
= ARRAY_SIZE(ar40xx_mibs
);
402 WARN_ON(port
>= priv
->dev
.ports
);
404 lockdep_assert_held(&priv
->mib_lock
);
406 base
= AR40XX_REG_PORT_STATS_START
+
407 AR40XX_REG_PORT_STATS_LEN
* port
;
409 mib_stats
= &priv
->mib_stats
[port
* num_mibs
];
413 len
= num_mibs
* sizeof(*mib_stats
);
414 memset(mib_stats
, 0, len
);
417 for (i
= 0; i
< num_mibs
; i
++) {
418 const struct ar40xx_mib_desc
*mib
;
421 mib
= &ar40xx_mibs
[i
];
422 t
= ar40xx_read(priv
, base
+ mib
->offset
);
423 if (mib
->size
== 2) {
426 hi
= ar40xx_read(priv
, base
+ mib
->offset
+ 4);
435 ar40xx_mib_capture(struct ar40xx_priv
*priv
)
437 return ar40xx_mib_op(priv
, AR40XX_MIB_FUNC_CAPTURE
);
441 ar40xx_mib_flush(struct ar40xx_priv
*priv
)
443 return ar40xx_mib_op(priv
, AR40XX_MIB_FUNC_FLUSH
);
447 ar40xx_sw_set_reset_mibs(struct switch_dev
*dev
,
448 const struct switch_attr
*attr
,
449 struct switch_val
*val
)
451 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
454 u32 num_mibs
= ARRAY_SIZE(ar40xx_mibs
);
456 mutex_lock(&priv
->mib_lock
);
458 len
= priv
->dev
.ports
* num_mibs
* sizeof(*priv
->mib_stats
);
459 memset(priv
->mib_stats
, 0, len
);
460 ret
= ar40xx_mib_flush(priv
);
462 mutex_unlock(&priv
->mib_lock
);
467 ar40xx_sw_set_vlan(struct switch_dev
*dev
, const struct switch_attr
*attr
,
468 struct switch_val
*val
)
470 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
472 priv
->vlan
= !!val
->value
.i
;
477 ar40xx_sw_get_vlan(struct switch_dev
*dev
, const struct switch_attr
*attr
,
478 struct switch_val
*val
)
480 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
482 val
->value
.i
= priv
->vlan
;
487 ar40xx_sw_set_mirror_rx_enable(struct switch_dev
*dev
,
488 const struct switch_attr
*attr
,
489 struct switch_val
*val
)
491 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
493 mutex_lock(&priv
->reg_mutex
);
494 priv
->mirror_rx
= !!val
->value
.i
;
495 ar40xx_set_mirror_regs(priv
);
496 mutex_unlock(&priv
->reg_mutex
);
502 ar40xx_sw_get_mirror_rx_enable(struct switch_dev
*dev
,
503 const struct switch_attr
*attr
,
504 struct switch_val
*val
)
506 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
508 mutex_lock(&priv
->reg_mutex
);
509 val
->value
.i
= priv
->mirror_rx
;
510 mutex_unlock(&priv
->reg_mutex
);
515 ar40xx_sw_set_mirror_tx_enable(struct switch_dev
*dev
,
516 const struct switch_attr
*attr
,
517 struct switch_val
*val
)
519 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
521 mutex_lock(&priv
->reg_mutex
);
522 priv
->mirror_tx
= !!val
->value
.i
;
523 ar40xx_set_mirror_regs(priv
);
524 mutex_unlock(&priv
->reg_mutex
);
530 ar40xx_sw_get_mirror_tx_enable(struct switch_dev
*dev
,
531 const struct switch_attr
*attr
,
532 struct switch_val
*val
)
534 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
536 mutex_lock(&priv
->reg_mutex
);
537 val
->value
.i
= priv
->mirror_tx
;
538 mutex_unlock(&priv
->reg_mutex
);
543 ar40xx_sw_set_mirror_monitor_port(struct switch_dev
*dev
,
544 const struct switch_attr
*attr
,
545 struct switch_val
*val
)
547 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
549 mutex_lock(&priv
->reg_mutex
);
550 priv
->monitor_port
= val
->value
.i
;
551 ar40xx_set_mirror_regs(priv
);
552 mutex_unlock(&priv
->reg_mutex
);
558 ar40xx_sw_get_mirror_monitor_port(struct switch_dev
*dev
,
559 const struct switch_attr
*attr
,
560 struct switch_val
*val
)
562 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
564 mutex_lock(&priv
->reg_mutex
);
565 val
->value
.i
= priv
->monitor_port
;
566 mutex_unlock(&priv
->reg_mutex
);
571 ar40xx_sw_set_mirror_source_port(struct switch_dev
*dev
,
572 const struct switch_attr
*attr
,
573 struct switch_val
*val
)
575 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
577 mutex_lock(&priv
->reg_mutex
);
578 priv
->source_port
= val
->value
.i
;
579 ar40xx_set_mirror_regs(priv
);
580 mutex_unlock(&priv
->reg_mutex
);
586 ar40xx_sw_get_mirror_source_port(struct switch_dev
*dev
,
587 const struct switch_attr
*attr
,
588 struct switch_val
*val
)
590 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
592 mutex_lock(&priv
->reg_mutex
);
593 val
->value
.i
= priv
->source_port
;
594 mutex_unlock(&priv
->reg_mutex
);
599 ar40xx_sw_set_linkdown(struct switch_dev
*dev
,
600 const struct switch_attr
*attr
,
601 struct switch_val
*val
)
603 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
605 if (val
->value
.i
== 1)
606 ar40xx_port_phy_linkdown(priv
);
608 ar40xx_phy_init(priv
);
614 ar40xx_sw_set_port_reset_mib(struct switch_dev
*dev
,
615 const struct switch_attr
*attr
,
616 struct switch_val
*val
)
618 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
622 port
= val
->port_vlan
;
623 if (port
>= dev
->ports
)
626 mutex_lock(&priv
->mib_lock
);
627 ret
= ar40xx_mib_capture(priv
);
631 ar40xx_mib_fetch_port_stat(priv
, port
, true);
634 mutex_unlock(&priv
->mib_lock
);
639 ar40xx_sw_get_port_mib(struct switch_dev
*dev
,
640 const struct switch_attr
*attr
,
641 struct switch_val
*val
)
643 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
647 char *buf
= priv
->buf
;
649 u32 num_mibs
= ARRAY_SIZE(ar40xx_mibs
);
651 port
= val
->port_vlan
;
652 if (port
>= dev
->ports
)
655 mutex_lock(&priv
->mib_lock
);
656 ret
= ar40xx_mib_capture(priv
);
660 ar40xx_mib_fetch_port_stat(priv
, port
, false);
662 len
+= snprintf(buf
+ len
, sizeof(priv
->buf
) - len
,
663 "Port %d MIB counters\n",
666 mib_stats
= &priv
->mib_stats
[port
* num_mibs
];
667 for (i
= 0; i
< num_mibs
; i
++)
668 len
+= snprintf(buf
+ len
, sizeof(priv
->buf
) - len
,
677 mutex_unlock(&priv
->mib_lock
);
682 ar40xx_sw_set_vid(struct switch_dev
*dev
, const struct switch_attr
*attr
,
683 struct switch_val
*val
)
685 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
687 priv
->vlan_id
[val
->port_vlan
] = val
->value
.i
;
692 ar40xx_sw_get_vid(struct switch_dev
*dev
, const struct switch_attr
*attr
,
693 struct switch_val
*val
)
695 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
697 val
->value
.i
= priv
->vlan_id
[val
->port_vlan
];
702 ar40xx_sw_get_pvid(struct switch_dev
*dev
, int port
, int *vlan
)
704 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
705 *vlan
= priv
->pvid
[port
];
710 ar40xx_sw_set_pvid(struct switch_dev
*dev
, int port
, int vlan
)
712 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
714 /* make sure no invalid PVIDs get set */
715 if (vlan
>= dev
->vlans
)
718 priv
->pvid
[port
] = vlan
;
723 ar40xx_read_port_link(struct ar40xx_priv
*priv
, int port
,
724 struct switch_port_link
*link
)
729 memset(link
, 0, sizeof(*link
));
731 status
= ar40xx_read(priv
, AR40XX_REG_PORT_STATUS(port
));
733 link
->aneg
= !!(status
& AR40XX_PORT_AUTO_LINK_EN
);
734 if (link
->aneg
|| (port
!= AR40XX_PORT_CPU
))
735 link
->link
= !!(status
& AR40XX_PORT_STATUS_LINK_UP
);
742 link
->duplex
= !!(status
& AR40XX_PORT_DUPLEX
);
743 link
->tx_flow
= !!(status
& AR40XX_PORT_STATUS_TXFLOW
);
744 link
->rx_flow
= !!(status
& AR40XX_PORT_STATUS_RXFLOW
);
746 speed
= (status
& AR40XX_PORT_SPEED
) >>
747 AR40XX_PORT_STATUS_SPEED_S
;
750 case AR40XX_PORT_SPEED_10M
:
751 link
->speed
= SWITCH_PORT_SPEED_10
;
753 case AR40XX_PORT_SPEED_100M
:
754 link
->speed
= SWITCH_PORT_SPEED_100
;
756 case AR40XX_PORT_SPEED_1000M
:
757 link
->speed
= SWITCH_PORT_SPEED_1000
;
760 link
->speed
= SWITCH_PORT_SPEED_UNKNOWN
;
766 ar40xx_sw_get_port_link(struct switch_dev
*dev
, int port
,
767 struct switch_port_link
*link
)
769 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
771 ar40xx_read_port_link(priv
, port
, link
);
775 static const struct switch_attr ar40xx_sw_attr_globals
[] = {
777 .type
= SWITCH_TYPE_INT
,
778 .name
= "enable_vlan",
779 .description
= "Enable VLAN mode",
780 .set
= ar40xx_sw_set_vlan
,
781 .get
= ar40xx_sw_get_vlan
,
785 .type
= SWITCH_TYPE_NOVAL
,
786 .name
= "reset_mibs",
787 .description
= "Reset all MIB counters",
788 .set
= ar40xx_sw_set_reset_mibs
,
791 .type
= SWITCH_TYPE_INT
,
792 .name
= "enable_mirror_rx",
793 .description
= "Enable mirroring of RX packets",
794 .set
= ar40xx_sw_set_mirror_rx_enable
,
795 .get
= ar40xx_sw_get_mirror_rx_enable
,
799 .type
= SWITCH_TYPE_INT
,
800 .name
= "enable_mirror_tx",
801 .description
= "Enable mirroring of TX packets",
802 .set
= ar40xx_sw_set_mirror_tx_enable
,
803 .get
= ar40xx_sw_get_mirror_tx_enable
,
807 .type
= SWITCH_TYPE_INT
,
808 .name
= "mirror_monitor_port",
809 .description
= "Mirror monitor port",
810 .set
= ar40xx_sw_set_mirror_monitor_port
,
811 .get
= ar40xx_sw_get_mirror_monitor_port
,
812 .max
= AR40XX_NUM_PORTS
- 1
815 .type
= SWITCH_TYPE_INT
,
816 .name
= "mirror_source_port",
817 .description
= "Mirror source port",
818 .set
= ar40xx_sw_set_mirror_source_port
,
819 .get
= ar40xx_sw_get_mirror_source_port
,
820 .max
= AR40XX_NUM_PORTS
- 1
823 .type
= SWITCH_TYPE_INT
,
825 .description
= "Link down all the PHYs",
826 .set
= ar40xx_sw_set_linkdown
,
831 static const struct switch_attr ar40xx_sw_attr_port
[] = {
833 .type
= SWITCH_TYPE_NOVAL
,
835 .description
= "Reset single port MIB counters",
836 .set
= ar40xx_sw_set_port_reset_mib
,
839 .type
= SWITCH_TYPE_STRING
,
841 .description
= "Get port's MIB counters",
843 .get
= ar40xx_sw_get_port_mib
,
847 const struct switch_attr ar40xx_sw_attr_vlan
[] = {
849 .type
= SWITCH_TYPE_INT
,
851 .description
= "VLAN ID (0-4094)",
852 .set
= ar40xx_sw_set_vid
,
853 .get
= ar40xx_sw_get_vid
,
858 /* End of swconfig support */
861 ar40xx_wait_bit(struct ar40xx_priv
*priv
, int reg
, u32 mask
, u32 val
)
867 t
= ar40xx_read(priv
, reg
);
868 if ((t
& mask
) == val
)
874 usleep_range(10, 20);
877 pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n",
878 (unsigned int)reg
, t
, mask
, val
);
883 ar40xx_atu_flush(struct ar40xx_priv
*priv
)
887 ret
= ar40xx_wait_bit(priv
, AR40XX_REG_ATU_FUNC
,
888 AR40XX_ATU_FUNC_BUSY
, 0);
890 ar40xx_write(priv
, AR40XX_REG_ATU_FUNC
,
891 AR40XX_ATU_FUNC_OP_FLUSH
|
892 AR40XX_ATU_FUNC_BUSY
);
898 ar40xx_ess_reset(struct ar40xx_priv
*priv
)
900 reset_control_assert(priv
->ess_rst
);
902 reset_control_deassert(priv
->ess_rst
);
903 /* Waiting for all inner tables init done.
908 pr_info("ESS reset ok!\n");
911 /* Start of psgmii self test */
914 ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv
*priv
)
917 struct mii_bus
*bus
= priv
->mii_bus
;
918 /* reset phy psgmii */
919 /* fix phy psgmii RX 20bit */
920 mdiobus_write(bus
, 5, 0x0, 0x005b);
921 /* reset phy psgmii */
922 mdiobus_write(bus
, 5, 0x0, 0x001b);
923 /* release reset phy psgmii */
924 mdiobus_write(bus
, 5, 0x0, 0x005b);
926 for (n
= 0; n
< AR40XX_PSGMII_CALB_NUM
; n
++) {
929 status
= ar40xx_phy_mmd_read(priv
, 5, 1, 0x28);
932 /* Polling interval to check PSGMII PLL in malibu is ready
933 * the worst time is 8.67ms
934 * for 25MHz reference clock
935 * [512+(128+2048)*49]*80ns+100us
941 /*check malibu psgmii calibration done end..*/
943 /*freeze phy psgmii RX CDR*/
944 mdiobus_write(bus
, 5, 0x1a, 0x2230);
946 ar40xx_ess_reset(priv
);
948 /*check psgmii calibration done start*/
949 for (n
= 0; n
< AR40XX_PSGMII_CALB_NUM
; n
++) {
952 status
= ar40xx_psgmii_read(priv
, 0xa0);
955 /* Polling interval to check PSGMII PLL in ESS is ready */
960 /* check dakota psgmii calibration done end..*/
962 /* relesae phy psgmii RX CDR */
963 mdiobus_write(bus
, 5, 0x1a, 0x3230);
964 /* release phy psgmii RX 20bit */
965 mdiobus_write(bus
, 5, 0x0, 0x005f);
970 ar40xx_psgmii_single_phy_testing(struct ar40xx_priv
*priv
, int phy
)
977 u32 tx_all_ok
, rx_all_ok
;
978 struct mii_bus
*bus
= priv
->mii_bus
;
980 mdiobus_write(bus
, phy
, 0x0, 0x9000);
981 mdiobus_write(bus
, phy
, 0x0, 0x4140);
983 for (j
= 0; j
< AR40XX_PSGMII_CALB_NUM
; j
++) {
986 status
= mdiobus_read(bus
, phy
, 0x11);
987 if (status
& AR40XX_PHY_SPEC_STATUS_LINK
)
989 /* the polling interval to check if the PHY link up or not
990 * maxwait_timer: 750 ms +/-10 ms
991 * minwait_timer : 1 us +/- 0.1us
992 * time resides in minwait_timer ~ maxwait_timer
993 * see IEEE 802.3 section 40.4.5.2
999 ar40xx_phy_mmd_write(priv
, phy
, 7, 0x8029, 0x0000);
1000 ar40xx_phy_mmd_write(priv
, phy
, 7, 0x8029, 0x0003);
1003 ar40xx_phy_mmd_write(priv
, phy
, 7, 0x8020, 0xa000);
1004 /* wait for all traffic end
1005 * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1010 tx_ok
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802e);
1011 tx_ok_high16
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802d);
1012 tx_error
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802f);
1013 rx_ok
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802b);
1014 rx_ok_high16
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802a);
1015 rx_error
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802c);
1016 tx_all_ok
= tx_ok
+ (tx_ok_high16
<< 16);
1017 rx_all_ok
= rx_ok
+ (rx_ok_high16
<< 16);
1018 if (tx_all_ok
== 0x1000 && tx_error
== 0) {
1020 priv
->phy_t_status
&= (~BIT(phy
));
1022 pr_info("PHY %d single test PSGMII issue happen!\n", phy
);
1023 priv
->phy_t_status
|= BIT(phy
);
1026 mdiobus_write(bus
, phy
, 0x0, 0x1840);
1030 ar40xx_psgmii_all_phy_testing(struct ar40xx_priv
*priv
)
1033 struct mii_bus
*bus
= priv
->mii_bus
;
1035 mdiobus_write(bus
, 0x1f, 0x0, 0x9000);
1036 mdiobus_write(bus
, 0x1f, 0x0, 0x4140);
1038 for (j
= 0; j
< AR40XX_PSGMII_CALB_NUM
; j
++) {
1039 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1042 status
= mdiobus_read(bus
, phy
, 0x11);
1043 if (!(status
& BIT(10)))
1047 if (phy
>= (AR40XX_NUM_PORTS
- 1))
1049 /* The polling interva to check if the PHY link up or not */
1053 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8029, 0x0000);
1054 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8029, 0x0003);
1057 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8020, 0xa000);
1058 /* wait for all traffic end
1059 * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1063 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1064 u32 tx_ok
, tx_error
;
1065 u32 rx_ok
, rx_error
;
1068 u32 tx_all_ok
, rx_all_ok
;
1071 tx_ok
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802e);
1072 tx_ok_high16
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802d);
1073 tx_error
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802f);
1074 rx_ok
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802b);
1075 rx_ok_high16
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802a);
1076 rx_error
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802c);
1077 tx_all_ok
= tx_ok
+ (tx_ok_high16
<<16);
1078 rx_all_ok
= rx_ok
+ (rx_ok_high16
<<16);
1079 if (tx_all_ok
== 0x1000 && tx_error
== 0) {
1081 priv
->phy_t_status
&= ~BIT(phy
+ 8);
1083 pr_info("PHY%d test see issue!\n", phy
);
1084 priv
->phy_t_status
|= BIT(phy
+ 8);
1088 pr_debug("PHY all test 0x%x \r\n", priv
->phy_t_status
);
1092 ar40xx_psgmii_self_test(struct ar40xx_priv
*priv
)
1095 struct mii_bus
*bus
= priv
->mii_bus
;
1097 ar40xx_malibu_psgmii_ess_reset(priv
);
1099 /* switch to access MII reg for copper */
1100 mdiobus_write(bus
, 4, 0x1f, 0x8500);
1101 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1102 /*enable phy mdio broadcast write*/
1103 ar40xx_phy_mmd_write(priv
, phy
, 7, 0x8028, 0x801f);
1105 /* force no link by power down */
1106 mdiobus_write(bus
, 0x1f, 0x0, 0x1840);
1108 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8021, 0x1000);
1109 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8062, 0x05e0);
1112 mdiobus_write(bus
, 0x1f, 0x10, 0x6800);
1113 for (i
= 0; i
< AR40XX_PSGMII_CALB_NUM
; i
++) {
1114 priv
->phy_t_status
= 0;
1116 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1117 ar40xx_rmw(priv
, AR40XX_REG_PORT_LOOKUP(phy
+ 1),
1118 AR40XX_PORT_LOOKUP_LOOPBACK
,
1119 AR40XX_PORT_LOOKUP_LOOPBACK
);
1122 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++)
1123 ar40xx_psgmii_single_phy_testing(priv
, phy
);
1125 ar40xx_psgmii_all_phy_testing(priv
);
1127 if (priv
->phy_t_status
)
1128 ar40xx_malibu_psgmii_ess_reset(priv
);
1133 if (i
>= AR40XX_PSGMII_CALB_NUM
)
1134 pr_info("PSGMII cannot recover\n");
1136 pr_debug("PSGMII recovered after %d times reset\n", i
);
1138 /* configuration recover */
1140 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8021, 0x0);
1142 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8029, 0x0);
1143 /* disable traffic */
1144 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8020, 0x0);
1148 ar40xx_psgmii_self_test_clean(struct ar40xx_priv
*priv
)
1151 struct mii_bus
*bus
= priv
->mii_bus
;
1153 /* disable phy internal loopback */
1154 mdiobus_write(bus
, 0x1f, 0x10, 0x6860);
1155 mdiobus_write(bus
, 0x1f, 0x0, 0x9040);
1157 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1158 /* disable mac loop back */
1159 ar40xx_rmw(priv
, AR40XX_REG_PORT_LOOKUP(phy
+ 1),
1160 AR40XX_PORT_LOOKUP_LOOPBACK
, 0);
1161 /* disable phy mdio broadcast write */
1162 ar40xx_phy_mmd_write(priv
, phy
, 7, 0x8028, 0x001f);
1165 /* clear fdb entry */
1166 ar40xx_atu_flush(priv
);
1169 /* End of psgmii self test */
1172 ar40xx_mac_mode_init(struct ar40xx_priv
*priv
, u32 mode
)
1174 if (mode
== PORT_WRAPPER_PSGMII
) {
1175 ar40xx_psgmii_write(priv
, AR40XX_PSGMII_MODE_CONTROL
, 0x2200);
1176 ar40xx_psgmii_write(priv
, AR40XX_PSGMIIPHY_TX_CONTROL
, 0x8380);
1181 int ar40xx_cpuport_setup(struct ar40xx_priv
*priv
)
1185 t
= AR40XX_PORT_STATUS_TXFLOW
|
1186 AR40XX_PORT_STATUS_RXFLOW
|
1187 AR40XX_PORT_TXHALF_FLOW
|
1188 AR40XX_PORT_DUPLEX
|
1189 AR40XX_PORT_SPEED_1000M
;
1190 ar40xx_write(priv
, AR40XX_REG_PORT_STATUS(0), t
);
1191 usleep_range(10, 20);
1193 t
|= AR40XX_PORT_TX_EN
|
1195 ar40xx_write(priv
, AR40XX_REG_PORT_STATUS(0), t
);
1201 ar40xx_init_port(struct ar40xx_priv
*priv
, int port
)
1205 ar40xx_write(priv
, AR40XX_REG_PORT_STATUS(port
), 0);
1207 ar40xx_write(priv
, AR40XX_REG_PORT_HEADER(port
), 0);
1209 ar40xx_write(priv
, AR40XX_REG_PORT_VLAN0(port
), 0);
1211 t
= AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH
<< AR40XX_PORT_VLAN1_OUT_MODE_S
;
1212 ar40xx_write(priv
, AR40XX_REG_PORT_VLAN1(port
), t
);
1214 t
= AR40XX_PORT_LOOKUP_LEARN
;
1215 t
|= AR40XX_PORT_STATE_FORWARD
<< AR40XX_PORT_LOOKUP_STATE_S
;
1216 ar40xx_write(priv
, AR40XX_REG_PORT_LOOKUP(port
), t
);
1220 ar40xx_init_globals(struct ar40xx_priv
*priv
)
1224 /* enable CPU port and disable mirror port */
1225 t
= AR40XX_FWD_CTRL0_CPU_PORT_EN
|
1226 AR40XX_FWD_CTRL0_MIRROR_PORT
;
1227 ar40xx_write(priv
, AR40XX_REG_FWD_CTRL0
, t
);
1229 /* forward multicast and broadcast frames to CPU */
1230 t
= (AR40XX_PORTS_ALL
<< AR40XX_FWD_CTRL1_UC_FLOOD_S
) |
1231 (AR40XX_PORTS_ALL
<< AR40XX_FWD_CTRL1_MC_FLOOD_S
) |
1232 (AR40XX_PORTS_ALL
<< AR40XX_FWD_CTRL1_BC_FLOOD_S
);
1233 ar40xx_write(priv
, AR40XX_REG_FWD_CTRL1
, t
);
1235 /* enable jumbo frames */
1236 ar40xx_rmw(priv
, AR40XX_REG_MAX_FRAME_SIZE
,
1237 AR40XX_MAX_FRAME_SIZE_MTU
, 9018 + 8 + 2);
1239 /* Enable MIB counters */
1240 ar40xx_rmw(priv
, AR40XX_REG_MODULE_EN
, 0,
1241 AR40XX_MODULE_EN_MIB
);
1244 ar40xx_write(priv
, AR40XX_REG_EEE_CTRL
, 0);
1246 /* set flowctrl thershold for cpu port */
1247 t
= (AR40XX_PORT0_FC_THRESH_ON_DFLT
<< 16) |
1248 AR40XX_PORT0_FC_THRESH_OFF_DFLT
;
1249 ar40xx_write(priv
, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t
);
1253 ar40xx_hw_init(struct ar40xx_priv
*priv
)
1257 ar40xx_ess_reset(priv
);
1262 ar40xx_psgmii_self_test(priv
);
1263 ar40xx_psgmii_self_test_clean(priv
);
1265 ar40xx_mac_mode_init(priv
, priv
->mac_mode
);
1267 for (i
= 0; i
< priv
->dev
.ports
; i
++)
1268 ar40xx_init_port(priv
, i
);
1270 ar40xx_init_globals(priv
);
1275 /* Start of qm error WAR */
1278 int ar40xx_force_1g_full(struct ar40xx_priv
*priv
, u32 port_id
)
1282 if (port_id
< 0 || port_id
> 6)
1285 reg
= AR40XX_REG_PORT_STATUS(port_id
);
1286 return ar40xx_rmw(priv
, reg
, AR40XX_PORT_SPEED
,
1287 (AR40XX_PORT_SPEED_1000M
| AR40XX_PORT_DUPLEX
));
1291 int ar40xx_get_qm_status(struct ar40xx_priv
*priv
,
1292 u32 port_id
, u32
*qm_buffer_err
)
1297 if (port_id
< 1 || port_id
> 5) {
1303 reg
= AR40XX_REG_QM_PORT0_3_QNUM
;
1304 ar40xx_write(priv
, AR40XX_REG_QM_DEBUG_ADDR
, reg
);
1305 qm_val
= ar40xx_read(priv
, AR40XX_REG_QM_DEBUG_VALUE
);
1306 /* every 8 bits for each port */
1307 *qm_buffer_err
= (qm_val
>> (port_id
* 8)) & 0xFF;
1309 reg
= AR40XX_REG_QM_PORT4_6_QNUM
;
1310 ar40xx_write(priv
, AR40XX_REG_QM_DEBUG_ADDR
, reg
);
1311 qm_val
= ar40xx_read(priv
, AR40XX_REG_QM_DEBUG_VALUE
);
1312 /* every 8 bits for each port */
1313 *qm_buffer_err
= (qm_val
>> ((port_id
-4) * 8)) & 0xFF;
1320 ar40xx_sw_mac_polling_task(struct ar40xx_priv
*priv
)
1322 static int task_count
;
1325 u32 link
, speed
, duplex
;
1327 u16 port_phy_status
[AR40XX_NUM_PORTS
];
1328 static u32 qm_err_cnt
[AR40XX_NUM_PORTS
] = {0, 0, 0, 0, 0, 0};
1329 static u32 link_cnt
[AR40XX_NUM_PORTS
] = {0, 0, 0, 0, 0, 0};
1330 struct mii_bus
*bus
= NULL
;
1332 if (!priv
|| !priv
->mii_bus
)
1335 bus
= priv
->mii_bus
;
1339 for (i
= 1; i
< AR40XX_NUM_PORTS
; ++i
) {
1340 port_phy_status
[i
] =
1341 mdiobus_read(bus
, i
-1, AR40XX_PHY_SPEC_STATUS
);
1343 speed
= FIELD_GET(AR40XX_PHY_SPEC_STATUS_SPEED
,
1344 port_phy_status
[i
]);
1345 link
= FIELD_GET(AR40XX_PHY_SPEC_STATUS_LINK
,
1346 port_phy_status
[i
]);
1347 duplex
= FIELD_GET(AR40XX_PHY_SPEC_STATUS_DUPLEX
,
1348 port_phy_status
[i
]);
1350 if (link
!= priv
->ar40xx_port_old_link
[i
]) {
1353 if ((priv
->ar40xx_port_old_link
[i
] ==
1354 AR40XX_PORT_LINK_UP
) &&
1355 (link
== AR40XX_PORT_LINK_DOWN
)) {
1356 /* LINK_EN disable(MAC force mode)*/
1357 reg
= AR40XX_REG_PORT_STATUS(i
);
1358 ar40xx_rmw(priv
, reg
,
1359 AR40XX_PORT_AUTO_LINK_EN
, 0);
1361 /* Check queue buffer */
1363 ar40xx_get_qm_status(priv
, i
, &qm_buffer_err
);
1364 if (qm_buffer_err
) {
1365 priv
->ar40xx_port_qm_buf
[i
] =
1366 AR40XX_QM_NOT_EMPTY
;
1370 priv
->ar40xx_port_qm_buf
[i
] =
1372 ar40xx_force_1g_full(priv
, i
);
1373 /* Ref:QCA8337 Datasheet,Clearing
1374 * MENU_CTRL_EN prevents phy to
1375 * stuck in 100BT mode when
1376 * bringing up the link
1378 ar40xx_phy_dbg_read(priv
, i
-1,
1381 phy_val
&= (~AR40XX_PHY_MANU_CTRL_EN
);
1382 ar40xx_phy_dbg_write(priv
, i
-1,
1386 priv
->ar40xx_port_old_link
[i
] = link
;
1387 } else if ((priv
->ar40xx_port_old_link
[i
] ==
1388 AR40XX_PORT_LINK_DOWN
) &&
1389 (link
== AR40XX_PORT_LINK_UP
)) {
1391 if (priv
->port_link_up
[i
] < 1) {
1392 ++priv
->port_link_up
[i
];
1394 /* Change port status */
1395 reg
= AR40XX_REG_PORT_STATUS(i
);
1396 value
= ar40xx_read(priv
, reg
);
1397 priv
->port_link_up
[i
] = 0;
1399 value
&= ~(AR40XX_PORT_DUPLEX
|
1401 value
|= speed
| (duplex
? BIT(6) : 0);
1402 ar40xx_write(priv
, reg
, value
);
1403 /* clock switch need such time
1406 usleep_range(100, 200);
1408 value
|= AR40XX_PORT_AUTO_LINK_EN
;
1409 ar40xx_write(priv
, reg
, value
);
1410 /* HW need such time to make sure link
1411 * stable before enable MAC
1413 usleep_range(100, 200);
1415 if (speed
== AR40XX_PORT_SPEED_100M
) {
1417 /* Enable @100M, if down to 10M
1418 * clock will change smoothly
1420 ar40xx_phy_dbg_read(priv
, i
-1,
1424 AR40XX_PHY_MANU_CTRL_EN
;
1425 ar40xx_phy_dbg_write(priv
, i
-1,
1429 priv
->ar40xx_port_old_link
[i
] = link
;
1434 if (priv
->ar40xx_port_qm_buf
[i
] == AR40XX_QM_NOT_EMPTY
) {
1436 ar40xx_get_qm_status(priv
, i
, &qm_buffer_err
);
1437 if (qm_buffer_err
) {
1440 priv
->ar40xx_port_qm_buf
[i
] =
1443 ar40xx_force_1g_full(priv
, i
);
1450 ar40xx_qm_err_check_work_task(struct work_struct
*work
)
1452 struct ar40xx_priv
*priv
= container_of(work
, struct ar40xx_priv
,
1455 mutex_lock(&priv
->qm_lock
);
1457 ar40xx_sw_mac_polling_task(priv
);
1459 mutex_unlock(&priv
->qm_lock
);
1461 schedule_delayed_work(&priv
->qm_dwork
,
1462 msecs_to_jiffies(AR40XX_QM_WORK_DELAY
));
1466 ar40xx_qm_err_check_work_start(struct ar40xx_priv
*priv
)
1468 mutex_init(&priv
->qm_lock
);
1470 INIT_DELAYED_WORK(&priv
->qm_dwork
, ar40xx_qm_err_check_work_task
);
1472 schedule_delayed_work(&priv
->qm_dwork
,
1473 msecs_to_jiffies(AR40XX_QM_WORK_DELAY
));
1478 /* End of qm error WAR */
1481 ar40xx_vlan_init(struct ar40xx_priv
*priv
)
1486 /* By default Enable VLAN */
1488 priv
->vlan_table
[AR40XX_LAN_VLAN
] = priv
->cpu_bmp
| priv
->lan_bmp
;
1489 priv
->vlan_table
[AR40XX_WAN_VLAN
] = priv
->cpu_bmp
| priv
->wan_bmp
;
1490 priv
->vlan_tagged
= priv
->cpu_bmp
;
1491 bmp
= priv
->lan_bmp
;
1492 for_each_set_bit(port
, &bmp
, AR40XX_NUM_PORTS
)
1493 priv
->pvid
[port
] = AR40XX_LAN_VLAN
;
1495 bmp
= priv
->wan_bmp
;
1496 for_each_set_bit(port
, &bmp
, AR40XX_NUM_PORTS
)
1497 priv
->pvid
[port
] = AR40XX_WAN_VLAN
;
1503 ar40xx_mib_work_func(struct work_struct
*work
)
1505 struct ar40xx_priv
*priv
;
1508 priv
= container_of(work
, struct ar40xx_priv
, mib_work
.work
);
1510 mutex_lock(&priv
->mib_lock
);
1512 err
= ar40xx_mib_capture(priv
);
1516 ar40xx_mib_fetch_port_stat(priv
, priv
->mib_next_port
, false);
1519 priv
->mib_next_port
++;
1520 if (priv
->mib_next_port
>= priv
->dev
.ports
)
1521 priv
->mib_next_port
= 0;
1523 mutex_unlock(&priv
->mib_lock
);
1525 schedule_delayed_work(&priv
->mib_work
,
1526 msecs_to_jiffies(AR40XX_MIB_WORK_DELAY
));
1530 ar40xx_setup_port(struct ar40xx_priv
*priv
, int port
, u32 members
)
1533 u32 egress
, ingress
;
1534 u32 pvid
= priv
->vlan_id
[priv
->pvid
[port
]];
1537 egress
= AR40XX_PORT_VLAN1_OUT_MODE_UNMOD
;
1539 ingress
= AR40XX_IN_SECURE
;
1541 egress
= AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH
;
1542 ingress
= AR40XX_IN_PORT_ONLY
;
1545 t
= pvid
<< AR40XX_PORT_VLAN0_DEF_SVID_S
;
1546 t
|= pvid
<< AR40XX_PORT_VLAN0_DEF_CVID_S
;
1547 ar40xx_write(priv
, AR40XX_REG_PORT_VLAN0(port
), t
);
1549 t
= AR40XX_PORT_VLAN1_PORT_VLAN_PROP
;
1550 t
|= egress
<< AR40XX_PORT_VLAN1_OUT_MODE_S
;
1552 ar40xx_write(priv
, AR40XX_REG_PORT_VLAN1(port
), t
);
1555 t
|= AR40XX_PORT_LOOKUP_LEARN
;
1556 t
|= ingress
<< AR40XX_PORT_LOOKUP_IN_MODE_S
;
1557 t
|= AR40XX_PORT_STATE_FORWARD
<< AR40XX_PORT_LOOKUP_STATE_S
;
1558 ar40xx_write(priv
, AR40XX_REG_PORT_LOOKUP(port
), t
);
1562 ar40xx_vtu_op(struct ar40xx_priv
*priv
, u32 op
, u32 val
)
1564 if (ar40xx_wait_bit(priv
, AR40XX_REG_VTU_FUNC1
,
1565 AR40XX_VTU_FUNC1_BUSY
, 0))
1568 if ((op
& AR40XX_VTU_FUNC1_OP
) == AR40XX_VTU_FUNC1_OP_LOAD
)
1569 ar40xx_write(priv
, AR40XX_REG_VTU_FUNC0
, val
);
1571 op
|= AR40XX_VTU_FUNC1_BUSY
;
1572 ar40xx_write(priv
, AR40XX_REG_VTU_FUNC1
, op
);
1576 ar40xx_vtu_load_vlan(struct ar40xx_priv
*priv
, u32 vid
, u32 port_mask
)
1582 op
= AR40XX_VTU_FUNC1_OP_LOAD
| (vid
<< AR40XX_VTU_FUNC1_VID_S
);
1583 val
= AR40XX_VTU_FUNC0_VALID
| AR40XX_VTU_FUNC0_IVL
;
1584 for (i
= 0; i
< AR40XX_NUM_PORTS
; i
++) {
1587 if ((port_mask
& BIT(i
)) == 0)
1588 mode
= AR40XX_VTU_FUNC0_EG_MODE_NOT
;
1589 else if (priv
->vlan
== 0)
1590 mode
= AR40XX_VTU_FUNC0_EG_MODE_KEEP
;
1591 else if ((priv
->vlan_tagged
& BIT(i
)) ||
1592 (priv
->vlan_id
[priv
->pvid
[i
]] != vid
))
1593 mode
= AR40XX_VTU_FUNC0_EG_MODE_TAG
;
1595 mode
= AR40XX_VTU_FUNC0_EG_MODE_UNTAG
;
1597 val
|= mode
<< AR40XX_VTU_FUNC0_EG_MODE_S(i
);
1599 ar40xx_vtu_op(priv
, op
, val
);
1603 ar40xx_vtu_flush(struct ar40xx_priv
*priv
)
1605 ar40xx_vtu_op(priv
, AR40XX_VTU_FUNC1_OP_FLUSH
, 0);
1609 ar40xx_sw_hw_apply(struct switch_dev
*dev
)
1611 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
1612 u8 portmask
[AR40XX_NUM_PORTS
];
1615 mutex_lock(&priv
->reg_mutex
);
1616 /* flush all vlan entries */
1617 ar40xx_vtu_flush(priv
);
1619 memset(portmask
, 0, sizeof(portmask
));
1621 for (j
= 0; j
< AR40XX_MAX_VLANS
; j
++) {
1622 u8 vp
= priv
->vlan_table
[j
];
1627 for (i
= 0; i
< dev
->ports
; i
++) {
1631 portmask
[i
] |= vp
& ~mask
;
1634 ar40xx_vtu_load_vlan(priv
, priv
->vlan_id
[j
],
1635 priv
->vlan_table
[j
]);
1638 /* 8021q vlan disabled */
1639 for (i
= 0; i
< dev
->ports
; i
++) {
1640 if (i
== AR40XX_PORT_CPU
)
1643 portmask
[i
] = BIT(AR40XX_PORT_CPU
);
1644 portmask
[AR40XX_PORT_CPU
] |= BIT(i
);
1648 /* update the port destination mask registers and tag settings */
1649 for (i
= 0; i
< dev
->ports
; i
++)
1650 ar40xx_setup_port(priv
, i
, portmask
[i
]);
1652 ar40xx_set_mirror_regs(priv
);
1654 mutex_unlock(&priv
->reg_mutex
);
1659 ar40xx_sw_reset_switch(struct switch_dev
*dev
)
1661 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
1664 mutex_lock(&priv
->reg_mutex
);
1665 memset(&priv
->vlan
, 0, sizeof(struct ar40xx_priv
) -
1666 offsetof(struct ar40xx_priv
, vlan
));
1668 for (i
= 0; i
< AR40XX_MAX_VLANS
; i
++)
1669 priv
->vlan_id
[i
] = i
;
1671 ar40xx_vlan_init(priv
);
1673 priv
->mirror_rx
= false;
1674 priv
->mirror_tx
= false;
1675 priv
->source_port
= 0;
1676 priv
->monitor_port
= 0;
1678 mutex_unlock(&priv
->reg_mutex
);
1680 rv
= ar40xx_sw_hw_apply(dev
);
1685 ar40xx_start(struct ar40xx_priv
*priv
)
1689 ret
= ar40xx_hw_init(priv
);
1693 ret
= ar40xx_sw_reset_switch(&priv
->dev
);
1697 /* at last, setup cpu port */
1698 ret
= ar40xx_cpuport_setup(priv
);
1702 schedule_delayed_work(&priv
->mib_work
,
1703 msecs_to_jiffies(AR40XX_MIB_WORK_DELAY
));
1705 ar40xx_qm_err_check_work_start(priv
);
1710 static const struct switch_dev_ops ar40xx_sw_ops
= {
1712 .attr
= ar40xx_sw_attr_globals
,
1713 .n_attr
= ARRAY_SIZE(ar40xx_sw_attr_globals
),
1716 .attr
= ar40xx_sw_attr_port
,
1717 .n_attr
= ARRAY_SIZE(ar40xx_sw_attr_port
),
1720 .attr
= ar40xx_sw_attr_vlan
,
1721 .n_attr
= ARRAY_SIZE(ar40xx_sw_attr_vlan
),
1723 .get_port_pvid
= ar40xx_sw_get_pvid
,
1724 .set_port_pvid
= ar40xx_sw_set_pvid
,
1725 .get_vlan_ports
= ar40xx_sw_get_ports
,
1726 .set_vlan_ports
= ar40xx_sw_set_ports
,
1727 .apply_config
= ar40xx_sw_hw_apply
,
1728 .reset_switch
= ar40xx_sw_reset_switch
,
1729 .get_port_link
= ar40xx_sw_get_port_link
,
1732 /* Platform driver probe function */
1734 static int ar40xx_probe(struct platform_device
*pdev
)
1736 struct device_node
*switch_node
;
1737 struct device_node
*psgmii_node
;
1738 struct device_node
*mdio_node
;
1739 const __be32
*mac_mode
;
1740 struct clk
*ess_clk
;
1741 struct switch_dev
*swdev
;
1742 struct ar40xx_priv
*priv
;
1745 struct resource psgmii_base
= {0};
1746 struct resource switch_base
= {0};
1749 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
1753 platform_set_drvdata(pdev
, priv
);
1756 switch_node
= of_node_get(pdev
->dev
.of_node
);
1757 if (of_address_to_resource(switch_node
, 0, &switch_base
) != 0)
1760 priv
->hw_addr
= devm_ioremap_resource(&pdev
->dev
, &switch_base
);
1761 if (IS_ERR(priv
->hw_addr
)) {
1762 dev_err(&pdev
->dev
, "Failed to ioremap switch_base!\n");
1763 return PTR_ERR(priv
->hw_addr
);
1767 psgmii_node
= of_find_node_by_name(NULL
, "ess-psgmii");
1769 dev_err(&pdev
->dev
, "Failed to find ess-psgmii node!\n");
1773 if (of_address_to_resource(psgmii_node
, 0, &psgmii_base
) != 0)
1776 priv
->psgmii_hw_addr
= devm_ioremap_resource(&pdev
->dev
, &psgmii_base
);
1777 if (IS_ERR(priv
->psgmii_hw_addr
)) {
1778 dev_err(&pdev
->dev
, "psgmii ioremap fail!\n");
1779 return PTR_ERR(priv
->psgmii_hw_addr
);
1782 mac_mode
= of_get_property(switch_node
, "switch_mac_mode", &len
);
1784 dev_err(&pdev
->dev
, "Failed to read switch_mac_mode\n");
1787 priv
->mac_mode
= be32_to_cpup(mac_mode
);
1789 ess_clk
= of_clk_get_by_name(switch_node
, "ess_clk");
1791 clk_prepare_enable(ess_clk
);
1793 priv
->ess_rst
= devm_reset_control_get(&pdev
->dev
, "ess_rst");
1794 if (IS_ERR(priv
->ess_rst
)) {
1795 dev_err(&pdev
->dev
, "Failed to get ess_rst control!\n");
1796 return PTR_ERR(priv
->ess_rst
);
1799 if (of_property_read_u32(switch_node
, "switch_cpu_bmp",
1801 of_property_read_u32(switch_node
, "switch_lan_bmp",
1803 of_property_read_u32(switch_node
, "switch_wan_bmp",
1805 dev_err(&pdev
->dev
, "Failed to read port properties\n");
1809 mutex_init(&priv
->reg_mutex
);
1810 mutex_init(&priv
->mib_lock
);
1811 INIT_DELAYED_WORK(&priv
->mib_work
, ar40xx_mib_work_func
);
1813 /* register switch */
1816 mdio_node
= of_find_compatible_node(NULL
, NULL
, "qcom,ipq4019-mdio");
1818 dev_err(&pdev
->dev
, "Probe failed - Cannot find mdio node by phandle!\n");
1820 goto err_missing_phy
;
1823 priv
->mii_bus
= of_mdio_find_bus(mdio_node
);
1825 if (priv
->mii_bus
== NULL
) {
1826 dev_err(&pdev
->dev
, "Probe failed - Missing PHYs!\n");
1828 goto err_missing_phy
;
1831 swdev
->alias
= dev_name(&priv
->mii_bus
->dev
);
1833 swdev
->cpu_port
= AR40XX_PORT_CPU
;
1834 swdev
->name
= "QCA AR40xx";
1835 swdev
->vlans
= AR40XX_MAX_VLANS
;
1836 swdev
->ports
= AR40XX_NUM_PORTS
;
1837 swdev
->ops
= &ar40xx_sw_ops
;
1838 ret
= register_switch(swdev
, NULL
);
1840 dev_err(&pdev
->dev
, "Switch registration failed!\n");
1844 num_mibs
= ARRAY_SIZE(ar40xx_mibs
);
1845 len
= priv
->dev
.ports
* num_mibs
*
1846 sizeof(*priv
->mib_stats
);
1847 priv
->mib_stats
= devm_kzalloc(&pdev
->dev
, len
, GFP_KERNEL
);
1848 if (!priv
->mib_stats
) {
1850 goto err_unregister_switch
;
1857 err_unregister_switch
:
1858 unregister_switch(&priv
->dev
);
1860 platform_set_drvdata(pdev
, NULL
);
1864 static int ar40xx_remove(struct platform_device
*pdev
)
1866 struct ar40xx_priv
*priv
= platform_get_drvdata(pdev
);
1868 cancel_delayed_work_sync(&priv
->qm_dwork
);
1869 cancel_delayed_work_sync(&priv
->mib_work
);
1871 unregister_switch(&priv
->dev
);
1876 static const struct of_device_id ar40xx_of_mtable
[] = {
1877 {.compatible
= "qcom,ess-switch" },
1881 struct platform_driver ar40xx_drv
= {
1882 .probe
= ar40xx_probe
,
1883 .remove
= ar40xx_remove
,
1886 .of_match_table
= ar40xx_of_mtable
,
1890 module_platform_driver(ar40xx_drv
);
1892 MODULE_DESCRIPTION("IPQ40XX ESS driver");
1893 MODULE_LICENSE("Dual BSD/GPL");