2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/module.h>
17 #include <linux/list.h>
18 #include <linux/bitops.h>
19 #include <linux/switch.h>
20 #include <linux/delay.h>
21 #include <linux/phy.h>
22 #include <linux/clk.h>
23 #include <linux/reset.h>
24 #include <linux/lockdep.h>
25 #include <linux/workqueue.h>
26 #include <linux/of_device.h>
27 #include <linux/of_address.h>
28 #include <linux/mdio.h>
29 #include <linux/gpio.h>
33 static struct ar40xx_priv
*ar40xx_priv
;
35 #define MIB_DESC(_s , _o, _n) \
42 static const struct ar40xx_mib_desc ar40xx_mibs
[] = {
43 MIB_DESC(1, AR40XX_STATS_RXBROAD
, "RxBroad"),
44 MIB_DESC(1, AR40XX_STATS_RXPAUSE
, "RxPause"),
45 MIB_DESC(1, AR40XX_STATS_RXMULTI
, "RxMulti"),
46 MIB_DESC(1, AR40XX_STATS_RXFCSERR
, "RxFcsErr"),
47 MIB_DESC(1, AR40XX_STATS_RXALIGNERR
, "RxAlignErr"),
48 MIB_DESC(1, AR40XX_STATS_RXRUNT
, "RxRunt"),
49 MIB_DESC(1, AR40XX_STATS_RXFRAGMENT
, "RxFragment"),
50 MIB_DESC(1, AR40XX_STATS_RX64BYTE
, "Rx64Byte"),
51 MIB_DESC(1, AR40XX_STATS_RX128BYTE
, "Rx128Byte"),
52 MIB_DESC(1, AR40XX_STATS_RX256BYTE
, "Rx256Byte"),
53 MIB_DESC(1, AR40XX_STATS_RX512BYTE
, "Rx512Byte"),
54 MIB_DESC(1, AR40XX_STATS_RX1024BYTE
, "Rx1024Byte"),
55 MIB_DESC(1, AR40XX_STATS_RX1518BYTE
, "Rx1518Byte"),
56 MIB_DESC(1, AR40XX_STATS_RXMAXBYTE
, "RxMaxByte"),
57 MIB_DESC(1, AR40XX_STATS_RXTOOLONG
, "RxTooLong"),
58 MIB_DESC(2, AR40XX_STATS_RXGOODBYTE
, "RxGoodByte"),
59 MIB_DESC(2, AR40XX_STATS_RXBADBYTE
, "RxBadByte"),
60 MIB_DESC(1, AR40XX_STATS_RXOVERFLOW
, "RxOverFlow"),
61 MIB_DESC(1, AR40XX_STATS_FILTERED
, "Filtered"),
62 MIB_DESC(1, AR40XX_STATS_TXBROAD
, "TxBroad"),
63 MIB_DESC(1, AR40XX_STATS_TXPAUSE
, "TxPause"),
64 MIB_DESC(1, AR40XX_STATS_TXMULTI
, "TxMulti"),
65 MIB_DESC(1, AR40XX_STATS_TXUNDERRUN
, "TxUnderRun"),
66 MIB_DESC(1, AR40XX_STATS_TX64BYTE
, "Tx64Byte"),
67 MIB_DESC(1, AR40XX_STATS_TX128BYTE
, "Tx128Byte"),
68 MIB_DESC(1, AR40XX_STATS_TX256BYTE
, "Tx256Byte"),
69 MIB_DESC(1, AR40XX_STATS_TX512BYTE
, "Tx512Byte"),
70 MIB_DESC(1, AR40XX_STATS_TX1024BYTE
, "Tx1024Byte"),
71 MIB_DESC(1, AR40XX_STATS_TX1518BYTE
, "Tx1518Byte"),
72 MIB_DESC(1, AR40XX_STATS_TXMAXBYTE
, "TxMaxByte"),
73 MIB_DESC(1, AR40XX_STATS_TXOVERSIZE
, "TxOverSize"),
74 MIB_DESC(2, AR40XX_STATS_TXBYTE
, "TxByte"),
75 MIB_DESC(1, AR40XX_STATS_TXCOLLISION
, "TxCollision"),
76 MIB_DESC(1, AR40XX_STATS_TXABORTCOL
, "TxAbortCol"),
77 MIB_DESC(1, AR40XX_STATS_TXMULTICOL
, "TxMultiCol"),
78 MIB_DESC(1, AR40XX_STATS_TXSINGLECOL
, "TxSingleCol"),
79 MIB_DESC(1, AR40XX_STATS_TXEXCDEFER
, "TxExcDefer"),
80 MIB_DESC(1, AR40XX_STATS_TXDEFER
, "TxDefer"),
81 MIB_DESC(1, AR40XX_STATS_TXLATECOL
, "TxLateCol"),
85 ar40xx_read(struct ar40xx_priv
*priv
, int reg
)
87 return readl(priv
->hw_addr
+ reg
);
91 ar40xx_psgmii_read(struct ar40xx_priv
*priv
, int reg
)
93 return readl(priv
->psgmii_hw_addr
+ reg
);
97 ar40xx_write(struct ar40xx_priv
*priv
, int reg
, u32 val
)
99 writel(val
, priv
->hw_addr
+ reg
);
103 ar40xx_rmw(struct ar40xx_priv
*priv
, int reg
, u32 mask
, u32 val
)
107 ret
= ar40xx_read(priv
, reg
);
110 ar40xx_write(priv
, reg
, ret
);
115 ar40xx_psgmii_write(struct ar40xx_priv
*priv
, int reg
, u32 val
)
117 writel(val
, priv
->psgmii_hw_addr
+ reg
);
121 ar40xx_phy_dbg_write(struct ar40xx_priv
*priv
, int phy_addr
,
122 u16 dbg_addr
, u16 dbg_data
)
124 struct mii_bus
*bus
= priv
->mii_bus
;
126 mutex_lock(&bus
->mdio_lock
);
127 bus
->write(bus
, phy_addr
, AR40XX_MII_ATH_DBG_ADDR
, dbg_addr
);
128 bus
->write(bus
, phy_addr
, AR40XX_MII_ATH_DBG_DATA
, dbg_data
);
129 mutex_unlock(&bus
->mdio_lock
);
133 ar40xx_phy_dbg_read(struct ar40xx_priv
*priv
, int phy_addr
,
134 u16 dbg_addr
, u16
*dbg_data
)
136 struct mii_bus
*bus
= priv
->mii_bus
;
138 mutex_lock(&bus
->mdio_lock
);
139 bus
->write(bus
, phy_addr
, AR40XX_MII_ATH_DBG_ADDR
, dbg_addr
);
140 *dbg_data
= bus
->read(bus
, phy_addr
, AR40XX_MII_ATH_DBG_DATA
);
141 mutex_unlock(&bus
->mdio_lock
);
145 ar40xx_phy_mmd_write(struct ar40xx_priv
*priv
, u32 phy_id
,
146 u16 mmd_num
, u16 reg_id
, u16 reg_val
)
148 struct mii_bus
*bus
= priv
->mii_bus
;
150 mutex_lock(&bus
->mdio_lock
);
151 bus
->write(bus
, phy_id
,
152 AR40XX_MII_ATH_MMD_ADDR
, mmd_num
);
153 bus
->write(bus
, phy_id
,
154 AR40XX_MII_ATH_MMD_DATA
, reg_id
);
155 bus
->write(bus
, phy_id
,
156 AR40XX_MII_ATH_MMD_ADDR
,
158 bus
->write(bus
, phy_id
,
159 AR40XX_MII_ATH_MMD_DATA
, reg_val
);
160 mutex_unlock(&bus
->mdio_lock
);
164 ar40xx_phy_mmd_read(struct ar40xx_priv
*priv
, u32 phy_id
,
165 u16 mmd_num
, u16 reg_id
)
168 struct mii_bus
*bus
= priv
->mii_bus
;
170 mutex_lock(&bus
->mdio_lock
);
171 bus
->write(bus
, phy_id
,
172 AR40XX_MII_ATH_MMD_ADDR
, mmd_num
);
173 bus
->write(bus
, phy_id
,
174 AR40XX_MII_ATH_MMD_DATA
, reg_id
);
175 bus
->write(bus
, phy_id
,
176 AR40XX_MII_ATH_MMD_ADDR
,
178 value
= bus
->read(bus
, phy_id
, AR40XX_MII_ATH_MMD_DATA
);
179 mutex_unlock(&bus
->mdio_lock
);
183 /* Start of swconfig support */
186 ar40xx_phy_poll_reset(struct ar40xx_priv
*priv
)
188 u32 i
, in_reset
, retries
= 500;
189 struct mii_bus
*bus
= priv
->mii_bus
;
191 /* Assume RESET was recently issued to some or all of the phys */
192 in_reset
= GENMASK(AR40XX_NUM_PHYS
- 1, 0);
195 /* 1ms should be plenty of time.
196 * 802.3 spec allows for a max wait time of 500ms
198 usleep_range(1000, 2000);
200 for (i
= 0; i
< AR40XX_NUM_PHYS
; i
++) {
203 /* skip devices which have completed reset */
204 if (!(in_reset
& BIT(i
)))
207 val
= mdiobus_read(bus
, i
, MII_BMCR
);
211 /* mark when phy is no longer in reset state */
212 if (!(val
& BMCR_RESET
))
220 dev_warn(&bus
->dev
, "Failed to reset all phys! (in_reset: 0x%x)\n",
225 ar40xx_phy_init(struct ar40xx_priv
*priv
)
232 for (i
= 0; i
< AR40XX_NUM_PORTS
- 1; i
++) {
233 ar40xx_phy_dbg_read(priv
, i
, AR40XX_PHY_DEBUG_0
, &val
);
234 val
&= ~AR40XX_PHY_MANU_CTRL_EN
;
235 ar40xx_phy_dbg_write(priv
, i
, AR40XX_PHY_DEBUG_0
, val
);
236 mdiobus_write(bus
, i
,
237 MII_ADVERTISE
, ADVERTISE_ALL
|
238 ADVERTISE_PAUSE_CAP
|
239 ADVERTISE_PAUSE_ASYM
);
240 mdiobus_write(bus
, i
, MII_CTRL1000
, ADVERTISE_1000FULL
);
241 mdiobus_write(bus
, i
, MII_BMCR
, BMCR_RESET
| BMCR_ANENABLE
);
244 ar40xx_phy_poll_reset(priv
);
248 ar40xx_port_phy_linkdown(struct ar40xx_priv
*priv
)
255 for (i
= 0; i
< AR40XX_NUM_PORTS
- 1; i
++) {
256 mdiobus_write(bus
, i
, MII_CTRL1000
, 0);
257 mdiobus_write(bus
, i
, MII_ADVERTISE
, 0);
258 mdiobus_write(bus
, i
, MII_BMCR
, BMCR_RESET
| BMCR_ANENABLE
);
259 ar40xx_phy_dbg_read(priv
, i
, AR40XX_PHY_DEBUG_0
, &val
);
260 val
|= AR40XX_PHY_MANU_CTRL_EN
;
261 ar40xx_phy_dbg_write(priv
, i
, AR40XX_PHY_DEBUG_0
, val
);
262 /* disable transmit */
263 ar40xx_phy_dbg_read(priv
, i
, AR40XX_PHY_DEBUG_2
, &val
);
265 ar40xx_phy_dbg_write(priv
, i
, AR40XX_PHY_DEBUG_2
, val
);
270 ar40xx_set_mirror_regs(struct ar40xx_priv
*priv
)
274 /* reset all mirror registers */
275 ar40xx_rmw(priv
, AR40XX_REG_FWD_CTRL0
,
276 AR40XX_FWD_CTRL0_MIRROR_PORT
,
277 (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S
));
278 for (port
= 0; port
< AR40XX_NUM_PORTS
; port
++) {
279 ar40xx_rmw(priv
, AR40XX_REG_PORT_LOOKUP(port
),
280 AR40XX_PORT_LOOKUP_ING_MIRROR_EN
, 0);
282 ar40xx_rmw(priv
, AR40XX_REG_PORT_HOL_CTRL1(port
),
283 AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN
, 0);
286 /* now enable mirroring if necessary */
287 if (priv
->source_port
>= AR40XX_NUM_PORTS
||
288 priv
->monitor_port
>= AR40XX_NUM_PORTS
||
289 priv
->source_port
== priv
->monitor_port
) {
293 ar40xx_rmw(priv
, AR40XX_REG_FWD_CTRL0
,
294 AR40XX_FWD_CTRL0_MIRROR_PORT
,
295 (priv
->monitor_port
<< AR40XX_FWD_CTRL0_MIRROR_PORT_S
));
298 ar40xx_rmw(priv
, AR40XX_REG_PORT_LOOKUP(priv
->source_port
), 0,
299 AR40XX_PORT_LOOKUP_ING_MIRROR_EN
);
302 ar40xx_rmw(priv
, AR40XX_REG_PORT_HOL_CTRL1(priv
->source_port
),
303 0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN
);
307 ar40xx_sw_get_ports(struct switch_dev
*dev
, struct switch_val
*val
)
309 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
310 u8 ports
= priv
->vlan_table
[val
->port_vlan
];
314 for (i
= 0; i
< dev
->ports
; i
++) {
315 struct switch_port
*p
;
317 if (!(ports
& BIT(i
)))
320 p
= &val
->value
.ports
[val
->len
++];
322 if ((priv
->vlan_tagged
& BIT(i
)) ||
323 (priv
->pvid
[i
] != val
->port_vlan
))
324 p
->flags
= BIT(SWITCH_PORT_FLAG_TAGGED
);
332 ar40xx_sw_set_ports(struct switch_dev
*dev
, struct switch_val
*val
)
334 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
335 u8
*vt
= &priv
->vlan_table
[val
->port_vlan
];
339 for (i
= 0; i
< val
->len
; i
++) {
340 struct switch_port
*p
= &val
->value
.ports
[i
];
342 if (p
->flags
& BIT(SWITCH_PORT_FLAG_TAGGED
)) {
343 if (val
->port_vlan
== priv
->pvid
[p
->id
])
344 priv
->vlan_tagged
|= BIT(p
->id
);
346 priv
->vlan_tagged
&= ~BIT(p
->id
);
347 priv
->pvid
[p
->id
] = val
->port_vlan
;
356 ar40xx_reg_wait(struct ar40xx_priv
*priv
, u32 reg
, u32 mask
, u32 val
,
361 for (i
= 0; i
< timeout
; i
++) {
364 t
= ar40xx_read(priv
, reg
);
365 if ((t
& mask
) == val
)
368 usleep_range(1000, 2000);
375 ar40xx_mib_op(struct ar40xx_priv
*priv
, u32 op
)
379 lockdep_assert_held(&priv
->mib_lock
);
381 /* Capture the hardware statistics for all ports */
382 ar40xx_rmw(priv
, AR40XX_REG_MIB_FUNC
,
383 AR40XX_MIB_FUNC
, (op
<< AR40XX_MIB_FUNC_S
));
385 /* Wait for the capturing to complete. */
386 ret
= ar40xx_reg_wait(priv
, AR40XX_REG_MIB_FUNC
,
387 AR40XX_MIB_BUSY
, 0, 10);
393 ar40xx_mib_fetch_port_stat(struct ar40xx_priv
*priv
, int port
, bool flush
)
398 u32 num_mibs
= ARRAY_SIZE(ar40xx_mibs
);
400 WARN_ON(port
>= priv
->dev
.ports
);
402 lockdep_assert_held(&priv
->mib_lock
);
404 base
= AR40XX_REG_PORT_STATS_START
+
405 AR40XX_REG_PORT_STATS_LEN
* port
;
407 mib_stats
= &priv
->mib_stats
[port
* num_mibs
];
411 len
= num_mibs
* sizeof(*mib_stats
);
412 memset(mib_stats
, 0, len
);
415 for (i
= 0; i
< num_mibs
; i
++) {
416 const struct ar40xx_mib_desc
*mib
;
419 mib
= &ar40xx_mibs
[i
];
420 t
= ar40xx_read(priv
, base
+ mib
->offset
);
421 if (mib
->size
== 2) {
424 hi
= ar40xx_read(priv
, base
+ mib
->offset
+ 4);
433 ar40xx_mib_capture(struct ar40xx_priv
*priv
)
435 return ar40xx_mib_op(priv
, AR40XX_MIB_FUNC_CAPTURE
);
439 ar40xx_mib_flush(struct ar40xx_priv
*priv
)
441 return ar40xx_mib_op(priv
, AR40XX_MIB_FUNC_FLUSH
);
445 ar40xx_sw_set_reset_mibs(struct switch_dev
*dev
,
446 const struct switch_attr
*attr
,
447 struct switch_val
*val
)
449 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
452 u32 num_mibs
= ARRAY_SIZE(ar40xx_mibs
);
454 mutex_lock(&priv
->mib_lock
);
456 len
= priv
->dev
.ports
* num_mibs
* sizeof(*priv
->mib_stats
);
457 memset(priv
->mib_stats
, 0, len
);
458 ret
= ar40xx_mib_flush(priv
);
460 mutex_unlock(&priv
->mib_lock
);
465 ar40xx_sw_set_vlan(struct switch_dev
*dev
, const struct switch_attr
*attr
,
466 struct switch_val
*val
)
468 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
470 priv
->vlan
= !!val
->value
.i
;
475 ar40xx_sw_get_vlan(struct switch_dev
*dev
, const struct switch_attr
*attr
,
476 struct switch_val
*val
)
478 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
480 val
->value
.i
= priv
->vlan
;
485 ar40xx_sw_set_mirror_rx_enable(struct switch_dev
*dev
,
486 const struct switch_attr
*attr
,
487 struct switch_val
*val
)
489 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
491 mutex_lock(&priv
->reg_mutex
);
492 priv
->mirror_rx
= !!val
->value
.i
;
493 ar40xx_set_mirror_regs(priv
);
494 mutex_unlock(&priv
->reg_mutex
);
500 ar40xx_sw_get_mirror_rx_enable(struct switch_dev
*dev
,
501 const struct switch_attr
*attr
,
502 struct switch_val
*val
)
504 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
506 mutex_lock(&priv
->reg_mutex
);
507 val
->value
.i
= priv
->mirror_rx
;
508 mutex_unlock(&priv
->reg_mutex
);
513 ar40xx_sw_set_mirror_tx_enable(struct switch_dev
*dev
,
514 const struct switch_attr
*attr
,
515 struct switch_val
*val
)
517 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
519 mutex_lock(&priv
->reg_mutex
);
520 priv
->mirror_tx
= !!val
->value
.i
;
521 ar40xx_set_mirror_regs(priv
);
522 mutex_unlock(&priv
->reg_mutex
);
528 ar40xx_sw_get_mirror_tx_enable(struct switch_dev
*dev
,
529 const struct switch_attr
*attr
,
530 struct switch_val
*val
)
532 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
534 mutex_lock(&priv
->reg_mutex
);
535 val
->value
.i
= priv
->mirror_tx
;
536 mutex_unlock(&priv
->reg_mutex
);
541 ar40xx_sw_set_mirror_monitor_port(struct switch_dev
*dev
,
542 const struct switch_attr
*attr
,
543 struct switch_val
*val
)
545 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
547 mutex_lock(&priv
->reg_mutex
);
548 priv
->monitor_port
= val
->value
.i
;
549 ar40xx_set_mirror_regs(priv
);
550 mutex_unlock(&priv
->reg_mutex
);
556 ar40xx_sw_get_mirror_monitor_port(struct switch_dev
*dev
,
557 const struct switch_attr
*attr
,
558 struct switch_val
*val
)
560 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
562 mutex_lock(&priv
->reg_mutex
);
563 val
->value
.i
= priv
->monitor_port
;
564 mutex_unlock(&priv
->reg_mutex
);
569 ar40xx_sw_set_mirror_source_port(struct switch_dev
*dev
,
570 const struct switch_attr
*attr
,
571 struct switch_val
*val
)
573 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
575 mutex_lock(&priv
->reg_mutex
);
576 priv
->source_port
= val
->value
.i
;
577 ar40xx_set_mirror_regs(priv
);
578 mutex_unlock(&priv
->reg_mutex
);
584 ar40xx_sw_get_mirror_source_port(struct switch_dev
*dev
,
585 const struct switch_attr
*attr
,
586 struct switch_val
*val
)
588 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
590 mutex_lock(&priv
->reg_mutex
);
591 val
->value
.i
= priv
->source_port
;
592 mutex_unlock(&priv
->reg_mutex
);
597 ar40xx_sw_set_linkdown(struct switch_dev
*dev
,
598 const struct switch_attr
*attr
,
599 struct switch_val
*val
)
601 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
603 if (val
->value
.i
== 1)
604 ar40xx_port_phy_linkdown(priv
);
606 ar40xx_phy_init(priv
);
612 ar40xx_sw_set_port_reset_mib(struct switch_dev
*dev
,
613 const struct switch_attr
*attr
,
614 struct switch_val
*val
)
616 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
620 port
= val
->port_vlan
;
621 if (port
>= dev
->ports
)
624 mutex_lock(&priv
->mib_lock
);
625 ret
= ar40xx_mib_capture(priv
);
629 ar40xx_mib_fetch_port_stat(priv
, port
, true);
632 mutex_unlock(&priv
->mib_lock
);
637 ar40xx_sw_get_port_mib(struct switch_dev
*dev
,
638 const struct switch_attr
*attr
,
639 struct switch_val
*val
)
641 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
645 char *buf
= priv
->buf
;
647 u32 num_mibs
= ARRAY_SIZE(ar40xx_mibs
);
649 port
= val
->port_vlan
;
650 if (port
>= dev
->ports
)
653 mutex_lock(&priv
->mib_lock
);
654 ret
= ar40xx_mib_capture(priv
);
658 ar40xx_mib_fetch_port_stat(priv
, port
, false);
660 len
+= snprintf(buf
+ len
, sizeof(priv
->buf
) - len
,
661 "Port %d MIB counters\n",
664 mib_stats
= &priv
->mib_stats
[port
* num_mibs
];
665 for (i
= 0; i
< num_mibs
; i
++)
666 len
+= snprintf(buf
+ len
, sizeof(priv
->buf
) - len
,
675 mutex_unlock(&priv
->mib_lock
);
680 ar40xx_sw_set_vid(struct switch_dev
*dev
, const struct switch_attr
*attr
,
681 struct switch_val
*val
)
683 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
685 priv
->vlan_id
[val
->port_vlan
] = val
->value
.i
;
690 ar40xx_sw_get_vid(struct switch_dev
*dev
, const struct switch_attr
*attr
,
691 struct switch_val
*val
)
693 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
695 val
->value
.i
= priv
->vlan_id
[val
->port_vlan
];
700 ar40xx_sw_get_pvid(struct switch_dev
*dev
, int port
, int *vlan
)
702 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
703 *vlan
= priv
->pvid
[port
];
708 ar40xx_sw_set_pvid(struct switch_dev
*dev
, int port
, int vlan
)
710 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
712 /* make sure no invalid PVIDs get set */
713 if (vlan
>= dev
->vlans
)
716 priv
->pvid
[port
] = vlan
;
721 ar40xx_read_port_link(struct ar40xx_priv
*priv
, int port
,
722 struct switch_port_link
*link
)
727 memset(link
, 0, sizeof(*link
));
729 status
= ar40xx_read(priv
, AR40XX_REG_PORT_STATUS(port
));
731 link
->aneg
= !!(status
& AR40XX_PORT_AUTO_LINK_EN
);
732 if (link
->aneg
|| (port
!= AR40XX_PORT_CPU
))
733 link
->link
= !!(status
& AR40XX_PORT_STATUS_LINK_UP
);
740 link
->duplex
= !!(status
& AR40XX_PORT_DUPLEX
);
741 link
->tx_flow
= !!(status
& AR40XX_PORT_STATUS_TXFLOW
);
742 link
->rx_flow
= !!(status
& AR40XX_PORT_STATUS_RXFLOW
);
744 speed
= (status
& AR40XX_PORT_SPEED
) >>
745 AR40XX_PORT_STATUS_SPEED_S
;
748 case AR40XX_PORT_SPEED_10M
:
749 link
->speed
= SWITCH_PORT_SPEED_10
;
751 case AR40XX_PORT_SPEED_100M
:
752 link
->speed
= SWITCH_PORT_SPEED_100
;
754 case AR40XX_PORT_SPEED_1000M
:
755 link
->speed
= SWITCH_PORT_SPEED_1000
;
758 link
->speed
= SWITCH_PORT_SPEED_UNKNOWN
;
764 ar40xx_sw_get_port_link(struct switch_dev
*dev
, int port
,
765 struct switch_port_link
*link
)
767 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
769 ar40xx_read_port_link(priv
, port
, link
);
773 static const struct switch_attr ar40xx_sw_attr_globals
[] = {
775 .type
= SWITCH_TYPE_INT
,
776 .name
= "enable_vlan",
777 .description
= "Enable VLAN mode",
778 .set
= ar40xx_sw_set_vlan
,
779 .get
= ar40xx_sw_get_vlan
,
783 .type
= SWITCH_TYPE_NOVAL
,
784 .name
= "reset_mibs",
785 .description
= "Reset all MIB counters",
786 .set
= ar40xx_sw_set_reset_mibs
,
789 .type
= SWITCH_TYPE_INT
,
790 .name
= "enable_mirror_rx",
791 .description
= "Enable mirroring of RX packets",
792 .set
= ar40xx_sw_set_mirror_rx_enable
,
793 .get
= ar40xx_sw_get_mirror_rx_enable
,
797 .type
= SWITCH_TYPE_INT
,
798 .name
= "enable_mirror_tx",
799 .description
= "Enable mirroring of TX packets",
800 .set
= ar40xx_sw_set_mirror_tx_enable
,
801 .get
= ar40xx_sw_get_mirror_tx_enable
,
805 .type
= SWITCH_TYPE_INT
,
806 .name
= "mirror_monitor_port",
807 .description
= "Mirror monitor port",
808 .set
= ar40xx_sw_set_mirror_monitor_port
,
809 .get
= ar40xx_sw_get_mirror_monitor_port
,
810 .max
= AR40XX_NUM_PORTS
- 1
813 .type
= SWITCH_TYPE_INT
,
814 .name
= "mirror_source_port",
815 .description
= "Mirror source port",
816 .set
= ar40xx_sw_set_mirror_source_port
,
817 .get
= ar40xx_sw_get_mirror_source_port
,
818 .max
= AR40XX_NUM_PORTS
- 1
821 .type
= SWITCH_TYPE_INT
,
823 .description
= "Link down all the PHYs",
824 .set
= ar40xx_sw_set_linkdown
,
829 static const struct switch_attr ar40xx_sw_attr_port
[] = {
831 .type
= SWITCH_TYPE_NOVAL
,
833 .description
= "Reset single port MIB counters",
834 .set
= ar40xx_sw_set_port_reset_mib
,
837 .type
= SWITCH_TYPE_STRING
,
839 .description
= "Get port's MIB counters",
841 .get
= ar40xx_sw_get_port_mib
,
845 const struct switch_attr ar40xx_sw_attr_vlan
[] = {
847 .type
= SWITCH_TYPE_INT
,
849 .description
= "VLAN ID (0-4094)",
850 .set
= ar40xx_sw_set_vid
,
851 .get
= ar40xx_sw_get_vid
,
856 /* End of swconfig support */
859 ar40xx_wait_bit(struct ar40xx_priv
*priv
, int reg
, u32 mask
, u32 val
)
865 t
= ar40xx_read(priv
, reg
);
866 if ((t
& mask
) == val
)
872 usleep_range(10, 20);
875 pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n",
876 (unsigned int)reg
, t
, mask
, val
);
881 ar40xx_atu_flush(struct ar40xx_priv
*priv
)
885 ret
= ar40xx_wait_bit(priv
, AR40XX_REG_ATU_FUNC
,
886 AR40XX_ATU_FUNC_BUSY
, 0);
888 ar40xx_write(priv
, AR40XX_REG_ATU_FUNC
,
889 AR40XX_ATU_FUNC_OP_FLUSH
|
890 AR40XX_ATU_FUNC_BUSY
);
896 ar40xx_ess_reset(struct ar40xx_priv
*priv
)
898 reset_control_assert(priv
->ess_rst
);
900 reset_control_deassert(priv
->ess_rst
);
901 /* Waiting for all inner tables init done.
906 pr_info("ESS reset ok!\n");
909 /* Start of psgmii self test */
912 ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv
*priv
)
915 struct mii_bus
*bus
= priv
->mii_bus
;
916 /* reset phy psgmii */
917 /* fix phy psgmii RX 20bit */
918 mdiobus_write(bus
, 5, 0x0, 0x005b);
919 /* reset phy psgmii */
920 mdiobus_write(bus
, 5, 0x0, 0x001b);
921 /* release reset phy psgmii */
922 mdiobus_write(bus
, 5, 0x0, 0x005b);
924 for (n
= 0; n
< AR40XX_PSGMII_CALB_NUM
; n
++) {
927 status
= ar40xx_phy_mmd_read(priv
, 5, 1, 0x28);
930 /* Polling interval to check PSGMII PLL in malibu is ready
931 * the worst time is 8.67ms
932 * for 25MHz reference clock
933 * [512+(128+2048)*49]*80ns+100us
938 /*check malibu psgmii calibration done end..*/
940 /*freeze phy psgmii RX CDR*/
941 mdiobus_write(bus
, 5, 0x1a, 0x2230);
943 ar40xx_ess_reset(priv
);
945 /*check psgmii calibration done start*/
946 for (n
= 0; n
< AR40XX_PSGMII_CALB_NUM
; n
++) {
949 status
= ar40xx_psgmii_read(priv
, 0xa0);
952 /* Polling interval to check PSGMII PLL in ESS is ready */
956 /* check dakota psgmii calibration done end..*/
958 /* relesae phy psgmii RX CDR */
959 mdiobus_write(bus
, 5, 0x1a, 0x3230);
960 /* release phy psgmii RX 20bit */
961 mdiobus_write(bus
, 5, 0x0, 0x005f);
965 ar40xx_psgmii_single_phy_testing(struct ar40xx_priv
*priv
, int phy
)
972 u32 tx_all_ok
, rx_all_ok
;
973 struct mii_bus
*bus
= priv
->mii_bus
;
975 mdiobus_write(bus
, phy
, 0x0, 0x9000);
976 mdiobus_write(bus
, phy
, 0x0, 0x4140);
978 for (j
= 0; j
< AR40XX_PSGMII_CALB_NUM
; j
++) {
981 status
= mdiobus_read(bus
, phy
, 0x11);
982 if (status
& AR40XX_PHY_SPEC_STATUS_LINK
)
984 /* the polling interval to check if the PHY link up or not
985 * maxwait_timer: 750 ms +/-10 ms
986 * minwait_timer : 1 us +/- 0.1us
987 * time resides in minwait_timer ~ maxwait_timer
988 * see IEEE 802.3 section 40.4.5.2
994 ar40xx_phy_mmd_write(priv
, phy
, 7, 0x8029, 0x0000);
995 ar40xx_phy_mmd_write(priv
, phy
, 7, 0x8029, 0x0003);
998 ar40xx_phy_mmd_write(priv
, phy
, 7, 0x8020, 0xa000);
999 /* wait for all traffic end
1000 * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1005 tx_ok
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802e);
1006 tx_ok_high16
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802d);
1007 tx_error
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802f);
1008 rx_ok
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802b);
1009 rx_ok_high16
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802a);
1010 rx_error
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802c);
1011 tx_all_ok
= tx_ok
+ (tx_ok_high16
<< 16);
1012 rx_all_ok
= rx_ok
+ (rx_ok_high16
<< 16);
1013 if (tx_all_ok
== 0x1000 && tx_error
== 0) {
1015 priv
->phy_t_status
&= (~BIT(phy
));
1017 pr_info("PHY %d single test PSGMII issue happen!\n", phy
);
1018 priv
->phy_t_status
|= BIT(phy
);
1021 mdiobus_write(bus
, phy
, 0x0, 0x1840);
1025 ar40xx_psgmii_all_phy_testing(struct ar40xx_priv
*priv
)
1028 struct mii_bus
*bus
= priv
->mii_bus
;
1030 mdiobus_write(bus
, 0x1f, 0x0, 0x9000);
1031 mdiobus_write(bus
, 0x1f, 0x0, 0x4140);
1033 for (j
= 0; j
< AR40XX_PSGMII_CALB_NUM
; j
++) {
1034 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1037 status
= mdiobus_read(bus
, phy
, 0x11);
1038 if (!(status
& BIT(10)))
1042 if (phy
>= (AR40XX_NUM_PORTS
- 1))
1044 /* The polling interva to check if the PHY link up or not */
1048 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8029, 0x0000);
1049 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8029, 0x0003);
1052 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8020, 0xa000);
1053 /* wait for all traffic end
1054 * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1058 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1059 u32 tx_ok
, tx_error
;
1060 u32 rx_ok
, rx_error
;
1063 u32 tx_all_ok
, rx_all_ok
;
1066 tx_ok
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802e);
1067 tx_ok_high16
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802d);
1068 tx_error
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802f);
1069 rx_ok
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802b);
1070 rx_ok_high16
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802a);
1071 rx_error
= ar40xx_phy_mmd_read(priv
, phy
, 7, 0x802c);
1072 tx_all_ok
= tx_ok
+ (tx_ok_high16
<<16);
1073 rx_all_ok
= rx_ok
+ (rx_ok_high16
<<16);
1074 if (tx_all_ok
== 0x1000 && tx_error
== 0) {
1076 priv
->phy_t_status
&= ~BIT(phy
+ 8);
1078 pr_info("PHY%d test see issue!\n", phy
);
1079 priv
->phy_t_status
|= BIT(phy
+ 8);
1083 pr_debug("PHY all test 0x%x \r\n", priv
->phy_t_status
);
1087 ar40xx_psgmii_self_test(struct ar40xx_priv
*priv
)
1090 struct mii_bus
*bus
= priv
->mii_bus
;
1092 ar40xx_malibu_psgmii_ess_reset(priv
);
1094 /* switch to access MII reg for copper */
1095 mdiobus_write(bus
, 4, 0x1f, 0x8500);
1096 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1097 /*enable phy mdio broadcast write*/
1098 ar40xx_phy_mmd_write(priv
, phy
, 7, 0x8028, 0x801f);
1100 /* force no link by power down */
1101 mdiobus_write(bus
, 0x1f, 0x0, 0x1840);
1103 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8021, 0x1000);
1104 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8062, 0x05e0);
1107 mdiobus_write(bus
, 0x1f, 0x10, 0x6800);
1108 for (i
= 0; i
< AR40XX_PSGMII_CALB_NUM
; i
++) {
1109 priv
->phy_t_status
= 0;
1111 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1112 ar40xx_rmw(priv
, AR40XX_REG_PORT_LOOKUP(phy
+ 1),
1113 AR40XX_PORT_LOOKUP_LOOPBACK
,
1114 AR40XX_PORT_LOOKUP_LOOPBACK
);
1117 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++)
1118 ar40xx_psgmii_single_phy_testing(priv
, phy
);
1120 ar40xx_psgmii_all_phy_testing(priv
);
1122 if (priv
->phy_t_status
)
1123 ar40xx_malibu_psgmii_ess_reset(priv
);
1128 if (i
>= AR40XX_PSGMII_CALB_NUM
)
1129 pr_info("PSGMII cannot recover\n");
1131 pr_debug("PSGMII recovered after %d times reset\n", i
);
1133 /* configuration recover */
1135 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8021, 0x0);
1137 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8029, 0x0);
1138 /* disable traffic */
1139 ar40xx_phy_mmd_write(priv
, 0x1f, 7, 0x8020, 0x0);
1143 ar40xx_psgmii_self_test_clean(struct ar40xx_priv
*priv
)
1146 struct mii_bus
*bus
= priv
->mii_bus
;
1148 /* disable phy internal loopback */
1149 mdiobus_write(bus
, 0x1f, 0x10, 0x6860);
1150 mdiobus_write(bus
, 0x1f, 0x0, 0x9040);
1152 for (phy
= 0; phy
< AR40XX_NUM_PORTS
- 1; phy
++) {
1153 /* disable mac loop back */
1154 ar40xx_rmw(priv
, AR40XX_REG_PORT_LOOKUP(phy
+ 1),
1155 AR40XX_PORT_LOOKUP_LOOPBACK
, 0);
1156 /* disable phy mdio broadcast write */
1157 ar40xx_phy_mmd_write(priv
, phy
, 7, 0x8028, 0x001f);
1160 /* clear fdb entry */
1161 ar40xx_atu_flush(priv
);
1164 /* End of psgmii self test */
1167 ar40xx_mac_mode_init(struct ar40xx_priv
*priv
, u32 mode
)
1169 if (mode
== PORT_WRAPPER_PSGMII
) {
1170 ar40xx_psgmii_write(priv
, AR40XX_PSGMII_MODE_CONTROL
, 0x2200);
1171 ar40xx_psgmii_write(priv
, AR40XX_PSGMIIPHY_TX_CONTROL
, 0x8380);
1176 int ar40xx_cpuport_setup(struct ar40xx_priv
*priv
)
1180 t
= AR40XX_PORT_STATUS_TXFLOW
|
1181 AR40XX_PORT_STATUS_RXFLOW
|
1182 AR40XX_PORT_TXHALF_FLOW
|
1183 AR40XX_PORT_DUPLEX
|
1184 AR40XX_PORT_SPEED_1000M
;
1185 ar40xx_write(priv
, AR40XX_REG_PORT_STATUS(0), t
);
1186 usleep_range(10, 20);
1188 t
|= AR40XX_PORT_TX_EN
|
1190 ar40xx_write(priv
, AR40XX_REG_PORT_STATUS(0), t
);
1196 ar40xx_init_port(struct ar40xx_priv
*priv
, int port
)
1200 ar40xx_rmw(priv
, AR40XX_REG_PORT_STATUS(port
),
1201 AR40XX_PORT_AUTO_LINK_EN
, 0);
1203 /* CPU port is setting headers to limit output ports */
1205 ar40xx_write(priv
, AR40XX_REG_PORT_HEADER(port
), 0x8);
1207 ar40xx_write(priv
, AR40XX_REG_PORT_HEADER(port
), 0);
1209 ar40xx_write(priv
, AR40XX_REG_PORT_VLAN0(port
), 0);
1211 t
= AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH
<< AR40XX_PORT_VLAN1_OUT_MODE_S
;
1212 ar40xx_write(priv
, AR40XX_REG_PORT_VLAN1(port
), t
);
1214 t
= AR40XX_PORT_LOOKUP_LEARN
;
1215 t
|= AR40XX_PORT_STATE_FORWARD
<< AR40XX_PORT_LOOKUP_STATE_S
;
1216 ar40xx_write(priv
, AR40XX_REG_PORT_LOOKUP(port
), t
);
1220 ar40xx_init_globals(struct ar40xx_priv
*priv
)
1224 /* enable CPU port and disable mirror port */
1225 t
= AR40XX_FWD_CTRL0_CPU_PORT_EN
|
1226 AR40XX_FWD_CTRL0_MIRROR_PORT
;
1227 ar40xx_write(priv
, AR40XX_REG_FWD_CTRL0
, t
);
1229 /* forward multicast and broadcast frames to CPU */
1230 t
= (AR40XX_PORTS_ALL
<< AR40XX_FWD_CTRL1_UC_FLOOD_S
) |
1231 (AR40XX_PORTS_ALL
<< AR40XX_FWD_CTRL1_MC_FLOOD_S
) |
1232 (AR40XX_PORTS_ALL
<< AR40XX_FWD_CTRL1_BC_FLOOD_S
);
1233 ar40xx_write(priv
, AR40XX_REG_FWD_CTRL1
, t
);
1235 /* enable jumbo frames */
1236 ar40xx_rmw(priv
, AR40XX_REG_MAX_FRAME_SIZE
,
1237 AR40XX_MAX_FRAME_SIZE_MTU
, 9018 + 8 + 2);
1239 /* Enable MIB counters */
1240 ar40xx_rmw(priv
, AR40XX_REG_MODULE_EN
, 0,
1241 AR40XX_MODULE_EN_MIB
);
1244 ar40xx_write(priv
, AR40XX_REG_EEE_CTRL
, 0);
1246 /* set flowctrl thershold for cpu port */
1247 t
= (AR40XX_PORT0_FC_THRESH_ON_DFLT
<< 16) |
1248 AR40XX_PORT0_FC_THRESH_OFF_DFLT
;
1249 ar40xx_write(priv
, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t
);
1251 /* set service tag to 802.1q */
1252 t
= ETH_P_8021Q
| AR40XX_ESS_SERVICE_TAG_STAG
;
1253 ar40xx_write(priv
, AR40XX_ESS_SERVICE_TAG
, t
);
1257 ar40xx_malibu_init(struct ar40xx_priv
*priv
)
1260 struct mii_bus
*bus
;
1263 bus
= priv
->mii_bus
;
1265 /* war to enable AZ transmitting ability */
1266 ar40xx_phy_mmd_write(priv
, AR40XX_PSGMII_ID
, 1,
1267 AR40XX_MALIBU_PSGMII_MODE_CTRL
,
1268 AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL
);
1269 for (i
= 0; i
< AR40XX_NUM_PORTS
- 1; i
++) {
1270 /* change malibu control_dac */
1271 val
= ar40xx_phy_mmd_read(priv
, i
, 7,
1272 AR40XX_MALIBU_PHY_MMD7_DAC_CTRL
);
1273 val
&= ~AR40XX_MALIBU_DAC_CTRL_MASK
;
1274 val
|= AR40XX_MALIBU_DAC_CTRL_VALUE
;
1275 ar40xx_phy_mmd_write(priv
, i
, 7,
1276 AR40XX_MALIBU_PHY_MMD7_DAC_CTRL
, val
);
1277 if (i
== AR40XX_MALIBU_PHY_LAST_ADDR
) {
1278 /* to avoid goes into hibernation */
1279 val
= ar40xx_phy_mmd_read(priv
, i
, 3,
1280 AR40XX_MALIBU_PHY_RLP_CTRL
);
1282 ar40xx_phy_mmd_write(priv
, i
, 3,
1283 AR40XX_MALIBU_PHY_RLP_CTRL
, val
);
1287 /* adjust psgmii serdes tx amp */
1288 mdiobus_write(bus
, AR40XX_PSGMII_ID
, AR40XX_PSGMII_TX_DRIVER_1_CTRL
,
1289 AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP
);
1293 ar40xx_hw_init(struct ar40xx_priv
*priv
)
1297 ar40xx_ess_reset(priv
);
1300 ar40xx_malibu_init(priv
);
1304 ar40xx_psgmii_self_test(priv
);
1305 ar40xx_psgmii_self_test_clean(priv
);
1307 ar40xx_mac_mode_init(priv
, priv
->mac_mode
);
1309 for (i
= 0; i
< priv
->dev
.ports
; i
++)
1310 ar40xx_init_port(priv
, i
);
1312 ar40xx_init_globals(priv
);
1317 /* Start of qm error WAR */
1320 int ar40xx_force_1g_full(struct ar40xx_priv
*priv
, u32 port_id
)
1324 if (port_id
< 0 || port_id
> 6)
1327 reg
= AR40XX_REG_PORT_STATUS(port_id
);
1328 return ar40xx_rmw(priv
, reg
, AR40XX_PORT_SPEED
,
1329 (AR40XX_PORT_SPEED_1000M
| AR40XX_PORT_DUPLEX
));
1333 int ar40xx_get_qm_status(struct ar40xx_priv
*priv
,
1334 u32 port_id
, u32
*qm_buffer_err
)
1339 if (port_id
< 1 || port_id
> 5) {
1345 reg
= AR40XX_REG_QM_PORT0_3_QNUM
;
1346 ar40xx_write(priv
, AR40XX_REG_QM_DEBUG_ADDR
, reg
);
1347 qm_val
= ar40xx_read(priv
, AR40XX_REG_QM_DEBUG_VALUE
);
1348 /* every 8 bits for each port */
1349 *qm_buffer_err
= (qm_val
>> (port_id
* 8)) & 0xFF;
1351 reg
= AR40XX_REG_QM_PORT4_6_QNUM
;
1352 ar40xx_write(priv
, AR40XX_REG_QM_DEBUG_ADDR
, reg
);
1353 qm_val
= ar40xx_read(priv
, AR40XX_REG_QM_DEBUG_VALUE
);
1354 /* every 8 bits for each port */
1355 *qm_buffer_err
= (qm_val
>> ((port_id
-4) * 8)) & 0xFF;
1362 ar40xx_sw_mac_polling_task(struct ar40xx_priv
*priv
)
1364 static int task_count
;
1367 u32 link
, speed
, duplex
;
1369 u16 port_phy_status
[AR40XX_NUM_PORTS
];
1370 static u32 qm_err_cnt
[AR40XX_NUM_PORTS
] = {0, 0, 0, 0, 0, 0};
1371 static u32 link_cnt
[AR40XX_NUM_PORTS
] = {0, 0, 0, 0, 0, 0};
1372 struct mii_bus
*bus
= NULL
;
1374 if (!priv
|| !priv
->mii_bus
)
1377 bus
= priv
->mii_bus
;
1381 for (i
= 1; i
< AR40XX_NUM_PORTS
; ++i
) {
1382 port_phy_status
[i
] =
1383 mdiobus_read(bus
, i
-1, AR40XX_PHY_SPEC_STATUS
);
1384 speed
= link
= duplex
= port_phy_status
[i
];
1385 speed
&= AR40XX_PHY_SPEC_STATUS_SPEED
;
1387 link
&= AR40XX_PHY_SPEC_STATUS_LINK
;
1389 duplex
&= AR40XX_PHY_SPEC_STATUS_DUPLEX
;
1392 if (link
!= priv
->ar40xx_port_old_link
[i
]) {
1395 if ((priv
->ar40xx_port_old_link
[i
] ==
1396 AR40XX_PORT_LINK_UP
) &&
1397 (link
== AR40XX_PORT_LINK_DOWN
)) {
1398 /* LINK_EN disable(MAC force mode)*/
1399 reg
= AR40XX_REG_PORT_STATUS(i
);
1400 ar40xx_rmw(priv
, reg
,
1401 AR40XX_PORT_AUTO_LINK_EN
, 0);
1403 /* Check queue buffer */
1405 ar40xx_get_qm_status(priv
, i
, &qm_buffer_err
);
1406 if (qm_buffer_err
) {
1407 priv
->ar40xx_port_qm_buf
[i
] =
1408 AR40XX_QM_NOT_EMPTY
;
1412 priv
->ar40xx_port_qm_buf
[i
] =
1414 ar40xx_force_1g_full(priv
, i
);
1415 /* Ref:QCA8337 Datasheet,Clearing
1416 * MENU_CTRL_EN prevents phy to
1417 * stuck in 100BT mode when
1418 * bringing up the link
1420 ar40xx_phy_dbg_read(priv
, i
-1,
1423 phy_val
&= (~AR40XX_PHY_MANU_CTRL_EN
);
1424 ar40xx_phy_dbg_write(priv
, i
-1,
1428 priv
->ar40xx_port_old_link
[i
] = link
;
1429 } else if ((priv
->ar40xx_port_old_link
[i
] ==
1430 AR40XX_PORT_LINK_DOWN
) &&
1431 (link
== AR40XX_PORT_LINK_UP
)) {
1433 if (priv
->port_link_up
[i
] < 1) {
1434 ++priv
->port_link_up
[i
];
1436 /* Change port status */
1437 reg
= AR40XX_REG_PORT_STATUS(i
);
1438 value
= ar40xx_read(priv
, reg
);
1439 priv
->port_link_up
[i
] = 0;
1441 value
&= ~(AR40XX_PORT_DUPLEX
|
1443 value
|= speed
| (duplex
? BIT(6) : 0);
1444 ar40xx_write(priv
, reg
, value
);
1445 /* clock switch need such time
1448 usleep_range(100, 200);
1450 value
|= AR40XX_PORT_AUTO_LINK_EN
;
1451 ar40xx_write(priv
, reg
, value
);
1452 /* HW need such time to make sure link
1453 * stable before enable MAC
1455 usleep_range(100, 200);
1457 if (speed
== AR40XX_PORT_SPEED_100M
) {
1459 /* Enable @100M, if down to 10M
1460 * clock will change smoothly
1462 ar40xx_phy_dbg_read(priv
, i
-1,
1466 AR40XX_PHY_MANU_CTRL_EN
;
1467 ar40xx_phy_dbg_write(priv
, i
-1,
1471 priv
->ar40xx_port_old_link
[i
] = link
;
1476 if (priv
->ar40xx_port_qm_buf
[i
] == AR40XX_QM_NOT_EMPTY
) {
1478 ar40xx_get_qm_status(priv
, i
, &qm_buffer_err
);
1479 if (qm_buffer_err
) {
1482 priv
->ar40xx_port_qm_buf
[i
] =
1485 ar40xx_force_1g_full(priv
, i
);
1492 ar40xx_qm_err_check_work_task(struct work_struct
*work
)
1494 struct ar40xx_priv
*priv
= container_of(work
, struct ar40xx_priv
,
1497 mutex_lock(&priv
->qm_lock
);
1499 ar40xx_sw_mac_polling_task(priv
);
1501 mutex_unlock(&priv
->qm_lock
);
1503 schedule_delayed_work(&priv
->qm_dwork
,
1504 msecs_to_jiffies(AR40XX_QM_WORK_DELAY
));
1508 ar40xx_qm_err_check_work_start(struct ar40xx_priv
*priv
)
1510 mutex_init(&priv
->qm_lock
);
1512 INIT_DELAYED_WORK(&priv
->qm_dwork
, ar40xx_qm_err_check_work_task
);
1514 schedule_delayed_work(&priv
->qm_dwork
,
1515 msecs_to_jiffies(AR40XX_QM_WORK_DELAY
));
1520 /* End of qm error WAR */
1523 ar40xx_vlan_init(struct ar40xx_priv
*priv
)
1528 /* By default Enable VLAN */
1530 priv
->vlan_table
[AR40XX_LAN_VLAN
] = priv
->cpu_bmp
| priv
->lan_bmp
;
1531 priv
->vlan_table
[AR40XX_WAN_VLAN
] = priv
->cpu_bmp
| priv
->wan_bmp
;
1532 priv
->vlan_tagged
= priv
->cpu_bmp
;
1533 bmp
= priv
->lan_bmp
;
1534 for_each_set_bit(port
, &bmp
, AR40XX_NUM_PORTS
)
1535 priv
->pvid
[port
] = AR40XX_LAN_VLAN
;
1537 bmp
= priv
->wan_bmp
;
1538 for_each_set_bit(port
, &bmp
, AR40XX_NUM_PORTS
)
1539 priv
->pvid
[port
] = AR40XX_WAN_VLAN
;
1545 ar40xx_mib_work_func(struct work_struct
*work
)
1547 struct ar40xx_priv
*priv
;
1550 priv
= container_of(work
, struct ar40xx_priv
, mib_work
.work
);
1552 mutex_lock(&priv
->mib_lock
);
1554 err
= ar40xx_mib_capture(priv
);
1558 ar40xx_mib_fetch_port_stat(priv
, priv
->mib_next_port
, false);
1561 priv
->mib_next_port
++;
1562 if (priv
->mib_next_port
>= priv
->dev
.ports
)
1563 priv
->mib_next_port
= 0;
1565 mutex_unlock(&priv
->mib_lock
);
1567 schedule_delayed_work(&priv
->mib_work
,
1568 msecs_to_jiffies(AR40XX_MIB_WORK_DELAY
));
1572 ar40xx_setup_port(struct ar40xx_priv
*priv
, int port
, u32 members
)
1575 u32 egress
, ingress
;
1576 u32 pvid
= priv
->vlan_id
[priv
->pvid
[port
]];
1579 if (priv
->vlan_tagged
& BIT(port
))
1580 egress
= AR40XX_PORT_VLAN1_OUT_MODE_TAG
;
1582 egress
= AR40XX_PORT_VLAN1_OUT_MODE_UNMOD
;
1584 ingress
= AR40XX_IN_SECURE
;
1586 egress
= AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH
;
1587 ingress
= AR40XX_IN_PORT_ONLY
;
1590 t
= pvid
<< AR40XX_PORT_VLAN0_DEF_SVID_S
;
1591 t
|= pvid
<< AR40XX_PORT_VLAN0_DEF_CVID_S
;
1592 ar40xx_write(priv
, AR40XX_REG_PORT_VLAN0(port
), t
);
1594 t
= egress
<< AR40XX_PORT_VLAN1_OUT_MODE_S
;
1596 /* set CPU port to core port */
1598 t
|= AR40XX_PORT_VLAN1_CORE_PORT
;
1600 if (priv
->vlan_tagged
& BIT(port
))
1601 t
|= AR40XX_PORT_VLAN1_PORT_VLAN_PROP
;
1603 t
|= AR40XX_PORT_VLAN1_PORT_TLS_MODE
;
1605 ar40xx_write(priv
, AR40XX_REG_PORT_VLAN1(port
), t
);
1608 t
|= AR40XX_PORT_LOOKUP_LEARN
;
1609 t
|= ingress
<< AR40XX_PORT_LOOKUP_IN_MODE_S
;
1610 t
|= AR40XX_PORT_STATE_FORWARD
<< AR40XX_PORT_LOOKUP_STATE_S
;
1611 ar40xx_write(priv
, AR40XX_REG_PORT_LOOKUP(port
), t
);
1615 ar40xx_vtu_op(struct ar40xx_priv
*priv
, u32 op
, u32 val
)
1617 if (ar40xx_wait_bit(priv
, AR40XX_REG_VTU_FUNC1
,
1618 AR40XX_VTU_FUNC1_BUSY
, 0))
1621 if ((op
& AR40XX_VTU_FUNC1_OP
) == AR40XX_VTU_FUNC1_OP_LOAD
)
1622 ar40xx_write(priv
, AR40XX_REG_VTU_FUNC0
, val
);
1624 op
|= AR40XX_VTU_FUNC1_BUSY
;
1625 ar40xx_write(priv
, AR40XX_REG_VTU_FUNC1
, op
);
1629 ar40xx_vtu_load_vlan(struct ar40xx_priv
*priv
, u32 vid
, u32 port_mask
)
1635 op
= AR40XX_VTU_FUNC1_OP_LOAD
| (vid
<< AR40XX_VTU_FUNC1_VID_S
);
1636 val
= AR40XX_VTU_FUNC0_VALID
| AR40XX_VTU_FUNC0_IVL
;
1637 for (i
= 0; i
< AR40XX_NUM_PORTS
; i
++) {
1640 if ((port_mask
& BIT(i
)) == 0)
1641 mode
= AR40XX_VTU_FUNC0_EG_MODE_NOT
;
1642 else if (priv
->vlan
== 0)
1643 mode
= AR40XX_VTU_FUNC0_EG_MODE_KEEP
;
1644 else if ((priv
->vlan_tagged
& BIT(i
)) ||
1645 (priv
->vlan_id
[priv
->pvid
[i
]] != vid
))
1646 mode
= AR40XX_VTU_FUNC0_EG_MODE_TAG
;
1648 mode
= AR40XX_VTU_FUNC0_EG_MODE_UNTAG
;
1650 val
|= mode
<< AR40XX_VTU_FUNC0_EG_MODE_S(i
);
1652 ar40xx_vtu_op(priv
, op
, val
);
1656 ar40xx_vtu_flush(struct ar40xx_priv
*priv
)
1658 ar40xx_vtu_op(priv
, AR40XX_VTU_FUNC1_OP_FLUSH
, 0);
1662 ar40xx_sw_hw_apply(struct switch_dev
*dev
)
1664 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
1665 u8 portmask
[AR40XX_NUM_PORTS
];
1668 mutex_lock(&priv
->reg_mutex
);
1669 /* flush all vlan entries */
1670 ar40xx_vtu_flush(priv
);
1672 memset(portmask
, 0, sizeof(portmask
));
1674 for (j
= 0; j
< AR40XX_MAX_VLANS
; j
++) {
1675 u8 vp
= priv
->vlan_table
[j
];
1680 for (i
= 0; i
< dev
->ports
; i
++) {
1684 portmask
[i
] |= vp
& ~mask
;
1687 ar40xx_vtu_load_vlan(priv
, priv
->vlan_id
[j
],
1688 priv
->vlan_table
[j
]);
1691 /* 8021q vlan disabled */
1692 for (i
= 0; i
< dev
->ports
; i
++) {
1693 if (i
== AR40XX_PORT_CPU
)
1696 portmask
[i
] = BIT(AR40XX_PORT_CPU
);
1697 portmask
[AR40XX_PORT_CPU
] |= BIT(i
);
1701 /* update the port destination mask registers and tag settings */
1702 for (i
= 0; i
< dev
->ports
; i
++)
1703 ar40xx_setup_port(priv
, i
, portmask
[i
]);
1705 ar40xx_set_mirror_regs(priv
);
1707 mutex_unlock(&priv
->reg_mutex
);
1712 ar40xx_sw_reset_switch(struct switch_dev
*dev
)
1714 struct ar40xx_priv
*priv
= swdev_to_ar40xx(dev
);
1717 mutex_lock(&priv
->reg_mutex
);
1718 memset(&priv
->vlan
, 0, sizeof(struct ar40xx_priv
) -
1719 offsetof(struct ar40xx_priv
, vlan
));
1721 for (i
= 0; i
< AR40XX_MAX_VLANS
; i
++)
1722 priv
->vlan_id
[i
] = i
;
1724 ar40xx_vlan_init(priv
);
1726 priv
->mirror_rx
= false;
1727 priv
->mirror_tx
= false;
1728 priv
->source_port
= 0;
1729 priv
->monitor_port
= 0;
1731 mutex_unlock(&priv
->reg_mutex
);
1733 rv
= ar40xx_sw_hw_apply(dev
);
1738 ar40xx_start(struct ar40xx_priv
*priv
)
1742 ret
= ar40xx_hw_init(priv
);
1746 ret
= ar40xx_sw_reset_switch(&priv
->dev
);
1750 /* at last, setup cpu port */
1751 ret
= ar40xx_cpuport_setup(priv
);
1755 schedule_delayed_work(&priv
->mib_work
,
1756 msecs_to_jiffies(AR40XX_MIB_WORK_DELAY
));
1758 ar40xx_qm_err_check_work_start(priv
);
1763 static const struct switch_dev_ops ar40xx_sw_ops
= {
1765 .attr
= ar40xx_sw_attr_globals
,
1766 .n_attr
= ARRAY_SIZE(ar40xx_sw_attr_globals
),
1769 .attr
= ar40xx_sw_attr_port
,
1770 .n_attr
= ARRAY_SIZE(ar40xx_sw_attr_port
),
1773 .attr
= ar40xx_sw_attr_vlan
,
1774 .n_attr
= ARRAY_SIZE(ar40xx_sw_attr_vlan
),
1776 .get_port_pvid
= ar40xx_sw_get_pvid
,
1777 .set_port_pvid
= ar40xx_sw_set_pvid
,
1778 .get_vlan_ports
= ar40xx_sw_get_ports
,
1779 .set_vlan_ports
= ar40xx_sw_set_ports
,
1780 .apply_config
= ar40xx_sw_hw_apply
,
1781 .reset_switch
= ar40xx_sw_reset_switch
,
1782 .get_port_link
= ar40xx_sw_get_port_link
,
1785 /* Start of phy driver support */
1787 static const u32 ar40xx_phy_ids
[] = {
1789 0x004dd0b2, /* AR40xx */
1793 ar40xx_phy_match(u32 phy_id
)
1797 for (i
= 0; i
< ARRAY_SIZE(ar40xx_phy_ids
); i
++)
1798 if (phy_id
== ar40xx_phy_ids
[i
])
1805 is_ar40xx_phy(struct mii_bus
*bus
)
1809 for (i
= 0; i
< 4; i
++) {
1812 phy_id
= mdiobus_read(bus
, i
, MII_PHYSID1
) << 16;
1813 phy_id
|= mdiobus_read(bus
, i
, MII_PHYSID2
);
1814 if (!ar40xx_phy_match(phy_id
))
1822 ar40xx_phy_probe(struct phy_device
*phydev
)
1824 if (!is_ar40xx_phy(phydev
->mdio
.bus
))
1827 ar40xx_priv
->mii_bus
= phydev
->mdio
.bus
;
1828 phydev
->priv
= ar40xx_priv
;
1829 if (phydev
->mdio
.addr
== 0)
1830 ar40xx_priv
->phy
= phydev
;
1832 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
, phydev
->supported
);
1833 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
, phydev
->advertising
);
1838 ar40xx_phy_remove(struct phy_device
*phydev
)
1840 ar40xx_priv
->mii_bus
= NULL
;
1841 phydev
->priv
= NULL
;
1845 ar40xx_phy_config_init(struct phy_device
*phydev
)
1851 ar40xx_phy_read_status(struct phy_device
*phydev
)
1853 if (phydev
->mdio
.addr
!= 0)
1854 return genphy_read_status(phydev
);
1860 ar40xx_phy_config_aneg(struct phy_device
*phydev
)
1862 if (phydev
->mdio
.addr
== 0)
1865 return genphy_config_aneg(phydev
);
1868 static struct phy_driver ar40xx_phy_driver
= {
1869 .phy_id
= 0x004d0000,
1870 .name
= "QCA Malibu",
1871 .phy_id_mask
= 0xffff0000,
1872 .features
= PHY_GBIT_FEATURES
,
1873 .probe
= ar40xx_phy_probe
,
1874 .remove
= ar40xx_phy_remove
,
1875 .config_init
= ar40xx_phy_config_init
,
1876 .config_aneg
= ar40xx_phy_config_aneg
,
1877 .read_status
= ar40xx_phy_read_status
,
1880 static uint16_t ar40xx_gpio_get_phy(unsigned int offset
)
1885 static uint16_t ar40xx_gpio_get_reg(unsigned int offset
)
1887 return 0x8074 + offset
% 4;
1890 static void ar40xx_gpio_set(struct gpio_chip
*gc
, unsigned int offset
,
1893 struct ar40xx_priv
*priv
= gpiochip_get_data(gc
);
1895 ar40xx_phy_mmd_write(priv
, ar40xx_gpio_get_phy(offset
), 0x7,
1896 ar40xx_gpio_get_reg(offset
),
1897 value
? 0xA000 : 0x8000);
1900 static int ar40xx_gpio_get(struct gpio_chip
*gc
, unsigned offset
)
1902 struct ar40xx_priv
*priv
= gpiochip_get_data(gc
);
1904 return ar40xx_phy_mmd_read(priv
, ar40xx_gpio_get_phy(offset
), 0x7,
1905 ar40xx_gpio_get_reg(offset
)) == 0xA000;
1908 static int ar40xx_gpio_get_dir(struct gpio_chip
*gc
, unsigned offset
)
1910 return 0; /* only out direction */
1913 static int ar40xx_gpio_dir_out(struct gpio_chip
*gc
, unsigned offset
,
1917 * the direction out value is used to set the initial value.
1918 * support of this function is required by leds-gpio.c
1920 ar40xx_gpio_set(gc
, offset
, value
);
1924 static void ar40xx_register_gpio(struct device
*pdev
,
1925 struct ar40xx_priv
*priv
,
1926 struct device_node
*switch_node
)
1928 struct gpio_chip
*gc
;
1931 gc
= devm_kzalloc(pdev
, sizeof(*gc
), GFP_KERNEL
);
1935 gc
->label
= "ar40xx_gpio",
1937 gc
->ngpio
= 5 /* mmd 0 - 4 */ * 4 /* 0x8074 - 0x8077 */,
1939 gc
->owner
= THIS_MODULE
;
1941 gc
->get_direction
= ar40xx_gpio_get_dir
;
1942 gc
->direction_output
= ar40xx_gpio_dir_out
;
1943 gc
->get
= ar40xx_gpio_get
;
1944 gc
->set
= ar40xx_gpio_set
;
1945 gc
->can_sleep
= true;
1946 gc
->label
= priv
->dev
.name
;
1947 gc
->of_node
= switch_node
;
1949 err
= devm_gpiochip_add_data(pdev
, gc
, priv
);
1951 dev_err(pdev
, "Failed to register gpio %d.\n", err
);
1954 /* End of phy driver support */
1956 /* Platform driver probe function */
1958 static int ar40xx_probe(struct platform_device
*pdev
)
1960 struct device_node
*switch_node
;
1961 struct device_node
*psgmii_node
;
1962 const __be32
*mac_mode
;
1963 struct clk
*ess_clk
;
1964 struct switch_dev
*swdev
;
1965 struct ar40xx_priv
*priv
;
1968 struct resource psgmii_base
= {0};
1969 struct resource switch_base
= {0};
1972 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
1976 platform_set_drvdata(pdev
, priv
);
1979 switch_node
= of_node_get(pdev
->dev
.of_node
);
1980 if (of_address_to_resource(switch_node
, 0, &switch_base
) != 0)
1983 priv
->hw_addr
= devm_ioremap_resource(&pdev
->dev
, &switch_base
);
1984 if (IS_ERR(priv
->hw_addr
)) {
1985 dev_err(&pdev
->dev
, "Failed to ioremap switch_base!\n");
1986 return PTR_ERR(priv
->hw_addr
);
1990 psgmii_node
= of_find_node_by_name(NULL
, "ess-psgmii");
1992 dev_err(&pdev
->dev
, "Failed to find ess-psgmii node!\n");
1996 if (of_address_to_resource(psgmii_node
, 0, &psgmii_base
) != 0)
1999 priv
->psgmii_hw_addr
= devm_ioremap_resource(&pdev
->dev
, &psgmii_base
);
2000 if (IS_ERR(priv
->psgmii_hw_addr
)) {
2001 dev_err(&pdev
->dev
, "psgmii ioremap fail!\n");
2002 return PTR_ERR(priv
->psgmii_hw_addr
);
2005 mac_mode
= of_get_property(switch_node
, "switch_mac_mode", &len
);
2007 dev_err(&pdev
->dev
, "Failed to read switch_mac_mode\n");
2010 priv
->mac_mode
= be32_to_cpup(mac_mode
);
2012 ess_clk
= of_clk_get_by_name(switch_node
, "ess_clk");
2014 clk_prepare_enable(ess_clk
);
2016 priv
->ess_rst
= devm_reset_control_get(&pdev
->dev
, "ess_rst");
2017 if (IS_ERR(priv
->ess_rst
)) {
2018 dev_err(&pdev
->dev
, "Failed to get ess_rst control!\n");
2019 return PTR_ERR(priv
->ess_rst
);
2022 if (of_property_read_u32(switch_node
, "switch_cpu_bmp",
2024 of_property_read_u32(switch_node
, "switch_lan_bmp",
2026 of_property_read_u32(switch_node
, "switch_wan_bmp",
2028 dev_err(&pdev
->dev
, "Failed to read port properties\n");
2032 ret
= phy_driver_register(&ar40xx_phy_driver
, THIS_MODULE
);
2034 dev_err(&pdev
->dev
, "Failed to register ar40xx phy driver!\n");
2038 mutex_init(&priv
->reg_mutex
);
2039 mutex_init(&priv
->mib_lock
);
2040 INIT_DELAYED_WORK(&priv
->mib_work
, ar40xx_mib_work_func
);
2042 /* register switch */
2045 if (priv
->mii_bus
== NULL
) {
2046 dev_err(&pdev
->dev
, "Probe failed - Missing PHYs!\n");
2048 goto err_missing_phy
;
2051 swdev
->alias
= dev_name(&priv
->mii_bus
->dev
);
2053 swdev
->cpu_port
= AR40XX_PORT_CPU
;
2054 swdev
->name
= "QCA AR40xx";
2055 swdev
->vlans
= AR40XX_MAX_VLANS
;
2056 swdev
->ports
= AR40XX_NUM_PORTS
;
2057 swdev
->ops
= &ar40xx_sw_ops
;
2058 ret
= register_switch(swdev
, NULL
);
2060 goto err_unregister_phy
;
2062 num_mibs
= ARRAY_SIZE(ar40xx_mibs
);
2063 len
= priv
->dev
.ports
* num_mibs
*
2064 sizeof(*priv
->mib_stats
);
2065 priv
->mib_stats
= devm_kzalloc(&pdev
->dev
, len
, GFP_KERNEL
);
2066 if (!priv
->mib_stats
) {
2068 goto err_unregister_switch
;
2073 if (of_property_read_bool(switch_node
, "gpio-controller"))
2074 ar40xx_register_gpio(&pdev
->dev
, ar40xx_priv
, switch_node
);
2078 err_unregister_switch
:
2079 unregister_switch(&priv
->dev
);
2081 phy_driver_unregister(&ar40xx_phy_driver
);
2083 platform_set_drvdata(pdev
, NULL
);
2087 static int ar40xx_remove(struct platform_device
*pdev
)
2089 struct ar40xx_priv
*priv
= platform_get_drvdata(pdev
);
2091 cancel_delayed_work_sync(&priv
->qm_dwork
);
2092 cancel_delayed_work_sync(&priv
->mib_work
);
2094 unregister_switch(&priv
->dev
);
2096 phy_driver_unregister(&ar40xx_phy_driver
);
2101 static const struct of_device_id ar40xx_of_mtable
[] = {
2102 {.compatible
= "qcom,ess-switch" },
2106 struct platform_driver ar40xx_drv
= {
2107 .probe
= ar40xx_probe
,
2108 .remove
= ar40xx_remove
,
2111 .of_match_table
= ar40xx_of_mtable
,
2115 module_platform_driver(ar40xx_drv
);
2117 MODULE_DESCRIPTION("IPQ40XX ESS driver");
2118 MODULE_LICENSE("Dual BSD/GPL");