ipq40xx: 5.15: fix ar40xx driver
[openwrt/openwrt.git] / target / linux / ipq40xx / files-5.15 / drivers / net / mdio / ar40xx.c
1 /*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16 #include <linux/bitfield.h>
17 #include <linux/module.h>
18 #include <linux/list.h>
19 #include <linux/bitops.h>
20 #include <linux/switch.h>
21 #include <linux/delay.h>
22 #include <linux/phy.h>
23 #include <linux/clk.h>
24 #include <linux/reset.h>
25 #include <linux/lockdep.h>
26 #include <linux/workqueue.h>
27 #include <linux/of_device.h>
28 #include <linux/of_address.h>
29 #include <linux/of_mdio.h>
30 #include <linux/mdio.h>
31 #include <linux/gpio.h>
32
33 #include "ar40xx.h"
34
35 static struct ar40xx_priv *ar40xx_priv;
36
37 #define MIB_DESC(_s , _o, _n) \
38 { \
39 .size = (_s), \
40 .offset = (_o), \
41 .name = (_n), \
42 }
43
44 static const struct ar40xx_mib_desc ar40xx_mibs[] = {
45 MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"),
46 MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"),
47 MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"),
48 MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"),
49 MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"),
50 MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"),
51 MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"),
52 MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"),
53 MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"),
54 MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"),
55 MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"),
56 MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"),
57 MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"),
58 MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"),
59 MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"),
60 MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"),
61 MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"),
62 MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"),
63 MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"),
64 MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"),
65 MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"),
66 MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"),
67 MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"),
68 MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"),
69 MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"),
70 MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"),
71 MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"),
72 MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"),
73 MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"),
74 MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"),
75 MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"),
76 MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"),
77 MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"),
78 MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"),
79 MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"),
80 MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"),
81 MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"),
82 MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"),
83 MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"),
84 };
85
86 static u32
87 ar40xx_read(struct ar40xx_priv *priv, int reg)
88 {
89 return readl(priv->hw_addr + reg);
90 }
91
92 static u32
93 ar40xx_psgmii_read(struct ar40xx_priv *priv, int reg)
94 {
95 return readl(priv->psgmii_hw_addr + reg);
96 }
97
98 static void
99 ar40xx_write(struct ar40xx_priv *priv, int reg, u32 val)
100 {
101 writel(val, priv->hw_addr + reg);
102 }
103
104 static u32
105 ar40xx_rmw(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
106 {
107 u32 ret;
108
109 ret = ar40xx_read(priv, reg);
110 ret &= ~mask;
111 ret |= val;
112 ar40xx_write(priv, reg, ret);
113 return ret;
114 }
115
116 static void
117 ar40xx_psgmii_write(struct ar40xx_priv *priv, int reg, u32 val)
118 {
119 writel(val, priv->psgmii_hw_addr + reg);
120 }
121
122 static void
123 ar40xx_phy_dbg_write(struct ar40xx_priv *priv, int phy_addr,
124 u16 dbg_addr, u16 dbg_data)
125 {
126 struct mii_bus *bus = priv->mii_bus;
127
128 mutex_lock(&bus->mdio_lock);
129 bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
130 bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA, dbg_data);
131 mutex_unlock(&bus->mdio_lock);
132 }
133
134 static void
135 ar40xx_phy_dbg_read(struct ar40xx_priv *priv, int phy_addr,
136 u16 dbg_addr, u16 *dbg_data)
137 {
138 struct mii_bus *bus = priv->mii_bus;
139
140 mutex_lock(&bus->mdio_lock);
141 bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
142 *dbg_data = bus->read(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA);
143 mutex_unlock(&bus->mdio_lock);
144 }
145
146 static void
147 ar40xx_phy_mmd_write(struct ar40xx_priv *priv, u32 phy_id,
148 u16 mmd_num, u16 reg_id, u16 reg_val)
149 {
150 struct mii_bus *bus = priv->mii_bus;
151
152 mutex_lock(&bus->mdio_lock);
153 bus->write(bus, phy_id,
154 AR40XX_MII_ATH_MMD_ADDR, mmd_num);
155 bus->write(bus, phy_id,
156 AR40XX_MII_ATH_MMD_DATA, reg_id);
157 bus->write(bus, phy_id,
158 AR40XX_MII_ATH_MMD_ADDR,
159 0x4000 | mmd_num);
160 bus->write(bus, phy_id,
161 AR40XX_MII_ATH_MMD_DATA, reg_val);
162 mutex_unlock(&bus->mdio_lock);
163 }
164
165 static u16
166 ar40xx_phy_mmd_read(struct ar40xx_priv *priv, u32 phy_id,
167 u16 mmd_num, u16 reg_id)
168 {
169 u16 value;
170 struct mii_bus *bus = priv->mii_bus;
171
172 mutex_lock(&bus->mdio_lock);
173 bus->write(bus, phy_id,
174 AR40XX_MII_ATH_MMD_ADDR, mmd_num);
175 bus->write(bus, phy_id,
176 AR40XX_MII_ATH_MMD_DATA, reg_id);
177 bus->write(bus, phy_id,
178 AR40XX_MII_ATH_MMD_ADDR,
179 0x4000 | mmd_num);
180 value = bus->read(bus, phy_id, AR40XX_MII_ATH_MMD_DATA);
181 mutex_unlock(&bus->mdio_lock);
182 return value;
183 }
184
185 /* Start of swconfig support */
186
187 static void
188 ar40xx_phy_poll_reset(struct ar40xx_priv *priv)
189 {
190 u32 i, in_reset, retries = 500;
191 struct mii_bus *bus = priv->mii_bus;
192
193 /* Assume RESET was recently issued to some or all of the phys */
194 in_reset = GENMASK(AR40XX_NUM_PHYS - 1, 0);
195
196 while (retries--) {
197 /* 1ms should be plenty of time.
198 * 802.3 spec allows for a max wait time of 500ms
199 */
200 usleep_range(1000, 2000);
201
202 for (i = 0; i < AR40XX_NUM_PHYS; i++) {
203 int val;
204
205 /* skip devices which have completed reset */
206 if (!(in_reset & BIT(i)))
207 continue;
208
209 val = mdiobus_read(bus, i, MII_BMCR);
210 if (val < 0)
211 continue;
212
213 /* mark when phy is no longer in reset state */
214 if (!(val & BMCR_RESET))
215 in_reset &= ~BIT(i);
216 }
217
218 if (!in_reset)
219 return;
220 }
221
222 dev_warn(&bus->dev, "Failed to reset all phys! (in_reset: 0x%x)\n",
223 in_reset);
224 }
225
226 static void
227 ar40xx_phy_init(struct ar40xx_priv *priv)
228 {
229 int i;
230 struct mii_bus *bus;
231 u16 val;
232
233 bus = priv->mii_bus;
234 for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
235 ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
236 val &= ~AR40XX_PHY_MANU_CTRL_EN;
237 ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
238 mdiobus_write(bus, i,
239 MII_ADVERTISE, ADVERTISE_ALL |
240 ADVERTISE_PAUSE_CAP |
241 ADVERTISE_PAUSE_ASYM);
242 mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
243 mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
244 }
245
246 ar40xx_phy_poll_reset(priv);
247 }
248
249 static void
250 ar40xx_port_phy_linkdown(struct ar40xx_priv *priv)
251 {
252 struct mii_bus *bus;
253 int i;
254 u16 val;
255
256 bus = priv->mii_bus;
257 for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
258 mdiobus_write(bus, i, MII_CTRL1000, 0);
259 mdiobus_write(bus, i, MII_ADVERTISE, 0);
260 mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
261 ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
262 val |= AR40XX_PHY_MANU_CTRL_EN;
263 ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
264 /* disable transmit */
265 ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_2, &val);
266 val &= 0xf00f;
267 ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_2, val);
268 }
269 }
270
271 static void
272 ar40xx_set_mirror_regs(struct ar40xx_priv *priv)
273 {
274 int port;
275
276 /* reset all mirror registers */
277 ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
278 AR40XX_FWD_CTRL0_MIRROR_PORT,
279 (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
280 for (port = 0; port < AR40XX_NUM_PORTS; port++) {
281 ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(port),
282 AR40XX_PORT_LOOKUP_ING_MIRROR_EN, 0);
283
284 ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(port),
285 AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN, 0);
286 }
287
288 /* now enable mirroring if necessary */
289 if (priv->source_port >= AR40XX_NUM_PORTS ||
290 priv->monitor_port >= AR40XX_NUM_PORTS ||
291 priv->source_port == priv->monitor_port) {
292 return;
293 }
294
295 ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
296 AR40XX_FWD_CTRL0_MIRROR_PORT,
297 (priv->monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
298
299 if (priv->mirror_rx)
300 ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(priv->source_port), 0,
301 AR40XX_PORT_LOOKUP_ING_MIRROR_EN);
302
303 if (priv->mirror_tx)
304 ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(priv->source_port),
305 0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN);
306 }
307
308 static int
309 ar40xx_sw_get_ports(struct switch_dev *dev, struct switch_val *val)
310 {
311 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
312 u8 ports = priv->vlan_table[val->port_vlan];
313 int i;
314
315 val->len = 0;
316 for (i = 0; i < dev->ports; i++) {
317 struct switch_port *p;
318
319 if (!(ports & BIT(i)))
320 continue;
321
322 p = &val->value.ports[val->len++];
323 p->id = i;
324 if ((priv->vlan_tagged & BIT(i)) ||
325 (priv->pvid[i] != val->port_vlan))
326 p->flags = BIT(SWITCH_PORT_FLAG_TAGGED);
327 else
328 p->flags = 0;
329 }
330 return 0;
331 }
332
333 static int
334 ar40xx_sw_set_ports(struct switch_dev *dev, struct switch_val *val)
335 {
336 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
337 u8 *vt = &priv->vlan_table[val->port_vlan];
338 int i;
339
340 *vt = 0;
341 for (i = 0; i < val->len; i++) {
342 struct switch_port *p = &val->value.ports[i];
343
344 if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) {
345 if (val->port_vlan == priv->pvid[p->id])
346 priv->vlan_tagged |= BIT(p->id);
347 } else {
348 priv->vlan_tagged &= ~BIT(p->id);
349 priv->pvid[p->id] = val->port_vlan;
350 }
351
352 *vt |= BIT(p->id);
353 }
354 return 0;
355 }
356
357 static int
358 ar40xx_reg_wait(struct ar40xx_priv *priv, u32 reg, u32 mask, u32 val,
359 unsigned timeout)
360 {
361 int i;
362
363 for (i = 0; i < timeout; i++) {
364 u32 t;
365
366 t = ar40xx_read(priv, reg);
367 if ((t & mask) == val)
368 return 0;
369
370 usleep_range(1000, 2000);
371 }
372
373 return -ETIMEDOUT;
374 }
375
376 static int
377 ar40xx_mib_op(struct ar40xx_priv *priv, u32 op)
378 {
379 int ret;
380
381 lockdep_assert_held(&priv->mib_lock);
382
383 /* Capture the hardware statistics for all ports */
384 ar40xx_rmw(priv, AR40XX_REG_MIB_FUNC,
385 AR40XX_MIB_FUNC, (op << AR40XX_MIB_FUNC_S));
386
387 /* Wait for the capturing to complete. */
388 ret = ar40xx_reg_wait(priv, AR40XX_REG_MIB_FUNC,
389 AR40XX_MIB_BUSY, 0, 10);
390
391 return ret;
392 }
393
394 static void
395 ar40xx_mib_fetch_port_stat(struct ar40xx_priv *priv, int port, bool flush)
396 {
397 unsigned int base;
398 u64 *mib_stats;
399 int i;
400 u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
401
402 WARN_ON(port >= priv->dev.ports);
403
404 lockdep_assert_held(&priv->mib_lock);
405
406 base = AR40XX_REG_PORT_STATS_START +
407 AR40XX_REG_PORT_STATS_LEN * port;
408
409 mib_stats = &priv->mib_stats[port * num_mibs];
410 if (flush) {
411 u32 len;
412
413 len = num_mibs * sizeof(*mib_stats);
414 memset(mib_stats, 0, len);
415 return;
416 }
417 for (i = 0; i < num_mibs; i++) {
418 const struct ar40xx_mib_desc *mib;
419 u64 t;
420
421 mib = &ar40xx_mibs[i];
422 t = ar40xx_read(priv, base + mib->offset);
423 if (mib->size == 2) {
424 u64 hi;
425
426 hi = ar40xx_read(priv, base + mib->offset + 4);
427 t |= hi << 32;
428 }
429
430 mib_stats[i] += t;
431 }
432 }
433
434 static int
435 ar40xx_mib_capture(struct ar40xx_priv *priv)
436 {
437 return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_CAPTURE);
438 }
439
440 static int
441 ar40xx_mib_flush(struct ar40xx_priv *priv)
442 {
443 return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_FLUSH);
444 }
445
446 static int
447 ar40xx_sw_set_reset_mibs(struct switch_dev *dev,
448 const struct switch_attr *attr,
449 struct switch_val *val)
450 {
451 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
452 unsigned int len;
453 int ret;
454 u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
455
456 mutex_lock(&priv->mib_lock);
457
458 len = priv->dev.ports * num_mibs * sizeof(*priv->mib_stats);
459 memset(priv->mib_stats, 0, len);
460 ret = ar40xx_mib_flush(priv);
461
462 mutex_unlock(&priv->mib_lock);
463 return ret;
464 }
465
466 static int
467 ar40xx_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
468 struct switch_val *val)
469 {
470 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
471
472 priv->vlan = !!val->value.i;
473 return 0;
474 }
475
476 static int
477 ar40xx_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
478 struct switch_val *val)
479 {
480 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
481
482 val->value.i = priv->vlan;
483 return 0;
484 }
485
486 static int
487 ar40xx_sw_set_mirror_rx_enable(struct switch_dev *dev,
488 const struct switch_attr *attr,
489 struct switch_val *val)
490 {
491 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
492
493 mutex_lock(&priv->reg_mutex);
494 priv->mirror_rx = !!val->value.i;
495 ar40xx_set_mirror_regs(priv);
496 mutex_unlock(&priv->reg_mutex);
497
498 return 0;
499 }
500
501 static int
502 ar40xx_sw_get_mirror_rx_enable(struct switch_dev *dev,
503 const struct switch_attr *attr,
504 struct switch_val *val)
505 {
506 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
507
508 mutex_lock(&priv->reg_mutex);
509 val->value.i = priv->mirror_rx;
510 mutex_unlock(&priv->reg_mutex);
511 return 0;
512 }
513
514 static int
515 ar40xx_sw_set_mirror_tx_enable(struct switch_dev *dev,
516 const struct switch_attr *attr,
517 struct switch_val *val)
518 {
519 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
520
521 mutex_lock(&priv->reg_mutex);
522 priv->mirror_tx = !!val->value.i;
523 ar40xx_set_mirror_regs(priv);
524 mutex_unlock(&priv->reg_mutex);
525
526 return 0;
527 }
528
529 static int
530 ar40xx_sw_get_mirror_tx_enable(struct switch_dev *dev,
531 const struct switch_attr *attr,
532 struct switch_val *val)
533 {
534 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
535
536 mutex_lock(&priv->reg_mutex);
537 val->value.i = priv->mirror_tx;
538 mutex_unlock(&priv->reg_mutex);
539 return 0;
540 }
541
542 static int
543 ar40xx_sw_set_mirror_monitor_port(struct switch_dev *dev,
544 const struct switch_attr *attr,
545 struct switch_val *val)
546 {
547 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
548
549 mutex_lock(&priv->reg_mutex);
550 priv->monitor_port = val->value.i;
551 ar40xx_set_mirror_regs(priv);
552 mutex_unlock(&priv->reg_mutex);
553
554 return 0;
555 }
556
557 static int
558 ar40xx_sw_get_mirror_monitor_port(struct switch_dev *dev,
559 const struct switch_attr *attr,
560 struct switch_val *val)
561 {
562 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
563
564 mutex_lock(&priv->reg_mutex);
565 val->value.i = priv->monitor_port;
566 mutex_unlock(&priv->reg_mutex);
567 return 0;
568 }
569
570 static int
571 ar40xx_sw_set_mirror_source_port(struct switch_dev *dev,
572 const struct switch_attr *attr,
573 struct switch_val *val)
574 {
575 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
576
577 mutex_lock(&priv->reg_mutex);
578 priv->source_port = val->value.i;
579 ar40xx_set_mirror_regs(priv);
580 mutex_unlock(&priv->reg_mutex);
581
582 return 0;
583 }
584
585 static int
586 ar40xx_sw_get_mirror_source_port(struct switch_dev *dev,
587 const struct switch_attr *attr,
588 struct switch_val *val)
589 {
590 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
591
592 mutex_lock(&priv->reg_mutex);
593 val->value.i = priv->source_port;
594 mutex_unlock(&priv->reg_mutex);
595 return 0;
596 }
597
598 static int
599 ar40xx_sw_set_linkdown(struct switch_dev *dev,
600 const struct switch_attr *attr,
601 struct switch_val *val)
602 {
603 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
604
605 if (val->value.i == 1)
606 ar40xx_port_phy_linkdown(priv);
607 else
608 ar40xx_phy_init(priv);
609
610 return 0;
611 }
612
613 static int
614 ar40xx_sw_set_port_reset_mib(struct switch_dev *dev,
615 const struct switch_attr *attr,
616 struct switch_val *val)
617 {
618 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
619 int port;
620 int ret;
621
622 port = val->port_vlan;
623 if (port >= dev->ports)
624 return -EINVAL;
625
626 mutex_lock(&priv->mib_lock);
627 ret = ar40xx_mib_capture(priv);
628 if (ret)
629 goto unlock;
630
631 ar40xx_mib_fetch_port_stat(priv, port, true);
632
633 unlock:
634 mutex_unlock(&priv->mib_lock);
635 return ret;
636 }
637
638 static int
639 ar40xx_sw_get_port_mib(struct switch_dev *dev,
640 const struct switch_attr *attr,
641 struct switch_val *val)
642 {
643 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
644 u64 *mib_stats;
645 int port;
646 int ret;
647 char *buf = priv->buf;
648 int i, len = 0;
649 u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
650
651 port = val->port_vlan;
652 if (port >= dev->ports)
653 return -EINVAL;
654
655 mutex_lock(&priv->mib_lock);
656 ret = ar40xx_mib_capture(priv);
657 if (ret)
658 goto unlock;
659
660 ar40xx_mib_fetch_port_stat(priv, port, false);
661
662 len += snprintf(buf + len, sizeof(priv->buf) - len,
663 "Port %d MIB counters\n",
664 port);
665
666 mib_stats = &priv->mib_stats[port * num_mibs];
667 for (i = 0; i < num_mibs; i++)
668 len += snprintf(buf + len, sizeof(priv->buf) - len,
669 "%-12s: %llu\n",
670 ar40xx_mibs[i].name,
671 mib_stats[i]);
672
673 val->value.s = buf;
674 val->len = len;
675
676 unlock:
677 mutex_unlock(&priv->mib_lock);
678 return ret;
679 }
680
681 static int
682 ar40xx_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
683 struct switch_val *val)
684 {
685 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
686
687 priv->vlan_id[val->port_vlan] = val->value.i;
688 return 0;
689 }
690
691 static int
692 ar40xx_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
693 struct switch_val *val)
694 {
695 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
696
697 val->value.i = priv->vlan_id[val->port_vlan];
698 return 0;
699 }
700
701 static int
702 ar40xx_sw_get_pvid(struct switch_dev *dev, int port, int *vlan)
703 {
704 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
705 *vlan = priv->pvid[port];
706 return 0;
707 }
708
709 static int
710 ar40xx_sw_set_pvid(struct switch_dev *dev, int port, int vlan)
711 {
712 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
713
714 /* make sure no invalid PVIDs get set */
715 if (vlan >= dev->vlans)
716 return -EINVAL;
717
718 priv->pvid[port] = vlan;
719 return 0;
720 }
721
722 static void
723 ar40xx_read_port_link(struct ar40xx_priv *priv, int port,
724 struct switch_port_link *link)
725 {
726 u32 status;
727 u32 speed;
728
729 memset(link, 0, sizeof(*link));
730
731 status = ar40xx_read(priv, AR40XX_REG_PORT_STATUS(port));
732
733 link->aneg = !!(status & AR40XX_PORT_AUTO_LINK_EN);
734 if (link->aneg || (port != AR40XX_PORT_CPU))
735 link->link = !!(status & AR40XX_PORT_STATUS_LINK_UP);
736 else
737 link->link = true;
738
739 if (!link->link)
740 return;
741
742 link->duplex = !!(status & AR40XX_PORT_DUPLEX);
743 link->tx_flow = !!(status & AR40XX_PORT_STATUS_TXFLOW);
744 link->rx_flow = !!(status & AR40XX_PORT_STATUS_RXFLOW);
745
746 speed = (status & AR40XX_PORT_SPEED) >>
747 AR40XX_PORT_STATUS_SPEED_S;
748
749 switch (speed) {
750 case AR40XX_PORT_SPEED_10M:
751 link->speed = SWITCH_PORT_SPEED_10;
752 break;
753 case AR40XX_PORT_SPEED_100M:
754 link->speed = SWITCH_PORT_SPEED_100;
755 break;
756 case AR40XX_PORT_SPEED_1000M:
757 link->speed = SWITCH_PORT_SPEED_1000;
758 break;
759 default:
760 link->speed = SWITCH_PORT_SPEED_UNKNOWN;
761 break;
762 }
763 }
764
765 static int
766 ar40xx_sw_get_port_link(struct switch_dev *dev, int port,
767 struct switch_port_link *link)
768 {
769 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
770
771 ar40xx_read_port_link(priv, port, link);
772 return 0;
773 }
774
775 static const struct switch_attr ar40xx_sw_attr_globals[] = {
776 {
777 .type = SWITCH_TYPE_INT,
778 .name = "enable_vlan",
779 .description = "Enable VLAN mode",
780 .set = ar40xx_sw_set_vlan,
781 .get = ar40xx_sw_get_vlan,
782 .max = 1
783 },
784 {
785 .type = SWITCH_TYPE_NOVAL,
786 .name = "reset_mibs",
787 .description = "Reset all MIB counters",
788 .set = ar40xx_sw_set_reset_mibs,
789 },
790 {
791 .type = SWITCH_TYPE_INT,
792 .name = "enable_mirror_rx",
793 .description = "Enable mirroring of RX packets",
794 .set = ar40xx_sw_set_mirror_rx_enable,
795 .get = ar40xx_sw_get_mirror_rx_enable,
796 .max = 1
797 },
798 {
799 .type = SWITCH_TYPE_INT,
800 .name = "enable_mirror_tx",
801 .description = "Enable mirroring of TX packets",
802 .set = ar40xx_sw_set_mirror_tx_enable,
803 .get = ar40xx_sw_get_mirror_tx_enable,
804 .max = 1
805 },
806 {
807 .type = SWITCH_TYPE_INT,
808 .name = "mirror_monitor_port",
809 .description = "Mirror monitor port",
810 .set = ar40xx_sw_set_mirror_monitor_port,
811 .get = ar40xx_sw_get_mirror_monitor_port,
812 .max = AR40XX_NUM_PORTS - 1
813 },
814 {
815 .type = SWITCH_TYPE_INT,
816 .name = "mirror_source_port",
817 .description = "Mirror source port",
818 .set = ar40xx_sw_set_mirror_source_port,
819 .get = ar40xx_sw_get_mirror_source_port,
820 .max = AR40XX_NUM_PORTS - 1
821 },
822 {
823 .type = SWITCH_TYPE_INT,
824 .name = "linkdown",
825 .description = "Link down all the PHYs",
826 .set = ar40xx_sw_set_linkdown,
827 .max = 1
828 },
829 };
830
831 static const struct switch_attr ar40xx_sw_attr_port[] = {
832 {
833 .type = SWITCH_TYPE_NOVAL,
834 .name = "reset_mib",
835 .description = "Reset single port MIB counters",
836 .set = ar40xx_sw_set_port_reset_mib,
837 },
838 {
839 .type = SWITCH_TYPE_STRING,
840 .name = "mib",
841 .description = "Get port's MIB counters",
842 .set = NULL,
843 .get = ar40xx_sw_get_port_mib,
844 },
845 };
846
847 const struct switch_attr ar40xx_sw_attr_vlan[] = {
848 {
849 .type = SWITCH_TYPE_INT,
850 .name = "vid",
851 .description = "VLAN ID (0-4094)",
852 .set = ar40xx_sw_set_vid,
853 .get = ar40xx_sw_get_vid,
854 .max = 4094,
855 },
856 };
857
858 /* End of swconfig support */
859
860 static int
861 ar40xx_wait_bit(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
862 {
863 int timeout = 20;
864 u32 t;
865
866 while (1) {
867 t = ar40xx_read(priv, reg);
868 if ((t & mask) == val)
869 return 0;
870
871 if (timeout-- <= 0)
872 break;
873
874 usleep_range(10, 20);
875 }
876
877 pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n",
878 (unsigned int)reg, t, mask, val);
879 return -ETIMEDOUT;
880 }
881
882 static int
883 ar40xx_atu_flush(struct ar40xx_priv *priv)
884 {
885 int ret;
886
887 ret = ar40xx_wait_bit(priv, AR40XX_REG_ATU_FUNC,
888 AR40XX_ATU_FUNC_BUSY, 0);
889 if (!ret)
890 ar40xx_write(priv, AR40XX_REG_ATU_FUNC,
891 AR40XX_ATU_FUNC_OP_FLUSH |
892 AR40XX_ATU_FUNC_BUSY);
893
894 return ret;
895 }
896
897 static void
898 ar40xx_ess_reset(struct ar40xx_priv *priv)
899 {
900 reset_control_assert(priv->ess_rst);
901 mdelay(10);
902 reset_control_deassert(priv->ess_rst);
903 /* Waiting for all inner tables init done.
904 * It cost 5~10ms.
905 */
906 mdelay(10);
907
908 pr_info("ESS reset ok!\n");
909 }
910
911 /* Start of psgmii self test */
912
913 static void
914 ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv *priv)
915 {
916 u32 n;
917 struct mii_bus *bus = priv->mii_bus;
918 /* reset phy psgmii */
919 /* fix phy psgmii RX 20bit */
920 mdiobus_write(bus, 5, 0x0, 0x005b);
921 /* reset phy psgmii */
922 mdiobus_write(bus, 5, 0x0, 0x001b);
923 /* release reset phy psgmii */
924 mdiobus_write(bus, 5, 0x0, 0x005b);
925
926 for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
927 u16 status;
928
929 status = ar40xx_phy_mmd_read(priv, 5, 1, 0x28);
930 if (status & BIT(0))
931 break;
932 /* Polling interval to check PSGMII PLL in malibu is ready
933 * the worst time is 8.67ms
934 * for 25MHz reference clock
935 * [512+(128+2048)*49]*80ns+100us
936 */
937 mdelay(2);
938 }
939 mdelay(50);
940
941 /*check malibu psgmii calibration done end..*/
942
943 /*freeze phy psgmii RX CDR*/
944 mdiobus_write(bus, 5, 0x1a, 0x2230);
945
946 ar40xx_ess_reset(priv);
947
948 /*check psgmii calibration done start*/
949 for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
950 u32 status;
951
952 status = ar40xx_psgmii_read(priv, 0xa0);
953 if (status & BIT(0))
954 break;
955 /* Polling interval to check PSGMII PLL in ESS is ready */
956 mdelay(2);
957 }
958 mdelay(50);
959
960 /* check dakota psgmii calibration done end..*/
961
962 /* relesae phy psgmii RX CDR */
963 mdiobus_write(bus, 5, 0x1a, 0x3230);
964 /* release phy psgmii RX 20bit */
965 mdiobus_write(bus, 5, 0x0, 0x005f);
966 mdelay(200);
967 }
968
969 static void
970 ar40xx_psgmii_single_phy_testing(struct ar40xx_priv *priv, int phy)
971 {
972 int j;
973 u32 tx_ok, tx_error;
974 u32 rx_ok, rx_error;
975 u32 tx_ok_high16;
976 u32 rx_ok_high16;
977 u32 tx_all_ok, rx_all_ok;
978 struct mii_bus *bus = priv->mii_bus;
979
980 mdiobus_write(bus, phy, 0x0, 0x9000);
981 mdiobus_write(bus, phy, 0x0, 0x4140);
982
983 for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
984 u16 status;
985
986 status = mdiobus_read(bus, phy, 0x11);
987 if (status & AR40XX_PHY_SPEC_STATUS_LINK)
988 break;
989 /* the polling interval to check if the PHY link up or not
990 * maxwait_timer: 750 ms +/-10 ms
991 * minwait_timer : 1 us +/- 0.1us
992 * time resides in minwait_timer ~ maxwait_timer
993 * see IEEE 802.3 section 40.4.5.2
994 */
995 mdelay(8);
996 }
997
998 /* enable check */
999 ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0000);
1000 ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0003);
1001
1002 /* start traffic */
1003 ar40xx_phy_mmd_write(priv, phy, 7, 0x8020, 0xa000);
1004 /* wait for all traffic end
1005 * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1006 */
1007 mdelay(50);
1008
1009 /* check counter */
1010 tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
1011 tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
1012 tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
1013 rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
1014 rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
1015 rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
1016 tx_all_ok = tx_ok + (tx_ok_high16 << 16);
1017 rx_all_ok = rx_ok + (rx_ok_high16 << 16);
1018 if (tx_all_ok == 0x1000 && tx_error == 0) {
1019 /* success */
1020 priv->phy_t_status &= (~BIT(phy));
1021 } else {
1022 pr_info("PHY %d single test PSGMII issue happen!\n", phy);
1023 priv->phy_t_status |= BIT(phy);
1024 }
1025
1026 mdiobus_write(bus, phy, 0x0, 0x1840);
1027 }
1028
1029 static void
1030 ar40xx_psgmii_all_phy_testing(struct ar40xx_priv *priv)
1031 {
1032 int phy, j;
1033 struct mii_bus *bus = priv->mii_bus;
1034
1035 mdiobus_write(bus, 0x1f, 0x0, 0x9000);
1036 mdiobus_write(bus, 0x1f, 0x0, 0x4140);
1037
1038 for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
1039 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1040 u16 status;
1041
1042 status = mdiobus_read(bus, phy, 0x11);
1043 if (!(status & BIT(10)))
1044 break;
1045 }
1046
1047 if (phy >= (AR40XX_NUM_PORTS - 1))
1048 break;
1049 /* The polling interva to check if the PHY link up or not */
1050 mdelay(8);
1051 }
1052 /* enable check */
1053 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0000);
1054 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0003);
1055
1056 /* start traffic */
1057 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0xa000);
1058 /* wait for all traffic end
1059 * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1060 */
1061 mdelay(50);
1062
1063 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1064 u32 tx_ok, tx_error;
1065 u32 rx_ok, rx_error;
1066 u32 tx_ok_high16;
1067 u32 rx_ok_high16;
1068 u32 tx_all_ok, rx_all_ok;
1069
1070 /* check counter */
1071 tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
1072 tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
1073 tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
1074 rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
1075 rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
1076 rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
1077 tx_all_ok = tx_ok + (tx_ok_high16<<16);
1078 rx_all_ok = rx_ok + (rx_ok_high16<<16);
1079 if (tx_all_ok == 0x1000 && tx_error == 0) {
1080 /* success */
1081 priv->phy_t_status &= ~BIT(phy + 8);
1082 } else {
1083 pr_info("PHY%d test see issue!\n", phy);
1084 priv->phy_t_status |= BIT(phy + 8);
1085 }
1086 }
1087
1088 pr_debug("PHY all test 0x%x \r\n", priv->phy_t_status);
1089 }
1090
1091 void
1092 ar40xx_psgmii_self_test(struct ar40xx_priv *priv)
1093 {
1094 u32 i, phy;
1095 struct mii_bus *bus = priv->mii_bus;
1096
1097 ar40xx_malibu_psgmii_ess_reset(priv);
1098
1099 /* switch to access MII reg for copper */
1100 mdiobus_write(bus, 4, 0x1f, 0x8500);
1101 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1102 /*enable phy mdio broadcast write*/
1103 ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x801f);
1104 }
1105 /* force no link by power down */
1106 mdiobus_write(bus, 0x1f, 0x0, 0x1840);
1107 /*packet number*/
1108 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x1000);
1109 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8062, 0x05e0);
1110
1111 /*fix mdi status */
1112 mdiobus_write(bus, 0x1f, 0x10, 0x6800);
1113 for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) {
1114 priv->phy_t_status = 0;
1115
1116 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1117 ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
1118 AR40XX_PORT_LOOKUP_LOOPBACK,
1119 AR40XX_PORT_LOOKUP_LOOPBACK);
1120 }
1121
1122 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++)
1123 ar40xx_psgmii_single_phy_testing(priv, phy);
1124
1125 ar40xx_psgmii_all_phy_testing(priv);
1126
1127 if (priv->phy_t_status)
1128 ar40xx_malibu_psgmii_ess_reset(priv);
1129 else
1130 break;
1131 }
1132
1133 if (i >= AR40XX_PSGMII_CALB_NUM)
1134 pr_info("PSGMII cannot recover\n");
1135 else
1136 pr_debug("PSGMII recovered after %d times reset\n", i);
1137
1138 /* configuration recover */
1139 /* packet number */
1140 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x0);
1141 /* disable check */
1142 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0);
1143 /* disable traffic */
1144 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0x0);
1145 }
1146
1147 void
1148 ar40xx_psgmii_self_test_clean(struct ar40xx_priv *priv)
1149 {
1150 int phy;
1151 struct mii_bus *bus = priv->mii_bus;
1152
1153 /* disable phy internal loopback */
1154 mdiobus_write(bus, 0x1f, 0x10, 0x6860);
1155 mdiobus_write(bus, 0x1f, 0x0, 0x9040);
1156
1157 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1158 /* disable mac loop back */
1159 ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
1160 AR40XX_PORT_LOOKUP_LOOPBACK, 0);
1161 /* disable phy mdio broadcast write */
1162 ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x001f);
1163 }
1164
1165 /* clear fdb entry */
1166 ar40xx_atu_flush(priv);
1167 }
1168
1169 /* End of psgmii self test */
1170
1171 static void
1172 ar40xx_mac_mode_init(struct ar40xx_priv *priv, u32 mode)
1173 {
1174 if (mode == PORT_WRAPPER_PSGMII) {
1175 ar40xx_psgmii_write(priv, AR40XX_PSGMII_MODE_CONTROL, 0x2200);
1176 ar40xx_psgmii_write(priv, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380);
1177 }
1178 }
1179
1180 static
1181 int ar40xx_cpuport_setup(struct ar40xx_priv *priv)
1182 {
1183 u32 t;
1184
1185 t = AR40XX_PORT_STATUS_TXFLOW |
1186 AR40XX_PORT_STATUS_RXFLOW |
1187 AR40XX_PORT_TXHALF_FLOW |
1188 AR40XX_PORT_DUPLEX |
1189 AR40XX_PORT_SPEED_1000M;
1190 ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
1191 usleep_range(10, 20);
1192
1193 t |= AR40XX_PORT_TX_EN |
1194 AR40XX_PORT_RX_EN;
1195 ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
1196
1197 return 0;
1198 }
1199
1200 static void
1201 ar40xx_init_port(struct ar40xx_priv *priv, int port)
1202 {
1203 u32 t;
1204
1205 ar40xx_write(priv, AR40XX_REG_PORT_STATUS(port), 0);
1206
1207 ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0);
1208
1209 ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), 0);
1210
1211 t = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S;
1212 ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
1213
1214 t = AR40XX_PORT_LOOKUP_LEARN;
1215 t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
1216 ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
1217 }
1218
1219 void
1220 ar40xx_init_globals(struct ar40xx_priv *priv)
1221 {
1222 u32 t;
1223
1224 /* enable CPU port and disable mirror port */
1225 t = AR40XX_FWD_CTRL0_CPU_PORT_EN |
1226 AR40XX_FWD_CTRL0_MIRROR_PORT;
1227 ar40xx_write(priv, AR40XX_REG_FWD_CTRL0, t);
1228
1229 /* forward multicast and broadcast frames to CPU */
1230 t = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) |
1231 (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) |
1232 (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S);
1233 ar40xx_write(priv, AR40XX_REG_FWD_CTRL1, t);
1234
1235 /* enable jumbo frames */
1236 ar40xx_rmw(priv, AR40XX_REG_MAX_FRAME_SIZE,
1237 AR40XX_MAX_FRAME_SIZE_MTU, 9018 + 8 + 2);
1238
1239 /* Enable MIB counters */
1240 ar40xx_rmw(priv, AR40XX_REG_MODULE_EN, 0,
1241 AR40XX_MODULE_EN_MIB);
1242
1243 /* Disable AZ */
1244 ar40xx_write(priv, AR40XX_REG_EEE_CTRL, 0);
1245
1246 /* set flowctrl thershold for cpu port */
1247 t = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) |
1248 AR40XX_PORT0_FC_THRESH_OFF_DFLT;
1249 ar40xx_write(priv, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t);
1250 }
1251
1252 static int
1253 ar40xx_hw_init(struct ar40xx_priv *priv)
1254 {
1255 u32 i;
1256
1257 ar40xx_ess_reset(priv);
1258
1259 if (!priv->mii_bus)
1260 return -1;
1261
1262 ar40xx_psgmii_self_test(priv);
1263 ar40xx_psgmii_self_test_clean(priv);
1264
1265 ar40xx_mac_mode_init(priv, priv->mac_mode);
1266
1267 for (i = 0; i < priv->dev.ports; i++)
1268 ar40xx_init_port(priv, i);
1269
1270 ar40xx_init_globals(priv);
1271
1272 return 0;
1273 }
1274
1275 /* Start of qm error WAR */
1276
1277 static
1278 int ar40xx_force_1g_full(struct ar40xx_priv *priv, u32 port_id)
1279 {
1280 u32 reg;
1281
1282 if (port_id < 0 || port_id > 6)
1283 return -1;
1284
1285 reg = AR40XX_REG_PORT_STATUS(port_id);
1286 return ar40xx_rmw(priv, reg, AR40XX_PORT_SPEED,
1287 (AR40XX_PORT_SPEED_1000M | AR40XX_PORT_DUPLEX));
1288 }
1289
1290 static
1291 int ar40xx_get_qm_status(struct ar40xx_priv *priv,
1292 u32 port_id, u32 *qm_buffer_err)
1293 {
1294 u32 reg;
1295 u32 qm_val;
1296
1297 if (port_id < 1 || port_id > 5) {
1298 *qm_buffer_err = 0;
1299 return -1;
1300 }
1301
1302 if (port_id < 4) {
1303 reg = AR40XX_REG_QM_PORT0_3_QNUM;
1304 ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
1305 qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
1306 /* every 8 bits for each port */
1307 *qm_buffer_err = (qm_val >> (port_id * 8)) & 0xFF;
1308 } else {
1309 reg = AR40XX_REG_QM_PORT4_6_QNUM;
1310 ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
1311 qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
1312 /* every 8 bits for each port */
1313 *qm_buffer_err = (qm_val >> ((port_id-4) * 8)) & 0xFF;
1314 }
1315
1316 return 0;
1317 }
1318
1319 static void
1320 ar40xx_sw_mac_polling_task(struct ar40xx_priv *priv)
1321 {
1322 static int task_count;
1323 u32 i;
1324 u32 reg, value;
1325 u32 link, speed, duplex;
1326 u32 qm_buffer_err;
1327 u16 port_phy_status[AR40XX_NUM_PORTS];
1328 static u32 qm_err_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
1329 static u32 link_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
1330 struct mii_bus *bus = NULL;
1331
1332 if (!priv || !priv->mii_bus)
1333 return;
1334
1335 bus = priv->mii_bus;
1336
1337 ++task_count;
1338
1339 for (i = 1; i < AR40XX_NUM_PORTS; ++i) {
1340 port_phy_status[i] =
1341 mdiobus_read(bus, i-1, AR40XX_PHY_SPEC_STATUS);
1342
1343 speed = FIELD_GET(AR40XX_PHY_SPEC_STATUS_SPEED,
1344 port_phy_status[i]);
1345 link = FIELD_GET(AR40XX_PHY_SPEC_STATUS_LINK,
1346 port_phy_status[i]);
1347 duplex = FIELD_GET(AR40XX_PHY_SPEC_STATUS_DUPLEX,
1348 port_phy_status[i]);
1349
1350 if (link != priv->ar40xx_port_old_link[i]) {
1351 ++link_cnt[i];
1352 /* Up --> Down */
1353 if ((priv->ar40xx_port_old_link[i] ==
1354 AR40XX_PORT_LINK_UP) &&
1355 (link == AR40XX_PORT_LINK_DOWN)) {
1356 /* LINK_EN disable(MAC force mode)*/
1357 reg = AR40XX_REG_PORT_STATUS(i);
1358 ar40xx_rmw(priv, reg,
1359 AR40XX_PORT_AUTO_LINK_EN, 0);
1360
1361 /* Check queue buffer */
1362 qm_err_cnt[i] = 0;
1363 ar40xx_get_qm_status(priv, i, &qm_buffer_err);
1364 if (qm_buffer_err) {
1365 priv->ar40xx_port_qm_buf[i] =
1366 AR40XX_QM_NOT_EMPTY;
1367 } else {
1368 u16 phy_val = 0;
1369
1370 priv->ar40xx_port_qm_buf[i] =
1371 AR40XX_QM_EMPTY;
1372 ar40xx_force_1g_full(priv, i);
1373 /* Ref:QCA8337 Datasheet,Clearing
1374 * MENU_CTRL_EN prevents phy to
1375 * stuck in 100BT mode when
1376 * bringing up the link
1377 */
1378 ar40xx_phy_dbg_read(priv, i-1,
1379 AR40XX_PHY_DEBUG_0,
1380 &phy_val);
1381 phy_val &= (~AR40XX_PHY_MANU_CTRL_EN);
1382 ar40xx_phy_dbg_write(priv, i-1,
1383 AR40XX_PHY_DEBUG_0,
1384 phy_val);
1385 }
1386 priv->ar40xx_port_old_link[i] = link;
1387 } else if ((priv->ar40xx_port_old_link[i] ==
1388 AR40XX_PORT_LINK_DOWN) &&
1389 (link == AR40XX_PORT_LINK_UP)) {
1390 /* Down --> Up */
1391 if (priv->port_link_up[i] < 1) {
1392 ++priv->port_link_up[i];
1393 } else {
1394 /* Change port status */
1395 reg = AR40XX_REG_PORT_STATUS(i);
1396 value = ar40xx_read(priv, reg);
1397 priv->port_link_up[i] = 0;
1398
1399 value &= ~(AR40XX_PORT_DUPLEX |
1400 AR40XX_PORT_SPEED);
1401 value |= speed | (duplex ? BIT(6) : 0);
1402 ar40xx_write(priv, reg, value);
1403 /* clock switch need such time
1404 * to avoid glitch
1405 */
1406 usleep_range(100, 200);
1407
1408 value |= AR40XX_PORT_AUTO_LINK_EN;
1409 ar40xx_write(priv, reg, value);
1410 /* HW need such time to make sure link
1411 * stable before enable MAC
1412 */
1413 usleep_range(100, 200);
1414
1415 if (speed == AR40XX_PORT_SPEED_100M) {
1416 u16 phy_val = 0;
1417 /* Enable @100M, if down to 10M
1418 * clock will change smoothly
1419 */
1420 ar40xx_phy_dbg_read(priv, i-1,
1421 0,
1422 &phy_val);
1423 phy_val |=
1424 AR40XX_PHY_MANU_CTRL_EN;
1425 ar40xx_phy_dbg_write(priv, i-1,
1426 0,
1427 phy_val);
1428 }
1429 priv->ar40xx_port_old_link[i] = link;
1430 }
1431 }
1432 }
1433
1434 if (priv->ar40xx_port_qm_buf[i] == AR40XX_QM_NOT_EMPTY) {
1435 /* Check QM */
1436 ar40xx_get_qm_status(priv, i, &qm_buffer_err);
1437 if (qm_buffer_err) {
1438 ++qm_err_cnt[i];
1439 } else {
1440 priv->ar40xx_port_qm_buf[i] =
1441 AR40XX_QM_EMPTY;
1442 qm_err_cnt[i] = 0;
1443 ar40xx_force_1g_full(priv, i);
1444 }
1445 }
1446 }
1447 }
1448
1449 static void
1450 ar40xx_qm_err_check_work_task(struct work_struct *work)
1451 {
1452 struct ar40xx_priv *priv = container_of(work, struct ar40xx_priv,
1453 qm_dwork.work);
1454
1455 mutex_lock(&priv->qm_lock);
1456
1457 ar40xx_sw_mac_polling_task(priv);
1458
1459 mutex_unlock(&priv->qm_lock);
1460
1461 schedule_delayed_work(&priv->qm_dwork,
1462 msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
1463 }
1464
1465 static int
1466 ar40xx_qm_err_check_work_start(struct ar40xx_priv *priv)
1467 {
1468 mutex_init(&priv->qm_lock);
1469
1470 INIT_DELAYED_WORK(&priv->qm_dwork, ar40xx_qm_err_check_work_task);
1471
1472 schedule_delayed_work(&priv->qm_dwork,
1473 msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
1474
1475 return 0;
1476 }
1477
1478 /* End of qm error WAR */
1479
1480 static int
1481 ar40xx_vlan_init(struct ar40xx_priv *priv)
1482 {
1483 int port;
1484 unsigned long bmp;
1485
1486 /* By default Enable VLAN */
1487 priv->vlan = 1;
1488 priv->vlan_table[AR40XX_LAN_VLAN] = priv->cpu_bmp | priv->lan_bmp;
1489 priv->vlan_table[AR40XX_WAN_VLAN] = priv->cpu_bmp | priv->wan_bmp;
1490 priv->vlan_tagged = priv->cpu_bmp;
1491 bmp = priv->lan_bmp;
1492 for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
1493 priv->pvid[port] = AR40XX_LAN_VLAN;
1494
1495 bmp = priv->wan_bmp;
1496 for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
1497 priv->pvid[port] = AR40XX_WAN_VLAN;
1498
1499 return 0;
1500 }
1501
1502 static void
1503 ar40xx_mib_work_func(struct work_struct *work)
1504 {
1505 struct ar40xx_priv *priv;
1506 int err;
1507
1508 priv = container_of(work, struct ar40xx_priv, mib_work.work);
1509
1510 mutex_lock(&priv->mib_lock);
1511
1512 err = ar40xx_mib_capture(priv);
1513 if (err)
1514 goto next_port;
1515
1516 ar40xx_mib_fetch_port_stat(priv, priv->mib_next_port, false);
1517
1518 next_port:
1519 priv->mib_next_port++;
1520 if (priv->mib_next_port >= priv->dev.ports)
1521 priv->mib_next_port = 0;
1522
1523 mutex_unlock(&priv->mib_lock);
1524
1525 schedule_delayed_work(&priv->mib_work,
1526 msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
1527 }
1528
1529 static void
1530 ar40xx_setup_port(struct ar40xx_priv *priv, int port, u32 members)
1531 {
1532 u32 t;
1533 u32 egress, ingress;
1534 u32 pvid = priv->vlan_id[priv->pvid[port]];
1535
1536 if (priv->vlan) {
1537 egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD;
1538
1539 ingress = AR40XX_IN_SECURE;
1540 } else {
1541 egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH;
1542 ingress = AR40XX_IN_PORT_ONLY;
1543 }
1544
1545 t = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S;
1546 t |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S;
1547 ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), t);
1548
1549 t = AR40XX_PORT_VLAN1_PORT_VLAN_PROP;
1550 t |= egress << AR40XX_PORT_VLAN1_OUT_MODE_S;
1551
1552 ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
1553
1554 t = members;
1555 t |= AR40XX_PORT_LOOKUP_LEARN;
1556 t |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S;
1557 t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
1558 ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
1559 }
1560
1561 static void
1562 ar40xx_vtu_op(struct ar40xx_priv *priv, u32 op, u32 val)
1563 {
1564 if (ar40xx_wait_bit(priv, AR40XX_REG_VTU_FUNC1,
1565 AR40XX_VTU_FUNC1_BUSY, 0))
1566 return;
1567
1568 if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD)
1569 ar40xx_write(priv, AR40XX_REG_VTU_FUNC0, val);
1570
1571 op |= AR40XX_VTU_FUNC1_BUSY;
1572 ar40xx_write(priv, AR40XX_REG_VTU_FUNC1, op);
1573 }
1574
1575 static void
1576 ar40xx_vtu_load_vlan(struct ar40xx_priv *priv, u32 vid, u32 port_mask)
1577 {
1578 u32 op;
1579 u32 val;
1580 int i;
1581
1582 op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S);
1583 val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL;
1584 for (i = 0; i < AR40XX_NUM_PORTS; i++) {
1585 u32 mode;
1586
1587 if ((port_mask & BIT(i)) == 0)
1588 mode = AR40XX_VTU_FUNC0_EG_MODE_NOT;
1589 else if (priv->vlan == 0)
1590 mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP;
1591 else if ((priv->vlan_tagged & BIT(i)) ||
1592 (priv->vlan_id[priv->pvid[i]] != vid))
1593 mode = AR40XX_VTU_FUNC0_EG_MODE_TAG;
1594 else
1595 mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG;
1596
1597 val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i);
1598 }
1599 ar40xx_vtu_op(priv, op, val);
1600 }
1601
1602 static void
1603 ar40xx_vtu_flush(struct ar40xx_priv *priv)
1604 {
1605 ar40xx_vtu_op(priv, AR40XX_VTU_FUNC1_OP_FLUSH, 0);
1606 }
1607
1608 static int
1609 ar40xx_sw_hw_apply(struct switch_dev *dev)
1610 {
1611 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
1612 u8 portmask[AR40XX_NUM_PORTS];
1613 int i, j;
1614
1615 mutex_lock(&priv->reg_mutex);
1616 /* flush all vlan entries */
1617 ar40xx_vtu_flush(priv);
1618
1619 memset(portmask, 0, sizeof(portmask));
1620 if (priv->vlan) {
1621 for (j = 0; j < AR40XX_MAX_VLANS; j++) {
1622 u8 vp = priv->vlan_table[j];
1623
1624 if (!vp)
1625 continue;
1626
1627 for (i = 0; i < dev->ports; i++) {
1628 u8 mask = BIT(i);
1629
1630 if (vp & mask)
1631 portmask[i] |= vp & ~mask;
1632 }
1633
1634 ar40xx_vtu_load_vlan(priv, priv->vlan_id[j],
1635 priv->vlan_table[j]);
1636 }
1637 } else {
1638 /* 8021q vlan disabled */
1639 for (i = 0; i < dev->ports; i++) {
1640 if (i == AR40XX_PORT_CPU)
1641 continue;
1642
1643 portmask[i] = BIT(AR40XX_PORT_CPU);
1644 portmask[AR40XX_PORT_CPU] |= BIT(i);
1645 }
1646 }
1647
1648 /* update the port destination mask registers and tag settings */
1649 for (i = 0; i < dev->ports; i++)
1650 ar40xx_setup_port(priv, i, portmask[i]);
1651
1652 ar40xx_set_mirror_regs(priv);
1653
1654 mutex_unlock(&priv->reg_mutex);
1655 return 0;
1656 }
1657
1658 static int
1659 ar40xx_sw_reset_switch(struct switch_dev *dev)
1660 {
1661 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
1662 int i, rv;
1663
1664 mutex_lock(&priv->reg_mutex);
1665 memset(&priv->vlan, 0, sizeof(struct ar40xx_priv) -
1666 offsetof(struct ar40xx_priv, vlan));
1667
1668 for (i = 0; i < AR40XX_MAX_VLANS; i++)
1669 priv->vlan_id[i] = i;
1670
1671 ar40xx_vlan_init(priv);
1672
1673 priv->mirror_rx = false;
1674 priv->mirror_tx = false;
1675 priv->source_port = 0;
1676 priv->monitor_port = 0;
1677
1678 mutex_unlock(&priv->reg_mutex);
1679
1680 rv = ar40xx_sw_hw_apply(dev);
1681 return rv;
1682 }
1683
1684 static int
1685 ar40xx_start(struct ar40xx_priv *priv)
1686 {
1687 int ret;
1688
1689 ret = ar40xx_hw_init(priv);
1690 if (ret)
1691 return ret;
1692
1693 ret = ar40xx_sw_reset_switch(&priv->dev);
1694 if (ret)
1695 return ret;
1696
1697 /* at last, setup cpu port */
1698 ret = ar40xx_cpuport_setup(priv);
1699 if (ret)
1700 return ret;
1701
1702 schedule_delayed_work(&priv->mib_work,
1703 msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
1704
1705 ar40xx_qm_err_check_work_start(priv);
1706
1707 return 0;
1708 }
1709
1710 static const struct switch_dev_ops ar40xx_sw_ops = {
1711 .attr_global = {
1712 .attr = ar40xx_sw_attr_globals,
1713 .n_attr = ARRAY_SIZE(ar40xx_sw_attr_globals),
1714 },
1715 .attr_port = {
1716 .attr = ar40xx_sw_attr_port,
1717 .n_attr = ARRAY_SIZE(ar40xx_sw_attr_port),
1718 },
1719 .attr_vlan = {
1720 .attr = ar40xx_sw_attr_vlan,
1721 .n_attr = ARRAY_SIZE(ar40xx_sw_attr_vlan),
1722 },
1723 .get_port_pvid = ar40xx_sw_get_pvid,
1724 .set_port_pvid = ar40xx_sw_set_pvid,
1725 .get_vlan_ports = ar40xx_sw_get_ports,
1726 .set_vlan_ports = ar40xx_sw_set_ports,
1727 .apply_config = ar40xx_sw_hw_apply,
1728 .reset_switch = ar40xx_sw_reset_switch,
1729 .get_port_link = ar40xx_sw_get_port_link,
1730 };
1731
1732 /* Platform driver probe function */
1733
1734 static int ar40xx_probe(struct platform_device *pdev)
1735 {
1736 struct device_node *switch_node;
1737 struct device_node *psgmii_node;
1738 struct device_node *mdio_node;
1739 const __be32 *mac_mode;
1740 struct clk *ess_clk;
1741 struct switch_dev *swdev;
1742 struct ar40xx_priv *priv;
1743 u32 len;
1744 u32 num_mibs;
1745 struct resource psgmii_base = {0};
1746 struct resource switch_base = {0};
1747 int ret;
1748
1749 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1750 if (!priv)
1751 return -ENOMEM;
1752
1753 platform_set_drvdata(pdev, priv);
1754 ar40xx_priv = priv;
1755
1756 switch_node = of_node_get(pdev->dev.of_node);
1757 if (of_address_to_resource(switch_node, 0, &switch_base) != 0)
1758 return -EIO;
1759
1760 priv->hw_addr = devm_ioremap_resource(&pdev->dev, &switch_base);
1761 if (IS_ERR(priv->hw_addr)) {
1762 dev_err(&pdev->dev, "Failed to ioremap switch_base!\n");
1763 return PTR_ERR(priv->hw_addr);
1764 }
1765
1766 /*psgmii dts get*/
1767 psgmii_node = of_find_node_by_name(NULL, "ess-psgmii");
1768 if (!psgmii_node) {
1769 dev_err(&pdev->dev, "Failed to find ess-psgmii node!\n");
1770 return -EINVAL;
1771 }
1772
1773 if (of_address_to_resource(psgmii_node, 0, &psgmii_base) != 0)
1774 return -EIO;
1775
1776 priv->psgmii_hw_addr = devm_ioremap_resource(&pdev->dev, &psgmii_base);
1777 if (IS_ERR(priv->psgmii_hw_addr)) {
1778 dev_err(&pdev->dev, "psgmii ioremap fail!\n");
1779 return PTR_ERR(priv->psgmii_hw_addr);
1780 }
1781
1782 mac_mode = of_get_property(switch_node, "switch_mac_mode", &len);
1783 if (!mac_mode) {
1784 dev_err(&pdev->dev, "Failed to read switch_mac_mode\n");
1785 return -EINVAL;
1786 }
1787 priv->mac_mode = be32_to_cpup(mac_mode);
1788
1789 ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
1790 if (ess_clk)
1791 clk_prepare_enable(ess_clk);
1792
1793 priv->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
1794 if (IS_ERR(priv->ess_rst)) {
1795 dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
1796 return PTR_ERR(priv->ess_rst);
1797 }
1798
1799 if (of_property_read_u32(switch_node, "switch_cpu_bmp",
1800 &priv->cpu_bmp) ||
1801 of_property_read_u32(switch_node, "switch_lan_bmp",
1802 &priv->lan_bmp) ||
1803 of_property_read_u32(switch_node, "switch_wan_bmp",
1804 &priv->wan_bmp)) {
1805 dev_err(&pdev->dev, "Failed to read port properties\n");
1806 return -EIO;
1807 }
1808
1809 mutex_init(&priv->reg_mutex);
1810 mutex_init(&priv->mib_lock);
1811 INIT_DELAYED_WORK(&priv->mib_work, ar40xx_mib_work_func);
1812
1813 /* register switch */
1814 swdev = &priv->dev;
1815
1816 mdio_node = of_find_compatible_node(NULL, NULL, "qcom,ipq4019-mdio");
1817 if (!mdio_node) {
1818 dev_err(&pdev->dev, "Probe failed - Cannot find mdio node by phandle!\n");
1819 ret = -ENODEV;
1820 goto err_missing_phy;
1821 }
1822
1823 priv->mii_bus = of_mdio_find_bus(mdio_node);
1824
1825 if (priv->mii_bus == NULL) {
1826 dev_err(&pdev->dev, "Probe failed - Missing PHYs!\n");
1827 ret = -ENODEV;
1828 goto err_missing_phy;
1829 }
1830
1831 swdev->alias = dev_name(&priv->mii_bus->dev);
1832
1833 swdev->cpu_port = AR40XX_PORT_CPU;
1834 swdev->name = "QCA AR40xx";
1835 swdev->vlans = AR40XX_MAX_VLANS;
1836 swdev->ports = AR40XX_NUM_PORTS;
1837 swdev->ops = &ar40xx_sw_ops;
1838 ret = register_switch(swdev, NULL);
1839 if (ret < 0) {
1840 dev_err(&pdev->dev, "Switch registration failed!\n");
1841 return ret;
1842 }
1843
1844 num_mibs = ARRAY_SIZE(ar40xx_mibs);
1845 len = priv->dev.ports * num_mibs *
1846 sizeof(*priv->mib_stats);
1847 priv->mib_stats = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1848 if (!priv->mib_stats) {
1849 ret = -ENOMEM;
1850 goto err_unregister_switch;
1851 }
1852
1853 ar40xx_start(priv);
1854
1855 return 0;
1856
1857 err_unregister_switch:
1858 unregister_switch(&priv->dev);
1859 err_missing_phy:
1860 platform_set_drvdata(pdev, NULL);
1861 return ret;
1862 }
1863
1864 static int ar40xx_remove(struct platform_device *pdev)
1865 {
1866 struct ar40xx_priv *priv = platform_get_drvdata(pdev);
1867
1868 cancel_delayed_work_sync(&priv->qm_dwork);
1869 cancel_delayed_work_sync(&priv->mib_work);
1870
1871 unregister_switch(&priv->dev);
1872
1873 return 0;
1874 }
1875
1876 static const struct of_device_id ar40xx_of_mtable[] = {
1877 {.compatible = "qcom,ess-switch" },
1878 {}
1879 };
1880
1881 struct platform_driver ar40xx_drv = {
1882 .probe = ar40xx_probe,
1883 .remove = ar40xx_remove,
1884 .driver = {
1885 .name = "ar40xx",
1886 .of_match_table = ar40xx_of_mtable,
1887 },
1888 };
1889
1890 module_platform_driver(ar40xx_drv);
1891
1892 MODULE_DESCRIPTION("IPQ40XX ESS driver");
1893 MODULE_LICENSE("Dual BSD/GPL");