ipq40xx: net: phy: ar40xx: remove PHY handling
[openwrt/staging/chunkeey.git] / target / linux / ipq40xx / files / drivers / net / phy / ar40xx.c
1 /*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16 #include <linux/module.h>
17 #include <linux/list.h>
18 #include <linux/bitops.h>
19 #include <linux/switch.h>
20 #include <linux/delay.h>
21 #include <linux/phy.h>
22 #include <linux/clk.h>
23 #include <linux/reset.h>
24 #include <linux/lockdep.h>
25 #include <linux/workqueue.h>
26 #include <linux/of_device.h>
27 #include <linux/of_address.h>
28 #include <linux/of_mdio.h>
29 #include <linux/mdio.h>
30 #include <linux/gpio.h>
31
32 #include "ar40xx.h"
33
34 static struct ar40xx_priv *ar40xx_priv;
35
36 #define MIB_DESC(_s , _o, _n) \
37 { \
38 .size = (_s), \
39 .offset = (_o), \
40 .name = (_n), \
41 }
42
43 static const struct ar40xx_mib_desc ar40xx_mibs[] = {
44 MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"),
45 MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"),
46 MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"),
47 MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"),
48 MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"),
49 MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"),
50 MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"),
51 MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"),
52 MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"),
53 MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"),
54 MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"),
55 MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"),
56 MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"),
57 MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"),
58 MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"),
59 MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"),
60 MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"),
61 MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"),
62 MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"),
63 MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"),
64 MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"),
65 MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"),
66 MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"),
67 MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"),
68 MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"),
69 MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"),
70 MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"),
71 MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"),
72 MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"),
73 MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"),
74 MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"),
75 MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"),
76 MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"),
77 MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"),
78 MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"),
79 MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"),
80 MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"),
81 MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"),
82 MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"),
83 };
84
85 static u32
86 ar40xx_read(struct ar40xx_priv *priv, int reg)
87 {
88 return readl(priv->hw_addr + reg);
89 }
90
91 static u32
92 ar40xx_psgmii_read(struct ar40xx_priv *priv, int reg)
93 {
94 return readl(priv->psgmii_hw_addr + reg);
95 }
96
97 static void
98 ar40xx_write(struct ar40xx_priv *priv, int reg, u32 val)
99 {
100 writel(val, priv->hw_addr + reg);
101 }
102
103 static u32
104 ar40xx_rmw(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
105 {
106 u32 ret;
107
108 ret = ar40xx_read(priv, reg);
109 ret &= ~mask;
110 ret |= val;
111 ar40xx_write(priv, reg, ret);
112 return ret;
113 }
114
115 static void
116 ar40xx_psgmii_write(struct ar40xx_priv *priv, int reg, u32 val)
117 {
118 writel(val, priv->psgmii_hw_addr + reg);
119 }
120
121 static void
122 ar40xx_phy_dbg_write(struct ar40xx_priv *priv, int phy_addr,
123 u16 dbg_addr, u16 dbg_data)
124 {
125 struct mii_bus *bus = priv->mii_bus;
126
127 mutex_lock(&bus->mdio_lock);
128 bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
129 bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA, dbg_data);
130 mutex_unlock(&bus->mdio_lock);
131 }
132
133 static void
134 ar40xx_phy_dbg_read(struct ar40xx_priv *priv, int phy_addr,
135 u16 dbg_addr, u16 *dbg_data)
136 {
137 struct mii_bus *bus = priv->mii_bus;
138
139 mutex_lock(&bus->mdio_lock);
140 bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
141 *dbg_data = bus->read(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA);
142 mutex_unlock(&bus->mdio_lock);
143 }
144
145 static void
146 ar40xx_phy_mmd_write(struct ar40xx_priv *priv, u32 phy_id,
147 u16 mmd_num, u16 reg_id, u16 reg_val)
148 {
149 struct mii_bus *bus = priv->mii_bus;
150
151 mutex_lock(&bus->mdio_lock);
152 bus->write(bus, phy_id,
153 AR40XX_MII_ATH_MMD_ADDR, mmd_num);
154 bus->write(bus, phy_id,
155 AR40XX_MII_ATH_MMD_DATA, reg_id);
156 bus->write(bus, phy_id,
157 AR40XX_MII_ATH_MMD_ADDR,
158 0x4000 | mmd_num);
159 bus->write(bus, phy_id,
160 AR40XX_MII_ATH_MMD_DATA, reg_val);
161 mutex_unlock(&bus->mdio_lock);
162 }
163
164 static u16
165 ar40xx_phy_mmd_read(struct ar40xx_priv *priv, u32 phy_id,
166 u16 mmd_num, u16 reg_id)
167 {
168 u16 value;
169 struct mii_bus *bus = priv->mii_bus;
170
171 mutex_lock(&bus->mdio_lock);
172 bus->write(bus, phy_id,
173 AR40XX_MII_ATH_MMD_ADDR, mmd_num);
174 bus->write(bus, phy_id,
175 AR40XX_MII_ATH_MMD_DATA, reg_id);
176 bus->write(bus, phy_id,
177 AR40XX_MII_ATH_MMD_ADDR,
178 0x4000 | mmd_num);
179 value = bus->read(bus, phy_id, AR40XX_MII_ATH_MMD_DATA);
180 mutex_unlock(&bus->mdio_lock);
181 return value;
182 }
183
184 /* Start of swconfig support */
185
186 static void
187 ar40xx_phy_poll_reset(struct ar40xx_priv *priv)
188 {
189 u32 i, in_reset, retries = 500;
190 struct mii_bus *bus = priv->mii_bus;
191
192 /* Assume RESET was recently issued to some or all of the phys */
193 in_reset = GENMASK(AR40XX_NUM_PHYS - 1, 0);
194
195 while (retries--) {
196 /* 1ms should be plenty of time.
197 * 802.3 spec allows for a max wait time of 500ms
198 */
199 usleep_range(1000, 2000);
200
201 for (i = 0; i < AR40XX_NUM_PHYS; i++) {
202 int val;
203
204 /* skip devices which have completed reset */
205 if (!(in_reset & BIT(i)))
206 continue;
207
208 val = mdiobus_read(bus, i, MII_BMCR);
209 if (val < 0)
210 continue;
211
212 /* mark when phy is no longer in reset state */
213 if (!(val & BMCR_RESET))
214 in_reset &= ~BIT(i);
215 }
216
217 if (!in_reset)
218 return;
219 }
220
221 dev_warn(&bus->dev, "Failed to reset all phys! (in_reset: 0x%x)\n",
222 in_reset);
223 }
224
225 static void
226 ar40xx_phy_init(struct ar40xx_priv *priv)
227 {
228 int i;
229 struct mii_bus *bus;
230 u16 val;
231
232 bus = priv->mii_bus;
233 for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
234 ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
235 val &= ~AR40XX_PHY_MANU_CTRL_EN;
236 ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
237 mdiobus_write(bus, i,
238 MII_ADVERTISE, ADVERTISE_ALL |
239 ADVERTISE_PAUSE_CAP |
240 ADVERTISE_PAUSE_ASYM);
241 mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
242 mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
243 }
244
245 ar40xx_phy_poll_reset(priv);
246 }
247
248 static void
249 ar40xx_port_phy_linkdown(struct ar40xx_priv *priv)
250 {
251 struct mii_bus *bus;
252 int i;
253 u16 val;
254
255 bus = priv->mii_bus;
256 for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
257 mdiobus_write(bus, i, MII_CTRL1000, 0);
258 mdiobus_write(bus, i, MII_ADVERTISE, 0);
259 mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
260 ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
261 val |= AR40XX_PHY_MANU_CTRL_EN;
262 ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
263 /* disable transmit */
264 ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_2, &val);
265 val &= 0xf00f;
266 ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_2, val);
267 }
268 }
269
270 static void
271 ar40xx_set_mirror_regs(struct ar40xx_priv *priv)
272 {
273 int port;
274
275 /* reset all mirror registers */
276 ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
277 AR40XX_FWD_CTRL0_MIRROR_PORT,
278 (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
279 for (port = 0; port < AR40XX_NUM_PORTS; port++) {
280 ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(port),
281 AR40XX_PORT_LOOKUP_ING_MIRROR_EN, 0);
282
283 ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(port),
284 AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN, 0);
285 }
286
287 /* now enable mirroring if necessary */
288 if (priv->source_port >= AR40XX_NUM_PORTS ||
289 priv->monitor_port >= AR40XX_NUM_PORTS ||
290 priv->source_port == priv->monitor_port) {
291 return;
292 }
293
294 ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
295 AR40XX_FWD_CTRL0_MIRROR_PORT,
296 (priv->monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
297
298 if (priv->mirror_rx)
299 ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(priv->source_port), 0,
300 AR40XX_PORT_LOOKUP_ING_MIRROR_EN);
301
302 if (priv->mirror_tx)
303 ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(priv->source_port),
304 0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN);
305 }
306
307 static int
308 ar40xx_sw_get_ports(struct switch_dev *dev, struct switch_val *val)
309 {
310 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
311 u8 ports = priv->vlan_table[val->port_vlan];
312 int i;
313
314 val->len = 0;
315 for (i = 0; i < dev->ports; i++) {
316 struct switch_port *p;
317
318 if (!(ports & BIT(i)))
319 continue;
320
321 p = &val->value.ports[val->len++];
322 p->id = i;
323 if ((priv->vlan_tagged & BIT(i)) ||
324 (priv->pvid[i] != val->port_vlan))
325 p->flags = BIT(SWITCH_PORT_FLAG_TAGGED);
326 else
327 p->flags = 0;
328 }
329 return 0;
330 }
331
332 static int
333 ar40xx_sw_set_ports(struct switch_dev *dev, struct switch_val *val)
334 {
335 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
336 u8 *vt = &priv->vlan_table[val->port_vlan];
337 int i;
338
339 *vt = 0;
340 for (i = 0; i < val->len; i++) {
341 struct switch_port *p = &val->value.ports[i];
342
343 if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) {
344 if (val->port_vlan == priv->pvid[p->id])
345 priv->vlan_tagged |= BIT(p->id);
346 } else {
347 priv->vlan_tagged &= ~BIT(p->id);
348 priv->pvid[p->id] = val->port_vlan;
349 }
350
351 *vt |= BIT(p->id);
352 }
353 return 0;
354 }
355
356 static int
357 ar40xx_reg_wait(struct ar40xx_priv *priv, u32 reg, u32 mask, u32 val,
358 unsigned timeout)
359 {
360 int i;
361
362 for (i = 0; i < timeout; i++) {
363 u32 t;
364
365 t = ar40xx_read(priv, reg);
366 if ((t & mask) == val)
367 return 0;
368
369 usleep_range(1000, 2000);
370 }
371
372 return -ETIMEDOUT;
373 }
374
375 static int
376 ar40xx_mib_op(struct ar40xx_priv *priv, u32 op)
377 {
378 int ret;
379
380 lockdep_assert_held(&priv->mib_lock);
381
382 /* Capture the hardware statistics for all ports */
383 ar40xx_rmw(priv, AR40XX_REG_MIB_FUNC,
384 AR40XX_MIB_FUNC, (op << AR40XX_MIB_FUNC_S));
385
386 /* Wait for the capturing to complete. */
387 ret = ar40xx_reg_wait(priv, AR40XX_REG_MIB_FUNC,
388 AR40XX_MIB_BUSY, 0, 10);
389
390 return ret;
391 }
392
393 static void
394 ar40xx_mib_fetch_port_stat(struct ar40xx_priv *priv, int port, bool flush)
395 {
396 unsigned int base;
397 u64 *mib_stats;
398 int i;
399 u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
400
401 WARN_ON(port >= priv->dev.ports);
402
403 lockdep_assert_held(&priv->mib_lock);
404
405 base = AR40XX_REG_PORT_STATS_START +
406 AR40XX_REG_PORT_STATS_LEN * port;
407
408 mib_stats = &priv->mib_stats[port * num_mibs];
409 if (flush) {
410 u32 len;
411
412 len = num_mibs * sizeof(*mib_stats);
413 memset(mib_stats, 0, len);
414 return;
415 }
416 for (i = 0; i < num_mibs; i++) {
417 const struct ar40xx_mib_desc *mib;
418 u64 t;
419
420 mib = &ar40xx_mibs[i];
421 t = ar40xx_read(priv, base + mib->offset);
422 if (mib->size == 2) {
423 u64 hi;
424
425 hi = ar40xx_read(priv, base + mib->offset + 4);
426 t |= hi << 32;
427 }
428
429 mib_stats[i] += t;
430 }
431 }
432
433 static int
434 ar40xx_mib_capture(struct ar40xx_priv *priv)
435 {
436 return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_CAPTURE);
437 }
438
439 static int
440 ar40xx_mib_flush(struct ar40xx_priv *priv)
441 {
442 return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_FLUSH);
443 }
444
445 static int
446 ar40xx_sw_set_reset_mibs(struct switch_dev *dev,
447 const struct switch_attr *attr,
448 struct switch_val *val)
449 {
450 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
451 unsigned int len;
452 int ret;
453 u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
454
455 mutex_lock(&priv->mib_lock);
456
457 len = priv->dev.ports * num_mibs * sizeof(*priv->mib_stats);
458 memset(priv->mib_stats, 0, len);
459 ret = ar40xx_mib_flush(priv);
460
461 mutex_unlock(&priv->mib_lock);
462 return ret;
463 }
464
465 static int
466 ar40xx_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
467 struct switch_val *val)
468 {
469 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
470
471 priv->vlan = !!val->value.i;
472 return 0;
473 }
474
475 static int
476 ar40xx_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
477 struct switch_val *val)
478 {
479 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
480
481 val->value.i = priv->vlan;
482 return 0;
483 }
484
485 static int
486 ar40xx_sw_set_mirror_rx_enable(struct switch_dev *dev,
487 const struct switch_attr *attr,
488 struct switch_val *val)
489 {
490 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
491
492 mutex_lock(&priv->reg_mutex);
493 priv->mirror_rx = !!val->value.i;
494 ar40xx_set_mirror_regs(priv);
495 mutex_unlock(&priv->reg_mutex);
496
497 return 0;
498 }
499
500 static int
501 ar40xx_sw_get_mirror_rx_enable(struct switch_dev *dev,
502 const struct switch_attr *attr,
503 struct switch_val *val)
504 {
505 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
506
507 mutex_lock(&priv->reg_mutex);
508 val->value.i = priv->mirror_rx;
509 mutex_unlock(&priv->reg_mutex);
510 return 0;
511 }
512
513 static int
514 ar40xx_sw_set_mirror_tx_enable(struct switch_dev *dev,
515 const struct switch_attr *attr,
516 struct switch_val *val)
517 {
518 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
519
520 mutex_lock(&priv->reg_mutex);
521 priv->mirror_tx = !!val->value.i;
522 ar40xx_set_mirror_regs(priv);
523 mutex_unlock(&priv->reg_mutex);
524
525 return 0;
526 }
527
528 static int
529 ar40xx_sw_get_mirror_tx_enable(struct switch_dev *dev,
530 const struct switch_attr *attr,
531 struct switch_val *val)
532 {
533 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
534
535 mutex_lock(&priv->reg_mutex);
536 val->value.i = priv->mirror_tx;
537 mutex_unlock(&priv->reg_mutex);
538 return 0;
539 }
540
541 static int
542 ar40xx_sw_set_mirror_monitor_port(struct switch_dev *dev,
543 const struct switch_attr *attr,
544 struct switch_val *val)
545 {
546 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
547
548 mutex_lock(&priv->reg_mutex);
549 priv->monitor_port = val->value.i;
550 ar40xx_set_mirror_regs(priv);
551 mutex_unlock(&priv->reg_mutex);
552
553 return 0;
554 }
555
556 static int
557 ar40xx_sw_get_mirror_monitor_port(struct switch_dev *dev,
558 const struct switch_attr *attr,
559 struct switch_val *val)
560 {
561 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
562
563 mutex_lock(&priv->reg_mutex);
564 val->value.i = priv->monitor_port;
565 mutex_unlock(&priv->reg_mutex);
566 return 0;
567 }
568
569 static int
570 ar40xx_sw_set_mirror_source_port(struct switch_dev *dev,
571 const struct switch_attr *attr,
572 struct switch_val *val)
573 {
574 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
575
576 mutex_lock(&priv->reg_mutex);
577 priv->source_port = val->value.i;
578 ar40xx_set_mirror_regs(priv);
579 mutex_unlock(&priv->reg_mutex);
580
581 return 0;
582 }
583
584 static int
585 ar40xx_sw_get_mirror_source_port(struct switch_dev *dev,
586 const struct switch_attr *attr,
587 struct switch_val *val)
588 {
589 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
590
591 mutex_lock(&priv->reg_mutex);
592 val->value.i = priv->source_port;
593 mutex_unlock(&priv->reg_mutex);
594 return 0;
595 }
596
597 static int
598 ar40xx_sw_set_linkdown(struct switch_dev *dev,
599 const struct switch_attr *attr,
600 struct switch_val *val)
601 {
602 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
603
604 if (val->value.i == 1)
605 ar40xx_port_phy_linkdown(priv);
606 else
607 ar40xx_phy_init(priv);
608
609 return 0;
610 }
611
612 static int
613 ar40xx_sw_set_port_reset_mib(struct switch_dev *dev,
614 const struct switch_attr *attr,
615 struct switch_val *val)
616 {
617 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
618 int port;
619 int ret;
620
621 port = val->port_vlan;
622 if (port >= dev->ports)
623 return -EINVAL;
624
625 mutex_lock(&priv->mib_lock);
626 ret = ar40xx_mib_capture(priv);
627 if (ret)
628 goto unlock;
629
630 ar40xx_mib_fetch_port_stat(priv, port, true);
631
632 unlock:
633 mutex_unlock(&priv->mib_lock);
634 return ret;
635 }
636
637 static int
638 ar40xx_sw_get_port_mib(struct switch_dev *dev,
639 const struct switch_attr *attr,
640 struct switch_val *val)
641 {
642 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
643 u64 *mib_stats;
644 int port;
645 int ret;
646 char *buf = priv->buf;
647 int i, len = 0;
648 u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
649
650 port = val->port_vlan;
651 if (port >= dev->ports)
652 return -EINVAL;
653
654 mutex_lock(&priv->mib_lock);
655 ret = ar40xx_mib_capture(priv);
656 if (ret)
657 goto unlock;
658
659 ar40xx_mib_fetch_port_stat(priv, port, false);
660
661 len += snprintf(buf + len, sizeof(priv->buf) - len,
662 "Port %d MIB counters\n",
663 port);
664
665 mib_stats = &priv->mib_stats[port * num_mibs];
666 for (i = 0; i < num_mibs; i++)
667 len += snprintf(buf + len, sizeof(priv->buf) - len,
668 "%-12s: %llu\n",
669 ar40xx_mibs[i].name,
670 mib_stats[i]);
671
672 val->value.s = buf;
673 val->len = len;
674
675 unlock:
676 mutex_unlock(&priv->mib_lock);
677 return ret;
678 }
679
680 static int
681 ar40xx_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
682 struct switch_val *val)
683 {
684 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
685
686 priv->vlan_id[val->port_vlan] = val->value.i;
687 return 0;
688 }
689
690 static int
691 ar40xx_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
692 struct switch_val *val)
693 {
694 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
695
696 val->value.i = priv->vlan_id[val->port_vlan];
697 return 0;
698 }
699
700 static int
701 ar40xx_sw_get_pvid(struct switch_dev *dev, int port, int *vlan)
702 {
703 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
704 *vlan = priv->pvid[port];
705 return 0;
706 }
707
708 static int
709 ar40xx_sw_set_pvid(struct switch_dev *dev, int port, int vlan)
710 {
711 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
712
713 /* make sure no invalid PVIDs get set */
714 if (vlan >= dev->vlans)
715 return -EINVAL;
716
717 priv->pvid[port] = vlan;
718 return 0;
719 }
720
721 static void
722 ar40xx_read_port_link(struct ar40xx_priv *priv, int port,
723 struct switch_port_link *link)
724 {
725 u32 status;
726 u32 speed;
727
728 memset(link, 0, sizeof(*link));
729
730 status = ar40xx_read(priv, AR40XX_REG_PORT_STATUS(port));
731
732 link->aneg = !!(status & AR40XX_PORT_AUTO_LINK_EN);
733 if (link->aneg || (port != AR40XX_PORT_CPU))
734 link->link = !!(status & AR40XX_PORT_STATUS_LINK_UP);
735 else
736 link->link = true;
737
738 if (!link->link)
739 return;
740
741 link->duplex = !!(status & AR40XX_PORT_DUPLEX);
742 link->tx_flow = !!(status & AR40XX_PORT_STATUS_TXFLOW);
743 link->rx_flow = !!(status & AR40XX_PORT_STATUS_RXFLOW);
744
745 speed = (status & AR40XX_PORT_SPEED) >>
746 AR40XX_PORT_STATUS_SPEED_S;
747
748 switch (speed) {
749 case AR40XX_PORT_SPEED_10M:
750 link->speed = SWITCH_PORT_SPEED_10;
751 break;
752 case AR40XX_PORT_SPEED_100M:
753 link->speed = SWITCH_PORT_SPEED_100;
754 break;
755 case AR40XX_PORT_SPEED_1000M:
756 link->speed = SWITCH_PORT_SPEED_1000;
757 break;
758 default:
759 link->speed = SWITCH_PORT_SPEED_UNKNOWN;
760 break;
761 }
762 }
763
764 static int
765 ar40xx_sw_get_port_link(struct switch_dev *dev, int port,
766 struct switch_port_link *link)
767 {
768 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
769
770 ar40xx_read_port_link(priv, port, link);
771 return 0;
772 }
773
774 static const struct switch_attr ar40xx_sw_attr_globals[] = {
775 {
776 .type = SWITCH_TYPE_INT,
777 .name = "enable_vlan",
778 .description = "Enable VLAN mode",
779 .set = ar40xx_sw_set_vlan,
780 .get = ar40xx_sw_get_vlan,
781 .max = 1
782 },
783 {
784 .type = SWITCH_TYPE_NOVAL,
785 .name = "reset_mibs",
786 .description = "Reset all MIB counters",
787 .set = ar40xx_sw_set_reset_mibs,
788 },
789 {
790 .type = SWITCH_TYPE_INT,
791 .name = "enable_mirror_rx",
792 .description = "Enable mirroring of RX packets",
793 .set = ar40xx_sw_set_mirror_rx_enable,
794 .get = ar40xx_sw_get_mirror_rx_enable,
795 .max = 1
796 },
797 {
798 .type = SWITCH_TYPE_INT,
799 .name = "enable_mirror_tx",
800 .description = "Enable mirroring of TX packets",
801 .set = ar40xx_sw_set_mirror_tx_enable,
802 .get = ar40xx_sw_get_mirror_tx_enable,
803 .max = 1
804 },
805 {
806 .type = SWITCH_TYPE_INT,
807 .name = "mirror_monitor_port",
808 .description = "Mirror monitor port",
809 .set = ar40xx_sw_set_mirror_monitor_port,
810 .get = ar40xx_sw_get_mirror_monitor_port,
811 .max = AR40XX_NUM_PORTS - 1
812 },
813 {
814 .type = SWITCH_TYPE_INT,
815 .name = "mirror_source_port",
816 .description = "Mirror source port",
817 .set = ar40xx_sw_set_mirror_source_port,
818 .get = ar40xx_sw_get_mirror_source_port,
819 .max = AR40XX_NUM_PORTS - 1
820 },
821 {
822 .type = SWITCH_TYPE_INT,
823 .name = "linkdown",
824 .description = "Link down all the PHYs",
825 .set = ar40xx_sw_set_linkdown,
826 .max = 1
827 },
828 };
829
830 static const struct switch_attr ar40xx_sw_attr_port[] = {
831 {
832 .type = SWITCH_TYPE_NOVAL,
833 .name = "reset_mib",
834 .description = "Reset single port MIB counters",
835 .set = ar40xx_sw_set_port_reset_mib,
836 },
837 {
838 .type = SWITCH_TYPE_STRING,
839 .name = "mib",
840 .description = "Get port's MIB counters",
841 .set = NULL,
842 .get = ar40xx_sw_get_port_mib,
843 },
844 };
845
846 const struct switch_attr ar40xx_sw_attr_vlan[] = {
847 {
848 .type = SWITCH_TYPE_INT,
849 .name = "vid",
850 .description = "VLAN ID (0-4094)",
851 .set = ar40xx_sw_set_vid,
852 .get = ar40xx_sw_get_vid,
853 .max = 4094,
854 },
855 };
856
857 /* End of swconfig support */
858
859 static int
860 ar40xx_wait_bit(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
861 {
862 int timeout = 20;
863 u32 t;
864
865 while (1) {
866 t = ar40xx_read(priv, reg);
867 if ((t & mask) == val)
868 return 0;
869
870 if (timeout-- <= 0)
871 break;
872
873 usleep_range(10, 20);
874 }
875
876 pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n",
877 (unsigned int)reg, t, mask, val);
878 return -ETIMEDOUT;
879 }
880
881 static int
882 ar40xx_atu_flush(struct ar40xx_priv *priv)
883 {
884 int ret;
885
886 ret = ar40xx_wait_bit(priv, AR40XX_REG_ATU_FUNC,
887 AR40XX_ATU_FUNC_BUSY, 0);
888 if (!ret)
889 ar40xx_write(priv, AR40XX_REG_ATU_FUNC,
890 AR40XX_ATU_FUNC_OP_FLUSH |
891 AR40XX_ATU_FUNC_BUSY);
892
893 return ret;
894 }
895
896 static void
897 ar40xx_ess_reset(struct ar40xx_priv *priv)
898 {
899 reset_control_assert(priv->ess_rst);
900 mdelay(10);
901 reset_control_deassert(priv->ess_rst);
902 /* Waiting for all inner tables init done.
903 * It cost 5~10ms.
904 */
905 mdelay(10);
906
907 pr_info("ESS reset ok!\n");
908 }
909
910 /* Start of psgmii self test */
911
912 static void
913 ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv *priv)
914 {
915 u32 n;
916 struct mii_bus *bus = priv->mii_bus;
917 /* reset phy psgmii */
918 /* fix phy psgmii RX 20bit */
919 mdiobus_write(bus, 5, 0x0, 0x005b);
920 /* reset phy psgmii */
921 mdiobus_write(bus, 5, 0x0, 0x001b);
922 /* release reset phy psgmii */
923 mdiobus_write(bus, 5, 0x0, 0x005b);
924
925 for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
926 u16 status;
927
928 status = ar40xx_phy_mmd_read(priv, 5, 1, 0x28);
929 if (status & BIT(0))
930 break;
931 /* Polling interval to check PSGMII PLL in malibu is ready
932 * the worst time is 8.67ms
933 * for 25MHz reference clock
934 * [512+(128+2048)*49]*80ns+100us
935 */
936 mdelay(2);
937 }
938
939 /*check malibu psgmii calibration done end..*/
940
941 /*freeze phy psgmii RX CDR*/
942 mdiobus_write(bus, 5, 0x1a, 0x2230);
943
944 ar40xx_ess_reset(priv);
945
946 /*check psgmii calibration done start*/
947 for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
948 u32 status;
949
950 status = ar40xx_psgmii_read(priv, 0xa0);
951 if (status & BIT(0))
952 break;
953 /* Polling interval to check PSGMII PLL in ESS is ready */
954 mdelay(2);
955 }
956
957 /* check dakota psgmii calibration done end..*/
958
959 /* relesae phy psgmii RX CDR */
960 mdiobus_write(bus, 5, 0x1a, 0x3230);
961 /* release phy psgmii RX 20bit */
962 mdiobus_write(bus, 5, 0x0, 0x005f);
963 }
964
965 static void
966 ar40xx_psgmii_single_phy_testing(struct ar40xx_priv *priv, int phy)
967 {
968 int j;
969 u32 tx_ok, tx_error;
970 u32 rx_ok, rx_error;
971 u32 tx_ok_high16;
972 u32 rx_ok_high16;
973 u32 tx_all_ok, rx_all_ok;
974 struct mii_bus *bus = priv->mii_bus;
975
976 mdiobus_write(bus, phy, 0x0, 0x9000);
977 mdiobus_write(bus, phy, 0x0, 0x4140);
978
979 for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
980 u16 status;
981
982 status = mdiobus_read(bus, phy, 0x11);
983 if (status & AR40XX_PHY_SPEC_STATUS_LINK)
984 break;
985 /* the polling interval to check if the PHY link up or not
986 * maxwait_timer: 750 ms +/-10 ms
987 * minwait_timer : 1 us +/- 0.1us
988 * time resides in minwait_timer ~ maxwait_timer
989 * see IEEE 802.3 section 40.4.5.2
990 */
991 mdelay(8);
992 }
993
994 /* enable check */
995 ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0000);
996 ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0003);
997
998 /* start traffic */
999 ar40xx_phy_mmd_write(priv, phy, 7, 0x8020, 0xa000);
1000 /* wait for all traffic end
1001 * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1002 */
1003 mdelay(50);
1004
1005 /* check counter */
1006 tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
1007 tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
1008 tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
1009 rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
1010 rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
1011 rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
1012 tx_all_ok = tx_ok + (tx_ok_high16 << 16);
1013 rx_all_ok = rx_ok + (rx_ok_high16 << 16);
1014 if (tx_all_ok == 0x1000 && tx_error == 0) {
1015 /* success */
1016 priv->phy_t_status &= (~BIT(phy));
1017 } else {
1018 pr_info("PHY %d single test PSGMII issue happen!\n", phy);
1019 priv->phy_t_status |= BIT(phy);
1020 }
1021
1022 mdiobus_write(bus, phy, 0x0, 0x1840);
1023 }
1024
1025 static void
1026 ar40xx_psgmii_all_phy_testing(struct ar40xx_priv *priv)
1027 {
1028 int phy, j;
1029 struct mii_bus *bus = priv->mii_bus;
1030
1031 mdiobus_write(bus, 0x1f, 0x0, 0x9000);
1032 mdiobus_write(bus, 0x1f, 0x0, 0x4140);
1033
1034 for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
1035 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1036 u16 status;
1037
1038 status = mdiobus_read(bus, phy, 0x11);
1039 if (!(status & BIT(10)))
1040 break;
1041 }
1042
1043 if (phy >= (AR40XX_NUM_PORTS - 1))
1044 break;
1045 /* The polling interva to check if the PHY link up or not */
1046 mdelay(8);
1047 }
1048 /* enable check */
1049 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0000);
1050 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0003);
1051
1052 /* start traffic */
1053 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0xa000);
1054 /* wait for all traffic end
1055 * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1056 */
1057 mdelay(50);
1058
1059 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1060 u32 tx_ok, tx_error;
1061 u32 rx_ok, rx_error;
1062 u32 tx_ok_high16;
1063 u32 rx_ok_high16;
1064 u32 tx_all_ok, rx_all_ok;
1065
1066 /* check counter */
1067 tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
1068 tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
1069 tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
1070 rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
1071 rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
1072 rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
1073 tx_all_ok = tx_ok + (tx_ok_high16<<16);
1074 rx_all_ok = rx_ok + (rx_ok_high16<<16);
1075 if (tx_all_ok == 0x1000 && tx_error == 0) {
1076 /* success */
1077 priv->phy_t_status &= ~BIT(phy + 8);
1078 } else {
1079 pr_info("PHY%d test see issue!\n", phy);
1080 priv->phy_t_status |= BIT(phy + 8);
1081 }
1082 }
1083
1084 pr_debug("PHY all test 0x%x \r\n", priv->phy_t_status);
1085 }
1086
1087 void
1088 ar40xx_psgmii_self_test(struct ar40xx_priv *priv)
1089 {
1090 u32 i, phy;
1091 struct mii_bus *bus = priv->mii_bus;
1092
1093 ar40xx_malibu_psgmii_ess_reset(priv);
1094
1095 /* switch to access MII reg for copper */
1096 mdiobus_write(bus, 4, 0x1f, 0x8500);
1097 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1098 /*enable phy mdio broadcast write*/
1099 ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x801f);
1100 }
1101 /* force no link by power down */
1102 mdiobus_write(bus, 0x1f, 0x0, 0x1840);
1103 /*packet number*/
1104 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x1000);
1105 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8062, 0x05e0);
1106
1107 /*fix mdi status */
1108 mdiobus_write(bus, 0x1f, 0x10, 0x6800);
1109 for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) {
1110 priv->phy_t_status = 0;
1111
1112 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1113 ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
1114 AR40XX_PORT_LOOKUP_LOOPBACK,
1115 AR40XX_PORT_LOOKUP_LOOPBACK);
1116 }
1117
1118 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++)
1119 ar40xx_psgmii_single_phy_testing(priv, phy);
1120
1121 ar40xx_psgmii_all_phy_testing(priv);
1122
1123 if (priv->phy_t_status)
1124 ar40xx_malibu_psgmii_ess_reset(priv);
1125 else
1126 break;
1127 }
1128
1129 if (i >= AR40XX_PSGMII_CALB_NUM)
1130 pr_info("PSGMII cannot recover\n");
1131 else
1132 pr_debug("PSGMII recovered after %d times reset\n", i);
1133
1134 /* configuration recover */
1135 /* packet number */
1136 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x0);
1137 /* disable check */
1138 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0);
1139 /* disable traffic */
1140 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0x0);
1141 }
1142
1143 void
1144 ar40xx_psgmii_self_test_clean(struct ar40xx_priv *priv)
1145 {
1146 int phy;
1147 struct mii_bus *bus = priv->mii_bus;
1148
1149 /* disable phy internal loopback */
1150 mdiobus_write(bus, 0x1f, 0x10, 0x6860);
1151 mdiobus_write(bus, 0x1f, 0x0, 0x9040);
1152
1153 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1154 /* disable mac loop back */
1155 ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
1156 AR40XX_PORT_LOOKUP_LOOPBACK, 0);
1157 /* disable phy mdio broadcast write */
1158 ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x001f);
1159 }
1160
1161 /* clear fdb entry */
1162 ar40xx_atu_flush(priv);
1163 }
1164
1165 /* End of psgmii self test */
1166
1167 static void
1168 ar40xx_mac_mode_init(struct ar40xx_priv *priv, u32 mode)
1169 {
1170 if (mode == PORT_WRAPPER_PSGMII) {
1171 ar40xx_psgmii_write(priv, AR40XX_PSGMII_MODE_CONTROL, 0x2200);
1172 ar40xx_psgmii_write(priv, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380);
1173 }
1174 }
1175
1176 static
1177 int ar40xx_cpuport_setup(struct ar40xx_priv *priv)
1178 {
1179 u32 t;
1180
1181 t = AR40XX_PORT_STATUS_TXFLOW |
1182 AR40XX_PORT_STATUS_RXFLOW |
1183 AR40XX_PORT_TXHALF_FLOW |
1184 AR40XX_PORT_DUPLEX |
1185 AR40XX_PORT_SPEED_1000M;
1186 ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
1187 usleep_range(10, 20);
1188
1189 t |= AR40XX_PORT_TX_EN |
1190 AR40XX_PORT_RX_EN;
1191 ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
1192
1193 return 0;
1194 }
1195
1196 static void
1197 ar40xx_init_port(struct ar40xx_priv *priv, int port)
1198 {
1199 u32 t;
1200
1201 ar40xx_rmw(priv, AR40XX_REG_PORT_STATUS(port),
1202 AR40XX_PORT_AUTO_LINK_EN, 0);
1203
1204 ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0);
1205
1206 ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), 0);
1207
1208 t = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S;
1209 ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
1210
1211 t = AR40XX_PORT_LOOKUP_LEARN;
1212 t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
1213 ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
1214 }
1215
1216 void
1217 ar40xx_init_globals(struct ar40xx_priv *priv)
1218 {
1219 u32 t;
1220
1221 /* enable CPU port and disable mirror port */
1222 t = AR40XX_FWD_CTRL0_CPU_PORT_EN |
1223 AR40XX_FWD_CTRL0_MIRROR_PORT;
1224 ar40xx_write(priv, AR40XX_REG_FWD_CTRL0, t);
1225
1226 /* forward multicast and broadcast frames to CPU */
1227 t = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) |
1228 (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) |
1229 (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S);
1230 ar40xx_write(priv, AR40XX_REG_FWD_CTRL1, t);
1231
1232 /* enable jumbo frames */
1233 ar40xx_rmw(priv, AR40XX_REG_MAX_FRAME_SIZE,
1234 AR40XX_MAX_FRAME_SIZE_MTU, 9018 + 8 + 2);
1235
1236 /* Enable MIB counters */
1237 ar40xx_rmw(priv, AR40XX_REG_MODULE_EN, 0,
1238 AR40XX_MODULE_EN_MIB);
1239
1240 /* Disable AZ */
1241 ar40xx_write(priv, AR40XX_REG_EEE_CTRL, 0);
1242
1243 /* set flowctrl thershold for cpu port */
1244 t = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) |
1245 AR40XX_PORT0_FC_THRESH_OFF_DFLT;
1246 ar40xx_write(priv, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t);
1247 }
1248
1249 static int
1250 ar40xx_hw_init(struct ar40xx_priv *priv)
1251 {
1252 u32 i;
1253
1254 ar40xx_ess_reset(priv);
1255
1256 if (!priv->mii_bus)
1257 return -1;
1258
1259 ar40xx_psgmii_self_test(priv);
1260 ar40xx_psgmii_self_test_clean(priv);
1261
1262 ar40xx_mac_mode_init(priv, priv->mac_mode);
1263
1264 for (i = 0; i < priv->dev.ports; i++)
1265 ar40xx_init_port(priv, i);
1266
1267 ar40xx_init_globals(priv);
1268
1269 return 0;
1270 }
1271
1272 /* Start of qm error WAR */
1273
1274 static
1275 int ar40xx_force_1g_full(struct ar40xx_priv *priv, u32 port_id)
1276 {
1277 u32 reg;
1278
1279 if (port_id < 0 || port_id > 6)
1280 return -1;
1281
1282 reg = AR40XX_REG_PORT_STATUS(port_id);
1283 return ar40xx_rmw(priv, reg, AR40XX_PORT_SPEED,
1284 (AR40XX_PORT_SPEED_1000M | AR40XX_PORT_DUPLEX));
1285 }
1286
1287 static
1288 int ar40xx_get_qm_status(struct ar40xx_priv *priv,
1289 u32 port_id, u32 *qm_buffer_err)
1290 {
1291 u32 reg;
1292 u32 qm_val;
1293
1294 if (port_id < 1 || port_id > 5) {
1295 *qm_buffer_err = 0;
1296 return -1;
1297 }
1298
1299 if (port_id < 4) {
1300 reg = AR40XX_REG_QM_PORT0_3_QNUM;
1301 ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
1302 qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
1303 /* every 8 bits for each port */
1304 *qm_buffer_err = (qm_val >> (port_id * 8)) & 0xFF;
1305 } else {
1306 reg = AR40XX_REG_QM_PORT4_6_QNUM;
1307 ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
1308 qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
1309 /* every 8 bits for each port */
1310 *qm_buffer_err = (qm_val >> ((port_id-4) * 8)) & 0xFF;
1311 }
1312
1313 return 0;
1314 }
1315
1316 static void
1317 ar40xx_sw_mac_polling_task(struct ar40xx_priv *priv)
1318 {
1319 static int task_count;
1320 u32 i;
1321 u32 reg, value;
1322 u32 link, speed, duplex;
1323 u32 qm_buffer_err;
1324 u16 port_phy_status[AR40XX_NUM_PORTS];
1325 static u32 qm_err_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
1326 static u32 link_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
1327 struct mii_bus *bus = NULL;
1328
1329 if (!priv || !priv->mii_bus)
1330 return;
1331
1332 bus = priv->mii_bus;
1333
1334 ++task_count;
1335
1336 for (i = 1; i < AR40XX_NUM_PORTS; ++i) {
1337 port_phy_status[i] =
1338 mdiobus_read(bus, i-1, AR40XX_PHY_SPEC_STATUS);
1339 speed = link = duplex = port_phy_status[i];
1340 speed &= AR40XX_PHY_SPEC_STATUS_SPEED;
1341 speed >>= 14;
1342 link &= AR40XX_PHY_SPEC_STATUS_LINK;
1343 link >>= 10;
1344 duplex &= AR40XX_PHY_SPEC_STATUS_DUPLEX;
1345 duplex >>= 13;
1346
1347 if (link != priv->ar40xx_port_old_link[i]) {
1348 ++link_cnt[i];
1349 /* Up --> Down */
1350 if ((priv->ar40xx_port_old_link[i] ==
1351 AR40XX_PORT_LINK_UP) &&
1352 (link == AR40XX_PORT_LINK_DOWN)) {
1353 /* LINK_EN disable(MAC force mode)*/
1354 reg = AR40XX_REG_PORT_STATUS(i);
1355 ar40xx_rmw(priv, reg,
1356 AR40XX_PORT_AUTO_LINK_EN, 0);
1357
1358 /* Check queue buffer */
1359 qm_err_cnt[i] = 0;
1360 ar40xx_get_qm_status(priv, i, &qm_buffer_err);
1361 if (qm_buffer_err) {
1362 priv->ar40xx_port_qm_buf[i] =
1363 AR40XX_QM_NOT_EMPTY;
1364 } else {
1365 u16 phy_val = 0;
1366
1367 priv->ar40xx_port_qm_buf[i] =
1368 AR40XX_QM_EMPTY;
1369 ar40xx_force_1g_full(priv, i);
1370 /* Ref:QCA8337 Datasheet,Clearing
1371 * MENU_CTRL_EN prevents phy to
1372 * stuck in 100BT mode when
1373 * bringing up the link
1374 */
1375 ar40xx_phy_dbg_read(priv, i-1,
1376 AR40XX_PHY_DEBUG_0,
1377 &phy_val);
1378 phy_val &= (~AR40XX_PHY_MANU_CTRL_EN);
1379 ar40xx_phy_dbg_write(priv, i-1,
1380 AR40XX_PHY_DEBUG_0,
1381 phy_val);
1382 }
1383 priv->ar40xx_port_old_link[i] = link;
1384 } else if ((priv->ar40xx_port_old_link[i] ==
1385 AR40XX_PORT_LINK_DOWN) &&
1386 (link == AR40XX_PORT_LINK_UP)) {
1387 /* Down --> Up */
1388 if (priv->port_link_up[i] < 1) {
1389 ++priv->port_link_up[i];
1390 } else {
1391 /* Change port status */
1392 reg = AR40XX_REG_PORT_STATUS(i);
1393 value = ar40xx_read(priv, reg);
1394 priv->port_link_up[i] = 0;
1395
1396 value &= ~(AR40XX_PORT_DUPLEX |
1397 AR40XX_PORT_SPEED);
1398 value |= speed | (duplex ? BIT(6) : 0);
1399 ar40xx_write(priv, reg, value);
1400 /* clock switch need such time
1401 * to avoid glitch
1402 */
1403 usleep_range(100, 200);
1404
1405 value |= AR40XX_PORT_AUTO_LINK_EN;
1406 ar40xx_write(priv, reg, value);
1407 /* HW need such time to make sure link
1408 * stable before enable MAC
1409 */
1410 usleep_range(100, 200);
1411
1412 if (speed == AR40XX_PORT_SPEED_100M) {
1413 u16 phy_val = 0;
1414 /* Enable @100M, if down to 10M
1415 * clock will change smoothly
1416 */
1417 ar40xx_phy_dbg_read(priv, i-1,
1418 0,
1419 &phy_val);
1420 phy_val |=
1421 AR40XX_PHY_MANU_CTRL_EN;
1422 ar40xx_phy_dbg_write(priv, i-1,
1423 0,
1424 phy_val);
1425 }
1426 priv->ar40xx_port_old_link[i] = link;
1427 }
1428 }
1429 }
1430
1431 if (priv->ar40xx_port_qm_buf[i] == AR40XX_QM_NOT_EMPTY) {
1432 /* Check QM */
1433 ar40xx_get_qm_status(priv, i, &qm_buffer_err);
1434 if (qm_buffer_err) {
1435 ++qm_err_cnt[i];
1436 } else {
1437 priv->ar40xx_port_qm_buf[i] =
1438 AR40XX_QM_EMPTY;
1439 qm_err_cnt[i] = 0;
1440 ar40xx_force_1g_full(priv, i);
1441 }
1442 }
1443 }
1444 }
1445
1446 static void
1447 ar40xx_qm_err_check_work_task(struct work_struct *work)
1448 {
1449 struct ar40xx_priv *priv = container_of(work, struct ar40xx_priv,
1450 qm_dwork.work);
1451
1452 mutex_lock(&priv->qm_lock);
1453
1454 ar40xx_sw_mac_polling_task(priv);
1455
1456 mutex_unlock(&priv->qm_lock);
1457
1458 schedule_delayed_work(&priv->qm_dwork,
1459 msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
1460 }
1461
1462 static int
1463 ar40xx_qm_err_check_work_start(struct ar40xx_priv *priv)
1464 {
1465 mutex_init(&priv->qm_lock);
1466
1467 INIT_DELAYED_WORK(&priv->qm_dwork, ar40xx_qm_err_check_work_task);
1468
1469 schedule_delayed_work(&priv->qm_dwork,
1470 msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
1471
1472 return 0;
1473 }
1474
1475 /* End of qm error WAR */
1476
1477 static int
1478 ar40xx_vlan_init(struct ar40xx_priv *priv)
1479 {
1480 int port;
1481 unsigned long bmp;
1482
1483 /* By default Enable VLAN */
1484 priv->vlan = 1;
1485 priv->vlan_table[AR40XX_LAN_VLAN] = priv->cpu_bmp | priv->lan_bmp;
1486 priv->vlan_table[AR40XX_WAN_VLAN] = priv->cpu_bmp | priv->wan_bmp;
1487 priv->vlan_tagged = priv->cpu_bmp;
1488 bmp = priv->lan_bmp;
1489 for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
1490 priv->pvid[port] = AR40XX_LAN_VLAN;
1491
1492 bmp = priv->wan_bmp;
1493 for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
1494 priv->pvid[port] = AR40XX_WAN_VLAN;
1495
1496 return 0;
1497 }
1498
1499 static void
1500 ar40xx_mib_work_func(struct work_struct *work)
1501 {
1502 struct ar40xx_priv *priv;
1503 int err;
1504
1505 priv = container_of(work, struct ar40xx_priv, mib_work.work);
1506
1507 mutex_lock(&priv->mib_lock);
1508
1509 err = ar40xx_mib_capture(priv);
1510 if (err)
1511 goto next_port;
1512
1513 ar40xx_mib_fetch_port_stat(priv, priv->mib_next_port, false);
1514
1515 next_port:
1516 priv->mib_next_port++;
1517 if (priv->mib_next_port >= priv->dev.ports)
1518 priv->mib_next_port = 0;
1519
1520 mutex_unlock(&priv->mib_lock);
1521
1522 schedule_delayed_work(&priv->mib_work,
1523 msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
1524 }
1525
1526 static void
1527 ar40xx_setup_port(struct ar40xx_priv *priv, int port, u32 members)
1528 {
1529 u32 t;
1530 u32 egress, ingress;
1531 u32 pvid = priv->vlan_id[priv->pvid[port]];
1532
1533 if (priv->vlan) {
1534 egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD;
1535
1536 ingress = AR40XX_IN_SECURE;
1537 } else {
1538 egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH;
1539 ingress = AR40XX_IN_PORT_ONLY;
1540 }
1541
1542 t = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S;
1543 t |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S;
1544 ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), t);
1545
1546 t = AR40XX_PORT_VLAN1_PORT_VLAN_PROP;
1547 t |= egress << AR40XX_PORT_VLAN1_OUT_MODE_S;
1548
1549 ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
1550
1551 t = members;
1552 t |= AR40XX_PORT_LOOKUP_LEARN;
1553 t |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S;
1554 t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
1555 ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
1556 }
1557
1558 static void
1559 ar40xx_vtu_op(struct ar40xx_priv *priv, u32 op, u32 val)
1560 {
1561 if (ar40xx_wait_bit(priv, AR40XX_REG_VTU_FUNC1,
1562 AR40XX_VTU_FUNC1_BUSY, 0))
1563 return;
1564
1565 if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD)
1566 ar40xx_write(priv, AR40XX_REG_VTU_FUNC0, val);
1567
1568 op |= AR40XX_VTU_FUNC1_BUSY;
1569 ar40xx_write(priv, AR40XX_REG_VTU_FUNC1, op);
1570 }
1571
1572 static void
1573 ar40xx_vtu_load_vlan(struct ar40xx_priv *priv, u32 vid, u32 port_mask)
1574 {
1575 u32 op;
1576 u32 val;
1577 int i;
1578
1579 op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S);
1580 val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL;
1581 for (i = 0; i < AR40XX_NUM_PORTS; i++) {
1582 u32 mode;
1583
1584 if ((port_mask & BIT(i)) == 0)
1585 mode = AR40XX_VTU_FUNC0_EG_MODE_NOT;
1586 else if (priv->vlan == 0)
1587 mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP;
1588 else if ((priv->vlan_tagged & BIT(i)) ||
1589 (priv->vlan_id[priv->pvid[i]] != vid))
1590 mode = AR40XX_VTU_FUNC0_EG_MODE_TAG;
1591 else
1592 mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG;
1593
1594 val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i);
1595 }
1596 ar40xx_vtu_op(priv, op, val);
1597 }
1598
1599 static void
1600 ar40xx_vtu_flush(struct ar40xx_priv *priv)
1601 {
1602 ar40xx_vtu_op(priv, AR40XX_VTU_FUNC1_OP_FLUSH, 0);
1603 }
1604
1605 static int
1606 ar40xx_sw_hw_apply(struct switch_dev *dev)
1607 {
1608 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
1609 u8 portmask[AR40XX_NUM_PORTS];
1610 int i, j;
1611
1612 mutex_lock(&priv->reg_mutex);
1613 /* flush all vlan entries */
1614 ar40xx_vtu_flush(priv);
1615
1616 memset(portmask, 0, sizeof(portmask));
1617 if (priv->vlan) {
1618 for (j = 0; j < AR40XX_MAX_VLANS; j++) {
1619 u8 vp = priv->vlan_table[j];
1620
1621 if (!vp)
1622 continue;
1623
1624 for (i = 0; i < dev->ports; i++) {
1625 u8 mask = BIT(i);
1626
1627 if (vp & mask)
1628 portmask[i] |= vp & ~mask;
1629 }
1630
1631 ar40xx_vtu_load_vlan(priv, priv->vlan_id[j],
1632 priv->vlan_table[j]);
1633 }
1634 } else {
1635 /* 8021q vlan disabled */
1636 for (i = 0; i < dev->ports; i++) {
1637 if (i == AR40XX_PORT_CPU)
1638 continue;
1639
1640 portmask[i] = BIT(AR40XX_PORT_CPU);
1641 portmask[AR40XX_PORT_CPU] |= BIT(i);
1642 }
1643 }
1644
1645 /* update the port destination mask registers and tag settings */
1646 for (i = 0; i < dev->ports; i++)
1647 ar40xx_setup_port(priv, i, portmask[i]);
1648
1649 ar40xx_set_mirror_regs(priv);
1650
1651 mutex_unlock(&priv->reg_mutex);
1652 return 0;
1653 }
1654
1655 static int
1656 ar40xx_sw_reset_switch(struct switch_dev *dev)
1657 {
1658 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
1659 int i, rv;
1660
1661 mutex_lock(&priv->reg_mutex);
1662 memset(&priv->vlan, 0, sizeof(struct ar40xx_priv) -
1663 offsetof(struct ar40xx_priv, vlan));
1664
1665 for (i = 0; i < AR40XX_MAX_VLANS; i++)
1666 priv->vlan_id[i] = i;
1667
1668 ar40xx_vlan_init(priv);
1669
1670 priv->mirror_rx = false;
1671 priv->mirror_tx = false;
1672 priv->source_port = 0;
1673 priv->monitor_port = 0;
1674
1675 mutex_unlock(&priv->reg_mutex);
1676
1677 rv = ar40xx_sw_hw_apply(dev);
1678 return rv;
1679 }
1680
1681 static int
1682 ar40xx_start(struct ar40xx_priv *priv)
1683 {
1684 int ret;
1685
1686 ret = ar40xx_hw_init(priv);
1687 if (ret)
1688 return ret;
1689
1690 ret = ar40xx_sw_reset_switch(&priv->dev);
1691 if (ret)
1692 return ret;
1693
1694 /* at last, setup cpu port */
1695 ret = ar40xx_cpuport_setup(priv);
1696 if (ret)
1697 return ret;
1698
1699 schedule_delayed_work(&priv->mib_work,
1700 msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
1701
1702 ar40xx_qm_err_check_work_start(priv);
1703
1704 return 0;
1705 }
1706
1707 static const struct switch_dev_ops ar40xx_sw_ops = {
1708 .attr_global = {
1709 .attr = ar40xx_sw_attr_globals,
1710 .n_attr = ARRAY_SIZE(ar40xx_sw_attr_globals),
1711 },
1712 .attr_port = {
1713 .attr = ar40xx_sw_attr_port,
1714 .n_attr = ARRAY_SIZE(ar40xx_sw_attr_port),
1715 },
1716 .attr_vlan = {
1717 .attr = ar40xx_sw_attr_vlan,
1718 .n_attr = ARRAY_SIZE(ar40xx_sw_attr_vlan),
1719 },
1720 .get_port_pvid = ar40xx_sw_get_pvid,
1721 .set_port_pvid = ar40xx_sw_set_pvid,
1722 .get_vlan_ports = ar40xx_sw_get_ports,
1723 .set_vlan_ports = ar40xx_sw_set_ports,
1724 .apply_config = ar40xx_sw_hw_apply,
1725 .reset_switch = ar40xx_sw_reset_switch,
1726 .get_port_link = ar40xx_sw_get_port_link,
1727 };
1728
1729 /* Platform driver probe function */
1730
1731 static int ar40xx_probe(struct platform_device *pdev)
1732 {
1733 struct device_node *switch_node;
1734 struct device_node *psgmii_node;
1735 struct device_node *mdio_node;
1736 const __be32 *mac_mode;
1737 struct clk *ess_clk;
1738 struct switch_dev *swdev;
1739 struct ar40xx_priv *priv;
1740 u32 len;
1741 u32 num_mibs;
1742 struct resource psgmii_base = {0};
1743 struct resource switch_base = {0};
1744 int ret;
1745
1746 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1747 if (!priv)
1748 return -ENOMEM;
1749
1750 platform_set_drvdata(pdev, priv);
1751 ar40xx_priv = priv;
1752
1753 switch_node = of_node_get(pdev->dev.of_node);
1754 if (of_address_to_resource(switch_node, 0, &switch_base) != 0)
1755 return -EIO;
1756
1757 priv->hw_addr = devm_ioremap_resource(&pdev->dev, &switch_base);
1758 if (IS_ERR(priv->hw_addr)) {
1759 dev_err(&pdev->dev, "Failed to ioremap switch_base!\n");
1760 return PTR_ERR(priv->hw_addr);
1761 }
1762
1763 /*psgmii dts get*/
1764 psgmii_node = of_find_node_by_name(NULL, "ess-psgmii");
1765 if (!psgmii_node) {
1766 dev_err(&pdev->dev, "Failed to find ess-psgmii node!\n");
1767 return -EINVAL;
1768 }
1769
1770 if (of_address_to_resource(psgmii_node, 0, &psgmii_base) != 0)
1771 return -EIO;
1772
1773 priv->psgmii_hw_addr = devm_ioremap_resource(&pdev->dev, &psgmii_base);
1774 if (IS_ERR(priv->psgmii_hw_addr)) {
1775 dev_err(&pdev->dev, "psgmii ioremap fail!\n");
1776 return PTR_ERR(priv->psgmii_hw_addr);
1777 }
1778
1779 mac_mode = of_get_property(switch_node, "switch_mac_mode", &len);
1780 if (!mac_mode) {
1781 dev_err(&pdev->dev, "Failed to read switch_mac_mode\n");
1782 return -EINVAL;
1783 }
1784 priv->mac_mode = be32_to_cpup(mac_mode);
1785
1786 ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
1787 if (ess_clk)
1788 clk_prepare_enable(ess_clk);
1789
1790 priv->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
1791 if (IS_ERR(priv->ess_rst)) {
1792 dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
1793 return PTR_ERR(priv->ess_rst);
1794 }
1795
1796 if (of_property_read_u32(switch_node, "switch_cpu_bmp",
1797 &priv->cpu_bmp) ||
1798 of_property_read_u32(switch_node, "switch_lan_bmp",
1799 &priv->lan_bmp) ||
1800 of_property_read_u32(switch_node, "switch_wan_bmp",
1801 &priv->wan_bmp)) {
1802 dev_err(&pdev->dev, "Failed to read port properties\n");
1803 return -EIO;
1804 }
1805
1806 mutex_init(&priv->reg_mutex);
1807 mutex_init(&priv->mib_lock);
1808 INIT_DELAYED_WORK(&priv->mib_work, ar40xx_mib_work_func);
1809
1810 /* register switch */
1811 swdev = &priv->dev;
1812
1813 mdio_node = of_find_compatible_node(NULL, NULL, "qcom,ipq4019-mdio");
1814 if (!mdio_node) {
1815 dev_err(&pdev->dev, "Probe failed - Cannot find mdio node by phandle!\n");
1816 ret = -ENODEV;
1817 goto err_missing_phy;
1818 }
1819
1820 priv->mii_bus = of_mdio_find_bus(mdio_node);
1821
1822 if (priv->mii_bus == NULL) {
1823 dev_err(&pdev->dev, "Probe failed - Missing PHYs!\n");
1824 ret = -ENODEV;
1825 goto err_missing_phy;
1826 }
1827
1828 swdev->alias = dev_name(&priv->mii_bus->dev);
1829
1830 swdev->cpu_port = AR40XX_PORT_CPU;
1831 swdev->name = "QCA AR40xx";
1832 swdev->vlans = AR40XX_MAX_VLANS;
1833 swdev->ports = AR40XX_NUM_PORTS;
1834 swdev->ops = &ar40xx_sw_ops;
1835 ret = register_switch(swdev, NULL);
1836 if (ret < 0) {
1837 dev_err(&pdev->dev, "Switch registration failed!\n");
1838 return ret;
1839 }
1840
1841 num_mibs = ARRAY_SIZE(ar40xx_mibs);
1842 len = priv->dev.ports * num_mibs *
1843 sizeof(*priv->mib_stats);
1844 priv->mib_stats = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1845 if (!priv->mib_stats) {
1846 ret = -ENOMEM;
1847 goto err_unregister_switch;
1848 }
1849
1850 ar40xx_start(priv);
1851
1852 return 0;
1853
1854 err_unregister_switch:
1855 unregister_switch(&priv->dev);
1856 err_missing_phy:
1857 platform_set_drvdata(pdev, NULL);
1858 return ret;
1859 }
1860
1861 static int ar40xx_remove(struct platform_device *pdev)
1862 {
1863 struct ar40xx_priv *priv = platform_get_drvdata(pdev);
1864
1865 cancel_delayed_work_sync(&priv->qm_dwork);
1866 cancel_delayed_work_sync(&priv->mib_work);
1867
1868 unregister_switch(&priv->dev);
1869
1870 return 0;
1871 }
1872
1873 static const struct of_device_id ar40xx_of_mtable[] = {
1874 {.compatible = "qcom,ess-switch" },
1875 {}
1876 };
1877
1878 struct platform_driver ar40xx_drv = {
1879 .probe = ar40xx_probe,
1880 .remove = ar40xx_remove,
1881 .driver = {
1882 .name = "ar40xx",
1883 .of_match_table = ar40xx_of_mtable,
1884 },
1885 };
1886
1887 module_platform_driver(ar40xx_drv);
1888
1889 MODULE_DESCRIPTION("IPQ40XX ESS driver");
1890 MODULE_LICENSE("Dual BSD/GPL");