db21547a03dc8d51536af3cf88a682c014a18e08
[openwrt/openwrt.git] / target / linux / ipq40xx / files-5.4 / drivers / net / phy / ar40xx.c
1 /*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16 #include <linux/module.h>
17 #include <linux/list.h>
18 #include <linux/bitops.h>
19 #include <linux/switch.h>
20 #include <linux/delay.h>
21 #include <linux/phy.h>
22 #include <linux/clk.h>
23 #include <linux/reset.h>
24 #include <linux/lockdep.h>
25 #include <linux/workqueue.h>
26 #include <linux/of_device.h>
27 #include <linux/of_address.h>
28 #include <linux/mdio.h>
29 #include <linux/gpio.h>
30
31 #include "ar40xx.h"
32
33 static struct ar40xx_priv *ar40xx_priv;
34
35 #define MIB_DESC(_s , _o, _n) \
36 { \
37 .size = (_s), \
38 .offset = (_o), \
39 .name = (_n), \
40 }
41
42 static const struct ar40xx_mib_desc ar40xx_mibs[] = {
43 MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"),
44 MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"),
45 MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"),
46 MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"),
47 MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"),
48 MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"),
49 MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"),
50 MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"),
51 MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"),
52 MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"),
53 MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"),
54 MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"),
55 MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"),
56 MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"),
57 MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"),
58 MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"),
59 MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"),
60 MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"),
61 MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"),
62 MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"),
63 MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"),
64 MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"),
65 MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"),
66 MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"),
67 MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"),
68 MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"),
69 MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"),
70 MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"),
71 MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"),
72 MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"),
73 MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"),
74 MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"),
75 MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"),
76 MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"),
77 MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"),
78 MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"),
79 MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"),
80 MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"),
81 MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"),
82 };
83
84 static u32
85 ar40xx_read(struct ar40xx_priv *priv, int reg)
86 {
87 return readl(priv->hw_addr + reg);
88 }
89
90 static u32
91 ar40xx_psgmii_read(struct ar40xx_priv *priv, int reg)
92 {
93 return readl(priv->psgmii_hw_addr + reg);
94 }
95
96 static void
97 ar40xx_write(struct ar40xx_priv *priv, int reg, u32 val)
98 {
99 writel(val, priv->hw_addr + reg);
100 }
101
102 static u32
103 ar40xx_rmw(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
104 {
105 u32 ret;
106
107 ret = ar40xx_read(priv, reg);
108 ret &= ~mask;
109 ret |= val;
110 ar40xx_write(priv, reg, ret);
111 return ret;
112 }
113
114 static void
115 ar40xx_psgmii_write(struct ar40xx_priv *priv, int reg, u32 val)
116 {
117 writel(val, priv->psgmii_hw_addr + reg);
118 }
119
120 static void
121 ar40xx_phy_dbg_write(struct ar40xx_priv *priv, int phy_addr,
122 u16 dbg_addr, u16 dbg_data)
123 {
124 struct mii_bus *bus = priv->mii_bus;
125
126 mutex_lock(&bus->mdio_lock);
127 bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
128 bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA, dbg_data);
129 mutex_unlock(&bus->mdio_lock);
130 }
131
132 static void
133 ar40xx_phy_dbg_read(struct ar40xx_priv *priv, int phy_addr,
134 u16 dbg_addr, u16 *dbg_data)
135 {
136 struct mii_bus *bus = priv->mii_bus;
137
138 mutex_lock(&bus->mdio_lock);
139 bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
140 *dbg_data = bus->read(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA);
141 mutex_unlock(&bus->mdio_lock);
142 }
143
144 static void
145 ar40xx_phy_mmd_write(struct ar40xx_priv *priv, u32 phy_id,
146 u16 mmd_num, u16 reg_id, u16 reg_val)
147 {
148 struct mii_bus *bus = priv->mii_bus;
149
150 mutex_lock(&bus->mdio_lock);
151 bus->write(bus, phy_id,
152 AR40XX_MII_ATH_MMD_ADDR, mmd_num);
153 bus->write(bus, phy_id,
154 AR40XX_MII_ATH_MMD_DATA, reg_id);
155 bus->write(bus, phy_id,
156 AR40XX_MII_ATH_MMD_ADDR,
157 0x4000 | mmd_num);
158 bus->write(bus, phy_id,
159 AR40XX_MII_ATH_MMD_DATA, reg_val);
160 mutex_unlock(&bus->mdio_lock);
161 }
162
163 static u16
164 ar40xx_phy_mmd_read(struct ar40xx_priv *priv, u32 phy_id,
165 u16 mmd_num, u16 reg_id)
166 {
167 u16 value;
168 struct mii_bus *bus = priv->mii_bus;
169
170 mutex_lock(&bus->mdio_lock);
171 bus->write(bus, phy_id,
172 AR40XX_MII_ATH_MMD_ADDR, mmd_num);
173 bus->write(bus, phy_id,
174 AR40XX_MII_ATH_MMD_DATA, reg_id);
175 bus->write(bus, phy_id,
176 AR40XX_MII_ATH_MMD_ADDR,
177 0x4000 | mmd_num);
178 value = bus->read(bus, phy_id, AR40XX_MII_ATH_MMD_DATA);
179 mutex_unlock(&bus->mdio_lock);
180 return value;
181 }
182
183 /* Start of swconfig support */
184
185 static void
186 ar40xx_phy_poll_reset(struct ar40xx_priv *priv)
187 {
188 u32 i, in_reset, retries = 500;
189 struct mii_bus *bus = priv->mii_bus;
190
191 /* Assume RESET was recently issued to some or all of the phys */
192 in_reset = GENMASK(AR40XX_NUM_PHYS - 1, 0);
193
194 while (retries--) {
195 /* 1ms should be plenty of time.
196 * 802.3 spec allows for a max wait time of 500ms
197 */
198 usleep_range(1000, 2000);
199
200 for (i = 0; i < AR40XX_NUM_PHYS; i++) {
201 int val;
202
203 /* skip devices which have completed reset */
204 if (!(in_reset & BIT(i)))
205 continue;
206
207 val = mdiobus_read(bus, i, MII_BMCR);
208 if (val < 0)
209 continue;
210
211 /* mark when phy is no longer in reset state */
212 if (!(val & BMCR_RESET))
213 in_reset &= ~BIT(i);
214 }
215
216 if (!in_reset)
217 return;
218 }
219
220 dev_warn(&bus->dev, "Failed to reset all phys! (in_reset: 0x%x)\n",
221 in_reset);
222 }
223
224 static void
225 ar40xx_phy_init(struct ar40xx_priv *priv)
226 {
227 int i;
228 struct mii_bus *bus;
229 u16 val;
230
231 bus = priv->mii_bus;
232 for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
233 ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
234 val &= ~AR40XX_PHY_MANU_CTRL_EN;
235 ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
236 mdiobus_write(bus, i,
237 MII_ADVERTISE, ADVERTISE_ALL |
238 ADVERTISE_PAUSE_CAP |
239 ADVERTISE_PAUSE_ASYM);
240 mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
241 mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
242 }
243
244 ar40xx_phy_poll_reset(priv);
245 }
246
247 static void
248 ar40xx_port_phy_linkdown(struct ar40xx_priv *priv)
249 {
250 struct mii_bus *bus;
251 int i;
252 u16 val;
253
254 bus = priv->mii_bus;
255 for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
256 mdiobus_write(bus, i, MII_CTRL1000, 0);
257 mdiobus_write(bus, i, MII_ADVERTISE, 0);
258 mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
259 ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
260 val |= AR40XX_PHY_MANU_CTRL_EN;
261 ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
262 /* disable transmit */
263 ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_2, &val);
264 val &= 0xf00f;
265 ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_2, val);
266 }
267 }
268
269 static void
270 ar40xx_set_mirror_regs(struct ar40xx_priv *priv)
271 {
272 int port;
273
274 /* reset all mirror registers */
275 ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
276 AR40XX_FWD_CTRL0_MIRROR_PORT,
277 (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
278 for (port = 0; port < AR40XX_NUM_PORTS; port++) {
279 ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(port),
280 AR40XX_PORT_LOOKUP_ING_MIRROR_EN, 0);
281
282 ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(port),
283 AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN, 0);
284 }
285
286 /* now enable mirroring if necessary */
287 if (priv->source_port >= AR40XX_NUM_PORTS ||
288 priv->monitor_port >= AR40XX_NUM_PORTS ||
289 priv->source_port == priv->monitor_port) {
290 return;
291 }
292
293 ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
294 AR40XX_FWD_CTRL0_MIRROR_PORT,
295 (priv->monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
296
297 if (priv->mirror_rx)
298 ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(priv->source_port), 0,
299 AR40XX_PORT_LOOKUP_ING_MIRROR_EN);
300
301 if (priv->mirror_tx)
302 ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(priv->source_port),
303 0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN);
304 }
305
306 static int
307 ar40xx_sw_get_ports(struct switch_dev *dev, struct switch_val *val)
308 {
309 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
310 u8 ports = priv->vlan_table[val->port_vlan];
311 int i;
312
313 val->len = 0;
314 for (i = 0; i < dev->ports; i++) {
315 struct switch_port *p;
316
317 if (!(ports & BIT(i)))
318 continue;
319
320 p = &val->value.ports[val->len++];
321 p->id = i;
322 if ((priv->vlan_tagged & BIT(i)) ||
323 (priv->pvid[i] != val->port_vlan))
324 p->flags = BIT(SWITCH_PORT_FLAG_TAGGED);
325 else
326 p->flags = 0;
327 }
328 return 0;
329 }
330
331 static int
332 ar40xx_sw_set_ports(struct switch_dev *dev, struct switch_val *val)
333 {
334 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
335 u8 *vt = &priv->vlan_table[val->port_vlan];
336 int i;
337
338 *vt = 0;
339 for (i = 0; i < val->len; i++) {
340 struct switch_port *p = &val->value.ports[i];
341
342 if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) {
343 if (val->port_vlan == priv->pvid[p->id])
344 priv->vlan_tagged |= BIT(p->id);
345 } else {
346 priv->vlan_tagged &= ~BIT(p->id);
347 priv->pvid[p->id] = val->port_vlan;
348 }
349
350 *vt |= BIT(p->id);
351 }
352 return 0;
353 }
354
355 static int
356 ar40xx_reg_wait(struct ar40xx_priv *priv, u32 reg, u32 mask, u32 val,
357 unsigned timeout)
358 {
359 int i;
360
361 for (i = 0; i < timeout; i++) {
362 u32 t;
363
364 t = ar40xx_read(priv, reg);
365 if ((t & mask) == val)
366 return 0;
367
368 usleep_range(1000, 2000);
369 }
370
371 return -ETIMEDOUT;
372 }
373
374 static int
375 ar40xx_mib_op(struct ar40xx_priv *priv, u32 op)
376 {
377 int ret;
378
379 lockdep_assert_held(&priv->mib_lock);
380
381 /* Capture the hardware statistics for all ports */
382 ar40xx_rmw(priv, AR40XX_REG_MIB_FUNC,
383 AR40XX_MIB_FUNC, (op << AR40XX_MIB_FUNC_S));
384
385 /* Wait for the capturing to complete. */
386 ret = ar40xx_reg_wait(priv, AR40XX_REG_MIB_FUNC,
387 AR40XX_MIB_BUSY, 0, 10);
388
389 return ret;
390 }
391
392 static void
393 ar40xx_mib_fetch_port_stat(struct ar40xx_priv *priv, int port, bool flush)
394 {
395 unsigned int base;
396 u64 *mib_stats;
397 int i;
398 u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
399
400 WARN_ON(port >= priv->dev.ports);
401
402 lockdep_assert_held(&priv->mib_lock);
403
404 base = AR40XX_REG_PORT_STATS_START +
405 AR40XX_REG_PORT_STATS_LEN * port;
406
407 mib_stats = &priv->mib_stats[port * num_mibs];
408 if (flush) {
409 u32 len;
410
411 len = num_mibs * sizeof(*mib_stats);
412 memset(mib_stats, 0, len);
413 return;
414 }
415 for (i = 0; i < num_mibs; i++) {
416 const struct ar40xx_mib_desc *mib;
417 u64 t;
418
419 mib = &ar40xx_mibs[i];
420 t = ar40xx_read(priv, base + mib->offset);
421 if (mib->size == 2) {
422 u64 hi;
423
424 hi = ar40xx_read(priv, base + mib->offset + 4);
425 t |= hi << 32;
426 }
427
428 mib_stats[i] += t;
429 }
430 }
431
432 static int
433 ar40xx_mib_capture(struct ar40xx_priv *priv)
434 {
435 return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_CAPTURE);
436 }
437
438 static int
439 ar40xx_mib_flush(struct ar40xx_priv *priv)
440 {
441 return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_FLUSH);
442 }
443
444 static int
445 ar40xx_sw_set_reset_mibs(struct switch_dev *dev,
446 const struct switch_attr *attr,
447 struct switch_val *val)
448 {
449 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
450 unsigned int len;
451 int ret;
452 u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
453
454 mutex_lock(&priv->mib_lock);
455
456 len = priv->dev.ports * num_mibs * sizeof(*priv->mib_stats);
457 memset(priv->mib_stats, 0, len);
458 ret = ar40xx_mib_flush(priv);
459
460 mutex_unlock(&priv->mib_lock);
461 return ret;
462 }
463
464 static int
465 ar40xx_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
466 struct switch_val *val)
467 {
468 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
469
470 priv->vlan = !!val->value.i;
471 return 0;
472 }
473
474 static int
475 ar40xx_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
476 struct switch_val *val)
477 {
478 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
479
480 val->value.i = priv->vlan;
481 return 0;
482 }
483
484 static int
485 ar40xx_sw_set_mirror_rx_enable(struct switch_dev *dev,
486 const struct switch_attr *attr,
487 struct switch_val *val)
488 {
489 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
490
491 mutex_lock(&priv->reg_mutex);
492 priv->mirror_rx = !!val->value.i;
493 ar40xx_set_mirror_regs(priv);
494 mutex_unlock(&priv->reg_mutex);
495
496 return 0;
497 }
498
499 static int
500 ar40xx_sw_get_mirror_rx_enable(struct switch_dev *dev,
501 const struct switch_attr *attr,
502 struct switch_val *val)
503 {
504 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
505
506 mutex_lock(&priv->reg_mutex);
507 val->value.i = priv->mirror_rx;
508 mutex_unlock(&priv->reg_mutex);
509 return 0;
510 }
511
512 static int
513 ar40xx_sw_set_mirror_tx_enable(struct switch_dev *dev,
514 const struct switch_attr *attr,
515 struct switch_val *val)
516 {
517 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
518
519 mutex_lock(&priv->reg_mutex);
520 priv->mirror_tx = !!val->value.i;
521 ar40xx_set_mirror_regs(priv);
522 mutex_unlock(&priv->reg_mutex);
523
524 return 0;
525 }
526
527 static int
528 ar40xx_sw_get_mirror_tx_enable(struct switch_dev *dev,
529 const struct switch_attr *attr,
530 struct switch_val *val)
531 {
532 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
533
534 mutex_lock(&priv->reg_mutex);
535 val->value.i = priv->mirror_tx;
536 mutex_unlock(&priv->reg_mutex);
537 return 0;
538 }
539
540 static int
541 ar40xx_sw_set_mirror_monitor_port(struct switch_dev *dev,
542 const struct switch_attr *attr,
543 struct switch_val *val)
544 {
545 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
546
547 mutex_lock(&priv->reg_mutex);
548 priv->monitor_port = val->value.i;
549 ar40xx_set_mirror_regs(priv);
550 mutex_unlock(&priv->reg_mutex);
551
552 return 0;
553 }
554
555 static int
556 ar40xx_sw_get_mirror_monitor_port(struct switch_dev *dev,
557 const struct switch_attr *attr,
558 struct switch_val *val)
559 {
560 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
561
562 mutex_lock(&priv->reg_mutex);
563 val->value.i = priv->monitor_port;
564 mutex_unlock(&priv->reg_mutex);
565 return 0;
566 }
567
568 static int
569 ar40xx_sw_set_mirror_source_port(struct switch_dev *dev,
570 const struct switch_attr *attr,
571 struct switch_val *val)
572 {
573 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
574
575 mutex_lock(&priv->reg_mutex);
576 priv->source_port = val->value.i;
577 ar40xx_set_mirror_regs(priv);
578 mutex_unlock(&priv->reg_mutex);
579
580 return 0;
581 }
582
583 static int
584 ar40xx_sw_get_mirror_source_port(struct switch_dev *dev,
585 const struct switch_attr *attr,
586 struct switch_val *val)
587 {
588 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
589
590 mutex_lock(&priv->reg_mutex);
591 val->value.i = priv->source_port;
592 mutex_unlock(&priv->reg_mutex);
593 return 0;
594 }
595
596 static int
597 ar40xx_sw_set_linkdown(struct switch_dev *dev,
598 const struct switch_attr *attr,
599 struct switch_val *val)
600 {
601 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
602
603 if (val->value.i == 1)
604 ar40xx_port_phy_linkdown(priv);
605 else
606 ar40xx_phy_init(priv);
607
608 return 0;
609 }
610
611 static int
612 ar40xx_sw_set_port_reset_mib(struct switch_dev *dev,
613 const struct switch_attr *attr,
614 struct switch_val *val)
615 {
616 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
617 int port;
618 int ret;
619
620 port = val->port_vlan;
621 if (port >= dev->ports)
622 return -EINVAL;
623
624 mutex_lock(&priv->mib_lock);
625 ret = ar40xx_mib_capture(priv);
626 if (ret)
627 goto unlock;
628
629 ar40xx_mib_fetch_port_stat(priv, port, true);
630
631 unlock:
632 mutex_unlock(&priv->mib_lock);
633 return ret;
634 }
635
636 static int
637 ar40xx_sw_get_port_mib(struct switch_dev *dev,
638 const struct switch_attr *attr,
639 struct switch_val *val)
640 {
641 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
642 u64 *mib_stats;
643 int port;
644 int ret;
645 char *buf = priv->buf;
646 int i, len = 0;
647 u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
648
649 port = val->port_vlan;
650 if (port >= dev->ports)
651 return -EINVAL;
652
653 mutex_lock(&priv->mib_lock);
654 ret = ar40xx_mib_capture(priv);
655 if (ret)
656 goto unlock;
657
658 ar40xx_mib_fetch_port_stat(priv, port, false);
659
660 len += snprintf(buf + len, sizeof(priv->buf) - len,
661 "Port %d MIB counters\n",
662 port);
663
664 mib_stats = &priv->mib_stats[port * num_mibs];
665 for (i = 0; i < num_mibs; i++)
666 len += snprintf(buf + len, sizeof(priv->buf) - len,
667 "%-12s: %llu\n",
668 ar40xx_mibs[i].name,
669 mib_stats[i]);
670
671 val->value.s = buf;
672 val->len = len;
673
674 unlock:
675 mutex_unlock(&priv->mib_lock);
676 return ret;
677 }
678
679 static int
680 ar40xx_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
681 struct switch_val *val)
682 {
683 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
684
685 priv->vlan_id[val->port_vlan] = val->value.i;
686 return 0;
687 }
688
689 static int
690 ar40xx_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
691 struct switch_val *val)
692 {
693 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
694
695 val->value.i = priv->vlan_id[val->port_vlan];
696 return 0;
697 }
698
699 static int
700 ar40xx_sw_get_pvid(struct switch_dev *dev, int port, int *vlan)
701 {
702 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
703 *vlan = priv->pvid[port];
704 return 0;
705 }
706
707 static int
708 ar40xx_sw_set_pvid(struct switch_dev *dev, int port, int vlan)
709 {
710 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
711
712 /* make sure no invalid PVIDs get set */
713 if (vlan >= dev->vlans)
714 return -EINVAL;
715
716 priv->pvid[port] = vlan;
717 return 0;
718 }
719
720 static void
721 ar40xx_read_port_link(struct ar40xx_priv *priv, int port,
722 struct switch_port_link *link)
723 {
724 u32 status;
725 u32 speed;
726
727 memset(link, 0, sizeof(*link));
728
729 status = ar40xx_read(priv, AR40XX_REG_PORT_STATUS(port));
730
731 link->aneg = !!(status & AR40XX_PORT_AUTO_LINK_EN);
732 if (link->aneg || (port != AR40XX_PORT_CPU))
733 link->link = !!(status & AR40XX_PORT_STATUS_LINK_UP);
734 else
735 link->link = true;
736
737 if (!link->link)
738 return;
739
740 link->duplex = !!(status & AR40XX_PORT_DUPLEX);
741 link->tx_flow = !!(status & AR40XX_PORT_STATUS_TXFLOW);
742 link->rx_flow = !!(status & AR40XX_PORT_STATUS_RXFLOW);
743
744 speed = (status & AR40XX_PORT_SPEED) >>
745 AR40XX_PORT_STATUS_SPEED_S;
746
747 switch (speed) {
748 case AR40XX_PORT_SPEED_10M:
749 link->speed = SWITCH_PORT_SPEED_10;
750 break;
751 case AR40XX_PORT_SPEED_100M:
752 link->speed = SWITCH_PORT_SPEED_100;
753 break;
754 case AR40XX_PORT_SPEED_1000M:
755 link->speed = SWITCH_PORT_SPEED_1000;
756 break;
757 default:
758 link->speed = SWITCH_PORT_SPEED_UNKNOWN;
759 break;
760 }
761 }
762
763 static int
764 ar40xx_sw_get_port_link(struct switch_dev *dev, int port,
765 struct switch_port_link *link)
766 {
767 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
768
769 ar40xx_read_port_link(priv, port, link);
770 return 0;
771 }
772
773 static const struct switch_attr ar40xx_sw_attr_globals[] = {
774 {
775 .type = SWITCH_TYPE_INT,
776 .name = "enable_vlan",
777 .description = "Enable VLAN mode",
778 .set = ar40xx_sw_set_vlan,
779 .get = ar40xx_sw_get_vlan,
780 .max = 1
781 },
782 {
783 .type = SWITCH_TYPE_NOVAL,
784 .name = "reset_mibs",
785 .description = "Reset all MIB counters",
786 .set = ar40xx_sw_set_reset_mibs,
787 },
788 {
789 .type = SWITCH_TYPE_INT,
790 .name = "enable_mirror_rx",
791 .description = "Enable mirroring of RX packets",
792 .set = ar40xx_sw_set_mirror_rx_enable,
793 .get = ar40xx_sw_get_mirror_rx_enable,
794 .max = 1
795 },
796 {
797 .type = SWITCH_TYPE_INT,
798 .name = "enable_mirror_tx",
799 .description = "Enable mirroring of TX packets",
800 .set = ar40xx_sw_set_mirror_tx_enable,
801 .get = ar40xx_sw_get_mirror_tx_enable,
802 .max = 1
803 },
804 {
805 .type = SWITCH_TYPE_INT,
806 .name = "mirror_monitor_port",
807 .description = "Mirror monitor port",
808 .set = ar40xx_sw_set_mirror_monitor_port,
809 .get = ar40xx_sw_get_mirror_monitor_port,
810 .max = AR40XX_NUM_PORTS - 1
811 },
812 {
813 .type = SWITCH_TYPE_INT,
814 .name = "mirror_source_port",
815 .description = "Mirror source port",
816 .set = ar40xx_sw_set_mirror_source_port,
817 .get = ar40xx_sw_get_mirror_source_port,
818 .max = AR40XX_NUM_PORTS - 1
819 },
820 {
821 .type = SWITCH_TYPE_INT,
822 .name = "linkdown",
823 .description = "Link down all the PHYs",
824 .set = ar40xx_sw_set_linkdown,
825 .max = 1
826 },
827 };
828
829 static const struct switch_attr ar40xx_sw_attr_port[] = {
830 {
831 .type = SWITCH_TYPE_NOVAL,
832 .name = "reset_mib",
833 .description = "Reset single port MIB counters",
834 .set = ar40xx_sw_set_port_reset_mib,
835 },
836 {
837 .type = SWITCH_TYPE_STRING,
838 .name = "mib",
839 .description = "Get port's MIB counters",
840 .set = NULL,
841 .get = ar40xx_sw_get_port_mib,
842 },
843 };
844
845 const struct switch_attr ar40xx_sw_attr_vlan[] = {
846 {
847 .type = SWITCH_TYPE_INT,
848 .name = "vid",
849 .description = "VLAN ID (0-4094)",
850 .set = ar40xx_sw_set_vid,
851 .get = ar40xx_sw_get_vid,
852 .max = 4094,
853 },
854 };
855
856 /* End of swconfig support */
857
858 static int
859 ar40xx_wait_bit(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
860 {
861 int timeout = 20;
862 u32 t;
863
864 while (1) {
865 t = ar40xx_read(priv, reg);
866 if ((t & mask) == val)
867 return 0;
868
869 if (timeout-- <= 0)
870 break;
871
872 usleep_range(10, 20);
873 }
874
875 pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n",
876 (unsigned int)reg, t, mask, val);
877 return -ETIMEDOUT;
878 }
879
880 static int
881 ar40xx_atu_flush(struct ar40xx_priv *priv)
882 {
883 int ret;
884
885 ret = ar40xx_wait_bit(priv, AR40XX_REG_ATU_FUNC,
886 AR40XX_ATU_FUNC_BUSY, 0);
887 if (!ret)
888 ar40xx_write(priv, AR40XX_REG_ATU_FUNC,
889 AR40XX_ATU_FUNC_OP_FLUSH |
890 AR40XX_ATU_FUNC_BUSY);
891
892 return ret;
893 }
894
895 static void
896 ar40xx_ess_reset(struct ar40xx_priv *priv)
897 {
898 reset_control_assert(priv->ess_rst);
899 mdelay(10);
900 reset_control_deassert(priv->ess_rst);
901 /* Waiting for all inner tables init done.
902 * It cost 5~10ms.
903 */
904 mdelay(10);
905
906 pr_info("ESS reset ok!\n");
907 }
908
909 /* Start of psgmii self test */
910
911 static void
912 ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv *priv)
913 {
914 u32 n;
915 struct mii_bus *bus = priv->mii_bus;
916 /* reset phy psgmii */
917 /* fix phy psgmii RX 20bit */
918 mdiobus_write(bus, 5, 0x0, 0x005b);
919 /* reset phy psgmii */
920 mdiobus_write(bus, 5, 0x0, 0x001b);
921 /* release reset phy psgmii */
922 mdiobus_write(bus, 5, 0x0, 0x005b);
923
924 for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
925 u16 status;
926
927 status = ar40xx_phy_mmd_read(priv, 5, 1, 0x28);
928 if (status & BIT(0))
929 break;
930 /* Polling interval to check PSGMII PLL in malibu is ready
931 * the worst time is 8.67ms
932 * for 25MHz reference clock
933 * [512+(128+2048)*49]*80ns+100us
934 */
935 mdelay(2);
936 }
937
938 /*check malibu psgmii calibration done end..*/
939
940 /*freeze phy psgmii RX CDR*/
941 mdiobus_write(bus, 5, 0x1a, 0x2230);
942
943 ar40xx_ess_reset(priv);
944
945 /*check psgmii calibration done start*/
946 for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
947 u32 status;
948
949 status = ar40xx_psgmii_read(priv, 0xa0);
950 if (status & BIT(0))
951 break;
952 /* Polling interval to check PSGMII PLL in ESS is ready */
953 mdelay(2);
954 }
955
956 /* check dakota psgmii calibration done end..*/
957
958 /* relesae phy psgmii RX CDR */
959 mdiobus_write(bus, 5, 0x1a, 0x3230);
960 /* release phy psgmii RX 20bit */
961 mdiobus_write(bus, 5, 0x0, 0x005f);
962 }
963
964 static void
965 ar40xx_psgmii_single_phy_testing(struct ar40xx_priv *priv, int phy)
966 {
967 int j;
968 u32 tx_ok, tx_error;
969 u32 rx_ok, rx_error;
970 u32 tx_ok_high16;
971 u32 rx_ok_high16;
972 u32 tx_all_ok, rx_all_ok;
973 struct mii_bus *bus = priv->mii_bus;
974
975 mdiobus_write(bus, phy, 0x0, 0x9000);
976 mdiobus_write(bus, phy, 0x0, 0x4140);
977
978 for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
979 u16 status;
980
981 status = mdiobus_read(bus, phy, 0x11);
982 if (status & AR40XX_PHY_SPEC_STATUS_LINK)
983 break;
984 /* the polling interval to check if the PHY link up or not
985 * maxwait_timer: 750 ms +/-10 ms
986 * minwait_timer : 1 us +/- 0.1us
987 * time resides in minwait_timer ~ maxwait_timer
988 * see IEEE 802.3 section 40.4.5.2
989 */
990 mdelay(8);
991 }
992
993 /* enable check */
994 ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0000);
995 ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0003);
996
997 /* start traffic */
998 ar40xx_phy_mmd_write(priv, phy, 7, 0x8020, 0xa000);
999 /* wait for all traffic end
1000 * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1001 */
1002 mdelay(50);
1003
1004 /* check counter */
1005 tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
1006 tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
1007 tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
1008 rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
1009 rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
1010 rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
1011 tx_all_ok = tx_ok + (tx_ok_high16 << 16);
1012 rx_all_ok = rx_ok + (rx_ok_high16 << 16);
1013 if (tx_all_ok == 0x1000 && tx_error == 0) {
1014 /* success */
1015 priv->phy_t_status &= (~BIT(phy));
1016 } else {
1017 pr_info("PHY %d single test PSGMII issue happen!\n", phy);
1018 priv->phy_t_status |= BIT(phy);
1019 }
1020
1021 mdiobus_write(bus, phy, 0x0, 0x1840);
1022 }
1023
1024 static void
1025 ar40xx_psgmii_all_phy_testing(struct ar40xx_priv *priv)
1026 {
1027 int phy, j;
1028 struct mii_bus *bus = priv->mii_bus;
1029
1030 mdiobus_write(bus, 0x1f, 0x0, 0x9000);
1031 mdiobus_write(bus, 0x1f, 0x0, 0x4140);
1032
1033 for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
1034 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1035 u16 status;
1036
1037 status = mdiobus_read(bus, phy, 0x11);
1038 if (!(status & BIT(10)))
1039 break;
1040 }
1041
1042 if (phy >= (AR40XX_NUM_PORTS - 1))
1043 break;
1044 /* The polling interva to check if the PHY link up or not */
1045 mdelay(8);
1046 }
1047 /* enable check */
1048 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0000);
1049 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0003);
1050
1051 /* start traffic */
1052 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0xa000);
1053 /* wait for all traffic end
1054 * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
1055 */
1056 mdelay(50);
1057
1058 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1059 u32 tx_ok, tx_error;
1060 u32 rx_ok, rx_error;
1061 u32 tx_ok_high16;
1062 u32 rx_ok_high16;
1063 u32 tx_all_ok, rx_all_ok;
1064
1065 /* check counter */
1066 tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
1067 tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
1068 tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
1069 rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
1070 rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
1071 rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
1072 tx_all_ok = tx_ok + (tx_ok_high16<<16);
1073 rx_all_ok = rx_ok + (rx_ok_high16<<16);
1074 if (tx_all_ok == 0x1000 && tx_error == 0) {
1075 /* success */
1076 priv->phy_t_status &= ~BIT(phy + 8);
1077 } else {
1078 pr_info("PHY%d test see issue!\n", phy);
1079 priv->phy_t_status |= BIT(phy + 8);
1080 }
1081 }
1082
1083 pr_debug("PHY all test 0x%x \r\n", priv->phy_t_status);
1084 }
1085
1086 void
1087 ar40xx_psgmii_self_test(struct ar40xx_priv *priv)
1088 {
1089 u32 i, phy;
1090 struct mii_bus *bus = priv->mii_bus;
1091
1092 ar40xx_malibu_psgmii_ess_reset(priv);
1093
1094 /* switch to access MII reg for copper */
1095 mdiobus_write(bus, 4, 0x1f, 0x8500);
1096 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1097 /*enable phy mdio broadcast write*/
1098 ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x801f);
1099 }
1100 /* force no link by power down */
1101 mdiobus_write(bus, 0x1f, 0x0, 0x1840);
1102 /*packet number*/
1103 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x1000);
1104 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8062, 0x05e0);
1105
1106 /*fix mdi status */
1107 mdiobus_write(bus, 0x1f, 0x10, 0x6800);
1108 for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) {
1109 priv->phy_t_status = 0;
1110
1111 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1112 ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
1113 AR40XX_PORT_LOOKUP_LOOPBACK,
1114 AR40XX_PORT_LOOKUP_LOOPBACK);
1115 }
1116
1117 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++)
1118 ar40xx_psgmii_single_phy_testing(priv, phy);
1119
1120 ar40xx_psgmii_all_phy_testing(priv);
1121
1122 if (priv->phy_t_status)
1123 ar40xx_malibu_psgmii_ess_reset(priv);
1124 else
1125 break;
1126 }
1127
1128 if (i >= AR40XX_PSGMII_CALB_NUM)
1129 pr_info("PSGMII cannot recover\n");
1130 else
1131 pr_debug("PSGMII recovered after %d times reset\n", i);
1132
1133 /* configuration recover */
1134 /* packet number */
1135 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x0);
1136 /* disable check */
1137 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0);
1138 /* disable traffic */
1139 ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0x0);
1140 }
1141
1142 void
1143 ar40xx_psgmii_self_test_clean(struct ar40xx_priv *priv)
1144 {
1145 int phy;
1146 struct mii_bus *bus = priv->mii_bus;
1147
1148 /* disable phy internal loopback */
1149 mdiobus_write(bus, 0x1f, 0x10, 0x6860);
1150 mdiobus_write(bus, 0x1f, 0x0, 0x9040);
1151
1152 for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
1153 /* disable mac loop back */
1154 ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
1155 AR40XX_PORT_LOOKUP_LOOPBACK, 0);
1156 /* disable phy mdio broadcast write */
1157 ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x001f);
1158 }
1159
1160 /* clear fdb entry */
1161 ar40xx_atu_flush(priv);
1162 }
1163
1164 /* End of psgmii self test */
1165
1166 static void
1167 ar40xx_mac_mode_init(struct ar40xx_priv *priv, u32 mode)
1168 {
1169 if (mode == PORT_WRAPPER_PSGMII) {
1170 ar40xx_psgmii_write(priv, AR40XX_PSGMII_MODE_CONTROL, 0x2200);
1171 ar40xx_psgmii_write(priv, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380);
1172 }
1173 }
1174
1175 static
1176 int ar40xx_cpuport_setup(struct ar40xx_priv *priv)
1177 {
1178 u32 t;
1179
1180 t = AR40XX_PORT_STATUS_TXFLOW |
1181 AR40XX_PORT_STATUS_RXFLOW |
1182 AR40XX_PORT_TXHALF_FLOW |
1183 AR40XX_PORT_DUPLEX |
1184 AR40XX_PORT_SPEED_1000M;
1185 ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
1186 usleep_range(10, 20);
1187
1188 t |= AR40XX_PORT_TX_EN |
1189 AR40XX_PORT_RX_EN;
1190 ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
1191
1192 return 0;
1193 }
1194
1195 static void
1196 ar40xx_init_port(struct ar40xx_priv *priv, int port)
1197 {
1198 u32 t;
1199
1200 ar40xx_rmw(priv, AR40XX_REG_PORT_STATUS(port),
1201 AR40XX_PORT_AUTO_LINK_EN, 0);
1202
1203 /* CPU port is setting headers to limit output ports */
1204 if (port == 0)
1205 ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0x8);
1206 else
1207 ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0);
1208
1209 ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), 0);
1210
1211 t = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S;
1212 ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
1213
1214 t = AR40XX_PORT_LOOKUP_LEARN;
1215 t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
1216 ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
1217 }
1218
1219 void
1220 ar40xx_init_globals(struct ar40xx_priv *priv)
1221 {
1222 u32 t;
1223
1224 /* enable CPU port and disable mirror port */
1225 t = AR40XX_FWD_CTRL0_CPU_PORT_EN |
1226 AR40XX_FWD_CTRL0_MIRROR_PORT;
1227 ar40xx_write(priv, AR40XX_REG_FWD_CTRL0, t);
1228
1229 /* forward multicast and broadcast frames to CPU */
1230 t = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) |
1231 (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) |
1232 (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S);
1233 ar40xx_write(priv, AR40XX_REG_FWD_CTRL1, t);
1234
1235 /* enable jumbo frames */
1236 ar40xx_rmw(priv, AR40XX_REG_MAX_FRAME_SIZE,
1237 AR40XX_MAX_FRAME_SIZE_MTU, 9018 + 8 + 2);
1238
1239 /* Enable MIB counters */
1240 ar40xx_rmw(priv, AR40XX_REG_MODULE_EN, 0,
1241 AR40XX_MODULE_EN_MIB);
1242
1243 /* Disable AZ */
1244 ar40xx_write(priv, AR40XX_REG_EEE_CTRL, 0);
1245
1246 /* set flowctrl thershold for cpu port */
1247 t = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) |
1248 AR40XX_PORT0_FC_THRESH_OFF_DFLT;
1249 ar40xx_write(priv, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t);
1250
1251 /* set service tag to 802.1q */
1252 t = ETH_P_8021Q | AR40XX_ESS_SERVICE_TAG_STAG;
1253 ar40xx_write(priv, AR40XX_ESS_SERVICE_TAG, t);
1254 }
1255
1256 static void
1257 ar40xx_malibu_init(struct ar40xx_priv *priv)
1258 {
1259 int i;
1260 struct mii_bus *bus;
1261 u16 val;
1262
1263 bus = priv->mii_bus;
1264
1265 /* war to enable AZ transmitting ability */
1266 ar40xx_phy_mmd_write(priv, AR40XX_PSGMII_ID, 1,
1267 AR40XX_MALIBU_PSGMII_MODE_CTRL,
1268 AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL);
1269 for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
1270 /* change malibu control_dac */
1271 val = ar40xx_phy_mmd_read(priv, i, 7,
1272 AR40XX_MALIBU_PHY_MMD7_DAC_CTRL);
1273 val &= ~AR40XX_MALIBU_DAC_CTRL_MASK;
1274 val |= AR40XX_MALIBU_DAC_CTRL_VALUE;
1275 ar40xx_phy_mmd_write(priv, i, 7,
1276 AR40XX_MALIBU_PHY_MMD7_DAC_CTRL, val);
1277 if (i == AR40XX_MALIBU_PHY_LAST_ADDR) {
1278 /* to avoid goes into hibernation */
1279 val = ar40xx_phy_mmd_read(priv, i, 3,
1280 AR40XX_MALIBU_PHY_RLP_CTRL);
1281 val &= (~(1<<1));
1282 ar40xx_phy_mmd_write(priv, i, 3,
1283 AR40XX_MALIBU_PHY_RLP_CTRL, val);
1284 }
1285 }
1286
1287 /* adjust psgmii serdes tx amp */
1288 mdiobus_write(bus, AR40XX_PSGMII_ID, AR40XX_PSGMII_TX_DRIVER_1_CTRL,
1289 AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP);
1290 }
1291
1292 static int
1293 ar40xx_hw_init(struct ar40xx_priv *priv)
1294 {
1295 u32 i;
1296
1297 ar40xx_ess_reset(priv);
1298
1299 if (priv->mii_bus)
1300 ar40xx_malibu_init(priv);
1301 else
1302 return -1;
1303
1304 ar40xx_psgmii_self_test(priv);
1305 ar40xx_psgmii_self_test_clean(priv);
1306
1307 ar40xx_mac_mode_init(priv, priv->mac_mode);
1308
1309 for (i = 0; i < priv->dev.ports; i++)
1310 ar40xx_init_port(priv, i);
1311
1312 ar40xx_init_globals(priv);
1313
1314 return 0;
1315 }
1316
1317 /* Start of qm error WAR */
1318
1319 static
1320 int ar40xx_force_1g_full(struct ar40xx_priv *priv, u32 port_id)
1321 {
1322 u32 reg;
1323
1324 if (port_id < 0 || port_id > 6)
1325 return -1;
1326
1327 reg = AR40XX_REG_PORT_STATUS(port_id);
1328 return ar40xx_rmw(priv, reg, AR40XX_PORT_SPEED,
1329 (AR40XX_PORT_SPEED_1000M | AR40XX_PORT_DUPLEX));
1330 }
1331
1332 static
1333 int ar40xx_get_qm_status(struct ar40xx_priv *priv,
1334 u32 port_id, u32 *qm_buffer_err)
1335 {
1336 u32 reg;
1337 u32 qm_val;
1338
1339 if (port_id < 1 || port_id > 5) {
1340 *qm_buffer_err = 0;
1341 return -1;
1342 }
1343
1344 if (port_id < 4) {
1345 reg = AR40XX_REG_QM_PORT0_3_QNUM;
1346 ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
1347 qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
1348 /* every 8 bits for each port */
1349 *qm_buffer_err = (qm_val >> (port_id * 8)) & 0xFF;
1350 } else {
1351 reg = AR40XX_REG_QM_PORT4_6_QNUM;
1352 ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
1353 qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
1354 /* every 8 bits for each port */
1355 *qm_buffer_err = (qm_val >> ((port_id-4) * 8)) & 0xFF;
1356 }
1357
1358 return 0;
1359 }
1360
1361 static void
1362 ar40xx_sw_mac_polling_task(struct ar40xx_priv *priv)
1363 {
1364 static int task_count;
1365 u32 i;
1366 u32 reg, value;
1367 u32 link, speed, duplex;
1368 u32 qm_buffer_err;
1369 u16 port_phy_status[AR40XX_NUM_PORTS];
1370 static u32 qm_err_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
1371 static u32 link_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
1372 struct mii_bus *bus = NULL;
1373
1374 if (!priv || !priv->mii_bus)
1375 return;
1376
1377 bus = priv->mii_bus;
1378
1379 ++task_count;
1380
1381 for (i = 1; i < AR40XX_NUM_PORTS; ++i) {
1382 port_phy_status[i] =
1383 mdiobus_read(bus, i-1, AR40XX_PHY_SPEC_STATUS);
1384 speed = link = duplex = port_phy_status[i];
1385 speed &= AR40XX_PHY_SPEC_STATUS_SPEED;
1386 speed >>= 14;
1387 link &= AR40XX_PHY_SPEC_STATUS_LINK;
1388 link >>= 10;
1389 duplex &= AR40XX_PHY_SPEC_STATUS_DUPLEX;
1390 duplex >>= 13;
1391
1392 if (link != priv->ar40xx_port_old_link[i]) {
1393 ++link_cnt[i];
1394 /* Up --> Down */
1395 if ((priv->ar40xx_port_old_link[i] ==
1396 AR40XX_PORT_LINK_UP) &&
1397 (link == AR40XX_PORT_LINK_DOWN)) {
1398 /* LINK_EN disable(MAC force mode)*/
1399 reg = AR40XX_REG_PORT_STATUS(i);
1400 ar40xx_rmw(priv, reg,
1401 AR40XX_PORT_AUTO_LINK_EN, 0);
1402
1403 /* Check queue buffer */
1404 qm_err_cnt[i] = 0;
1405 ar40xx_get_qm_status(priv, i, &qm_buffer_err);
1406 if (qm_buffer_err) {
1407 priv->ar40xx_port_qm_buf[i] =
1408 AR40XX_QM_NOT_EMPTY;
1409 } else {
1410 u16 phy_val = 0;
1411
1412 priv->ar40xx_port_qm_buf[i] =
1413 AR40XX_QM_EMPTY;
1414 ar40xx_force_1g_full(priv, i);
1415 /* Ref:QCA8337 Datasheet,Clearing
1416 * MENU_CTRL_EN prevents phy to
1417 * stuck in 100BT mode when
1418 * bringing up the link
1419 */
1420 ar40xx_phy_dbg_read(priv, i-1,
1421 AR40XX_PHY_DEBUG_0,
1422 &phy_val);
1423 phy_val &= (~AR40XX_PHY_MANU_CTRL_EN);
1424 ar40xx_phy_dbg_write(priv, i-1,
1425 AR40XX_PHY_DEBUG_0,
1426 phy_val);
1427 }
1428 priv->ar40xx_port_old_link[i] = link;
1429 } else if ((priv->ar40xx_port_old_link[i] ==
1430 AR40XX_PORT_LINK_DOWN) &&
1431 (link == AR40XX_PORT_LINK_UP)) {
1432 /* Down --> Up */
1433 if (priv->port_link_up[i] < 1) {
1434 ++priv->port_link_up[i];
1435 } else {
1436 /* Change port status */
1437 reg = AR40XX_REG_PORT_STATUS(i);
1438 value = ar40xx_read(priv, reg);
1439 priv->port_link_up[i] = 0;
1440
1441 value &= ~(AR40XX_PORT_DUPLEX |
1442 AR40XX_PORT_SPEED);
1443 value |= speed | (duplex ? BIT(6) : 0);
1444 ar40xx_write(priv, reg, value);
1445 /* clock switch need such time
1446 * to avoid glitch
1447 */
1448 usleep_range(100, 200);
1449
1450 value |= AR40XX_PORT_AUTO_LINK_EN;
1451 ar40xx_write(priv, reg, value);
1452 /* HW need such time to make sure link
1453 * stable before enable MAC
1454 */
1455 usleep_range(100, 200);
1456
1457 if (speed == AR40XX_PORT_SPEED_100M) {
1458 u16 phy_val = 0;
1459 /* Enable @100M, if down to 10M
1460 * clock will change smoothly
1461 */
1462 ar40xx_phy_dbg_read(priv, i-1,
1463 0,
1464 &phy_val);
1465 phy_val |=
1466 AR40XX_PHY_MANU_CTRL_EN;
1467 ar40xx_phy_dbg_write(priv, i-1,
1468 0,
1469 phy_val);
1470 }
1471 priv->ar40xx_port_old_link[i] = link;
1472 }
1473 }
1474 }
1475
1476 if (priv->ar40xx_port_qm_buf[i] == AR40XX_QM_NOT_EMPTY) {
1477 /* Check QM */
1478 ar40xx_get_qm_status(priv, i, &qm_buffer_err);
1479 if (qm_buffer_err) {
1480 ++qm_err_cnt[i];
1481 } else {
1482 priv->ar40xx_port_qm_buf[i] =
1483 AR40XX_QM_EMPTY;
1484 qm_err_cnt[i] = 0;
1485 ar40xx_force_1g_full(priv, i);
1486 }
1487 }
1488 }
1489 }
1490
1491 static void
1492 ar40xx_qm_err_check_work_task(struct work_struct *work)
1493 {
1494 struct ar40xx_priv *priv = container_of(work, struct ar40xx_priv,
1495 qm_dwork.work);
1496
1497 mutex_lock(&priv->qm_lock);
1498
1499 ar40xx_sw_mac_polling_task(priv);
1500
1501 mutex_unlock(&priv->qm_lock);
1502
1503 schedule_delayed_work(&priv->qm_dwork,
1504 msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
1505 }
1506
1507 static int
1508 ar40xx_qm_err_check_work_start(struct ar40xx_priv *priv)
1509 {
1510 mutex_init(&priv->qm_lock);
1511
1512 INIT_DELAYED_WORK(&priv->qm_dwork, ar40xx_qm_err_check_work_task);
1513
1514 schedule_delayed_work(&priv->qm_dwork,
1515 msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
1516
1517 return 0;
1518 }
1519
1520 /* End of qm error WAR */
1521
1522 static int
1523 ar40xx_vlan_init(struct ar40xx_priv *priv)
1524 {
1525 int port;
1526 unsigned long bmp;
1527
1528 /* By default Enable VLAN */
1529 priv->vlan = 1;
1530 priv->vlan_table[AR40XX_LAN_VLAN] = priv->cpu_bmp | priv->lan_bmp;
1531 priv->vlan_table[AR40XX_WAN_VLAN] = priv->cpu_bmp | priv->wan_bmp;
1532 priv->vlan_tagged = priv->cpu_bmp;
1533 bmp = priv->lan_bmp;
1534 for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
1535 priv->pvid[port] = AR40XX_LAN_VLAN;
1536
1537 bmp = priv->wan_bmp;
1538 for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
1539 priv->pvid[port] = AR40XX_WAN_VLAN;
1540
1541 return 0;
1542 }
1543
1544 static void
1545 ar40xx_mib_work_func(struct work_struct *work)
1546 {
1547 struct ar40xx_priv *priv;
1548 int err;
1549
1550 priv = container_of(work, struct ar40xx_priv, mib_work.work);
1551
1552 mutex_lock(&priv->mib_lock);
1553
1554 err = ar40xx_mib_capture(priv);
1555 if (err)
1556 goto next_port;
1557
1558 ar40xx_mib_fetch_port_stat(priv, priv->mib_next_port, false);
1559
1560 next_port:
1561 priv->mib_next_port++;
1562 if (priv->mib_next_port >= priv->dev.ports)
1563 priv->mib_next_port = 0;
1564
1565 mutex_unlock(&priv->mib_lock);
1566
1567 schedule_delayed_work(&priv->mib_work,
1568 msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
1569 }
1570
1571 static void
1572 ar40xx_setup_port(struct ar40xx_priv *priv, int port, u32 members)
1573 {
1574 u32 t;
1575 u32 egress, ingress;
1576 u32 pvid = priv->vlan_id[priv->pvid[port]];
1577
1578 if (priv->vlan) {
1579 if (priv->vlan_tagged & BIT(port))
1580 egress = AR40XX_PORT_VLAN1_OUT_MODE_TAG;
1581 else
1582 egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD;
1583
1584 ingress = AR40XX_IN_SECURE;
1585 } else {
1586 egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH;
1587 ingress = AR40XX_IN_PORT_ONLY;
1588 }
1589
1590 t = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S;
1591 t |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S;
1592 ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), t);
1593
1594 t = egress << AR40XX_PORT_VLAN1_OUT_MODE_S;
1595
1596 /* set CPU port to core port */
1597 if (port == 0)
1598 t |= AR40XX_PORT_VLAN1_CORE_PORT;
1599
1600 if (priv->vlan_tagged & BIT(port))
1601 t |= AR40XX_PORT_VLAN1_PORT_VLAN_PROP;
1602 else
1603 t |= AR40XX_PORT_VLAN1_PORT_TLS_MODE;
1604
1605 ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
1606
1607 t = members;
1608 t |= AR40XX_PORT_LOOKUP_LEARN;
1609 t |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S;
1610 t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
1611 ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
1612 }
1613
1614 static void
1615 ar40xx_vtu_op(struct ar40xx_priv *priv, u32 op, u32 val)
1616 {
1617 if (ar40xx_wait_bit(priv, AR40XX_REG_VTU_FUNC1,
1618 AR40XX_VTU_FUNC1_BUSY, 0))
1619 return;
1620
1621 if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD)
1622 ar40xx_write(priv, AR40XX_REG_VTU_FUNC0, val);
1623
1624 op |= AR40XX_VTU_FUNC1_BUSY;
1625 ar40xx_write(priv, AR40XX_REG_VTU_FUNC1, op);
1626 }
1627
1628 static void
1629 ar40xx_vtu_load_vlan(struct ar40xx_priv *priv, u32 vid, u32 port_mask)
1630 {
1631 u32 op;
1632 u32 val;
1633 int i;
1634
1635 op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S);
1636 val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL;
1637 for (i = 0; i < AR40XX_NUM_PORTS; i++) {
1638 u32 mode;
1639
1640 if ((port_mask & BIT(i)) == 0)
1641 mode = AR40XX_VTU_FUNC0_EG_MODE_NOT;
1642 else if (priv->vlan == 0)
1643 mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP;
1644 else if ((priv->vlan_tagged & BIT(i)) ||
1645 (priv->vlan_id[priv->pvid[i]] != vid))
1646 mode = AR40XX_VTU_FUNC0_EG_MODE_TAG;
1647 else
1648 mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG;
1649
1650 val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i);
1651 }
1652 ar40xx_vtu_op(priv, op, val);
1653 }
1654
1655 static void
1656 ar40xx_vtu_flush(struct ar40xx_priv *priv)
1657 {
1658 ar40xx_vtu_op(priv, AR40XX_VTU_FUNC1_OP_FLUSH, 0);
1659 }
1660
1661 static int
1662 ar40xx_sw_hw_apply(struct switch_dev *dev)
1663 {
1664 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
1665 u8 portmask[AR40XX_NUM_PORTS];
1666 int i, j;
1667
1668 mutex_lock(&priv->reg_mutex);
1669 /* flush all vlan entries */
1670 ar40xx_vtu_flush(priv);
1671
1672 memset(portmask, 0, sizeof(portmask));
1673 if (priv->vlan) {
1674 for (j = 0; j < AR40XX_MAX_VLANS; j++) {
1675 u8 vp = priv->vlan_table[j];
1676
1677 if (!vp)
1678 continue;
1679
1680 for (i = 0; i < dev->ports; i++) {
1681 u8 mask = BIT(i);
1682
1683 if (vp & mask)
1684 portmask[i] |= vp & ~mask;
1685 }
1686
1687 ar40xx_vtu_load_vlan(priv, priv->vlan_id[j],
1688 priv->vlan_table[j]);
1689 }
1690 } else {
1691 /* 8021q vlan disabled */
1692 for (i = 0; i < dev->ports; i++) {
1693 if (i == AR40XX_PORT_CPU)
1694 continue;
1695
1696 portmask[i] = BIT(AR40XX_PORT_CPU);
1697 portmask[AR40XX_PORT_CPU] |= BIT(i);
1698 }
1699 }
1700
1701 /* update the port destination mask registers and tag settings */
1702 for (i = 0; i < dev->ports; i++)
1703 ar40xx_setup_port(priv, i, portmask[i]);
1704
1705 ar40xx_set_mirror_regs(priv);
1706
1707 mutex_unlock(&priv->reg_mutex);
1708 return 0;
1709 }
1710
1711 static int
1712 ar40xx_sw_reset_switch(struct switch_dev *dev)
1713 {
1714 struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
1715 int i, rv;
1716
1717 mutex_lock(&priv->reg_mutex);
1718 memset(&priv->vlan, 0, sizeof(struct ar40xx_priv) -
1719 offsetof(struct ar40xx_priv, vlan));
1720
1721 for (i = 0; i < AR40XX_MAX_VLANS; i++)
1722 priv->vlan_id[i] = i;
1723
1724 ar40xx_vlan_init(priv);
1725
1726 priv->mirror_rx = false;
1727 priv->mirror_tx = false;
1728 priv->source_port = 0;
1729 priv->monitor_port = 0;
1730
1731 mutex_unlock(&priv->reg_mutex);
1732
1733 rv = ar40xx_sw_hw_apply(dev);
1734 return rv;
1735 }
1736
1737 static int
1738 ar40xx_start(struct ar40xx_priv *priv)
1739 {
1740 int ret;
1741
1742 ret = ar40xx_hw_init(priv);
1743 if (ret)
1744 return ret;
1745
1746 ret = ar40xx_sw_reset_switch(&priv->dev);
1747 if (ret)
1748 return ret;
1749
1750 /* at last, setup cpu port */
1751 ret = ar40xx_cpuport_setup(priv);
1752 if (ret)
1753 return ret;
1754
1755 schedule_delayed_work(&priv->mib_work,
1756 msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
1757
1758 ar40xx_qm_err_check_work_start(priv);
1759
1760 return 0;
1761 }
1762
1763 static const struct switch_dev_ops ar40xx_sw_ops = {
1764 .attr_global = {
1765 .attr = ar40xx_sw_attr_globals,
1766 .n_attr = ARRAY_SIZE(ar40xx_sw_attr_globals),
1767 },
1768 .attr_port = {
1769 .attr = ar40xx_sw_attr_port,
1770 .n_attr = ARRAY_SIZE(ar40xx_sw_attr_port),
1771 },
1772 .attr_vlan = {
1773 .attr = ar40xx_sw_attr_vlan,
1774 .n_attr = ARRAY_SIZE(ar40xx_sw_attr_vlan),
1775 },
1776 .get_port_pvid = ar40xx_sw_get_pvid,
1777 .set_port_pvid = ar40xx_sw_set_pvid,
1778 .get_vlan_ports = ar40xx_sw_get_ports,
1779 .set_vlan_ports = ar40xx_sw_set_ports,
1780 .apply_config = ar40xx_sw_hw_apply,
1781 .reset_switch = ar40xx_sw_reset_switch,
1782 .get_port_link = ar40xx_sw_get_port_link,
1783 };
1784
1785 /* Start of phy driver support */
1786
1787 static const u32 ar40xx_phy_ids[] = {
1788 0x004dd0b1,
1789 0x004dd0b2, /* AR40xx */
1790 };
1791
1792 static bool
1793 ar40xx_phy_match(u32 phy_id)
1794 {
1795 int i;
1796
1797 for (i = 0; i < ARRAY_SIZE(ar40xx_phy_ids); i++)
1798 if (phy_id == ar40xx_phy_ids[i])
1799 return true;
1800
1801 return false;
1802 }
1803
1804 static bool
1805 is_ar40xx_phy(struct mii_bus *bus)
1806 {
1807 unsigned i;
1808
1809 for (i = 0; i < 4; i++) {
1810 u32 phy_id;
1811
1812 phy_id = mdiobus_read(bus, i, MII_PHYSID1) << 16;
1813 phy_id |= mdiobus_read(bus, i, MII_PHYSID2);
1814 if (!ar40xx_phy_match(phy_id))
1815 return false;
1816 }
1817
1818 return true;
1819 }
1820
1821 static int
1822 ar40xx_phy_probe(struct phy_device *phydev)
1823 {
1824 if (!is_ar40xx_phy(phydev->mdio.bus))
1825 return -ENODEV;
1826
1827 ar40xx_priv->mii_bus = phydev->mdio.bus;
1828 phydev->priv = ar40xx_priv;
1829 if (phydev->mdio.addr == 0)
1830 ar40xx_priv->phy = phydev;
1831
1832 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phydev->supported);
1833 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phydev->advertising);
1834 return 0;
1835 }
1836
1837 static void
1838 ar40xx_phy_remove(struct phy_device *phydev)
1839 {
1840 ar40xx_priv->mii_bus = NULL;
1841 phydev->priv = NULL;
1842 }
1843
1844 static int
1845 ar40xx_phy_config_init(struct phy_device *phydev)
1846 {
1847 return 0;
1848 }
1849
1850 static int
1851 ar40xx_phy_read_status(struct phy_device *phydev)
1852 {
1853 if (phydev->mdio.addr != 0)
1854 return genphy_read_status(phydev);
1855
1856 return 0;
1857 }
1858
1859 static int
1860 ar40xx_phy_config_aneg(struct phy_device *phydev)
1861 {
1862 if (phydev->mdio.addr == 0)
1863 return 0;
1864
1865 return genphy_config_aneg(phydev);
1866 }
1867
1868 static struct phy_driver ar40xx_phy_driver = {
1869 .phy_id = 0x004d0000,
1870 .name = "QCA Malibu",
1871 .phy_id_mask = 0xffff0000,
1872 .features = PHY_GBIT_FEATURES,
1873 .probe = ar40xx_phy_probe,
1874 .remove = ar40xx_phy_remove,
1875 .config_init = ar40xx_phy_config_init,
1876 .config_aneg = ar40xx_phy_config_aneg,
1877 .read_status = ar40xx_phy_read_status,
1878 };
1879
1880 static uint16_t ar40xx_gpio_get_phy(unsigned int offset)
1881 {
1882 return offset / 4;
1883 }
1884
1885 static uint16_t ar40xx_gpio_get_reg(unsigned int offset)
1886 {
1887 return 0x8074 + offset % 4;
1888 }
1889
1890 static void ar40xx_gpio_set(struct gpio_chip *gc, unsigned int offset,
1891 int value)
1892 {
1893 struct ar40xx_priv *priv = gpiochip_get_data(gc);
1894
1895 ar40xx_phy_mmd_write(priv, ar40xx_gpio_get_phy(offset), 0x7,
1896 ar40xx_gpio_get_reg(offset),
1897 value ? 0xA000 : 0x8000);
1898 }
1899
1900 static int ar40xx_gpio_get(struct gpio_chip *gc, unsigned offset)
1901 {
1902 struct ar40xx_priv *priv = gpiochip_get_data(gc);
1903
1904 return ar40xx_phy_mmd_read(priv, ar40xx_gpio_get_phy(offset), 0x7,
1905 ar40xx_gpio_get_reg(offset)) == 0xA000;
1906 }
1907
1908 static int ar40xx_gpio_get_dir(struct gpio_chip *gc, unsigned offset)
1909 {
1910 return 0; /* only out direction */
1911 }
1912
1913 static int ar40xx_gpio_dir_out(struct gpio_chip *gc, unsigned offset,
1914 int value)
1915 {
1916 /*
1917 * the direction out value is used to set the initial value.
1918 * support of this function is required by leds-gpio.c
1919 */
1920 ar40xx_gpio_set(gc, offset, value);
1921 return 0;
1922 }
1923
1924 static void ar40xx_register_gpio(struct device *pdev,
1925 struct ar40xx_priv *priv,
1926 struct device_node *switch_node)
1927 {
1928 struct gpio_chip *gc;
1929 int err;
1930
1931 gc = devm_kzalloc(pdev, sizeof(*gc), GFP_KERNEL);
1932 if (!gc)
1933 return;
1934
1935 gc->label = "ar40xx_gpio",
1936 gc->base = -1,
1937 gc->ngpio = 5 /* mmd 0 - 4 */ * 4 /* 0x8074 - 0x8077 */,
1938 gc->parent = pdev;
1939 gc->owner = THIS_MODULE;
1940
1941 gc->get_direction = ar40xx_gpio_get_dir;
1942 gc->direction_output = ar40xx_gpio_dir_out;
1943 gc->get = ar40xx_gpio_get;
1944 gc->set = ar40xx_gpio_set;
1945 gc->can_sleep = true;
1946 gc->label = priv->dev.name;
1947 gc->of_node = switch_node;
1948
1949 err = devm_gpiochip_add_data(pdev, gc, priv);
1950 if (err != 0)
1951 dev_err(pdev, "Failed to register gpio %d.\n", err);
1952 }
1953
1954 /* End of phy driver support */
1955
1956 /* Platform driver probe function */
1957
1958 static int ar40xx_probe(struct platform_device *pdev)
1959 {
1960 struct device_node *switch_node;
1961 struct device_node *psgmii_node;
1962 const __be32 *mac_mode;
1963 struct clk *ess_clk;
1964 struct switch_dev *swdev;
1965 struct ar40xx_priv *priv;
1966 u32 len;
1967 u32 num_mibs;
1968 struct resource psgmii_base = {0};
1969 struct resource switch_base = {0};
1970 int ret;
1971
1972 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1973 if (!priv)
1974 return -ENOMEM;
1975
1976 platform_set_drvdata(pdev, priv);
1977 ar40xx_priv = priv;
1978
1979 switch_node = of_node_get(pdev->dev.of_node);
1980 if (of_address_to_resource(switch_node, 0, &switch_base) != 0)
1981 return -EIO;
1982
1983 priv->hw_addr = devm_ioremap_resource(&pdev->dev, &switch_base);
1984 if (IS_ERR(priv->hw_addr)) {
1985 dev_err(&pdev->dev, "Failed to ioremap switch_base!\n");
1986 return PTR_ERR(priv->hw_addr);
1987 }
1988
1989 /*psgmii dts get*/
1990 psgmii_node = of_find_node_by_name(NULL, "ess-psgmii");
1991 if (!psgmii_node) {
1992 dev_err(&pdev->dev, "Failed to find ess-psgmii node!\n");
1993 return -EINVAL;
1994 }
1995
1996 if (of_address_to_resource(psgmii_node, 0, &psgmii_base) != 0)
1997 return -EIO;
1998
1999 priv->psgmii_hw_addr = devm_ioremap_resource(&pdev->dev, &psgmii_base);
2000 if (IS_ERR(priv->psgmii_hw_addr)) {
2001 dev_err(&pdev->dev, "psgmii ioremap fail!\n");
2002 return PTR_ERR(priv->psgmii_hw_addr);
2003 }
2004
2005 mac_mode = of_get_property(switch_node, "switch_mac_mode", &len);
2006 if (!mac_mode) {
2007 dev_err(&pdev->dev, "Failed to read switch_mac_mode\n");
2008 return -EINVAL;
2009 }
2010 priv->mac_mode = be32_to_cpup(mac_mode);
2011
2012 ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
2013 if (ess_clk)
2014 clk_prepare_enable(ess_clk);
2015
2016 priv->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
2017 if (IS_ERR(priv->ess_rst)) {
2018 dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
2019 return PTR_ERR(priv->ess_rst);
2020 }
2021
2022 if (of_property_read_u32(switch_node, "switch_cpu_bmp",
2023 &priv->cpu_bmp) ||
2024 of_property_read_u32(switch_node, "switch_lan_bmp",
2025 &priv->lan_bmp) ||
2026 of_property_read_u32(switch_node, "switch_wan_bmp",
2027 &priv->wan_bmp)) {
2028 dev_err(&pdev->dev, "Failed to read port properties\n");
2029 return -EIO;
2030 }
2031
2032 ret = phy_driver_register(&ar40xx_phy_driver, THIS_MODULE);
2033 if (ret) {
2034 dev_err(&pdev->dev, "Failed to register ar40xx phy driver!\n");
2035 return -EIO;
2036 }
2037
2038 mutex_init(&priv->reg_mutex);
2039 mutex_init(&priv->mib_lock);
2040 INIT_DELAYED_WORK(&priv->mib_work, ar40xx_mib_work_func);
2041
2042 /* register switch */
2043 swdev = &priv->dev;
2044
2045 if (priv->mii_bus == NULL) {
2046 dev_err(&pdev->dev, "Probe failed - Missing PHYs!\n");
2047 ret = -ENODEV;
2048 goto err_missing_phy;
2049 }
2050
2051 swdev->alias = dev_name(&priv->mii_bus->dev);
2052
2053 swdev->cpu_port = AR40XX_PORT_CPU;
2054 swdev->name = "QCA AR40xx";
2055 swdev->vlans = AR40XX_MAX_VLANS;
2056 swdev->ports = AR40XX_NUM_PORTS;
2057 swdev->ops = &ar40xx_sw_ops;
2058 ret = register_switch(swdev, NULL);
2059 if (ret)
2060 goto err_unregister_phy;
2061
2062 num_mibs = ARRAY_SIZE(ar40xx_mibs);
2063 len = priv->dev.ports * num_mibs *
2064 sizeof(*priv->mib_stats);
2065 priv->mib_stats = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
2066 if (!priv->mib_stats) {
2067 ret = -ENOMEM;
2068 goto err_unregister_switch;
2069 }
2070
2071 ar40xx_start(priv);
2072
2073 if (of_property_read_bool(switch_node, "gpio-controller"))
2074 ar40xx_register_gpio(&pdev->dev, ar40xx_priv, switch_node);
2075
2076 return 0;
2077
2078 err_unregister_switch:
2079 unregister_switch(&priv->dev);
2080 err_unregister_phy:
2081 phy_driver_unregister(&ar40xx_phy_driver);
2082 err_missing_phy:
2083 platform_set_drvdata(pdev, NULL);
2084 return ret;
2085 }
2086
2087 static int ar40xx_remove(struct platform_device *pdev)
2088 {
2089 struct ar40xx_priv *priv = platform_get_drvdata(pdev);
2090
2091 cancel_delayed_work_sync(&priv->qm_dwork);
2092 cancel_delayed_work_sync(&priv->mib_work);
2093
2094 unregister_switch(&priv->dev);
2095
2096 phy_driver_unregister(&ar40xx_phy_driver);
2097
2098 return 0;
2099 }
2100
2101 static const struct of_device_id ar40xx_of_mtable[] = {
2102 {.compatible = "qcom,ess-switch" },
2103 {}
2104 };
2105
2106 struct platform_driver ar40xx_drv = {
2107 .probe = ar40xx_probe,
2108 .remove = ar40xx_remove,
2109 .driver = {
2110 .name = "ar40xx",
2111 .of_match_table = ar40xx_of_mtable,
2112 },
2113 };
2114
2115 module_platform_driver(ar40xx_drv);
2116
2117 MODULE_DESCRIPTION("IPQ40XX ESS driver");
2118 MODULE_LICENSE("Dual BSD/GPL");