1 From 83fe1ecb8ac6e0544ae74bf5a63806dcac768201 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Wed, 17 Apr 2019 18:58:45 +0800
4 Subject: [PATCH] mdio-phy: support layerscape
6 This is an integrated patch of mdio-phy for layerscape
8 Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
9 Signed-off-by: Biwen Li <biwen.li@nxp.com>
10 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
11 Signed-off-by: Constantin Tudor <constantin.tudor@nxp.com>
12 Signed-off-by: costi <constantin.tudor@freescale.com>
13 Signed-off-by: Florin Chiculita <florinlaurentiu.chiculita@nxp.com>
14 Signed-off-by: Florinel Iordache <florinel.iordache@nxp.com>
15 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
16 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
17 Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
18 Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
19 Signed-off-by: Shaohui Xie <Shaohui.Xie@freescale.com>
20 Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
21 Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
23 drivers/net/phy/Kconfig | 33 +
24 drivers/net/phy/Makefile | 5 +
25 drivers/net/phy/aquantia.c | 328 +++-
26 drivers/net/phy/at803x.c | 21 +
27 drivers/net/phy/fsl_backplane.c | 1780 ++++++++++++++++++++
28 drivers/net/phy/fsl_backplane.h | 41 +
29 drivers/net/phy/fsl_backplane_serdes_10g.c | 281 +++
30 drivers/net/phy/fsl_backplane_serdes_28g.c | 336 ++++
31 drivers/net/phy/inphi.c | 594 +++++++
32 drivers/net/phy/mdio-mux-multiplexer.c | 122 ++
33 drivers/net/phy/swphy.c | 1 +
34 include/linux/phy.h | 3 +
35 12 files changed, 3526 insertions(+), 19 deletions(-)
36 create mode 100644 drivers/net/phy/fsl_backplane.c
37 create mode 100644 drivers/net/phy/fsl_backplane.h
38 create mode 100644 drivers/net/phy/fsl_backplane_serdes_10g.c
39 create mode 100644 drivers/net/phy/fsl_backplane_serdes_28g.c
40 create mode 100644 drivers/net/phy/inphi.c
41 create mode 100644 drivers/net/phy/mdio-mux-multiplexer.c
43 --- a/drivers/net/phy/Kconfig
44 +++ b/drivers/net/phy/Kconfig
45 @@ -87,9 +87,27 @@ config MDIO_BUS_MUX_MMIOREG
47 Currently, only 8/16/32 bits registers are supported.
49 +config MDIO_BUS_MUX_MULTIPLEXER
50 + tristate "MDIO bus multiplexer using kernel multiplexer subsystem"
55 + This module provides a driver for MDIO bus multiplexer
56 + that is controlled via the kernel multiplexer subsystem. The
57 + bus multiplexer connects one of several child MDIO busses to
58 + a parent bus. Child bus selection is under the control of
59 + the kernel multiplexer subsystem.
64 +config MDIO_FSL_BACKPLANE
65 + tristate "Support for backplane on Freescale XFI interface"
68 + This module provides a driver for Freescale XFI's backplane.
71 tristate "GPIO lib-based bitbanged MDIO buses"
72 depends on MDIO_BITBANG && GPIOLIB
73 @@ -303,6 +321,16 @@ config AT803X_PHY
75 Currently supports the AT8030 and AT8035 model
77 +config AT803X_PHY_SMART_EEE
78 + depends on AT803X_PHY
80 + tristate "SmartEEE feature for AT803X PHYs"
82 + Enables the Atheros SmartEEE feature (not IEEE 802.3az). When 2 PHYs
83 + which support this feature are connected back-to-back, they may
84 + negotiate a low-power sleep mode autonomously, without the Ethernet
85 + controller's knowledge. May cause packet loss.
88 tristate "Broadcom 63xx SOCs internal PHY"
90 @@ -385,6 +413,11 @@ config ICPLUS_PHY
92 Currently supports the IP175C and IP1001 PHYs.
95 + tristate "Inphi CDR 10G/25G Ethernet PHY"
97 + Currently supports the IN112525_S03 part @ 25G
100 tristate "Intel XWAY PHYs"
102 --- a/drivers/net/phy/Makefile
103 +++ b/drivers/net/phy/Makefile
104 @@ -44,7 +44,11 @@ obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
105 obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
106 obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
107 obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
108 +obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
109 obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
110 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane.o
111 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane_serdes_10g.o
112 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane_serdes_28g.o
113 obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
114 obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
115 obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
116 @@ -75,6 +79,7 @@ obj-$(CONFIG_DP83848_PHY) += dp83848.o
117 obj-$(CONFIG_DP83867_PHY) += dp83867.o
118 obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
119 obj-$(CONFIG_ICPLUS_PHY) += icplus.o
120 +obj-$(CONFIG_INPHI_PHY) += inphi.o
121 obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
122 obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
123 obj-$(CONFIG_LXT_PHY) += lxt.o
124 --- a/drivers/net/phy/aquantia.c
125 +++ b/drivers/net/phy/aquantia.c
127 * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
129 * Copyright 2015 Freescale Semiconductor, Inc.
130 + * Copyright 2018 NXP
132 * This file is licensed under the terms of the GNU General Public License
133 * version 2. This program is licensed "as is" without any warranty of any
136 #define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
137 SUPPORTED_1000baseT_Full | \
138 + SUPPORTED_2500baseX_Full | \
139 SUPPORTED_100baseT_Full | \
140 + SUPPORTED_Pause | \
141 + SUPPORTED_Asym_Pause | \
142 PHY_DEFAULT_FEATURES)
144 +#define MDIO_PMA_CTRL1_AQ_SPEED10 0
145 +#define MDIO_PMA_CTRL1_AQ_SPEED2500 0x2058
146 +#define MDIO_PMA_CTRL1_AQ_SPEED5000 0x205c
147 +#define MDIO_PMA_CTRL2_AQ_2500BT 0x30
148 +#define MDIO_PMA_CTRL2_AQ_5000BT 0x31
149 +#define MDIO_PMA_CTRL2_AQ_TYPE_MASK 0x3F
151 +#define MDIO_AN_VENDOR_PROV_CTRL 0xc400
152 +#define MDIO_AN_RECV_LP_STATUS 0xe820
154 +#define MDIO_AN_LPA_PAUSE 0x20
155 +#define MDIO_AN_LPA_ASYM_PAUSE 0x10
156 +#define MDIO_AN_ADV_PAUSE 0x20
157 +#define MDIO_AN_ADV_ASYM_PAUSE 0x10
159 +static int aquantia_write_reg(struct phy_device *phydev, int devad,
160 + u32 regnum, u16 val)
162 + u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
164 + return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, addr, val);
167 +static int aquantia_read_reg(struct phy_device *phydev, int devad, u32 regnum)
169 + u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
171 + return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr);
174 +static int aquantia_pma_setup_forced(struct phy_device *phydev)
176 + int ctrl1, ctrl2, ret;
178 + /* Half duplex is not supported */
179 + if (phydev->duplex != DUPLEX_FULL)
182 + ctrl1 = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
186 + ctrl2 = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2);
190 + ctrl1 &= ~MDIO_CTRL1_SPEEDSEL;
191 + ctrl2 &= ~(MDIO_PMA_CTRL2_AQ_TYPE_MASK);
193 + switch (phydev->speed) {
195 + ctrl2 |= MDIO_PMA_CTRL2_10BT;
198 + ctrl1 |= MDIO_PMA_CTRL1_SPEED100;
199 + ctrl2 |= MDIO_PMA_CTRL2_100BTX;
202 + ctrl1 |= MDIO_PMA_CTRL1_SPEED1000;
203 + /* Assume 1000base-T */
204 + ctrl2 |= MDIO_PMA_CTRL2_1000BT;
207 + ctrl1 |= MDIO_CTRL1_SPEED10G;
208 + /* Assume 10Gbase-T */
209 + ctrl2 |= MDIO_PMA_CTRL2_10GBT;
212 + ctrl1 |= MDIO_PMA_CTRL1_AQ_SPEED2500;
213 + ctrl2 |= MDIO_PMA_CTRL2_AQ_2500BT;
216 + ctrl1 |= MDIO_PMA_CTRL1_AQ_SPEED5000;
217 + ctrl2 |= MDIO_PMA_CTRL2_AQ_5000BT;
223 + ret = aquantia_write_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, ctrl1);
227 + return aquantia_write_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2);
230 +static int aquantia_aneg(struct phy_device *phydev, bool control)
232 + int reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_CTRL1);
238 + reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART;
240 + reg &= ~(MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
242 + return aquantia_write_reg(phydev, MDIO_MMD_AN, MDIO_CTRL1, reg);
245 +static int aquantia_config_advert(struct phy_device *phydev)
248 + int oldadv, adv, oldadv1, adv1;
249 + int err, changed = 0;
251 + /* Only allow advertising what this PHY supports */
252 + phydev->advertising &= phydev->supported;
253 + advertise = phydev->advertising;
255 + /* Setup standard advertisement */
256 + oldadv = aquantia_read_reg(phydev, MDIO_MMD_AN,
257 + MDIO_AN_10GBT_CTRL);
261 + /* Aquantia vendor specific advertisments */
262 + oldadv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
263 + MDIO_AN_VENDOR_PROV_CTRL);
270 + /*100BaseT_full is supported by default*/
272 + if (advertise & ADVERTISED_1000baseT_Full)
274 + if (advertise & ADVERTISED_10000baseT_Full)
276 + if (advertise & ADVERTISED_2500baseX_Full)
279 + if (adv != oldadv) {
280 + err = aquantia_write_reg(phydev, MDIO_MMD_AN,
281 + MDIO_AN_10GBT_CTRL, adv);
286 + if (adv1 != oldadv1) {
287 + err = aquantia_write_reg(phydev, MDIO_MMD_AN,
288 + MDIO_AN_VENDOR_PROV_CTRL, adv1);
294 + /* advertise flow control */
295 + oldadv = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
299 + adv = oldadv & ~(MDIO_AN_ADV_PAUSE | MDIO_AN_ADV_ASYM_PAUSE);
300 + if (advertise & ADVERTISED_Pause)
301 + adv |= MDIO_AN_ADV_PAUSE;
302 + if (advertise & ADVERTISED_Asym_Pause)
303 + adv |= MDIO_AN_ADV_ASYM_PAUSE;
305 + if (adv != oldadv) {
306 + err = aquantia_write_reg(phydev, MDIO_MMD_AN,
307 + MDIO_AN_ADVERTISE, adv);
316 static int aquantia_config_aneg(struct phy_device *phydev)
320 phydev->supported = PHY_AQUANTIA_FEATURES;
321 - phydev->advertising = phydev->supported;
322 + if (phydev->autoneg == AUTONEG_DISABLE) {
323 + aquantia_pma_setup_forced(phydev);
324 + return aquantia_aneg(phydev, false);
328 + ret = aquantia_config_advert(phydev);
330 + /* restart autoneg */
331 + return aquantia_aneg(phydev, true);
336 static int aquantia_aneg_done(struct phy_device *phydev)
337 @@ -51,25 +237,26 @@ static int aquantia_config_intr(struct p
340 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
341 - err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1);
342 + err = aquantia_write_reg(phydev, MDIO_MMD_AN, 0xd401, 1);
346 - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1);
347 + err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff00, 1);
351 - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001);
352 + err = aquantia_write_reg(phydev, MDIO_MMD_VEND1,
355 - err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0);
356 + err = aquantia_write_reg(phydev, MDIO_MMD_AN, 0xd401, 0);
360 - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0);
361 + err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff00, 0);
365 - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0);
366 + err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff01, 0);
370 @@ -79,42 +266,145 @@ static int aquantia_ack_interrupt(struct
374 - reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01);
375 + reg = aquantia_read_reg(phydev, MDIO_MMD_AN, 0xcc01);
376 return (reg < 0) ? reg : 0;
379 +static int aquantia_read_advert(struct phy_device *phydev)
383 + /* Setup standard advertisement */
384 + adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
385 + MDIO_AN_10GBT_CTRL);
387 + /* Aquantia vendor specific advertisments */
388 + adv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
389 + MDIO_AN_VENDOR_PROV_CTRL);
391 + /*100BaseT_full is supported by default*/
392 + phydev->advertising |= ADVERTISED_100baseT_Full;
395 + phydev->advertising |= ADVERTISED_10000baseT_Full;
397 + phydev->advertising &= ~ADVERTISED_10000baseT_Full;
399 + phydev->advertising |= ADVERTISED_1000baseT_Full;
401 + phydev->advertising &= ~ADVERTISED_1000baseT_Full;
403 + phydev->advertising |= ADVERTISED_2500baseX_Full;
405 + phydev->advertising &= ~ADVERTISED_2500baseX_Full;
407 + /* flow control advertisement */
408 + adv = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
409 + if (adv & MDIO_AN_ADV_PAUSE)
410 + phydev->advertising |= ADVERTISED_Pause;
412 + phydev->advertising &= ~ADVERTISED_Pause;
413 + if (adv & MDIO_AN_ADV_ASYM_PAUSE)
414 + phydev->advertising |= ADVERTISED_Asym_Pause;
416 + phydev->advertising &= ~ADVERTISED_Asym_Pause;
421 +static int aquantia_read_lp_advert(struct phy_device *phydev)
425 + /* Read standard link partner advertisement */
426 + adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
430 + phydev->lp_advertising |= ADVERTISED_Autoneg |
431 + ADVERTISED_100baseT_Full;
433 + phydev->lp_advertising &= ~(ADVERTISED_Autoneg |
434 + ADVERTISED_100baseT_Full);
436 + /* Read standard link partner advertisement */
437 + adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
438 + MDIO_AN_10GBT_STAT);
440 + /* Aquantia link partner advertisments */
441 + adv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
442 + MDIO_AN_RECV_LP_STATUS);
445 + phydev->lp_advertising |= ADVERTISED_10000baseT_Full;
447 + phydev->lp_advertising &= ~ADVERTISED_10000baseT_Full;
449 + phydev->lp_advertising |= ADVERTISED_1000baseT_Full;
451 + phydev->lp_advertising &= ~ADVERTISED_1000baseT_Full;
453 + phydev->lp_advertising |= ADVERTISED_2500baseX_Full;
455 + phydev->lp_advertising &= ~ADVERTISED_2500baseX_Full;
460 static int aquantia_read_status(struct phy_device *phydev)
464 - reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
465 - reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
466 + /* Read the link status twice; the bit is latching low */
467 + reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_STAT1);
468 + reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_STAT1);
470 if (reg & MDIO_STAT1_LSTATUS)
475 - reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
477 - reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
478 + reg = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
480 + if ((reg & MDIO_CTRL1_SPEEDSELEXT) == MDIO_CTRL1_SPEEDSELEXT)
481 + reg &= MDIO_CTRL1_SPEEDSEL;
483 + reg &= MDIO_CTRL1_SPEEDSELEXT;
487 + case MDIO_PMA_CTRL1_AQ_SPEED5000:
488 + phydev->speed = SPEED_5000;
490 + case MDIO_PMA_CTRL1_AQ_SPEED2500:
491 phydev->speed = SPEED_2500;
494 - phydev->speed = SPEED_1000;
495 + case MDIO_PMA_CTRL1_AQ_SPEED10:
496 + phydev->speed = SPEED_10;
499 + case MDIO_PMA_CTRL1_SPEED100:
500 phydev->speed = SPEED_100;
504 + case MDIO_PMA_CTRL1_SPEED1000:
505 + phydev->speed = SPEED_1000;
507 + case MDIO_CTRL1_SPEED10G:
508 phydev->speed = SPEED_10000;
511 + phydev->speed = SPEED_UNKNOWN;
515 phydev->duplex = DUPLEX_FULL;
517 + reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
518 + phydev->pause = reg & MDIO_AN_LPA_PAUSE ? 1 : 0;
519 + phydev->asym_pause = reg & MDIO_AN_LPA_ASYM_PAUSE ? 1 : 0;
521 + aquantia_read_advert(phydev);
522 + aquantia_read_lp_advert(phydev);
527 --- a/drivers/net/phy/at803x.c
528 +++ b/drivers/net/phy/at803x.c
530 #define AT803X_DEBUG_REG_5 0x05
531 #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
533 +#define AT803X_LPI_EN BIT(8)
535 #define ATH8030_PHY_ID 0x004dd076
536 #define ATH8031_PHY_ID 0x004dd074
537 #define ATH8032_PHY_ID 0x004dd023
538 @@ -290,6 +292,19 @@ static void at803x_disable_smarteee(stru
539 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0);
542 +static void at803x_enable_smart_eee(struct phy_device *phydev, int on)
546 + /* 5.1.11 Smart_eee control3 */
547 + value = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x805D);
549 + value |= AT803X_LPI_EN;
551 + value &= ~AT803X_LPI_EN;
552 + phy_write_mmd(phydev, MDIO_MMD_PCS, 0x805D, value);
555 static int at803x_config_init(struct phy_device *phydev)
557 struct at803x_platform_data *pdata;
558 @@ -320,6 +335,12 @@ static int at803x_config_init(struct phy
562 +#ifdef CONFIG_AT803X_PHY_SMART_EEE
563 + at803x_enable_smart_eee(phydev, 1);
565 + at803x_enable_smart_eee(phydev, 0);
568 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
569 phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
570 ret = at803x_enable_rx_delay(phydev);
572 +++ b/drivers/net/phy/fsl_backplane.c
574 +// SPDX-License-Identifier: GPL-2.0+
576 + * DPAA backplane driver.
577 + * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
578 + * Florinel Iordache <florinel.iordache@nxp.com>
580 + * Copyright 2015 Freescale Semiconductor, Inc.
581 + * Copyright 2018 NXP
583 + * Licensed under the GPL-2 or later.
586 +#include <linux/kernel.h>
587 +#include <linux/module.h>
588 +#include <linux/mii.h>
589 +#include <linux/mdio.h>
590 +#include <linux/ethtool.h>
591 +#include <linux/phy.h>
592 +#include <linux/io.h>
593 +#include <linux/of.h>
594 +#include <linux/of_net.h>
595 +#include <linux/of_address.h>
596 +#include <linux/of_platform.h>
597 +#include <linux/timer.h>
598 +#include <linux/delay.h>
599 +#include <linux/workqueue.h>
600 +#include <linux/netdevice.h>
602 +#include "fsl_backplane.h"
605 +/* PCS Device Identifier */
606 +#define PCS_PHY_DEVICE_ID 0x0083e400
607 +#define PCS_PHY_DEVICE_ID_MASK 0xffffffff
609 +/* 10G Long cables setup: 1 m to 2 m cables */
610 +#define RATIO_PREQ_10G 0x3
611 +#define RATIO_PST1Q_10G 0xd
612 +#define RATIO_EQ_10G 0x20
614 +/* 10G Short cables setup: up to 30 cm cable */
615 +//#define RATIO_PREQ_10G 0x3
616 +//#define RATIO_PST1Q_10G 0xa
617 +//#define RATIO_EQ_10G 0x29
619 +/* 40G Long cables setup: 1 m to 2 m cables */
620 +#define RATIO_PREQ_40G 0x2
621 +#define RATIO_PST1Q_40G 0xd
622 +#define RATIO_EQ_40G 0x20
624 +/* 40G Short cables setup: up to 30 cm cable */
625 +//#define RATIO_PREQ_40G 0x1
626 +//#define RATIO_PST1Q_40G 0x3
627 +//#define RATIO_EQ_40G 0x29
629 +/* LX2 2x40G default RCW setup */
630 +//#define RATIO_PREQ_40G 0x0
631 +//#define RATIO_PST1Q_40G 0x3
632 +//#define RATIO_EQ_40G 0x30
634 +/* Max/Min coefficient values */
635 +#define PRE_COE_MAX 0x0
636 +#define PRE_COE_MIN 0x8
637 +#define POST_COE_MAX 0x0
638 +#define POST_COE_MIN 0x10
639 +#define ZERO_COE_MAX 0x30
640 +#define ZERO_COE_MIN 0x0
642 +/* KR PMD defines */
643 +#define PMD_RESET 0x1
644 +#define PMD_STATUS_SUP_STAT 0x4
645 +#define PMD_STATUS_FRAME_LOCK 0x2
646 +#define TRAIN_EN 0x3
647 +#define TRAIN_DISABLE 0x1
651 +#define XFI_PCS_SR1 0x20
652 +#define KR_RX_LINK_STAT_MASK 0x1000
654 +/* KX PCS mode register */
655 +#define KX_PCS_IF_MODE 0x8014
657 +/* KX PCS mode register init value */
658 +#define KX_IF_MODE_INIT 0x8
660 +/* KX/KR AN registers */
661 +#define AN_CTRL_INIT 0x1200
662 +#define KX_AN_AD1_INIT 0x25
663 +#define KR_AN_AD1_INIT_10G 0x85
664 +#define KR_AN_AD1_INIT_40G 0x105
665 +#define AN_LNK_UP_MASK 0x4
666 +#define KR_AN_MASK_10G 0x8
667 +#define KR_AN_MASK_40G 0x20
668 +#define TRAIN_FAIL 0x8
669 +#define KR_AN_40G_MDIO_OFFSET 4
672 +#define XGKR_TIMEOUT 1050
673 +#define XGKR_DENY_RT_INTERVAL 3000
674 +#define XGKR_AN_WAIT_ITERATIONS 5
676 +/* XGKR Increment/Decrement Requests */
679 +#define TIMEOUT_LONG 3
680 +#define TIMEOUT_M1 3
683 +#define RX_READY_MASK 0x8000
684 +#define PRESET_MASK 0x2000
685 +#define INIT_MASK 0x1000
686 +#define COP1_MASK 0x30
687 +#define COP1_SHIFT 4
688 +#define COZ_MASK 0xc
690 +#define COM1_MASK 0x3
691 +#define COM1_SHIFT 0
692 +#define REQUEST_MASK 0x3f
693 +#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \
694 + COP1_MASK | COZ_MASK | COM1_MASK)
696 +/* Lanes definitions */
697 +#define MASTER_LANE 0
698 +#define SINGLE_LANE 0
699 +#define MAX_PHY_LANES_NO 4
702 +#define VAL_INVALID 0xff
704 +/* New XGKR Training Algorithm */
705 +#define NEW_ALGORITHM_TRAIN_TX
707 +#ifdef NEW_ALGORITHM_TRAIN_TX
708 +#define FORCE_INC_COP1_NUMBER 0
709 +#define FORCE_INC_COM1_NUMBER 1
712 +/* Link_Training_Registers offsets */
713 +static int lt_MDIO_MMD = 0;
714 +static u32 lt_KR_PMD_CTRL = 0;
715 +static u32 lt_KR_PMD_STATUS = 0;
716 +static u32 lt_KR_LP_CU = 0;
717 +static u32 lt_KR_LP_STATUS = 0;
718 +static u32 lt_KR_LD_CU = 0;
719 +static u32 lt_KR_LD_STATUS = 0;
721 +/* KX/KR AN registers offsets */
722 +static u32 g_an_AD1 = 0;
723 +static u32 g_an_BP_STAT = 0;
725 +static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5,
726 + 0x7, 0x9, 0xb, 0xc, VAL_INVALID};
727 +static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, 0x7,
728 + 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID};
730 +enum backplane_mode {
731 + PHY_BACKPLANE_1000BASE_KX,
732 + PHY_BACKPLANE_10GBASE_KR,
733 + PHY_BACKPLANE_40GBASE_KR,
734 + PHY_BACKPLANE_INVAL
762 +struct tx_condition {
763 + bool bin_m1_late_early;
764 + bool bin_long_late_early;
766 + bool bin_long_stop;
769 + int m1_min_max_cnt;
770 + int long_min_max_cnt;
771 +#ifdef NEW_ALGORITHM_TRAIN_TX
777 +struct xgkr_params {
778 + void *reg_base; /* lane memory map: registers base address */
779 + int idx; /* lane relative index inside a multi-lane PHY */
780 + struct phy_device *phydev;
781 + struct serdes_access *srds;
782 + struct tx_condition tx_c;
783 + struct delayed_work xgkr_wk;
784 + enum train_state state;
786 + unsigned long rt_time;
792 + u32 tuned_ratio_preq;
793 + u32 tuned_ratio_pst1q;
797 +struct xgkr_phy_data {
800 + struct mutex phy_lock;
802 + struct xgkr_params xgkr[MAX_PHY_LANES_NO];
805 +static void setup_an_lt_ls(void)
807 + /* KR PMD registers */
808 + lt_MDIO_MMD = MDIO_MMD_PMAPMD;
809 + lt_KR_PMD_CTRL = 0x96;
810 + lt_KR_PMD_STATUS = 0x97;
811 + lt_KR_LP_CU = 0x98;
812 + lt_KR_LP_STATUS = 0x99;
813 + lt_KR_LD_CU = 0x9a;
814 + lt_KR_LD_STATUS = 0x9b;
816 + /* KX/KR AN registers */
818 + g_an_BP_STAT = 0x30;
821 +static void setup_an_lt_lx(void)
823 + /* Auto-Negotiation and Link Training Core Registers page 1: 256 = 0x100 */
824 + lt_MDIO_MMD = MDIO_MMD_AN;
825 + lt_KR_PMD_CTRL = 0x100;
826 + lt_KR_PMD_STATUS = 0x101;
827 + lt_KR_LP_CU = 0x102;
828 + lt_KR_LP_STATUS = 0x103;
829 + lt_KR_LD_CU = 0x104;
830 + lt_KR_LD_STATUS = 0x105;
832 + /* KX/KR AN registers */
834 + g_an_BP_STAT = 0x0F;
837 +static u32 le_ioread32(u32 *reg)
839 + return ioread32(reg);
842 +static void le_iowrite32(u32 value, u32 *reg)
844 + iowrite32(value, reg);
847 +static u32 be_ioread32(u32 *reg)
849 + return ioread32be(reg);
852 +static void be_iowrite32(u32 value, u32 *reg)
854 + iowrite32be(value, reg);
858 + * xgkr_phy_write_mmd - Wrapper function for phy_write_mmd
859 + * for writing a register on an MMD on a given PHY.
861 + * Same rules as for phy_write_mmd();
863 +static int xgkr_phy_write_mmd(struct xgkr_params *xgkr, int devad, u32 regnum, u16 val)
865 + struct phy_device *phydev = xgkr->phydev;
866 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
867 + int mdio_addr = phydev->mdio.addr;
870 + mutex_lock(&xgkr_inst->phy_lock);
872 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
873 + //40G AN: prepare mdio address for writing phydev AN registers for 40G on respective lane
874 + phydev->mdio.addr = KR_AN_40G_MDIO_OFFSET + xgkr->idx;
877 + err = phy_write_mmd(phydev, devad, regnum, val);
879 + dev_err(&phydev->mdio.dev, "Writing PHY (%p) MMD = 0x%02x register = 0x%02x failed with error code: 0x%08x \n", phydev, devad, regnum, err);
881 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
882 + //40G AN: restore mdio address
883 + phydev->mdio.addr = mdio_addr;
886 + mutex_unlock(&xgkr_inst->phy_lock);
892 + * xgkr_phy_read_mmd - Wrapper function for phy_read_mmd
893 + * for reading a register from an MMD on a given PHY.
895 + * Same rules as for phy_read_mmd();
897 +static int xgkr_phy_read_mmd(struct xgkr_params *xgkr, int devad, u32 regnum)
899 + struct phy_device *phydev = xgkr->phydev;
900 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
901 + int mdio_addr = phydev->mdio.addr;
904 + mutex_lock(&xgkr_inst->phy_lock);
906 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
907 + //40G AN: prepare mdio address for reading phydev AN registers for 40G on respective lane
908 + phydev->mdio.addr = KR_AN_40G_MDIO_OFFSET + xgkr->idx;
911 + ret = phy_read_mmd(phydev, devad, regnum);
913 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
914 + //40G AN: restore mdio address
915 + phydev->mdio.addr = mdio_addr;
918 + mutex_unlock(&xgkr_inst->phy_lock);
923 +static void tx_condition_init(struct tx_condition *tx_c)
925 + tx_c->bin_m1_late_early = true;
926 + tx_c->bin_long_late_early = false;
927 + tx_c->bin_m1_stop = false;
928 + tx_c->bin_long_stop = false;
929 + tx_c->tx_complete = false;
930 + tx_c->sent_init = false;
931 + tx_c->m1_min_max_cnt = 0;
932 + tx_c->long_min_max_cnt = 0;
933 +#ifdef NEW_ALGORITHM_TRAIN_TX
934 + tx_c->pre_inc = FORCE_INC_COM1_NUMBER;
935 + tx_c->post_inc = FORCE_INC_COP1_NUMBER;
939 +void tune_tecr(struct xgkr_params *xgkr)
941 + struct phy_device *phydev = xgkr->phydev;
942 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
943 + bool reset = false;
945 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
946 + /* Reset only the Master Lane */
947 + reset = (xgkr->idx == MASTER_LANE);
952 + xgkr->srds->tune_tecr(xgkr->reg_base, xgkr->ratio_preq, xgkr->ratio_pst1q, xgkr->adpt_eq, reset);
954 + xgkr->tuned_ratio_preq = xgkr->ratio_preq;
955 + xgkr->tuned_ratio_pst1q = xgkr->ratio_pst1q;
956 + xgkr->tuned_adpt_eq = xgkr->adpt_eq;
959 +static void start_lt(struct xgkr_params *xgkr)
961 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_EN);
964 +static void stop_lt(struct xgkr_params *xgkr)
966 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_DISABLE);
969 +static void reset_lt(struct xgkr_params *xgkr)
971 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, MDIO_CTRL1, PMD_RESET);
972 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_DISABLE);
973 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LD_CU, 0);
974 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LD_STATUS, 0);
975 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_STATUS, 0);
976 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_CU, 0);
977 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS, 0);
981 +static void ld_coe_status(struct xgkr_params *xgkr)
983 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
984 + lt_KR_LD_STATUS, xgkr->ld_status);
987 +static void ld_coe_update(struct xgkr_params *xgkr)
989 + dev_dbg(&xgkr->phydev->mdio.dev, "sending request: %x\n", xgkr->ld_update);
990 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
991 + lt_KR_LD_CU, xgkr->ld_update);
994 +static void start_xgkr_state_machine(struct delayed_work *work)
996 + queue_delayed_work(system_power_efficient_wq, work,
997 + msecs_to_jiffies(XGKR_TIMEOUT));
1000 +static void start_xgkr_an(struct xgkr_params *xgkr)
1002 + struct phy_device *phydev = xgkr->phydev;
1003 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
1007 + switch (xgkr_inst->bp_mode)
1009 + case PHY_BACKPLANE_1000BASE_KX:
1010 + dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1013 + case PHY_BACKPLANE_10GBASE_KR:
1014 + err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, g_an_AD1, KR_AN_AD1_INIT_10G);
1016 + dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x failed with error code: 0x%08x \n", g_an_AD1, err);
1018 + err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
1020 + dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x failed with error code: 0x%08x \n", MDIO_CTRL1, err);
1023 + case PHY_BACKPLANE_40GBASE_KR:
1024 + if (xgkr->idx == MASTER_LANE) {
1025 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
1026 + err = xgkr_phy_write_mmd(&xgkr_inst->xgkr[i], MDIO_MMD_AN, g_an_AD1, KR_AN_AD1_INIT_40G);
1028 + dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x on lane %d failed with error code: 0x%08x \n", g_an_AD1, xgkr_inst->xgkr[i].idx, err);
1031 + err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
1033 + dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x on Master Lane failed with error code: 0x%08x \n", MDIO_CTRL1, err);
1039 +static void start_1gkx_an(struct phy_device *phydev)
1041 + phy_write_mmd(phydev, MDIO_MMD_PCS, KX_PCS_IF_MODE, KX_IF_MODE_INIT);
1042 + phy_write_mmd(phydev, MDIO_MMD_AN, g_an_AD1, KX_AN_AD1_INIT);
1043 + phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
1044 + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
1047 +static void reset_tecr(struct xgkr_params *xgkr)
1049 + struct phy_device *phydev = xgkr->phydev;
1050 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
1052 + switch (xgkr_inst->bp_mode)
1054 + case PHY_BACKPLANE_1000BASE_KX:
1055 + dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1058 + case PHY_BACKPLANE_10GBASE_KR:
1059 + xgkr->ratio_preq = RATIO_PREQ_10G;
1060 + xgkr->ratio_pst1q = RATIO_PST1Q_10G;
1061 + xgkr->adpt_eq = RATIO_EQ_10G;
1064 + case PHY_BACKPLANE_40GBASE_KR:
1065 + xgkr->ratio_preq = RATIO_PREQ_40G;
1066 + xgkr->ratio_pst1q = RATIO_PST1Q_40G;
1067 + xgkr->adpt_eq = RATIO_EQ_40G;
1074 +static void init_xgkr(struct xgkr_params *xgkr, int reset)
1079 + tx_condition_init(&xgkr->tx_c);
1080 + xgkr->state = DETECTING_LP;
1082 + xgkr->ld_status &= RX_READY_MASK;
1083 + ld_coe_status(xgkr);
1084 + xgkr->ld_update = 0;
1085 + xgkr->ld_status &= ~RX_READY_MASK;
1086 + ld_coe_status(xgkr);
1090 +static void initialize(struct xgkr_params *xgkr)
1094 + xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1095 + xgkr->ld_status |= COE_UPDATED << COP1_SHIFT |
1096 + COE_UPDATED << COZ_SHIFT |
1097 + COE_UPDATED << COM1_SHIFT;
1098 + ld_coe_status(xgkr);
1101 +static void train_remote_tx(struct xgkr_params *xgkr)
1103 + struct tx_condition *tx_c = &xgkr->tx_c;
1104 + bool bin_m1_early, bin_long_early;
1105 + u32 lp_status, old_ld_update;
1106 + u32 status_cop1, status_coz, status_com1;
1107 + u32 req_cop1, req_coz, req_com1, req_preset, req_init;
1109 +#ifdef NEW_ALGORITHM_TRAIN_TX
1110 + u32 median_gaink2;
1114 + if (tx_c->bin_long_stop && tx_c->bin_m1_stop) {
1115 + tx_c->tx_complete = true;
1116 + xgkr->ld_status |= RX_READY_MASK;
1117 + ld_coe_status(xgkr);
1119 + /* tell LP we are ready */
1120 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
1121 + lt_KR_PMD_STATUS, RX_STAT);
1126 + /* We start by checking the current LP status. If we got any responses,
1127 + * we can clear up the appropriate update request so that the
1128 + * subsequent code may easily issue new update requests if needed.
1130 + lp_status = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS) &
1133 + status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT;
1134 + status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT;
1135 + status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT;
1137 + old_ld_update = xgkr->ld_update;
1138 + req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT;
1139 + req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT;
1140 + req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT;
1141 + req_preset = old_ld_update & PRESET_MASK;
1142 + req_init = old_ld_update & INIT_MASK;
1144 + /* IEEE802.3-2008, 72.6.10.2.3.1
1145 + * We may clear PRESET when all coefficients show UPDATED or MAX.
1148 + if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) &&
1149 + (status_coz == COE_UPDATED || status_coz == COE_MAX) &&
1150 + (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) {
1151 + xgkr->ld_update &= ~PRESET_MASK;
1155 + /* IEEE802.3-2008, 72.6.10.2.3.2
1156 + * We may clear INITIALIZE when no coefficients show NOT UPDATED.
1159 + if (status_cop1 != COE_NOTUPDATED &&
1160 + status_coz != COE_NOTUPDATED &&
1161 + status_com1 != COE_NOTUPDATED) {
1162 + xgkr->ld_update &= ~INIT_MASK;
1166 + /* IEEE802.3-2008, 72.6.10.2.3.2
1167 + * we send initialize to the other side to ensure default settings
1168 + * for the LP. Naturally, we should do this only once.
1170 + if (!tx_c->sent_init) {
1171 + if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) {
1172 + xgkr->ld_update = INIT_MASK;
1173 + tx_c->sent_init = true;
1177 + /* IEEE802.3-2008, 72.6.10.2.3.3
1178 + * We set coefficient requests to HOLD when we get the information
1179 + * about any updates On clearing our prior response, we also update
1180 + * our internal status.
1182 + if (status_cop1 != COE_NOTUPDATED) {
1184 + xgkr->ld_update &= ~COP1_MASK;
1185 +#ifdef NEW_ALGORITHM_TRAIN_TX
1186 + if (tx_c->post_inc) {
1187 + if (req_cop1 == INCREMENT &&
1188 + status_cop1 == COE_MAX) {
1189 + tx_c->post_inc = 0;
1190 + tx_c->bin_long_stop = true;
1191 + tx_c->bin_m1_stop = true;
1193 + tx_c->post_inc -= 1;
1196 + ld_coe_update(xgkr);
1200 + if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) ||
1201 + (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) {
1202 + dev_dbg(&xgkr->phydev->mdio.dev, "COP1 hit limit %s",
1203 + (status_cop1 == COE_MIN) ?
1204 + "DEC MIN" : "INC MAX");
1205 + tx_c->long_min_max_cnt++;
1206 + if (tx_c->long_min_max_cnt >= TIMEOUT_LONG) {
1207 + tx_c->bin_long_stop = true;
1208 + ld_coe_update(xgkr);
1215 + if (status_coz != COE_NOTUPDATED) {
1217 + xgkr->ld_update &= ~COZ_MASK;
1220 + if (status_com1 != COE_NOTUPDATED) {
1222 + xgkr->ld_update &= ~COM1_MASK;
1223 +#ifdef NEW_ALGORITHM_TRAIN_TX
1224 + if (tx_c->pre_inc) {
1225 + if (req_com1 == INCREMENT &&
1226 + status_com1 == COE_MAX)
1227 + tx_c->pre_inc = 0;
1229 + tx_c->pre_inc -= 1;
1231 + ld_coe_update(xgkr);
1235 + /* Stop If we have reached the limit for a parameter. */
1236 + if ((req_com1 == DECREMENT && status_com1 == COE_MIN) ||
1237 + (req_com1 == INCREMENT && status_com1 == COE_MAX)) {
1238 + dev_dbg(&xgkr->phydev->mdio.dev, "COM1 hit limit %s",
1239 + (status_com1 == COE_MIN) ?
1240 + "DEC MIN" : "INC MAX");
1241 + tx_c->m1_min_max_cnt++;
1242 + if (tx_c->m1_min_max_cnt >= TIMEOUT_M1) {
1243 + tx_c->bin_m1_stop = true;
1244 + ld_coe_update(xgkr);
1251 + if (old_ld_update != xgkr->ld_update) {
1252 + ld_coe_update(xgkr);
1253 + /* Redo these status checks and updates until we have no more
1254 + * changes, to speed up the overall process.
1259 + /* Do nothing if we have pending request. */
1260 + if ((req_coz || req_com1 || req_cop1))
1262 + else if (lp_status)
1263 + /* No pending request but LP status was not reverted to
1268 +#ifdef NEW_ALGORITHM_TRAIN_TX
1269 + if (!(xgkr->ld_update & (PRESET_MASK | INIT_MASK))) {
1270 + if (tx_c->pre_inc) {
1271 + xgkr->ld_update = INCREMENT << COM1_SHIFT;
1272 + ld_coe_update(xgkr);
1276 + if (status_cop1 != COE_MAX) {
1277 + median_gaink2 = xgkr->srds->get_median_gaink2(xgkr->reg_base);
1278 + if (median_gaink2 == 0xf) {
1279 + tx_c->post_inc = 1;
1281 + /* Gaink2 median lower than "F" */
1282 + tx_c->bin_m1_stop = true;
1283 + tx_c->bin_long_stop = true;
1288 + tx_c->bin_m1_stop = true;
1289 + tx_c->bin_long_stop = true;
1293 + if (tx_c->post_inc) {
1294 + xgkr->ld_update = INCREMENT << COP1_SHIFT;
1295 + ld_coe_update(xgkr);
1301 + /* snapshot and select bin */
1302 + bin_m1_early = xgkr->srds->is_bin_early(BIN_M1, xgkr->reg_base);
1303 + bin_long_early = xgkr->srds->is_bin_early(BIN_LONG, xgkr->reg_base);
1305 + if (!tx_c->bin_m1_stop && !tx_c->bin_m1_late_early && bin_m1_early) {
1306 + tx_c->bin_m1_stop = true;
1310 + if (!tx_c->bin_long_stop &&
1311 + tx_c->bin_long_late_early && !bin_long_early) {
1312 + tx_c->bin_long_stop = true;
1316 + /* IEEE802.3-2008, 72.6.10.2.3.3
1317 + * We only request coefficient updates when no PRESET/INITIALIZE is
1318 + * pending. We also only request coefficient updates when the
1319 + * corresponding status is NOT UPDATED and nothing is pending.
1321 + if (!(xgkr->ld_update & (PRESET_MASK | INIT_MASK))) {
1322 + if (!tx_c->bin_long_stop) {
1323 + /* BinM1 correction means changing COM1 */
1324 + if (!status_com1 && !(xgkr->ld_update & COM1_MASK)) {
1325 + /* Avoid BinM1Late by requesting an
1326 + * immediate decrement.
1328 + if (!bin_m1_early) {
1329 + /* request decrement c(-1) */
1330 + temp = DECREMENT << COM1_SHIFT;
1331 + xgkr->ld_update = temp;
1332 + ld_coe_update(xgkr);
1333 + tx_c->bin_m1_late_early = bin_m1_early;
1338 + /* BinLong correction means changing COP1 */
1339 + if (!status_cop1 && !(xgkr->ld_update & COP1_MASK)) {
1340 + /* Locate BinLong transition point (if any)
1341 + * while avoiding BinM1Late.
1343 + if (bin_long_early) {
1344 + /* request increment c(1) */
1345 + temp = INCREMENT << COP1_SHIFT;
1346 + xgkr->ld_update = temp;
1348 + /* request decrement c(1) */
1349 + temp = DECREMENT << COP1_SHIFT;
1350 + xgkr->ld_update = temp;
1353 + ld_coe_update(xgkr);
1354 + tx_c->bin_long_late_early = bin_long_early;
1356 + /* We try to finish BinLong before we do BinM1 */
1360 + if (!tx_c->bin_m1_stop) {
1361 + /* BinM1 correction means changing COM1 */
1362 + if (!status_com1 && !(xgkr->ld_update & COM1_MASK)) {
1363 + /* Locate BinM1 transition point (if any) */
1364 + if (bin_m1_early) {
1365 + /* request increment c(-1) */
1366 + temp = INCREMENT << COM1_SHIFT;
1367 + xgkr->ld_update = temp;
1369 + /* request decrement c(-1) */
1370 + temp = DECREMENT << COM1_SHIFT;
1371 + xgkr->ld_update = temp;
1374 + ld_coe_update(xgkr);
1375 + tx_c->bin_m1_late_early = bin_m1_early;
1381 +static int is_link_up(struct phy_device *phydev)
1383 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
1386 + mutex_lock(&xgkr_inst->phy_lock);
1388 + val = phy_read_mmd(phydev, MDIO_MMD_PCS, XFI_PCS_SR1);
1390 + mutex_unlock(&xgkr_inst->phy_lock);
1392 + return (val & KR_RX_LINK_STAT_MASK) ? 1 : 0;
1395 +static int is_link_training_fail(struct xgkr_params *xgkr)
1397 + struct phy_device *phydev = xgkr->phydev;
1399 + int timeout = 100;
1401 + val = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_STATUS);
1403 + if (!(val & TRAIN_FAIL) && (val & RX_STAT)) {
1404 + /* check LNK_STAT for sure */
1405 + while (timeout--) {
1406 + if (is_link_up(phydev))
1409 + usleep_range(100, 500);
1416 +static int check_rx(struct xgkr_params *xgkr)
1418 + return xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS) &
1422 +/* Coefficient values have hardware restrictions */
1423 +static int is_ld_valid(struct xgkr_params *xgkr)
1425 + u32 ratio_pst1q = xgkr->ratio_pst1q;
1426 + u32 adpt_eq = xgkr->adpt_eq;
1427 + u32 ratio_preq = xgkr->ratio_preq;
1429 + if ((ratio_pst1q + adpt_eq + ratio_preq) > 48)
1432 + if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >=
1433 + ((adpt_eq - ratio_pst1q - ratio_preq) * 17))
1436 + if (ratio_preq > ratio_pst1q)
1439 + if (ratio_preq > 8)
1445 + if (ratio_pst1q > 16)
1451 +static int is_value_allowed(const u32 *val_table, u32 val)
1455 + for (i = 0;; i++) {
1456 + if (*(val_table + i) == VAL_INVALID)
1458 + if (*(val_table + i) == val)
1463 +static enum coe_update inc_dec(struct xgkr_params *xgkr, int field, int request)
1465 + u32 ld_limit[3], ld_coe[3], step[3];
1467 + ld_coe[0] = xgkr->ratio_pst1q;
1468 + ld_coe[1] = xgkr->adpt_eq;
1469 + ld_coe[2] = xgkr->ratio_preq;
1471 + /* Information specific to the SerDes for 10GBase-KR:
1472 + * Incrementing C(+1) means *decrementing* RATIO_PST1Q
1473 + * Incrementing C(0) means incrementing ADPT_EQ
1474 + * Incrementing C(-1) means *decrementing* RATIO_PREQ
1480 + switch (request) {
1482 + ld_limit[0] = POST_COE_MAX;
1483 + ld_limit[1] = ZERO_COE_MAX;
1484 + ld_limit[2] = PRE_COE_MAX;
1485 + if (ld_coe[field] != ld_limit[field])
1486 + ld_coe[field] += step[field];
1492 + ld_limit[0] = POST_COE_MIN;
1493 + ld_limit[1] = ZERO_COE_MIN;
1494 + ld_limit[2] = PRE_COE_MIN;
1495 + if (ld_coe[field] != ld_limit[field])
1496 + ld_coe[field] -= step[field];
1505 + if (is_ld_valid(xgkr)) {
1506 + /* accept new ld */
1507 + xgkr->ratio_pst1q = ld_coe[0];
1508 + xgkr->adpt_eq = ld_coe[1];
1509 + xgkr->ratio_preq = ld_coe[2];
1510 + /* only some values for preq and pst1q can be used.
1511 + * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc.
1512 + * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10.
1514 + if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) {
1515 + dev_dbg(&xgkr->phydev->mdio.dev,
1516 + "preq skipped value: %d\n", ld_coe[2]);
1518 + return COE_NOTUPDATED;
1521 + if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) {
1522 + dev_dbg(&xgkr->phydev->mdio.dev,
1523 + "pst1q skipped value: %d\n", ld_coe[0]);
1525 + return COE_NOTUPDATED;
1530 + if (request == DECREMENT)
1533 + if (request == INCREMENT)
1539 + return COE_UPDATED;
1542 +static void min_max_updated(struct xgkr_params *xgkr, int field, enum coe_update cs)
1547 + if (cs == COE_INV)
1553 + val = ld_cs << COP1_SHIFT;
1557 + val = ld_cs << COZ_SHIFT;
1561 + val = ld_cs << COM1_SHIFT;
1567 + xgkr->ld_status &= ~mask;
1568 + xgkr->ld_status |= val;
1571 +static void check_request(struct xgkr_params *xgkr, int request)
1573 + int cop1_req, coz_req, com_req;
1575 + enum coe_update cu;
1577 + cop1_req = (request & COP1_MASK) >> COP1_SHIFT;
1578 + coz_req = (request & COZ_MASK) >> COZ_SHIFT;
1579 + com_req = (request & COM1_MASK) >> COM1_SHIFT;
1581 + /* IEEE802.3-2008, 72.6.10.2.5
1582 + * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED
1584 + old_status = xgkr->ld_status;
1586 + if (cop1_req && !(xgkr->ld_status & COP1_MASK)) {
1587 + cu = inc_dec(xgkr, COE_COP1, cop1_req);
1588 + min_max_updated(xgkr, COE_COP1, cu);
1591 + if (coz_req && !(xgkr->ld_status & COZ_MASK)) {
1592 + cu = inc_dec(xgkr, COE_COZ, coz_req);
1593 + min_max_updated(xgkr, COE_COZ, cu);
1596 + if (com_req && !(xgkr->ld_status & COM1_MASK)) {
1597 + cu = inc_dec(xgkr, COE_COM, com_req);
1598 + min_max_updated(xgkr, COE_COM, cu);
1601 + if (old_status != xgkr->ld_status)
1602 + ld_coe_status(xgkr);
1605 +static void preset(struct xgkr_params *xgkr)
1607 + /* These are all MAX values from the IEEE802.3 perspective. */
1608 + xgkr->ratio_pst1q = POST_COE_MAX;
1609 + xgkr->adpt_eq = ZERO_COE_MAX;
1610 + xgkr->ratio_preq = PRE_COE_MAX;
1613 + xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1614 + xgkr->ld_status |= COE_MAX << COP1_SHIFT |
1615 + COE_MAX << COZ_SHIFT |
1616 + COE_MAX << COM1_SHIFT;
1617 + ld_coe_status(xgkr);
1620 +static void train_local_tx(struct xgkr_params *xgkr)
1622 + int request, old_ld_status;
1624 + /* get request from LP */
1625 + request = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_CU) &
1628 + old_ld_status = xgkr->ld_status;
1630 + /* IEEE802.3-2008, 72.6.10.2.5
1631 + * Ensure we always go to NOT UDPATED for status reporting in
1632 + * response to HOLD requests.
1633 + * IEEE802.3-2008, 72.6.10.2.3.1/2
1634 + * ... but only if PRESET/INITIALIZE are not active to ensure
1635 + * we keep status until they are released.
1637 + if (!(request & (PRESET_MASK | INIT_MASK))) {
1638 + if (!(request & COP1_MASK))
1639 + xgkr->ld_status &= ~COP1_MASK;
1641 + if (!(request & COZ_MASK))
1642 + xgkr->ld_status &= ~COZ_MASK;
1644 + if (!(request & COM1_MASK))
1645 + xgkr->ld_status &= ~COM1_MASK;
1647 + if (old_ld_status != xgkr->ld_status)
1648 + ld_coe_status(xgkr);
1651 + /* As soon as the LP shows ready, no need to do any more updates. */
1652 + if (check_rx(xgkr)) {
1653 + /* LP receiver is ready */
1654 + if (xgkr->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) {
1655 + xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1656 + ld_coe_status(xgkr);
1659 + /* IEEE802.3-2008, 72.6.10.2.3.1/2
1660 + * only act on PRESET/INITIALIZE if all status is NOT UPDATED.
1662 + if (request & (PRESET_MASK | INIT_MASK)) {
1663 + if (!(xgkr->ld_status &
1664 + (COP1_MASK | COZ_MASK | COM1_MASK))) {
1665 + if (request & PRESET_MASK)
1668 + if (request & INIT_MASK)
1673 + /* LP Coefficient are not in HOLD */
1674 + if (request & REQUEST_MASK)
1675 + check_request(xgkr, request & REQUEST_MASK);
1679 +static void xgkr_start_train(struct xgkr_params *xgkr)
1681 + struct phy_device *phydev = xgkr->phydev;
1682 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
1683 + struct tx_condition *tx_c = &xgkr->tx_c;
1684 + int val = 0, i, j;
1686 + unsigned long dead_line;
1687 + int lp_rx_ready, tx_training_complete;
1688 + u32 lt_timeout = 500;
1690 + init_xgkr(xgkr, 0);
1694 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
1695 + lt_timeout = 2000;
1698 + for (i = 0; i < 2;) {
1700 + dead_line = jiffies + msecs_to_jiffies(lt_timeout);
1702 + while (time_before(jiffies, dead_line)) {
1704 + val = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD,
1705 + lt_KR_PMD_STATUS);
1707 + if (val & TRAIN_FAIL) {
1708 + /* LT failed already, reset lane to avoid
1709 + * it run into hanging, then start LT again.
1711 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
1712 + /* Reset only the Master Lane */
1713 + if (xgkr->idx == MASTER_LANE)
1714 + xgkr->srds->reset_lane(xgkr->reg_base);
1716 + xgkr->srds->reset_lane(xgkr->reg_base);
1720 + } else if ((val & PMD_STATUS_SUP_STAT) &&
1721 + (val & PMD_STATUS_FRAME_LOCK))
1723 + usleep_range(100, 500);
1726 + if (!((val & PMD_STATUS_FRAME_LOCK) &&
1727 + (val & PMD_STATUS_SUP_STAT))) {
1732 + /* init process */
1733 + lp_rx_ready = false;
1734 + tx_training_complete = false;
1735 + /* the LT should be finished in 500ms, failed or OK. */
1736 + dead_line = jiffies + msecs_to_jiffies(lt_timeout);
1738 + while (time_before(jiffies, dead_line)) {
1739 + /* check if the LT is already failed */
1741 + lt_state = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD,
1742 + lt_KR_PMD_STATUS);
1744 + if (lt_state & TRAIN_FAIL) {
1746 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
1747 + /* Reset only the Master Lane */
1748 + if (xgkr->idx == MASTER_LANE)
1749 + xgkr->srds->reset_lane(xgkr->reg_base);
1751 + xgkr->srds->reset_lane(xgkr->reg_base);
1757 + lp_rx_ready = check_rx(xgkr);
1758 + tx_training_complete = tx_c->tx_complete;
1760 + if (lp_rx_ready && tx_training_complete)
1764 + train_local_tx(xgkr);
1766 + if (!tx_training_complete)
1767 + train_remote_tx(xgkr);
1769 + usleep_range(100, 500);
1773 + /* check LT result */
1774 + if (is_link_training_fail(xgkr)) {
1775 + init_xgkr(xgkr, 0);
1779 + xgkr->state = TRAINED;
1781 + switch (xgkr_inst->bp_mode)
1783 + case PHY_BACKPLANE_10GBASE_KR:
1784 + if (phydev->attached_dev == NULL)
1785 + dev_info(&phydev->mdio.dev, "10GBase-KR link trained (Tx equalization: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x)\n",
1786 + xgkr->tuned_ratio_preq, xgkr->tuned_ratio_pst1q, xgkr->tuned_adpt_eq);
1788 + dev_info(&phydev->mdio.dev, "%s %s: 10GBase-KR link trained (Tx equalization: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x)\n",
1789 + dev_driver_string(phydev->attached_dev->dev.parent),
1790 + dev_name(phydev->attached_dev->dev.parent),
1791 + xgkr->tuned_ratio_preq, xgkr->tuned_ratio_pst1q, xgkr->tuned_adpt_eq);
1794 + case PHY_BACKPLANE_40GBASE_KR:
1795 + if (xgkr->idx == xgkr_inst->phy_lanes - 1) {
1796 + if (phydev->attached_dev == NULL)
1797 + dev_info(&phydev->mdio.dev, "40GBase-KR link trained at lanes Tx equalization:\n");
1799 + dev_info(&phydev->mdio.dev, "%s %s: 40GBase-KR link trained at lanes Tx equalization:\n",
1800 + dev_driver_string(phydev->attached_dev->dev.parent),
1801 + dev_name(phydev->attached_dev->dev.parent));
1803 + for (j = 0; j < xgkr_inst->phy_lanes; j++) {
1804 + if (phydev->attached_dev == NULL)
1805 + dev_info(&phydev->mdio.dev, "40GBase-KR Lane %d: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x\n",
1806 + j, xgkr_inst->xgkr[j].tuned_ratio_preq, xgkr_inst->xgkr[j].tuned_ratio_pst1q, xgkr_inst->xgkr[j].tuned_adpt_eq);
1808 + dev_info(&phydev->mdio.dev, "%s %s: 40GBase-KR Lane %d: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x\n",
1809 + dev_driver_string(phydev->attached_dev->dev.parent),
1810 + dev_name(phydev->attached_dev->dev.parent),
1811 + j, xgkr_inst->xgkr[j].tuned_ratio_preq, xgkr_inst->xgkr[j].tuned_ratio_pst1q, xgkr_inst->xgkr[j].tuned_adpt_eq);
1822 +static void xgkr_request_restart_an(struct xgkr_params *xgkr)
1824 + struct phy_device *phydev = xgkr->phydev;
1825 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
1828 + if (time_before(jiffies, xgkr->rt_time))
1831 + switch (xgkr_inst->bp_mode)
1833 + case PHY_BACKPLANE_1000BASE_KX:
1834 + dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1837 + case PHY_BACKPLANE_10GBASE_KR:
1838 + init_xgkr(xgkr, 0);
1840 + xgkr->state = DETECTING_LP;
1841 + start_xgkr_an(xgkr);
1842 + start_xgkr_state_machine(&xgkr->xgkr_wk);
1845 + case PHY_BACKPLANE_40GBASE_KR:
1846 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
1847 + init_xgkr(&xgkr_inst->xgkr[i], 0);
1848 + reset_lt(&xgkr_inst->xgkr[i]);
1849 + xgkr_inst->xgkr[i].state = DETECTING_LP;
1851 + //Start AN only for Master Lane
1852 + start_xgkr_an(&xgkr_inst->xgkr[MASTER_LANE]);
1853 + //start state machine
1854 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
1855 + start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
1860 + xgkr->rt_time = jiffies + msecs_to_jiffies(XGKR_DENY_RT_INTERVAL);
1863 +static void xgkr_state_machine(struct work_struct *work)
1865 + struct delayed_work *dwork = to_delayed_work(work);
1866 + struct xgkr_params *xgkr = container_of(dwork,
1867 + struct xgkr_params, xgkr_wk);
1868 + struct phy_device *phydev = xgkr->phydev;
1869 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
1871 + bool start_train = false;
1872 + bool all_lanes_trained = false;
1875 + if (!xgkr_inst->aneg_done) {
1876 + start_xgkr_state_machine(&xgkr->xgkr_wk);
1880 + mutex_lock(&phydev->lock);
1882 + switch (xgkr->state) {
1883 + case DETECTING_LP:
1885 + switch (xgkr_inst->bp_mode)
1887 + case PHY_BACKPLANE_1000BASE_KX:
1888 + dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1891 + case PHY_BACKPLANE_10GBASE_KR:
1892 + an_state = xgkr_phy_read_mmd(xgkr, MDIO_MMD_AN, g_an_BP_STAT);
1893 + if (an_state & KR_AN_MASK_10G) {
1894 + //AN acquired: Train the lane
1895 + xgkr->an_wait_count = 0;
1896 + start_train = true;
1898 + //AN lost or not yet acquired
1899 + if (!is_link_up(phydev)) {
1900 + //Link is down: restart training
1901 + xgkr->an_wait_count = 0;
1902 + xgkr_request_restart_an(xgkr);
1904 + //Link is up: wait few iterations for AN to be acquired
1905 + if (xgkr->an_wait_count >= XGKR_AN_WAIT_ITERATIONS) {
1906 + xgkr->an_wait_count = 0;
1907 + xgkr_request_restart_an(xgkr);
1909 + xgkr->an_wait_count++;
1915 + case PHY_BACKPLANE_40GBASE_KR:
1916 + //Check AN state only on Master Lane
1917 + an_state = xgkr_phy_read_mmd(&xgkr_inst->xgkr[MASTER_LANE], MDIO_MMD_AN, g_an_BP_STAT);
1918 + if (an_state & KR_AN_MASK_40G) {
1919 + //AN acquired: Train all lanes in order starting with Master Lane
1920 + xgkr->an_wait_count = 0;
1921 + if (xgkr->idx == MASTER_LANE) {
1922 + start_train = true;
1924 + else if (xgkr_inst->xgkr[xgkr->idx - 1].state == TRAINED) {
1925 + start_train = true;
1928 + //AN lost or not yet acquired
1929 + if (!is_link_up(phydev)) {
1930 + //Link is down: restart training
1931 + xgkr->an_wait_count = 0;
1932 + xgkr_request_restart_an(xgkr);
1934 + //Link is up: wait few iterations for AN to be acquired
1935 + if (xgkr->an_wait_count >= XGKR_AN_WAIT_ITERATIONS) {
1936 + xgkr->an_wait_count = 0;
1937 + xgkr_request_restart_an(xgkr);
1939 + xgkr->an_wait_count++;
1948 + if (!is_link_up(phydev)) {
1949 + switch (xgkr_inst->bp_mode)
1951 + case PHY_BACKPLANE_1000BASE_KX:
1952 + dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1955 + case PHY_BACKPLANE_10GBASE_KR:
1956 + dev_info(&phydev->mdio.dev, "Detect hotplug, restart training\n");
1957 + xgkr_request_restart_an(xgkr);
1960 + case PHY_BACKPLANE_40GBASE_KR:
1961 + if (xgkr->idx == MASTER_LANE) {
1962 + //check if all lanes are trained only on Master Lane
1963 + all_lanes_trained = true;
1964 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
1965 + if (xgkr_inst->xgkr[i].state != TRAINED) {
1966 + all_lanes_trained = false;
1970 + if (all_lanes_trained) {
1971 + dev_info(&phydev->mdio.dev, "Detect hotplug, restart training\n");
1972 + xgkr_request_restart_an(xgkr);
1981 + if (start_train) {
1982 + xgkr_start_train(xgkr);
1985 + mutex_unlock(&phydev->lock);
1986 + start_xgkr_state_machine(&xgkr->xgkr_wk);
1989 +static int fsl_backplane_probe(struct phy_device *phydev)
1991 + struct xgkr_phy_data *xgkr_inst;
1992 + struct device_node *phy_node, *lane_node;
1993 + struct resource res_lane;
1994 + struct serdes_access *srds = NULL;
1998 + int ret, i, phy_lanes;
2000 + u32 lane_base_addr[MAX_PHY_LANES_NO], lane_memmap_size;
2002 + phy_node = phydev->mdio.dev.of_node;
2004 + dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2008 + bp_mode = of_property_read_string(phy_node, "backplane-mode", &bm);
2013 + if (!strcasecmp(bm, "1000base-kx")) {
2014 + bp_mode = PHY_BACKPLANE_1000BASE_KX;
2015 + } else if (!strcasecmp(bm, "10gbase-kr")) {
2016 + bp_mode = PHY_BACKPLANE_10GBASE_KR;
2017 + } else if (!strcasecmp(bm, "40gbase-kr")) {
2018 + bp_mode = PHY_BACKPLANE_40GBASE_KR;
2021 + dev_err(&phydev->mdio.dev, "Unknown backplane-mode\n");
2025 + lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
2027 + dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
2031 + ret = of_property_read_string(lane_node, "compatible", &st);
2033 + //assume SERDES-10G if compatible property is not specified
2034 + serdes_type = SERDES_10G;
2036 + else if (!strcasecmp(st, "fsl,serdes-10g")) {
2037 + serdes_type = SERDES_10G;
2038 + } else if (!strcasecmp(st, "fsl,serdes-28g")) {
2039 + serdes_type = SERDES_28G;
2041 + dev_err(&phydev->mdio.dev, "Unknown serdes-type\n");
2045 + ret = of_address_to_resource(lane_node, 0, &res_lane);
2047 + dev_err(&phydev->mdio.dev, "could not obtain memory map\n");
2051 + of_node_put(lane_node);
2052 + ret = of_property_read_u32_array(phy_node, "fsl,lane-reg",
2053 + (u32 *)lane_base_addr, phy_lanes);
2055 + dev_err(&phydev->mdio.dev, "could not get fsl,lane-reg\n");
2059 + switch (serdes_type)
2063 + srds = setup_serdes_access_10g();
2068 + srds = setup_serdes_access_28g();
2072 + dev_err(&phydev->mdio.dev, "Unsupported serdes-type\n");
2077 + dev_err(&phydev->mdio.dev, "Unsupported serdes-type\n");
2081 + srds->serdes_type = serdes_type;
2082 + srds->is_little_endian = of_property_read_bool(lane_node, "little-endian");
2084 + if (srds->is_little_endian) {
2085 + srds->ioread32 = le_ioread32;
2086 + srds->iowrite32 = le_iowrite32;
2088 + srds->ioread32 = be_ioread32;
2089 + srds->iowrite32 = be_iowrite32;
2092 + xgkr_inst = devm_kzalloc(&phydev->mdio.dev,
2093 + sizeof(*xgkr_inst), GFP_KERNEL);
2097 + xgkr_inst->phy_lanes = phy_lanes;
2098 + xgkr_inst->bp_mode = bp_mode;
2099 + mutex_init(&xgkr_inst->phy_lock);
2101 + lane_memmap_size = srds->get_lane_memmap_size();
2103 + for (i = 0; i < phy_lanes; i++) {
2104 + xgkr_inst->xgkr[i].idx = i;
2105 + xgkr_inst->xgkr[i].phydev = phydev;
2106 + xgkr_inst->xgkr[i].srds = srds;
2107 + xgkr_inst->xgkr[i].reg_base = devm_ioremap_nocache(&phydev->mdio.dev,
2108 + res_lane.start + lane_base_addr[i],
2109 + lane_memmap_size);
2110 + if (!xgkr_inst->xgkr[i].reg_base) {
2111 + dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n");
2114 + xgkr_inst->xgkr[i].rt_time = jiffies + msecs_to_jiffies(XGKR_DENY_RT_INTERVAL);
2117 + phydev->priv = xgkr_inst;
2121 + case PHY_BACKPLANE_1000BASE_KX:
2122 + phydev->speed = SPEED_1000;
2123 + /* configure the lane for 1000BASE-KX */
2124 + srds->lane_set_1gkx(xgkr_inst->xgkr[SINGLE_LANE].reg_base);
2127 + case PHY_BACKPLANE_10GBASE_KR:
2128 + phydev->speed = SPEED_10000;
2129 + INIT_DELAYED_WORK(&xgkr_inst->xgkr[SINGLE_LANE].xgkr_wk, xgkr_state_machine);
2132 + case PHY_BACKPLANE_40GBASE_KR:
2133 + phydev->speed = SPEED_40000;
2134 + for (i = 0; i < phy_lanes; i++)
2135 + INIT_DELAYED_WORK(&xgkr_inst->xgkr[i].xgkr_wk, xgkr_state_machine);
2142 +static int fsl_backplane_aneg_done(struct phy_device *phydev)
2144 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
2146 + if (!phydev->mdio.dev.of_node) {
2147 + dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2151 + xgkr_inst->aneg_done = true;
2156 +static int fsl_backplane_config_aneg(struct phy_device *phydev)
2158 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
2161 + if (!phydev->mdio.dev.of_node) {
2162 + dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2166 + switch (phydev->speed)
2169 + phydev->supported |= SUPPORTED_1000baseKX_Full;
2170 + start_1gkx_an(phydev);
2174 + phydev->supported |= SUPPORTED_10000baseKR_Full;
2175 + reset_lt(&xgkr_inst->xgkr[SINGLE_LANE]);
2176 + start_xgkr_an(&xgkr_inst->xgkr[SINGLE_LANE]);
2177 + /* start state machine*/
2178 + start_xgkr_state_machine(&xgkr_inst->xgkr[SINGLE_LANE].xgkr_wk);
2182 + phydev->supported |= SUPPORTED_40000baseKR4_Full;
2183 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
2184 + reset_lt(&xgkr_inst->xgkr[i]);
2186 + //Start AN only for Master Lane
2187 + start_xgkr_an(&xgkr_inst->xgkr[MASTER_LANE]);
2188 + /* start state machine*/
2189 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
2190 + start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
2196 + phydev->advertising = phydev->supported;
2197 + phydev->duplex = 1;
2202 +static int fsl_backplane_suspend(struct phy_device *phydev)
2206 + if (!phydev->mdio.dev.of_node) {
2207 + dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2211 + if (phydev->speed == SPEED_10000 || phydev->speed == SPEED_40000) {
2212 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
2214 + for (i = 0; i < xgkr_inst->phy_lanes; i++)
2215 + cancel_delayed_work_sync(&xgkr_inst->xgkr[i].xgkr_wk);
2220 +static int fsl_backplane_resume(struct phy_device *phydev)
2222 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
2225 + if (!phydev->mdio.dev.of_node) {
2226 + dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2230 + if (phydev->speed == SPEED_10000 || phydev->speed == SPEED_40000) {
2231 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
2232 + init_xgkr(&xgkr_inst->xgkr[i], 1);
2233 + start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
2239 +static int fsl_backplane_read_status(struct phy_device *phydev)
2241 + if (!phydev->mdio.dev.of_node) {
2242 + dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2246 + if (is_link_up(phydev))
2254 +static int fsl_backplane_match_phy_device(struct phy_device *phydev)
2256 + struct device_node *phy_node, *lane_node;
2258 + int serdes_type, i, ret;
2259 + const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
2261 + if (!phydev->mdio.dev.of_node) {
2266 + // Required for LX2 devices
2267 + // where PHY ID cannot be verified in PCS
2268 + // because PCS Device Identifier Upper and Lower registers are hidden
2269 + // and always return 0 when they are read:
2270 + // 2 02 Device_ID0 RO Bits 15:0 0
2271 + // val = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x2);
2272 + // 3 03 Device_ID1 RO Bits 31:16 0
2273 + // val = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x3);
2275 + // To be removed: After the issue will be fixed on LX2 devices
2277 + if (!phydev->is_c45)
2280 + phy_node = phydev->mdio.dev.of_node;
2282 + lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
2284 + dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
2288 + ret = of_property_read_string(lane_node, "compatible", &st);
2290 + //assume SERDES-10G if compatible property is not specified
2291 + serdes_type = SERDES_10G;
2293 + else if (!strcasecmp(st, "fsl,serdes-10g")) {
2294 + serdes_type = SERDES_10G;
2295 + } else if (!strcasecmp(st, "fsl,serdes-28g")) {
2296 + serdes_type = SERDES_28G;
2298 + dev_err(&phydev->mdio.dev, "Unknown serdes-type\n");
2302 + if (serdes_type == SERDES_10G) {
2303 + //On LS devices we must find the c45 device with correct PHY ID
2304 + //Implementation similar with the one existent in phy_device: @function: phy_bus_match
2305 + for (i = 1; i < num_ids; i++) {
2306 + if (!(phydev->c45_ids.devices_in_package & (1 << i)))
2309 + if ((PCS_PHY_DEVICE_ID & PCS_PHY_DEVICE_ID_MASK) ==
2310 + (phydev->c45_ids.device_ids[i] & PCS_PHY_DEVICE_ID_MASK))
2318 + //On LX devices we cannot verify PHY ID
2319 + //so we are happy only with preliminary verifications already made: mdio.dev.of_node and is_c45
2320 + //because we already filtered other undesired devices: non clause 45
2325 +static struct phy_driver fsl_backplane_driver[] = {
2327 + .phy_id = PCS_PHY_DEVICE_ID,
2328 + .name = "Freescale Backplane",
2329 + .phy_id_mask = PCS_PHY_DEVICE_ID_MASK,
2330 + .features = SUPPORTED_Backplane | SUPPORTED_Autoneg |
2332 + .probe = fsl_backplane_probe,
2333 + .aneg_done = fsl_backplane_aneg_done,
2334 + .config_aneg = fsl_backplane_config_aneg,
2335 + .read_status = fsl_backplane_read_status,
2336 + .suspend = fsl_backplane_suspend,
2337 + .resume = fsl_backplane_resume,
2338 + .match_phy_device = fsl_backplane_match_phy_device,
2342 +module_phy_driver(fsl_backplane_driver);
2344 +static struct mdio_device_id __maybe_unused freescale_tbl[] = {
2345 + { PCS_PHY_DEVICE_ID, PCS_PHY_DEVICE_ID_MASK },
2349 +MODULE_DEVICE_TABLE(mdio, freescale_tbl);
2351 +MODULE_DESCRIPTION("Freescale Backplane driver");
2352 +MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
2353 +MODULE_LICENSE("GPL v2");
2355 +++ b/drivers/net/phy/fsl_backplane.h
2357 +/* SPDX-License-Identifier: GPL-2.0+ */
2359 + * DPAA backplane driver.
2360 + * Author: Florinel Iordache <florinel.iordache@nxp.com>
2362 + * Copyright 2018 NXP
2364 + * Licensed under the GPL-2 or later.
2367 +#ifndef FSL_BACKPLANE_H
2368 +#define FSL_BACKPLANE_H
2375 +#define BIN_SNAPSHOT_NUM 5
2376 +#define BIN_M1_THRESHOLD 3
2377 +#define BIN_LONG_THRESHOLD 2
2379 +struct serdes_access {
2382 + bool is_little_endian;
2383 + u32 (*ioread32)(u32 *reg);
2384 + void (*iowrite32)(u32 value, u32 *reg);
2385 + u32 (*get_lane_memmap_size)(void);
2386 + void (*tune_tecr)(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset);
2387 + void (*reset_lane)(void *reg);
2388 + void (*lane_set_1gkx)(void *reg);
2389 + int (*get_median_gaink2)(u32 *reg);
2390 + bool (*is_bin_early)(int bin_sel, void *reg);
2393 +struct serdes_access* setup_serdes_access_10g(void);
2394 +struct serdes_access* setup_serdes_access_28g(void);
2397 +#endif //FSL_BACKPLANE_H
2399 +++ b/drivers/net/phy/fsl_backplane_serdes_10g.c
2401 +// SPDX-License-Identifier: GPL-2.0+
2403 + * DPAA backplane driver for SerDes 10G.
2404 + * Author: Florinel Iordache <florinel.iordache@nxp.com>
2406 + * Copyright 2018 NXP
2408 + * Licensed under the GPL-2 or later.
2411 +#include <linux/io.h>
2412 +#include <linux/delay.h>
2414 +#include "fsl_backplane.h"
2416 +#define BIN_M1_SEL 6
2417 +#define BIN_Long_SEL 7
2418 +#define CDR_SEL_MASK 0x00070000
2420 +#define PRE_COE_SHIFT 22
2421 +#define POST_COE_SHIFT 16
2422 +#define ZERO_COE_SHIFT 8
2424 +#define TECR0_INIT 0x24200000
2426 +#define GCR0_RESET_MASK 0x00600000
2428 +#define GCR1_SNP_START_MASK 0x00000040
2429 +#define GCR1_CTL_SNP_START_MASK 0x00002000
2431 +#define RECR1_CTL_SNP_DONE_MASK 0x00000002
2432 +#define RECR1_SNP_DONE_MASK 0x00000004
2433 +#define TCSR1_SNP_DATA_MASK 0x0000ffc0
2434 +#define TCSR1_SNP_DATA_SHIFT 6
2435 +#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100
2437 +#define RECR1_GAINK2_MASK 0x0f000000
2438 +#define RECR1_GAINK2_SHIFT 24
2440 +/* Required only for 1000BASE KX */
2441 +#define GCR1_REIDL_TH_MASK 0x00700000
2442 +#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
2443 +#define GCR1_REIDL_ET_MAS_MASK 0x00004000
2444 +#define TECR0_AMP_RED_MASK 0x0000003f
2446 +struct per_lane_ctrl_status {
2447 + u32 gcr0; /* 0x.000 - General Control Register 0 */
2448 + u32 gcr1; /* 0x.004 - General Control Register 1 */
2449 + u32 gcr2; /* 0x.008 - General Control Register 2 */
2450 + u32 resv1; /* 0x.00C - Reserved */
2451 + u32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */
2452 + u32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */
2453 + u32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */
2454 + u32 resv2; /* 0x.01C - Reserved */
2455 + u32 tlcr0; /* 0x.020 - TTL Control Register 0 */
2456 + u32 tlcr1; /* 0x.024 - TTL Control Register 1 */
2457 + u32 tlcr2; /* 0x.028 - TTL Control Register 2 */
2458 + u32 tlcr3; /* 0x.02C - TTL Control Register 3 */
2459 + u32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */
2460 + u32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */
2461 + u32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */
2462 + u32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */
2465 +static struct serdes_access srds;
2467 +static u32 get_lane_memmap_size(void)
2472 +static void reset_lane(void *reg)
2474 + struct per_lane_ctrl_status *reg_base = reg;
2476 + /* reset the lane */
2477 + srds.iowrite32(srds.ioread32(®_base->gcr0) & ~GCR0_RESET_MASK,
2481 + /* unreset the lane */
2482 + srds.iowrite32(srds.ioread32(®_base->gcr0) | GCR0_RESET_MASK,
2487 +static void tune_tecr(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset)
2489 + struct per_lane_ctrl_status *reg_base = reg;
2492 + val = TECR0_INIT |
2493 + adpt_eq << ZERO_COE_SHIFT |
2494 + ratio_preq << PRE_COE_SHIFT |
2495 + ratio_pst1q << POST_COE_SHIFT;
2498 + /* reset the lane */
2499 + srds.iowrite32(srds.ioread32(®_base->gcr0) & ~GCR0_RESET_MASK,
2504 + srds.iowrite32(val, ®_base->tecr0);
2508 + /* unreset the lane */
2509 + srds.iowrite32(srds.ioread32(®_base->gcr0) | GCR0_RESET_MASK,
2515 +static void lane_set_1gkx(void *reg)
2517 + struct per_lane_ctrl_status *reg_base = reg;
2520 + /* reset the lane */
2521 + srds.iowrite32(srds.ioread32(®_base->gcr0) & ~GCR0_RESET_MASK,
2525 + /* set gcr1 for 1GKX */
2526 + val = srds.ioread32(®_base->gcr1);
2527 + val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
2528 + GCR1_REIDL_ET_MAS_MASK);
2529 + srds.iowrite32(val, ®_base->gcr1);
2532 + /* set tecr0 for 1GKX */
2533 + val = srds.ioread32(®_base->tecr0);
2534 + val &= ~TECR0_AMP_RED_MASK;
2535 + srds.iowrite32(val, ®_base->tecr0);
2538 + /* unreset the lane */
2539 + srds.iowrite32(srds.ioread32(®_base->gcr0) | GCR0_RESET_MASK,
2544 +static int get_median_gaink2(u32 *reg)
2546 + int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
2548 + struct per_lane_ctrl_status *reg_base;
2550 + int i, j, tmp, pos;
2552 + reg_base = (struct per_lane_ctrl_status *)reg;
2554 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
2555 + /* wait RECR1_CTL_SNP_DONE_MASK has cleared */
2557 + while (srds.ioread32(®_base->recr1) &
2558 + RECR1_CTL_SNP_DONE_MASK) {
2565 + /* start snap shot */
2566 + srds.iowrite32((srds.ioread32(®_base->gcr1) |
2567 + GCR1_CTL_SNP_START_MASK),
2570 + /* wait for SNP done */
2572 + while (!(srds.ioread32(®_base->recr1) &
2573 + RECR1_CTL_SNP_DONE_MASK)) {
2580 + /* read and save the snap shot */
2581 + rx_eq_snp = srds.ioread32(®_base->recr1);
2582 + gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >>
2583 + RECR1_GAINK2_SHIFT;
2585 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
2586 + srds.iowrite32((srds.ioread32(®_base->gcr1) &
2587 + ~GCR1_CTL_SNP_START_MASK),
2591 + /* get median of the 5 snap shot */
2592 + for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
2593 + tmp = gaink2_snap_shot[i];
2595 + for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
2596 + if (gaink2_snap_shot[j] < tmp) {
2597 + tmp = gaink2_snap_shot[j];
2602 + gaink2_snap_shot[pos] = gaink2_snap_shot[i];
2603 + gaink2_snap_shot[i] = tmp;
2606 + return gaink2_snap_shot[2];
2609 +static bool is_bin_early(int bin_sel, void *reg)
2611 + bool early = false;
2612 + int bin_snap_shot[BIN_SNAPSHOT_NUM];
2613 + int i, negative_count = 0;
2614 + struct per_lane_ctrl_status *reg_base = reg;
2617 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
2618 + /* wait RECR1_SNP_DONE_MASK has cleared */
2620 + while ((srds.ioread32(®_base->recr1) & RECR1_SNP_DONE_MASK)) {
2627 + /* set TCSR1[CDR_SEL] to BinM1/BinLong */
2628 + if (bin_sel == BIN_M1) {
2629 + srds.iowrite32((srds.ioread32(®_base->tcsr1) &
2630 + ~CDR_SEL_MASK) | BIN_M1_SEL,
2631 + ®_base->tcsr1);
2633 + srds.iowrite32((srds.ioread32(®_base->tcsr1) &
2634 + ~CDR_SEL_MASK) | BIN_Long_SEL,
2635 + ®_base->tcsr1);
2638 + /* start snap shot */
2639 + srds.iowrite32(srds.ioread32(®_base->gcr1) | GCR1_SNP_START_MASK,
2642 + /* wait for SNP done */
2644 + while (!(srds.ioread32(®_base->recr1) & RECR1_SNP_DONE_MASK)) {
2651 + /* read and save the snap shot */
2652 + bin_snap_shot[i] = (srds.ioread32(®_base->tcsr1) &
2653 + TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT;
2654 + if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK)
2657 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
2658 + srds.iowrite32(srds.ioread32(®_base->gcr1) & ~GCR1_SNP_START_MASK,
2662 + if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
2663 + ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
2670 +struct serdes_access* setup_serdes_access_10g(void)
2672 + srds.get_lane_memmap_size = get_lane_memmap_size;
2673 + srds.tune_tecr = tune_tecr;
2674 + srds.reset_lane = reset_lane;
2675 + srds.lane_set_1gkx = lane_set_1gkx;
2676 + srds.get_median_gaink2 = get_median_gaink2;
2677 + srds.is_bin_early = is_bin_early;
2683 +++ b/drivers/net/phy/fsl_backplane_serdes_28g.c
2685 +// SPDX-License-Identifier: GPL-2.0+
2687 + * DPAA backplane driver for SerDes 28G.
2688 + * Author: Florinel Iordache <florinel.iordache@nxp.com>
2690 + * Copyright 2018 NXP
2692 + * Licensed under the GPL-2 or later.
2695 +#include <linux/io.h>
2696 +#include <linux/delay.h>
2697 +#include <linux/sched.h>
2699 +#include "fsl_backplane.h"
2701 +#define BIN_M1_SEL 0x0000c000
2702 +#define BIN_Long_SEL 0x0000d000
2703 +#define CDR_SEL_MASK 0x0000f000
2705 +#define PRE_COE_SHIFT 16
2706 +#define POST_COE_SHIFT 8
2707 +#define ZERO_COE_SHIFT 24
2709 +#define TECR0_INIT 0x20808000
2711 +#define RESET_REQ_MASK 0x80000000
2713 +#define RECR3_SNP_START_MASK 0x80000000
2714 +#define RECR3_SNP_DONE_MASK 0x40000000
2716 +#define RECR4_SNP_DATA_MASK 0x000003ff
2717 +#define RECR4_SNP_DATA_SHIFT 0
2718 +#define RECR4_EQ_SNPBIN_SIGN_MASK 0x200
2720 +#define RECR3_GAINK2_MASK 0x1f000000
2721 +#define RECR3_GAINK2_SHIFT 24
2723 +/* Required only for 1000BASE KX */
2724 +#define GCR1_REIDL_TH_MASK 0x00700000
2725 +#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
2726 +#define GCR1_REIDL_ET_MAS_MASK 0x04000000
2727 +#define TECR0_AMP_RED_MASK 0x0000003f
2729 +struct per_lane_ctrl_status {
2730 + u32 gcr0; /* 0x.000 - General Control Register 0 */
2731 + u32 resv1; /* 0x.004 - Reserved */
2732 + u32 resv2; /* 0x.008 - Reserved */
2733 + u32 resv3; /* 0x.00C - Reserved */
2734 + u32 resv4; /* 0x.010 - Reserved */
2735 + u32 resv5; /* 0x.014 - Reserved */
2736 + u32 resv6; /* 0x.018 - Reserved */
2737 + u32 resv7; /* 0x.01C - Reserved */
2738 + u32 trstctl; /* 0x.020 - TX Reset Control Register */
2739 + u32 tgcr0; /* 0x.024 - TX General Control Register 0 */
2740 + u32 tgcr1; /* 0x.028 - TX General Control Register 1 */
2741 + u32 tgcr2; /* 0x.02C - TX General Control Register 2 */
2742 + u32 tecr0; /* 0x.030 - Transmit Equalization Control Register 0 */
2743 + u32 tecr1; /* 0x.034 - Transmit Equalization Control Register 1 */
2744 + u32 resv8; /* 0x.038 - Reserved */
2745 + u32 resv9; /* 0x.03C - Reserved */
2746 + u32 rrstctl; /* 0x.040 - RX Reset Control Register */
2747 + u32 rgcr0; /* 0x.044 - RX General Control Register 0 */
2748 + u32 rxgcr1; /* 0x.048 - RX General Control Register 1 */
2749 + u32 resv10; /* 0x.04C - Reserved */
2750 + u32 recr0; /* 0x.050 - RX Equalization Register 0 */
2751 + u32 recr1; /* 0x.054 - RX Equalization Register 1 */
2752 + u32 recr2; /* 0x.058 - RX Equalization Register 2 */
2753 + u32 recr3; /* 0x.05C - RX Equalization Register 3 */
2754 + u32 recr4; /* 0x.060 - RX Equalization Register 4 */
2755 + u32 resv11; /* 0x.064 - Reserved */
2756 + u32 rccr0; /* 0x.068 - RX Calibration Register 0 */
2757 + u32 rccr1; /* 0x.06C - RX Calibration Register 1 */
2758 + u32 rcpcr0; /* 0x.070 - RX Clock Path Register 0 */
2759 + u32 rsccr0; /* 0x.074 - RX Sampler Calibration Control Register 0 */
2760 + u32 rsccr1; /* 0x.078 - RX Sampler Calibration Control Register 1 */
2761 + u32 resv12; /* 0x.07C - Reserved */
2762 + u32 ttlcr0; /* 0x.080 - Transition Tracking Loop Register 0 */
2763 + u32 ttlcr1; /* 0x.084 - Transition Tracking Loop Register 1 */
2764 + u32 ttlcr2; /* 0x.088 - Transition Tracking Loop Register 2 */
2765 + u32 ttlcr3; /* 0x.08C - Transition Tracking Loop Register 3 */
2766 + u32 resv13; /* 0x.090 - Reserved */
2767 + u32 resv14; /* 0x.094 - Reserved */
2768 + u32 resv15; /* 0x.098 - Reserved */
2769 + u32 resv16; /* 0x.09C - Reserved */
2770 + u32 tcsr0; /* 0x.0A0 - Test Control/Status Register 0 */
2771 + u32 tcsr1; /* 0x.0A4 - Test Control/Status Register 1 */
2772 + u32 tcsr2; /* 0x.0A8 - Test Control/Status Register 2 */
2773 + u32 tcsr3; /* 0x.0AC - Test Control/Status Register 3 */
2774 + u32 tcsr4; /* 0x.0B0 - Test Control/Status Register 4 */
2775 + u32 resv17; /* 0x.0B4 - Reserved */
2776 + u32 resv18; /* 0x.0B8 - Reserved */
2777 + u32 resv19; /* 0x.0BC - Reserved */
2778 + u32 rxcb0; /* 0x.0C0 - RX Control Block Register 0 */
2779 + u32 rxcb1; /* 0x.0C4 - RX Control Block Register 1 */
2780 + u32 resv20; /* 0x.0C8 - Reserved */
2781 + u32 resv21; /* 0x.0CC - Reserved */
2782 + u32 rxss0; /* 0x.0D0 - RX Speed Switch Register 0 */
2783 + u32 rxss1; /* 0x.0D4 - RX Speed Switch Register 1 */
2784 + u32 rxss2; /* 0x.0D8 - RX Speed Switch Register 2 */
2785 + u32 resv22; /* 0x.0DC - Reserved */
2786 + u32 txcb0; /* 0x.0E0 - TX Control Block Register 0 */
2787 + u32 txcb1; /* 0x.0E4 - TX Control Block Register 1 */
2788 + u32 resv23; /* 0x.0E8 - Reserved */
2789 + u32 resv24; /* 0x.0EC - Reserved */
2790 + u32 txss0; /* 0x.0F0 - TX Speed Switch Register 0 */
2791 + u32 txss1; /* 0x.0F4 - TX Speed Switch Register 1 */
2792 + u32 txss2; /* 0x.0F8 - TX Speed Switch Register 2 */
2793 + u32 resv25; /* 0x.0FC - Reserved */
2796 +static struct serdes_access srds;
2798 +static u32 get_lane_memmap_size(void)
2803 +static void reset_lane(void *reg)
2805 + struct per_lane_ctrl_status *reg_base = reg;
2807 + unsigned long timeout;
2809 + /* reset Tx lane: send reset request */
2810 + srds.iowrite32(srds.ioread32(®_base->trstctl) | RESET_REQ_MASK,
2811 + ®_base->trstctl);
2814 + while (timeout--) {
2815 + val = srds.ioread32(®_base->trstctl);
2816 + if (!(val & RESET_REQ_MASK))
2818 + usleep_range(5, 20);
2821 + /* reset Rx lane: send reset request */
2822 + srds.iowrite32(srds.ioread32(®_base->rrstctl) | RESET_REQ_MASK,
2823 + ®_base->rrstctl);
2826 + while (timeout--) {
2827 + val = srds.ioread32(®_base->rrstctl);
2828 + if (!(val & RESET_REQ_MASK))
2830 + usleep_range(5, 20);
2833 + /* wait for a while after reset */
2834 + timeout = jiffies + 10;
2835 + while (time_before(jiffies, timeout)) {
2837 + usleep_range(5, 20);
2841 +static void tune_tecr(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset)
2843 + struct per_lane_ctrl_status *reg_base = reg;
2851 + val = TECR0_INIT |
2852 + ratio_preq << PRE_COE_SHIFT |
2853 + ratio_pst1q << POST_COE_SHIFT;
2854 + srds.iowrite32(val, ®_base->tecr0);
2856 + val = adpt_eq << ZERO_COE_SHIFT;
2857 + srds.iowrite32(val, ®_base->tecr1);
2862 +static void lane_set_1gkx(void *reg)
2864 + struct per_lane_ctrl_status *reg_base = reg;
2870 + /* set gcr1 for 1GKX */
2871 + val = srds.ioread32(®_base->rxgcr1);
2872 + val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
2873 + GCR1_REIDL_ET_MAS_MASK);
2874 + srds.iowrite32(val, ®_base->rxgcr1);
2877 + /* set tecr0 for 1GKX */
2878 + val = srds.ioread32(®_base->tecr0);
2879 + val &= ~TECR0_AMP_RED_MASK;
2880 + srds.iowrite32(val, ®_base->tecr0);
2884 +static int get_median_gaink2(u32 *reg)
2886 + int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
2888 + struct per_lane_ctrl_status *reg_base;
2890 + int i, j, tmp, pos;
2892 + reg_base = (struct per_lane_ctrl_status *)reg;
2894 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
2895 + /* wait RECR3_SNP_DONE_MASK has cleared */
2897 + while (srds.ioread32(®_base->recr3) &
2898 + RECR3_SNP_DONE_MASK) {
2905 + /* start snap shot */
2906 + srds.iowrite32((srds.ioread32(®_base->recr3) |
2907 + RECR3_SNP_START_MASK),
2908 + ®_base->recr3);
2910 + /* wait for SNP done */
2912 + while (!(srds.ioread32(®_base->recr3) &
2913 + RECR3_SNP_DONE_MASK)) {
2920 + /* read and save the snap shot */
2921 + rx_eq_snp = srds.ioread32(®_base->recr3);
2922 + gaink2_snap_shot[i] = (rx_eq_snp & RECR3_GAINK2_MASK) >>
2923 + RECR3_GAINK2_SHIFT;
2925 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
2926 + srds.iowrite32((srds.ioread32(®_base->recr3) &
2927 + ~RECR3_SNP_START_MASK),
2928 + ®_base->recr3);
2931 + /* get median of the 5 snap shot */
2932 + for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
2933 + tmp = gaink2_snap_shot[i];
2935 + for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
2936 + if (gaink2_snap_shot[j] < tmp) {
2937 + tmp = gaink2_snap_shot[j];
2942 + gaink2_snap_shot[pos] = gaink2_snap_shot[i];
2943 + gaink2_snap_shot[i] = tmp;
2946 + return gaink2_snap_shot[2];
2949 +static bool is_bin_early(int bin_sel, void *reg)
2951 + bool early = false;
2952 + int bin_snap_shot[BIN_SNAPSHOT_NUM];
2953 + int i, negative_count = 0;
2954 + struct per_lane_ctrl_status *reg_base = reg;
2957 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
2958 + /* wait RECR3_SNP_DONE_MASK has cleared */
2960 + while ((srds.ioread32(®_base->recr3) & RECR3_SNP_DONE_MASK)) {
2967 + /* set TCSR1[CDR_SEL] to BinM1/BinLong */
2968 + if (bin_sel == BIN_M1) {
2969 + srds.iowrite32((srds.ioread32(®_base->recr4) &
2970 + ~CDR_SEL_MASK) | BIN_M1_SEL,
2971 + ®_base->recr4);
2973 + srds.iowrite32((srds.ioread32(®_base->recr4) &
2974 + ~CDR_SEL_MASK) | BIN_Long_SEL,
2975 + ®_base->recr4);
2978 + /* start snap shot */
2979 + srds.iowrite32(srds.ioread32(®_base->recr3) | RECR3_SNP_START_MASK,
2980 + ®_base->recr3);
2982 + /* wait for SNP done */
2984 + while (!(srds.ioread32(®_base->recr3) & RECR3_SNP_DONE_MASK)) {
2991 + /* read and save the snap shot */
2992 + bin_snap_shot[i] = (srds.ioread32(®_base->recr4) &
2993 + RECR4_SNP_DATA_MASK) >> RECR4_SNP_DATA_SHIFT;
2994 + if (bin_snap_shot[i] & RECR4_EQ_SNPBIN_SIGN_MASK)
2997 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
2998 + srds.iowrite32(srds.ioread32(®_base->recr3) & ~RECR3_SNP_START_MASK,
2999 + ®_base->recr3);
3002 + if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
3003 + ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
3010 +struct serdes_access* setup_serdes_access_28g(void)
3012 + srds.get_lane_memmap_size = get_lane_memmap_size;
3013 + srds.tune_tecr = tune_tecr;
3014 + srds.reset_lane = reset_lane;
3015 + srds.lane_set_1gkx = lane_set_1gkx;
3016 + srds.get_median_gaink2 = get_median_gaink2;
3017 + srds.is_bin_early = is_bin_early;
3022 +++ b/drivers/net/phy/inphi.c
3025 + * Copyright 2018 NXP
3026 + * Copyright 2018 INPHI
3028 + * Redistribution and use in source and binary forms, with or without
3029 + * modification, are permitted provided that the following conditions are met:
3031 + * 1. Redistributions of source code must retain the above copyright notice,
3032 + * this list of conditions and the following disclaimer.
3033 + * 2. Redistributions in binary form must reproduce the above copyright notice,
3034 + * this list of conditions and the following disclaimer in the documentation
3035 + * and/or other materials provided with the distribution.
3037 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
3038 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3039 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3040 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
3041 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
3042 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
3043 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
3044 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
3045 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3046 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3047 + * POSSIBILITY OF SUCH DAMAGE.
3049 + * Inphi is a registered trademark of Inphi Corporation
3053 +#include <linux/module.h>
3054 +#include <linux/phy.h>
3055 +#include <linux/mdio.h>
3056 +#include <linux/interrupt.h>
3057 +#include <linux/platform_device.h>
3058 +#include <linux/of_irq.h>
3059 +#include <linux/workqueue.h>
3060 +#include <linux/i2c.h>
3061 +#include <linux/timer.h>
3062 +#include <linux/delay.h>
3063 +#include <linux/kernel.h>
3064 +#include <linux/init.h>
3065 +#include <linux/fs.h>
3066 +#include <linux/cdev.h>
3067 +#include <linux/device.h>
3068 +#include <linux/slab.h>
3069 +#include <linux/uaccess.h>
3071 +#define PHY_ID_IN112525 0x02107440
3073 +#define INPHI_S03_DEVICE_ID_MSB 0x2
3074 +#define INPHI_S03_DEVICE_ID_LSB 0x3
3076 +#define ALL_LANES 4
3077 +#define INPHI_POLL_DELAY 2500
3079 +#define PHYCTRL_REG1 0x0012
3080 +#define PHYCTRL_REG2 0x0014
3081 +#define PHYCTRL_REG3 0x0120
3082 +#define PHYCTRL_REG4 0x0121
3083 +#define PHYCTRL_REG5 0x0180
3084 +#define PHYCTRL_REG6 0x0580
3085 +#define PHYCTRL_REG7 0x05C4
3086 +#define PHYCTRL_REG8 0x01C8
3087 +#define PHYCTRL_REG9 0x0521
3089 +#define PHYSTAT_REG1 0x0021
3090 +#define PHYSTAT_REG2 0x0022
3091 +#define PHYSTAT_REG3 0x0123
3093 +#define PHYMISC_REG1 0x0025
3094 +#define PHYMISC_REG2 0x002c
3095 +#define PHYMISC_REG3 0x00b3
3096 +#define PHYMISC_REG4 0x0181
3097 +#define PHYMISC_REG5 0x019D
3098 +#define PHYMISC_REG6 0x0198
3099 +#define PHYMISC_REG7 0x0199
3100 +#define PHYMISC_REG8 0x0581
3101 +#define PHYMISC_REG9 0x0598
3102 +#define PHYMISC_REG10 0x059c
3103 +#define PHYMISC_REG20 0x01B0
3104 +#define PHYMISC_REG21 0x01BC
3105 +#define PHYMISC_REG22 0x01C0
3107 +#define RX_VCO_CODE_OFFSET 5
3108 +#define VCO_CODE 390
3110 +int vco_codes[ALL_LANES] = {
3117 +static void mykmod_work_handler(struct work_struct *w);
3119 +static struct workqueue_struct *wq;
3120 +static DECLARE_DELAYED_WORK(mykmod_work, mykmod_work_handler);
3121 +static unsigned long onesec;
3122 +struct phy_device *inphi_phydev;
3124 +static int mdio_wr(u32 regnum, u16 val)
3126 + regnum = MII_ADDR_C45 | (MDIO_MMD_VEND1 << 16) | (regnum & 0xffff);
3128 + return mdiobus_write(inphi_phydev->mdio.bus, inphi_phydev->mdio.addr,
3132 +static int mdio_rd(u32 regnum)
3134 + regnum = MII_ADDR_C45 | (MDIO_MMD_VEND1 << 16) | (regnum & 0xffff);
3136 + return mdiobus_read(inphi_phydev->mdio.bus, inphi_phydev->mdio.addr,
3141 +int bit_test(int value, int bit_field)
3144 + int bit_mask = (1 << bit_field);
3146 + result = ((value & bit_mask) == bit_mask);
3150 +int tx_pll_lock_test(int lane)
3152 + int i, val, locked = 1;
3154 + if (lane == ALL_LANES) {
3155 + for (i = 0; i < ALL_LANES; i++) {
3156 + val = mdio_rd(i * 0x100 + PHYSTAT_REG3);
3157 + locked = locked & bit_test(val, 15);
3160 + val = mdio_rd(lane * 0x100 + PHYSTAT_REG3);
3161 + locked = locked & bit_test(val, 15);
3167 +void rx_reset_assert(int lane)
3171 + if (lane == ALL_LANES) {
3172 + val = mdio_rd(PHYMISC_REG2);
3174 + mdio_wr(PHYMISC_REG2, val + mask);
3176 + val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
3178 + mdio_wr(lane * 0x100 + PHYCTRL_REG8, val + mask);
3182 +void rx_reset_de_assert(int lane)
3186 + if (lane == ALL_LANES) {
3187 + val = mdio_rd(PHYMISC_REG2);
3188 + mask = 0xffff - (1 << 15);
3189 + mdio_wr(PHYMISC_REG2, val & mask);
3191 + val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
3192 + mask = 0xffff - (1 << 6);
3193 + mdio_wr(lane * 0x100 + PHYCTRL_REG8, val & mask);
3197 +void rx_powerdown_assert(int lane)
3201 + val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
3203 + mdio_wr(lane * 0x100 + PHYCTRL_REG8, val + mask);
3206 +void rx_powerdown_de_assert(int lane)
3210 + val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
3211 + mask = 0xffff - (1 << 5);
3212 + mdio_wr(lane * 0x100 + PHYCTRL_REG8, val & mask);
3215 +void tx_pll_assert(int lane)
3219 + if (lane == ALL_LANES) {
3220 + val = mdio_rd(PHYMISC_REG2);
3221 + recal = (1 << 12);
3222 + mdio_wr(PHYMISC_REG2, val | recal);
3224 + val = mdio_rd(lane * 0x100 + PHYCTRL_REG4);
3225 + recal = (1 << 15);
3226 + mdio_wr(lane * 0x100 + PHYCTRL_REG4, val | recal);
3230 +void tx_pll_de_assert(int lane)
3234 + if (lane == ALL_LANES) {
3235 + val = mdio_rd(PHYMISC_REG2);
3237 + mdio_wr(PHYMISC_REG2, val & recal);
3239 + val = mdio_rd(lane * 0x100 + PHYCTRL_REG4);
3241 + mdio_wr(lane * 0x100 + PHYCTRL_REG4, val & recal);
3245 +void tx_core_assert(int lane)
3247 + int recal, val, val2, core_reset;
3250 + val = mdio_rd(PHYMISC_REG2);
3252 + mdio_wr(PHYMISC_REG2, val | recal);
3254 + val2 = mdio_rd(PHYMISC_REG3);
3255 + core_reset = (1 << (lane + 8));
3256 + mdio_wr(PHYMISC_REG3, val2 | core_reset);
3260 +void lol_disable(int lane)
3264 + val = mdio_rd(PHYMISC_REG3);
3265 + mask = 1 << (lane + 4);
3266 + mdio_wr(PHYMISC_REG3, val | mask);
3269 +void tx_core_de_assert(int lane)
3271 + int val, recal, val2, core_reset;
3273 + if (lane == ALL_LANES) {
3274 + val = mdio_rd(PHYMISC_REG2);
3275 + recal = 0xffff - (1 << 10);
3276 + mdio_wr(PHYMISC_REG2, val & recal);
3278 + val2 = mdio_rd(PHYMISC_REG3);
3279 + core_reset = 0xffff - (1 << (lane + 8));
3280 + mdio_wr(PHYMISC_REG3, val2 & core_reset);
3284 +void tx_restart(int lane)
3286 + tx_core_assert(lane);
3287 + tx_pll_assert(lane);
3288 + tx_pll_de_assert(lane);
3289 + usleep_range(1500, 1600);
3290 + tx_core_de_assert(lane);
3293 +void disable_lane(int lane)
3295 + rx_reset_assert(lane);
3296 + rx_powerdown_assert(lane);
3297 + tx_core_assert(lane);
3298 + lol_disable(lane);
3301 +void toggle_reset(int lane)
3303 + int reg, val, orig;
3305 + if (lane == ALL_LANES) {
3306 + mdio_wr(PHYMISC_REG2, 0x8000);
3308 + mdio_wr(PHYMISC_REG2, 0x0000);
3310 + reg = lane * 0x100 + PHYCTRL_REG8;
3312 + orig = mdio_rd(reg);
3313 + mdio_wr(reg, orig + val);
3315 + mdio_wr(reg, orig);
3319 +int az_complete_test(int lane)
3321 + int success = 1, value;
3323 + if (lane == 0 || lane == ALL_LANES) {
3324 + value = mdio_rd(PHYCTRL_REG5);
3325 + success = success & bit_test(value, 2);
3327 + if (lane == 1 || lane == ALL_LANES) {
3328 + value = mdio_rd(PHYCTRL_REG5 + 0x100);
3329 + success = success & bit_test(value, 2);
3331 + if (lane == 2 || lane == ALL_LANES) {
3332 + value = mdio_rd(PHYCTRL_REG5 + 0x200);
3333 + success = success & bit_test(value, 2);
3335 + if (lane == 3 || lane == ALL_LANES) {
3336 + value = mdio_rd(PHYCTRL_REG5 + 0x300);
3337 + success = success & bit_test(value, 2);
3343 +void save_az_offsets(int lane)
3347 +#define AZ_OFFSET_LANE_UPDATE(reg, lane) \
3348 + mdio_wr((reg) + (lane) * 0x100, \
3349 + (mdio_rd((reg) + (lane) * 0x100) >> 8))
3351 + if (lane == ALL_LANES) {
3352 + for (i = 0; i < ALL_LANES; i++) {
3353 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20, i);
3354 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 1, i);
3355 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 2, i);
3356 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 3, i);
3357 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21, i);
3358 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 1, i);
3359 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 2, i);
3360 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 3, i);
3361 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG22, i);
3364 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20, lane);
3365 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 1, lane);
3366 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 2, lane);
3367 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 3, lane);
3368 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21, lane);
3369 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 1, lane);
3370 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 2, lane);
3371 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 3, lane);
3372 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG22, lane);
3375 + mdio_wr(PHYCTRL_REG7, 0x0001);
3378 +void save_vco_codes(int lane)
3382 + if (lane == ALL_LANES) {
3383 + for (i = 0; i < ALL_LANES; i++) {
3384 + vco_codes[i] = mdio_rd(PHYMISC_REG5 + i * 0x100);
3385 + mdio_wr(PHYMISC_REG5 + i * 0x100,
3386 + vco_codes[i] + RX_VCO_CODE_OFFSET);
3389 + vco_codes[lane] = mdio_rd(PHYMISC_REG5 + lane * 0x100);
3390 + mdio_wr(PHYMISC_REG5 + lane * 0x100,
3391 + vco_codes[lane] + RX_VCO_CODE_OFFSET);
3395 +int inphi_lane_recovery(int lane)
3397 + int i, value, az_pass;
3404 + rx_reset_assert(lane);
3408 + mdio_wr(PHYMISC_REG2, 0x9C00);
3411 + value = mdio_rd(PHYMISC_REG2);
3413 + } while (!bit_test(value, 4));
3416 + dev_err(&inphi_phydev->mdio.dev,
3417 + "Incorrect usage of APIs in %s driver\n",
3418 + inphi_phydev->drv->name);
3422 + if (lane == ALL_LANES) {
3423 + for (i = 0; i < ALL_LANES; i++)
3424 + mdio_wr(PHYMISC_REG7 + i * 0x100, VCO_CODE);
3426 + mdio_wr(PHYMISC_REG7 + lane * 0x100, VCO_CODE);
3429 + if (lane == ALL_LANES)
3430 + for (i = 0; i < ALL_LANES; i++)
3431 + mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0418);
3433 + mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0418);
3435 + mdio_wr(PHYCTRL_REG7, 0x0000);
3437 + rx_reset_de_assert(lane);
3439 + if (lane == ALL_LANES) {
3440 + for (i = 0; i < ALL_LANES; i++) {
3441 + mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0410);
3442 + mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0412);
3445 + mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0410);
3446 + mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0412);
3449 + for (i = 0; i < 64; i++) {
3451 + az_pass = az_complete_test(lane);
3453 + save_az_offsets(lane);
3459 + pr_info("in112525: AZ calibration fail @ lane=%d\n", lane);
3463 + if (lane == ALL_LANES) {
3464 + mdio_wr(PHYMISC_REG8, 0x0002);
3465 + mdio_wr(PHYMISC_REG9, 0x2028);
3466 + mdio_wr(PHYCTRL_REG6, 0x0010);
3467 + usleep_range(1000, 1200);
3468 + mdio_wr(PHYCTRL_REG6, 0x0110);
3470 + mdio_wr(PHYMISC_REG9, 0x3020);
3472 + mdio_wr(PHYMISC_REG4 + lane * 0x100, 0x0002);
3473 + mdio_wr(PHYMISC_REG6 + lane * 0x100, 0x2028);
3474 + mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0010);
3475 + usleep_range(1000, 1200);
3476 + mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0110);
3478 + mdio_wr(PHYMISC_REG6 + lane * 0x100, 0x3020);
3481 + if (lane == ALL_LANES) {
3482 + mdio_wr(PHYMISC_REG2, 0x1C00);
3483 + mdio_wr(PHYMISC_REG2, 0x0C00);
3489 + if (lane == ALL_LANES) {
3490 + if (bit_test(mdio_rd(PHYMISC_REG2), 6) == 0)
3493 + if (tx_pll_lock_test(lane) == 0)
3497 + save_vco_codes(lane);
3499 + if (lane == ALL_LANES) {
3500 + mdio_wr(PHYMISC_REG2, 0x0400);
3501 + mdio_wr(PHYMISC_REG2, 0x0000);
3502 + value = mdio_rd(PHYCTRL_REG1);
3503 + value = value & 0xffbf;
3504 + mdio_wr(PHYCTRL_REG2, value);
3506 + tx_core_de_assert(lane);
3509 + if (lane == ALL_LANES) {
3510 + mdio_wr(PHYMISC_REG1, 0x8000);
3511 + mdio_wr(PHYMISC_REG1, 0x0000);
3513 + mdio_rd(PHYMISC_REG1);
3514 + mdio_rd(PHYMISC_REG1);
3515 + usleep_range(1000, 1200);
3516 + mdio_rd(PHYSTAT_REG1);
3517 + mdio_rd(PHYSTAT_REG2);
3522 +static void mykmod_work_handler(struct work_struct *w)
3524 + int all_lanes_lock, lane0_lock, lane1_lock, lane2_lock, lane3_lock;
3526 + lane0_lock = bit_test(mdio_rd(0x123), 15);
3527 + lane1_lock = bit_test(mdio_rd(0x223), 15);
3528 + lane2_lock = bit_test(mdio_rd(0x323), 15);
3529 + lane3_lock = bit_test(mdio_rd(0x423), 15);
3531 + /* check if the chip had any successful lane lock from the previous
3532 + * stage (e.g. u-boot)
3534 + all_lanes_lock = lane0_lock | lane1_lock | lane2_lock | lane3_lock;
3536 + if (!all_lanes_lock) {
3538 + inphi_lane_recovery(ALL_LANES);
3541 + inphi_lane_recovery(0);
3543 + inphi_lane_recovery(1);
3545 + inphi_lane_recovery(2);
3547 + inphi_lane_recovery(3);
3550 + queue_delayed_work(wq, &mykmod_work, onesec);
3553 +int inphi_probe(struct phy_device *phydev)
3555 + int phy_id = 0, id_lsb = 0, id_msb = 0;
3557 + /* setup the inphi_phydev ptr for mdio_rd/mdio_wr APIs */
3558 + inphi_phydev = phydev;
3560 + /* Read device id from phy registers */
3561 + id_lsb = mdio_rd(INPHI_S03_DEVICE_ID_MSB);
3565 + phy_id = id_lsb << 16;
3567 + id_msb = mdio_rd(INPHI_S03_DEVICE_ID_LSB);
3573 + /* Make sure the device tree binding matched the driver with the
3576 + if (phy_id != phydev->drv->phy_id) {
3577 + dev_err(&phydev->mdio.dev,
3578 + "Error matching phy with %s driver\n",
3579 + phydev->drv->name);
3583 + /* update the local phydev pointer, used inside all APIs */
3584 + inphi_phydev = phydev;
3585 + onesec = msecs_to_jiffies(INPHI_POLL_DELAY);
3587 + wq = create_singlethread_workqueue("inphi_kmod");
3589 + queue_delayed_work(wq, &mykmod_work, onesec);
3591 + dev_err(&phydev->mdio.dev,
3592 + "Error creating kernel workqueue for %s driver\n",
3593 + phydev->drv->name);
3600 +static struct phy_driver inphi_driver[] = {
3602 + .phy_id = PHY_ID_IN112525,
3603 + .phy_id_mask = 0x0ff0fff0,
3604 + .name = "Inphi 112525_S03",
3605 + .features = PHY_GBIT_FEATURES,
3606 + .probe = &inphi_probe,
3610 +module_phy_driver(inphi_driver);
3612 +static struct mdio_device_id __maybe_unused inphi_tbl[] = {
3613 + { PHY_ID_IN112525, 0x0ff0fff0},
3617 +MODULE_DEVICE_TABLE(mdio, inphi_tbl);
3619 +++ b/drivers/net/phy/mdio-mux-multiplexer.c
3621 +// SPDX-License-Identifier: GPL-2.0+
3622 +/* MDIO bus multiplexer using kernel multiplexer subsystem
3624 + * Copyright 2019 NXP
3627 +#include <linux/platform_device.h>
3628 +#include <linux/mdio-mux.h>
3629 +#include <linux/module.h>
3630 +#include <linux/mux/consumer.h>
3632 +struct mdio_mux_multiplexer_state {
3633 + struct mux_control *muxc;
3639 + * mdio_mux_multiplexer_switch_fn - This function is called by the mdio-mux
3640 + * layer when it thinks the mdio bus
3641 + * multiplexer needs to switch.
3642 + * @current_child: current value of the mux register.
3643 + * @desired_child: value of the 'reg' property of the target child MDIO node.
3644 + * @data: Private data used by this switch_fn passed to mdio_mux_init function
3645 + * via mdio_mux_init(.., .., .., .., data, ..).
3647 + * The first time this function is called, current_child == -1.
3648 + * If current_child == desired_child, then the mux is already set to the
3651 +static int mdio_mux_multiplexer_switch_fn(int current_child, int desired_child,
3654 + struct platform_device *pdev;
3655 + struct mdio_mux_multiplexer_state *s;
3658 + pdev = (struct platform_device *)data;
3659 + s = platform_get_drvdata(pdev);
3661 + if (!(current_child ^ desired_child))
3664 + if (s->do_deselect)
3665 + ret = mux_control_deselect(s->muxc);
3667 + dev_err(&pdev->dev, "mux_control_deselect failed in %s: %d\n",
3672 + ret = mux_control_select(s->muxc, desired_child);
3674 + dev_dbg(&pdev->dev, "%s %d -> %d\n", __func__, current_child,
3676 + s->do_deselect = true;
3678 + s->do_deselect = false;
3684 +static int mdio_mux_multiplexer_probe(struct platform_device *pdev)
3686 + struct device *dev = &pdev->dev;
3687 + struct mdio_mux_multiplexer_state *s;
3690 + s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
3694 + s->muxc = devm_mux_control_get(dev, NULL);
3695 + if (IS_ERR(s->muxc)) {
3696 + ret = PTR_ERR(s->muxc);
3697 + if (ret != -EPROBE_DEFER)
3698 + dev_err(&pdev->dev, "Failed to get mux: %d\n", ret);
3702 + platform_set_drvdata(pdev, s);
3704 + ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
3705 + mdio_mux_multiplexer_switch_fn, &s->mux_handle,
3711 +static int mdio_mux_multiplexer_remove(struct platform_device *pdev)
3713 + struct mdio_mux_multiplexer_state *s = platform_get_drvdata(pdev);
3715 + mdio_mux_uninit(s->mux_handle);
3717 + if (s->do_deselect)
3718 + mux_control_deselect(s->muxc);
3723 +static const struct of_device_id mdio_mux_multiplexer_match[] = {
3724 + { .compatible = "mdio-mux-multiplexer", },
3727 +MODULE_DEVICE_TABLE(of, mdio_mux_multiplexer_match);
3729 +static struct platform_driver mdio_mux_multiplexer_driver = {
3731 + .name = "mdio-mux-multiplexer",
3732 + .of_match_table = mdio_mux_multiplexer_match,
3734 + .probe = mdio_mux_multiplexer_probe,
3735 + .remove = mdio_mux_multiplexer_remove,
3738 +module_platform_driver(mdio_mux_multiplexer_driver);
3740 +MODULE_DESCRIPTION("MDIO bus multiplexer using kernel multiplexer subsystem");
3741 +MODULE_AUTHOR("Pankaj Bansal <pankaj.bansal@nxp.com>");
3742 +MODULE_LICENSE("GPL");
3743 --- a/drivers/net/phy/swphy.c
3744 +++ b/drivers/net/phy/swphy.c
3745 @@ -77,6 +77,7 @@ static const struct swmii_regs duplex[]
3746 static int swphy_decode_speed(int speed)
3751 return SWMII_SPEED_1000;
3753 --- a/include/linux/phy.h
3754 +++ b/include/linux/phy.h
3755 @@ -87,6 +87,7 @@ typedef enum {
3756 PHY_INTERFACE_MODE_XAUI,
3757 /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */
3758 PHY_INTERFACE_MODE_10GKR,
3759 + PHY_INTERFACE_MODE_2500SGMII,
3760 PHY_INTERFACE_MODE_MAX,
3763 @@ -159,6 +160,8 @@ static inline const char *phy_modes(phy_
3765 case PHY_INTERFACE_MODE_10GKR:
3766 return "10gbase-kr";
3767 + case PHY_INTERFACE_MODE_2500SGMII:
3768 + return "sgmii-2500";