layerscape: drop pause frame support for aquantia phy
[openwrt/staging/dedeckeh.git] / target / linux / layerscape / patches-4.14 / 709-mdio-phy-support-layerscape.patch
1 From c24cbb648c5bde8312dbd5498a4b8c12b2692205 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Wed, 17 Apr 2019 18:58:45 +0800
4 Subject: [PATCH] mdio-phy: support layerscape
5
6 This is an integrated patch of mdio-phy for layerscape
7
8 Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
9 Signed-off-by: Biwen Li <biwen.li@nxp.com>
10 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
11 Signed-off-by: Constantin Tudor <constantin.tudor@nxp.com>
12 Signed-off-by: costi <constantin.tudor@freescale.com>
13 Signed-off-by: Florin Chiculita <florinlaurentiu.chiculita@nxp.com>
14 Signed-off-by: Florinel Iordache <florinel.iordache@nxp.com>
15 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
16 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
17 Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
18 Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
19 Signed-off-by: Shaohui Xie <Shaohui.Xie@freescale.com>
20 Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
21 Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
22 ---
23 drivers/net/phy/Kconfig | 33 +
24 drivers/net/phy/Makefile | 5 +
25 drivers/net/phy/aquantia.c | 286 ++++-
26 drivers/net/phy/at803x.c | 21 +
27 drivers/net/phy/fsl_backplane.c | 1780 ++++++++++++++++++++++++++++
28 drivers/net/phy/fsl_backplane.h | 41 +
29 drivers/net/phy/fsl_backplane_serdes_10g.c | 281 +++++
30 drivers/net/phy/fsl_backplane_serdes_28g.c | 336 ++++++
31 drivers/net/phy/inphi.c | 594 ++++++++++
32 drivers/net/phy/mdio-mux-multiplexer.c | 122 ++
33 drivers/net/phy/swphy.c | 1 +
34 include/linux/phy.h | 3 +
35 12 files changed, 3484 insertions(+), 19 deletions(-)
36 create mode 100644 drivers/net/phy/fsl_backplane.c
37 create mode 100644 drivers/net/phy/fsl_backplane.h
38 create mode 100644 drivers/net/phy/fsl_backplane_serdes_10g.c
39 create mode 100644 drivers/net/phy/fsl_backplane_serdes_28g.c
40 create mode 100644 drivers/net/phy/inphi.c
41 create mode 100644 drivers/net/phy/mdio-mux-multiplexer.c
42
43 --- a/drivers/net/phy/Kconfig
44 +++ b/drivers/net/phy/Kconfig
45 @@ -87,9 +87,27 @@ config MDIO_BUS_MUX_MMIOREG
46
47 Currently, only 8/16/32 bits registers are supported.
48
49 +config MDIO_BUS_MUX_MULTIPLEXER
50 + tristate "MDIO bus multiplexer using kernel multiplexer subsystem"
51 + depends on OF
52 + select MULTIPLEXER
53 + select MDIO_BUS_MUX
54 + help
55 + This module provides a driver for MDIO bus multiplexer
56 + that is controlled via the kernel multiplexer subsystem. The
57 + bus multiplexer connects one of several child MDIO busses to
58 + a parent bus. Child bus selection is under the control of
59 + the kernel multiplexer subsystem.
60 +
61 config MDIO_CAVIUM
62 tristate
63
64 +config MDIO_FSL_BACKPLANE
65 + tristate "Support for backplane on Freescale XFI interface"
66 + depends on OF_MDIO
67 + help
68 + This module provides a driver for Freescale XFI's backplane.
69 +
70 config MDIO_GPIO
71 tristate "GPIO lib-based bitbanged MDIO buses"
72 depends on MDIO_BITBANG && GPIOLIB
73 @@ -303,6 +321,16 @@ config AT803X_PHY
74 ---help---
75 Currently supports the AT8030 and AT8035 model
76
77 +config AT803X_PHY_SMART_EEE
78 + depends on AT803X_PHY
79 + default n
80 + tristate "SmartEEE feature for AT803X PHYs"
81 + ---help---
82 + Enables the Atheros SmartEEE feature (not IEEE 802.3az). When 2 PHYs
83 + which support this feature are connected back-to-back, they may
84 + negotiate a low-power sleep mode autonomously, without the Ethernet
85 + controller's knowledge. May cause packet loss.
86 +
87 config BCM63XX_PHY
88 tristate "Broadcom 63xx SOCs internal PHY"
89 depends on BCM63XX
90 @@ -385,6 +413,11 @@ config ICPLUS_PHY
91 ---help---
92 Currently supports the IP175C and IP1001 PHYs.
93
94 +config INPHI_PHY
95 + tristate "Inphi CDR 10G/25G Ethernet PHY"
96 + ---help---
97 + Currently supports the IN112525_S03 part @ 25G
98 +
99 config INTEL_XWAY_PHY
100 tristate "Intel XWAY PHYs"
101 ---help---
102 --- a/drivers/net/phy/Makefile
103 +++ b/drivers/net/phy/Makefile
104 @@ -44,7 +44,11 @@ obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
105 obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
106 obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
107 obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
108 +obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
109 obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
110 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane.o
111 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane_serdes_10g.o
112 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane_serdes_28g.o
113 obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
114 obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
115 obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
116 @@ -75,6 +79,7 @@ obj-$(CONFIG_DP83848_PHY) += dp83848.o
117 obj-$(CONFIG_DP83867_PHY) += dp83867.o
118 obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
119 obj-$(CONFIG_ICPLUS_PHY) += icplus.o
120 +obj-$(CONFIG_INPHI_PHY) += inphi.o
121 obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
122 obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
123 obj-$(CONFIG_LXT_PHY) += lxt.o
124 --- a/drivers/net/phy/aquantia.c
125 +++ b/drivers/net/phy/aquantia.c
126 @@ -4,6 +4,7 @@
127 * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
128 *
129 * Copyright 2015 Freescale Semiconductor, Inc.
130 + * Copyright 2018 NXP
131 *
132 * This file is licensed under the terms of the GNU General Public License
133 * version 2. This program is licensed "as is" without any warranty of any
134 @@ -27,15 +28,174 @@
135
136 #define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
137 SUPPORTED_1000baseT_Full | \
138 + SUPPORTED_2500baseX_Full | \
139 SUPPORTED_100baseT_Full | \
140 PHY_DEFAULT_FEATURES)
141
142 +#define MDIO_PMA_CTRL1_AQ_SPEED10 0
143 +#define MDIO_PMA_CTRL1_AQ_SPEED2500 0x2058
144 +#define MDIO_PMA_CTRL1_AQ_SPEED5000 0x205c
145 +#define MDIO_PMA_CTRL2_AQ_2500BT 0x30
146 +#define MDIO_PMA_CTRL2_AQ_5000BT 0x31
147 +#define MDIO_PMA_CTRL2_AQ_TYPE_MASK 0x3F
148 +
149 +#define MDIO_AN_VENDOR_PROV_CTRL 0xc400
150 +#define MDIO_AN_RECV_LP_STATUS 0xe820
151 +
152 +static int aquantia_write_reg(struct phy_device *phydev, int devad,
153 + u32 regnum, u16 val)
154 +{
155 + u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
156 +
157 + return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, addr, val);
158 +}
159 +
160 +static int aquantia_read_reg(struct phy_device *phydev, int devad, u32 regnum)
161 +{
162 + u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
163 +
164 + return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr);
165 +}
166 +
167 +static int aquantia_pma_setup_forced(struct phy_device *phydev)
168 +{
169 + int ctrl1, ctrl2, ret;
170 +
171 + /* Half duplex is not supported */
172 + if (phydev->duplex != DUPLEX_FULL)
173 + return -EINVAL;
174 +
175 + ctrl1 = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
176 + if (ctrl1 < 0)
177 + return ctrl1;
178 +
179 + ctrl2 = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2);
180 + if (ctrl2 < 0)
181 + return ctrl2;
182 +
183 + ctrl1 &= ~MDIO_CTRL1_SPEEDSEL;
184 + ctrl2 &= ~(MDIO_PMA_CTRL2_AQ_TYPE_MASK);
185 +
186 + switch (phydev->speed) {
187 + case SPEED_10:
188 + ctrl2 |= MDIO_PMA_CTRL2_10BT;
189 + break;
190 + case SPEED_100:
191 + ctrl1 |= MDIO_PMA_CTRL1_SPEED100;
192 + ctrl2 |= MDIO_PMA_CTRL2_100BTX;
193 + break;
194 + case SPEED_1000:
195 + ctrl1 |= MDIO_PMA_CTRL1_SPEED1000;
196 + /* Assume 1000base-T */
197 + ctrl2 |= MDIO_PMA_CTRL2_1000BT;
198 + break;
199 + case SPEED_10000:
200 + ctrl1 |= MDIO_CTRL1_SPEED10G;
201 + /* Assume 10Gbase-T */
202 + ctrl2 |= MDIO_PMA_CTRL2_10GBT;
203 + break;
204 + case SPEED_2500:
205 + ctrl1 |= MDIO_PMA_CTRL1_AQ_SPEED2500;
206 + ctrl2 |= MDIO_PMA_CTRL2_AQ_2500BT;
207 + break;
208 + case SPEED_5000:
209 + ctrl1 |= MDIO_PMA_CTRL1_AQ_SPEED5000;
210 + ctrl2 |= MDIO_PMA_CTRL2_AQ_5000BT;
211 + break;
212 + default:
213 + return -EINVAL;
214 + }
215 +
216 + ret = aquantia_write_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, ctrl1);
217 + if (ret < 0)
218 + return ret;
219 +
220 + return aquantia_write_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2);
221 +}
222 +
223 +static int aquantia_aneg(struct phy_device *phydev, bool control)
224 +{
225 + int reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_CTRL1);
226 +
227 + if (reg < 0)
228 + return reg;
229 +
230 + if (control)
231 + reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART;
232 + else
233 + reg &= ~(MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
234 +
235 + return aquantia_write_reg(phydev, MDIO_MMD_AN, MDIO_CTRL1, reg);
236 +}
237 +
238 +static int aquantia_config_advert(struct phy_device *phydev)
239 +{
240 + u32 advertise;
241 + int oldadv, adv, oldadv1, adv1;
242 + int err, changed = 0;
243 +
244 + /* Only allow advertising what this PHY supports */
245 + phydev->advertising &= phydev->supported;
246 + advertise = phydev->advertising;
247 +
248 + /* Setup standard advertisement */
249 + oldadv = aquantia_read_reg(phydev, MDIO_MMD_AN,
250 + MDIO_AN_10GBT_CTRL);
251 + if (oldadv < 0)
252 + return oldadv;
253 +
254 + /* Aquantia vendor specific advertisments */
255 + oldadv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
256 + MDIO_AN_VENDOR_PROV_CTRL);
257 + if (oldadv1 < 0)
258 + return oldadv1;
259 +
260 + adv = 0;
261 + adv1 = 0;
262 +
263 + /*100BaseT_full is supported by default*/
264 +
265 + if (advertise & ADVERTISED_1000baseT_Full)
266 + adv1 |= 0x8000;
267 + if (advertise & ADVERTISED_10000baseT_Full)
268 + adv |= 0x1000;
269 + if (advertise & ADVERTISED_2500baseX_Full)
270 + adv1 |= 0x400;
271 +
272 + if (adv != oldadv) {
273 + err = aquantia_write_reg(phydev, MDIO_MMD_AN,
274 + MDIO_AN_10GBT_CTRL, adv);
275 + if (err < 0)
276 + return err;
277 + changed = 1;
278 + }
279 + if (adv1 != oldadv1) {
280 + err = aquantia_write_reg(phydev, MDIO_MMD_AN,
281 + MDIO_AN_VENDOR_PROV_CTRL, adv1);
282 + if (err < 0)
283 + return err;
284 + changed = 1;
285 + }
286 +
287 + return changed;
288 +}
289 +
290 static int aquantia_config_aneg(struct phy_device *phydev)
291 {
292 + int ret = 0;
293 +
294 phydev->supported = PHY_AQUANTIA_FEATURES;
295 - phydev->advertising = phydev->supported;
296 + if (phydev->autoneg == AUTONEG_DISABLE) {
297 + aquantia_pma_setup_forced(phydev);
298 + return aquantia_aneg(phydev, false);
299 + }
300
301 - return 0;
302 + ret = aquantia_config_advert(phydev);
303 + if (ret > 0)
304 + /* restart autoneg */
305 + return aquantia_aneg(phydev, true);
306 +
307 + return ret;
308 }
309
310 static int aquantia_aneg_done(struct phy_device *phydev)
311 @@ -51,25 +211,26 @@ static int aquantia_config_intr(struct p
312 int err;
313
314 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
315 - err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1);
316 + err = aquantia_write_reg(phydev, MDIO_MMD_AN, 0xd401, 1);
317 if (err < 0)
318 return err;
319
320 - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1);
321 + err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff00, 1);
322 if (err < 0)
323 return err;
324
325 - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001);
326 + err = aquantia_write_reg(phydev, MDIO_MMD_VEND1,
327 + 0xff01, 0x1001);
328 } else {
329 - err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0);
330 + err = aquantia_write_reg(phydev, MDIO_MMD_AN, 0xd401, 0);
331 if (err < 0)
332 return err;
333
334 - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0);
335 + err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff00, 0);
336 if (err < 0)
337 return err;
338
339 - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0);
340 + err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff01, 0);
341 }
342
343 return err;
344 @@ -79,42 +240,129 @@ static int aquantia_ack_interrupt(struct
345 {
346 int reg;
347
348 - reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01);
349 + reg = aquantia_read_reg(phydev, MDIO_MMD_AN, 0xcc01);
350 return (reg < 0) ? reg : 0;
351 }
352
353 +static int aquantia_read_advert(struct phy_device *phydev)
354 +{
355 + int adv, adv1;
356 +
357 + /* Setup standard advertisement */
358 + adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
359 + MDIO_AN_10GBT_CTRL);
360 +
361 + /* Aquantia vendor specific advertisments */
362 + adv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
363 + MDIO_AN_VENDOR_PROV_CTRL);
364 +
365 + /*100BaseT_full is supported by default*/
366 + phydev->advertising |= ADVERTISED_100baseT_Full;
367 +
368 + if (adv & 0x1000)
369 + phydev->advertising |= ADVERTISED_10000baseT_Full;
370 + else
371 + phydev->advertising &= ~ADVERTISED_10000baseT_Full;
372 + if (adv1 & 0x8000)
373 + phydev->advertising |= ADVERTISED_1000baseT_Full;
374 + else
375 + phydev->advertising &= ~ADVERTISED_1000baseT_Full;
376 + if (adv1 & 0x400)
377 + phydev->advertising |= ADVERTISED_2500baseX_Full;
378 + else
379 + phydev->advertising &= ~ADVERTISED_2500baseX_Full;
380 + return 0;
381 +}
382 +
383 +static int aquantia_read_lp_advert(struct phy_device *phydev)
384 +{
385 + int adv, adv1;
386 +
387 + /* Read standard link partner advertisement */
388 + adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
389 + MDIO_STAT1);
390 +
391 + if (adv & 0x1)
392 + phydev->lp_advertising |= ADVERTISED_Autoneg |
393 + ADVERTISED_100baseT_Full;
394 + else
395 + phydev->lp_advertising &= ~(ADVERTISED_Autoneg |
396 + ADVERTISED_100baseT_Full);
397 +
398 + /* Read standard link partner advertisement */
399 + adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
400 + MDIO_AN_10GBT_STAT);
401 +
402 + /* Aquantia link partner advertisments */
403 + adv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
404 + MDIO_AN_RECV_LP_STATUS);
405 +
406 + if (adv & 0x800)
407 + phydev->lp_advertising |= ADVERTISED_10000baseT_Full;
408 + else
409 + phydev->lp_advertising &= ~ADVERTISED_10000baseT_Full;
410 + if (adv1 & 0x8000)
411 + phydev->lp_advertising |= ADVERTISED_1000baseT_Full;
412 + else
413 + phydev->lp_advertising &= ~ADVERTISED_1000baseT_Full;
414 + if (adv1 & 0x400)
415 + phydev->lp_advertising |= ADVERTISED_2500baseX_Full;
416 + else
417 + phydev->lp_advertising &= ~ADVERTISED_2500baseX_Full;
418 +
419 + return 0;
420 +}
421 +
422 static int aquantia_read_status(struct phy_device *phydev)
423 {
424 int reg;
425
426 - reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
427 - reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
428 + /* Read the link status twice; the bit is latching low */
429 + reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_STAT1);
430 + reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_STAT1);
431 +
432 if (reg & MDIO_STAT1_LSTATUS)
433 phydev->link = 1;
434 else
435 phydev->link = 0;
436
437 - reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
438 mdelay(10);
439 - reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
440 + reg = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
441 +
442 + if ((reg & MDIO_CTRL1_SPEEDSELEXT) == MDIO_CTRL1_SPEEDSELEXT)
443 + reg &= MDIO_CTRL1_SPEEDSEL;
444 + else
445 + reg &= MDIO_CTRL1_SPEEDSELEXT;
446
447 switch (reg) {
448 - case 0x9:
449 + case MDIO_PMA_CTRL1_AQ_SPEED5000:
450 + phydev->speed = SPEED_5000;
451 + break;
452 + case MDIO_PMA_CTRL1_AQ_SPEED2500:
453 phydev->speed = SPEED_2500;
454 break;
455 - case 0x5:
456 - phydev->speed = SPEED_1000;
457 + case MDIO_PMA_CTRL1_AQ_SPEED10:
458 + phydev->speed = SPEED_10;
459 break;
460 - case 0x3:
461 + case MDIO_PMA_CTRL1_SPEED100:
462 phydev->speed = SPEED_100;
463 break;
464 - case 0x7:
465 - default:
466 + case MDIO_PMA_CTRL1_SPEED1000:
467 + phydev->speed = SPEED_1000;
468 + break;
469 + case MDIO_CTRL1_SPEED10G:
470 phydev->speed = SPEED_10000;
471 break;
472 + default:
473 + phydev->speed = SPEED_UNKNOWN;
474 + break;
475 }
476 +
477 phydev->duplex = DUPLEX_FULL;
478
479 + aquantia_read_advert(phydev);
480 + aquantia_read_lp_advert(phydev);
481 +
482 return 0;
483 }
484
485 --- a/drivers/net/phy/at803x.c
486 +++ b/drivers/net/phy/at803x.c
487 @@ -68,6 +68,8 @@
488 #define AT803X_DEBUG_REG_5 0x05
489 #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
490
491 +#define AT803X_LPI_EN BIT(8)
492 +
493 #define ATH8030_PHY_ID 0x004dd076
494 #define ATH8031_PHY_ID 0x004dd074
495 #define ATH8032_PHY_ID 0x004dd023
496 @@ -290,6 +292,19 @@ static void at803x_disable_smarteee(stru
497 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0);
498 }
499
500 +static void at803x_enable_smart_eee(struct phy_device *phydev, int on)
501 +{
502 + int value;
503 +
504 + /* 5.1.11 Smart_eee control3 */
505 + value = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x805D);
506 + if (on)
507 + value |= AT803X_LPI_EN;
508 + else
509 + value &= ~AT803X_LPI_EN;
510 + phy_write_mmd(phydev, MDIO_MMD_PCS, 0x805D, value);
511 +}
512 +
513 static int at803x_config_init(struct phy_device *phydev)
514 {
515 struct at803x_platform_data *pdata;
516 @@ -320,6 +335,12 @@ static int at803x_config_init(struct phy
517 if (ret < 0)
518 return ret;
519
520 +#ifdef CONFIG_AT803X_PHY_SMART_EEE
521 + at803x_enable_smart_eee(phydev, 1);
522 +#else
523 + at803x_enable_smart_eee(phydev, 0);
524 +#endif
525 +
526 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
527 phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
528 ret = at803x_enable_rx_delay(phydev);
529 --- /dev/null
530 +++ b/drivers/net/phy/fsl_backplane.c
531 @@ -0,0 +1,1780 @@
532 +// SPDX-License-Identifier: GPL-2.0+
533 +/*
534 + * DPAA backplane driver.
535 + * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
536 + * Florinel Iordache <florinel.iordache@nxp.com>
537 + *
538 + * Copyright 2015 Freescale Semiconductor, Inc.
539 + * Copyright 2018 NXP
540 + *
541 + * Licensed under the GPL-2 or later.
542 + */
543 +
544 +#include <linux/kernel.h>
545 +#include <linux/module.h>
546 +#include <linux/mii.h>
547 +#include <linux/mdio.h>
548 +#include <linux/ethtool.h>
549 +#include <linux/phy.h>
550 +#include <linux/io.h>
551 +#include <linux/of.h>
552 +#include <linux/of_net.h>
553 +#include <linux/of_address.h>
554 +#include <linux/of_platform.h>
555 +#include <linux/timer.h>
556 +#include <linux/delay.h>
557 +#include <linux/workqueue.h>
558 +#include <linux/netdevice.h>
559 +
560 +#include "fsl_backplane.h"
561 +
562 +
563 +/* PCS Device Identifier */
564 +#define PCS_PHY_DEVICE_ID 0x0083e400
565 +#define PCS_PHY_DEVICE_ID_MASK 0xffffffff
566 +
567 +/* 10G Long cables setup: 1 m to 2 m cables */
568 +#define RATIO_PREQ_10G 0x3
569 +#define RATIO_PST1Q_10G 0xd
570 +#define RATIO_EQ_10G 0x20
571 +
572 +/* 10G Short cables setup: up to 30 cm cable */
573 +//#define RATIO_PREQ_10G 0x3
574 +//#define RATIO_PST1Q_10G 0xa
575 +//#define RATIO_EQ_10G 0x29
576 +
577 +/* 40G Long cables setup: 1 m to 2 m cables */
578 +#define RATIO_PREQ_40G 0x2
579 +#define RATIO_PST1Q_40G 0xd
580 +#define RATIO_EQ_40G 0x20
581 +
582 +/* 40G Short cables setup: up to 30 cm cable */
583 +//#define RATIO_PREQ_40G 0x1
584 +//#define RATIO_PST1Q_40G 0x3
585 +//#define RATIO_EQ_40G 0x29
586 +
587 +/* LX2 2x40G default RCW setup */
588 +//#define RATIO_PREQ_40G 0x0
589 +//#define RATIO_PST1Q_40G 0x3
590 +//#define RATIO_EQ_40G 0x30
591 +
592 +/* Max/Min coefficient values */
593 +#define PRE_COE_MAX 0x0
594 +#define PRE_COE_MIN 0x8
595 +#define POST_COE_MAX 0x0
596 +#define POST_COE_MIN 0x10
597 +#define ZERO_COE_MAX 0x30
598 +#define ZERO_COE_MIN 0x0
599 +
600 +/* KR PMD defines */
601 +#define PMD_RESET 0x1
602 +#define PMD_STATUS_SUP_STAT 0x4
603 +#define PMD_STATUS_FRAME_LOCK 0x2
604 +#define TRAIN_EN 0x3
605 +#define TRAIN_DISABLE 0x1
606 +#define RX_STAT 0x1
607 +
608 +/* PCS Link up */
609 +#define XFI_PCS_SR1 0x20
610 +#define KR_RX_LINK_STAT_MASK 0x1000
611 +
612 +/* KX PCS mode register */
613 +#define KX_PCS_IF_MODE 0x8014
614 +
615 +/* KX PCS mode register init value */
616 +#define KX_IF_MODE_INIT 0x8
617 +
618 +/* KX/KR AN registers */
619 +#define AN_CTRL_INIT 0x1200
620 +#define KX_AN_AD1_INIT 0x25
621 +#define KR_AN_AD1_INIT_10G 0x85
622 +#define KR_AN_AD1_INIT_40G 0x105
623 +#define AN_LNK_UP_MASK 0x4
624 +#define KR_AN_MASK_10G 0x8
625 +#define KR_AN_MASK_40G 0x20
626 +#define TRAIN_FAIL 0x8
627 +#define KR_AN_40G_MDIO_OFFSET 4
628 +
629 +/* XGKR Timeouts */
630 +#define XGKR_TIMEOUT 1050
631 +#define XGKR_DENY_RT_INTERVAL 3000
632 +#define XGKR_AN_WAIT_ITERATIONS 5
633 +
634 +/* XGKR Increment/Decrement Requests */
635 +#define INCREMENT 1
636 +#define DECREMENT 2
637 +#define TIMEOUT_LONG 3
638 +#define TIMEOUT_M1 3
639 +
640 +/* XGKR Masks */
641 +#define RX_READY_MASK 0x8000
642 +#define PRESET_MASK 0x2000
643 +#define INIT_MASK 0x1000
644 +#define COP1_MASK 0x30
645 +#define COP1_SHIFT 4
646 +#define COZ_MASK 0xc
647 +#define COZ_SHIFT 2
648 +#define COM1_MASK 0x3
649 +#define COM1_SHIFT 0
650 +#define REQUEST_MASK 0x3f
651 +#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \
652 + COP1_MASK | COZ_MASK | COM1_MASK)
653 +
654 +/* Lanes definitions */
655 +#define MASTER_LANE 0
656 +#define SINGLE_LANE 0
657 +#define MAX_PHY_LANES_NO 4
658 +
659 +/* Invalid value */
660 +#define VAL_INVALID 0xff
661 +
662 +/* New XGKR Training Algorithm */
663 +#define NEW_ALGORITHM_TRAIN_TX
664 +
665 +#ifdef NEW_ALGORITHM_TRAIN_TX
666 +#define FORCE_INC_COP1_NUMBER 0
667 +#define FORCE_INC_COM1_NUMBER 1
668 +#endif
669 +
670 +/* Link_Training_Registers offsets */
671 +static int lt_MDIO_MMD = 0;
672 +static u32 lt_KR_PMD_CTRL = 0;
673 +static u32 lt_KR_PMD_STATUS = 0;
674 +static u32 lt_KR_LP_CU = 0;
675 +static u32 lt_KR_LP_STATUS = 0;
676 +static u32 lt_KR_LD_CU = 0;
677 +static u32 lt_KR_LD_STATUS = 0;
678 +
679 +/* KX/KR AN registers offsets */
680 +static u32 g_an_AD1 = 0;
681 +static u32 g_an_BP_STAT = 0;
682 +
683 +static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5,
684 + 0x7, 0x9, 0xb, 0xc, VAL_INVALID};
685 +static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, 0x7,
686 + 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID};
687 +
688 +enum backplane_mode {
689 + PHY_BACKPLANE_1000BASE_KX,
690 + PHY_BACKPLANE_10GBASE_KR,
691 + PHY_BACKPLANE_40GBASE_KR,
692 + PHY_BACKPLANE_INVAL
693 +};
694 +
695 +enum serdes_type {
696 + SERDES_10G,
697 + SERDES_28G,
698 + SERDES_INVAL
699 +};
700 +
701 +enum coe_filed {
702 + COE_COP1,
703 + COE_COZ,
704 + COE_COM
705 +};
706 +
707 +enum coe_update {
708 + COE_NOTUPDATED,
709 + COE_UPDATED,
710 + COE_MIN,
711 + COE_MAX,
712 + COE_INV
713 +};
714 +
715 +enum train_state {
716 + DETECTING_LP,
717 + TRAINED,
718 +};
719 +
720 +struct tx_condition {
721 + bool bin_m1_late_early;
722 + bool bin_long_late_early;
723 + bool bin_m1_stop;
724 + bool bin_long_stop;
725 + bool tx_complete;
726 + bool sent_init;
727 + int m1_min_max_cnt;
728 + int long_min_max_cnt;
729 +#ifdef NEW_ALGORITHM_TRAIN_TX
730 + int pre_inc;
731 + int post_inc;
732 +#endif
733 +};
734 +
735 +struct xgkr_params {
736 + void *reg_base; /* lane memory map: registers base address */
737 + int idx; /* lane relative index inside a multi-lane PHY */
738 + struct phy_device *phydev;
739 + struct serdes_access *srds;
740 + struct tx_condition tx_c;
741 + struct delayed_work xgkr_wk;
742 + enum train_state state;
743 + int an_wait_count;
744 + unsigned long rt_time;
745 + u32 ld_update;
746 + u32 ld_status;
747 + u32 ratio_preq;
748 + u32 ratio_pst1q;
749 + u32 adpt_eq;
750 + u32 tuned_ratio_preq;
751 + u32 tuned_ratio_pst1q;
752 + u32 tuned_adpt_eq;
753 +};
754 +
755 +struct xgkr_phy_data {
756 + int bp_mode;
757 + u32 phy_lanes;
758 + struct mutex phy_lock;
759 + bool aneg_done;
760 + struct xgkr_params xgkr[MAX_PHY_LANES_NO];
761 +};
762 +
763 +static void setup_an_lt_ls(void)
764 +{
765 + /* KR PMD registers */
766 + lt_MDIO_MMD = MDIO_MMD_PMAPMD;
767 + lt_KR_PMD_CTRL = 0x96;
768 + lt_KR_PMD_STATUS = 0x97;
769 + lt_KR_LP_CU = 0x98;
770 + lt_KR_LP_STATUS = 0x99;
771 + lt_KR_LD_CU = 0x9a;
772 + lt_KR_LD_STATUS = 0x9b;
773 +
774 + /* KX/KR AN registers */
775 + g_an_AD1 = 0x11;
776 + g_an_BP_STAT = 0x30;
777 +}
778 +
779 +static void setup_an_lt_lx(void)
780 +{
781 + /* Auto-Negotiation and Link Training Core Registers page 1: 256 = 0x100 */
782 + lt_MDIO_MMD = MDIO_MMD_AN;
783 + lt_KR_PMD_CTRL = 0x100;
784 + lt_KR_PMD_STATUS = 0x101;
785 + lt_KR_LP_CU = 0x102;
786 + lt_KR_LP_STATUS = 0x103;
787 + lt_KR_LD_CU = 0x104;
788 + lt_KR_LD_STATUS = 0x105;
789 +
790 + /* KX/KR AN registers */
791 + g_an_AD1 = 0x03;
792 + g_an_BP_STAT = 0x0F;
793 +}
794 +
795 +static u32 le_ioread32(u32 *reg)
796 +{
797 + return ioread32(reg);
798 +}
799 +
800 +static void le_iowrite32(u32 value, u32 *reg)
801 +{
802 + iowrite32(value, reg);
803 +}
804 +
805 +static u32 be_ioread32(u32 *reg)
806 +{
807 + return ioread32be(reg);
808 +}
809 +
810 +static void be_iowrite32(u32 value, u32 *reg)
811 +{
812 + iowrite32be(value, reg);
813 +}
814 +
815 +/**
816 + * xgkr_phy_write_mmd - Wrapper function for phy_write_mmd
817 + * for writing a register on an MMD on a given PHY.
818 + *
819 + * Same rules as for phy_write_mmd();
820 + */
821 +static int xgkr_phy_write_mmd(struct xgkr_params *xgkr, int devad, u32 regnum, u16 val)
822 +{
823 + struct phy_device *phydev = xgkr->phydev;
824 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
825 + int mdio_addr = phydev->mdio.addr;
826 + int err;
827 +
828 + mutex_lock(&xgkr_inst->phy_lock);
829 +
830 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
831 + //40G AN: prepare mdio address for writing phydev AN registers for 40G on respective lane
832 + phydev->mdio.addr = KR_AN_40G_MDIO_OFFSET + xgkr->idx;
833 + }
834 +
835 + err = phy_write_mmd(phydev, devad, regnum, val);
836 + if (err)
837 + dev_err(&phydev->mdio.dev, "Writing PHY (%p) MMD = 0x%02x register = 0x%02x failed with error code: 0x%08x \n", phydev, devad, regnum, err);
838 +
839 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
840 + //40G AN: restore mdio address
841 + phydev->mdio.addr = mdio_addr;
842 + }
843 +
844 + mutex_unlock(&xgkr_inst->phy_lock);
845 +
846 + return err;
847 +}
848 +
849 +/**
850 + * xgkr_phy_read_mmd - Wrapper function for phy_read_mmd
851 + * for reading a register from an MMD on a given PHY.
852 + *
853 + * Same rules as for phy_read_mmd();
854 + */
855 +static int xgkr_phy_read_mmd(struct xgkr_params *xgkr, int devad, u32 regnum)
856 +{
857 + struct phy_device *phydev = xgkr->phydev;
858 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
859 + int mdio_addr = phydev->mdio.addr;
860 + int ret;
861 +
862 + mutex_lock(&xgkr_inst->phy_lock);
863 +
864 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
865 + //40G AN: prepare mdio address for reading phydev AN registers for 40G on respective lane
866 + phydev->mdio.addr = KR_AN_40G_MDIO_OFFSET + xgkr->idx;
867 + }
868 +
869 + ret = phy_read_mmd(phydev, devad, regnum);
870 +
871 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
872 + //40G AN: restore mdio address
873 + phydev->mdio.addr = mdio_addr;
874 + }
875 +
876 + mutex_unlock(&xgkr_inst->phy_lock);
877 +
878 + return ret;
879 +}
880 +
881 +static void tx_condition_init(struct tx_condition *tx_c)
882 +{
883 + tx_c->bin_m1_late_early = true;
884 + tx_c->bin_long_late_early = false;
885 + tx_c->bin_m1_stop = false;
886 + tx_c->bin_long_stop = false;
887 + tx_c->tx_complete = false;
888 + tx_c->sent_init = false;
889 + tx_c->m1_min_max_cnt = 0;
890 + tx_c->long_min_max_cnt = 0;
891 +#ifdef NEW_ALGORITHM_TRAIN_TX
892 + tx_c->pre_inc = FORCE_INC_COM1_NUMBER;
893 + tx_c->post_inc = FORCE_INC_COP1_NUMBER;
894 +#endif
895 +}
896 +
897 +void tune_tecr(struct xgkr_params *xgkr)
898 +{
899 + struct phy_device *phydev = xgkr->phydev;
900 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
901 + bool reset = false;
902 +
903 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
904 + /* Reset only the Master Lane */
905 + reset = (xgkr->idx == MASTER_LANE);
906 + } else {
907 + reset = true;
908 + }
909 +
910 + xgkr->srds->tune_tecr(xgkr->reg_base, xgkr->ratio_preq, xgkr->ratio_pst1q, xgkr->adpt_eq, reset);
911 +
912 + xgkr->tuned_ratio_preq = xgkr->ratio_preq;
913 + xgkr->tuned_ratio_pst1q = xgkr->ratio_pst1q;
914 + xgkr->tuned_adpt_eq = xgkr->adpt_eq;
915 +}
916 +
917 +static void start_lt(struct xgkr_params *xgkr)
918 +{
919 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_EN);
920 +}
921 +
922 +static void stop_lt(struct xgkr_params *xgkr)
923 +{
924 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_DISABLE);
925 +}
926 +
927 +static void reset_lt(struct xgkr_params *xgkr)
928 +{
929 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, MDIO_CTRL1, PMD_RESET);
930 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_DISABLE);
931 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LD_CU, 0);
932 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LD_STATUS, 0);
933 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_STATUS, 0);
934 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_CU, 0);
935 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS, 0);
936 +
937 +}
938 +
939 +static void ld_coe_status(struct xgkr_params *xgkr)
940 +{
941 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
942 + lt_KR_LD_STATUS, xgkr->ld_status);
943 +}
944 +
945 +static void ld_coe_update(struct xgkr_params *xgkr)
946 +{
947 + dev_dbg(&xgkr->phydev->mdio.dev, "sending request: %x\n", xgkr->ld_update);
948 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
949 + lt_KR_LD_CU, xgkr->ld_update);
950 +}
951 +
952 +static void start_xgkr_state_machine(struct delayed_work *work)
953 +{
954 + queue_delayed_work(system_power_efficient_wq, work,
955 + msecs_to_jiffies(XGKR_TIMEOUT));
956 +}
957 +
958 +static void start_xgkr_an(struct xgkr_params *xgkr)
959 +{
960 + struct phy_device *phydev = xgkr->phydev;
961 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
962 + int i;
963 + int err;
964 +
965 + switch (xgkr_inst->bp_mode)
966 + {
967 + case PHY_BACKPLANE_1000BASE_KX:
968 + dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
969 + break;
970 +
971 + case PHY_BACKPLANE_10GBASE_KR:
972 + err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, g_an_AD1, KR_AN_AD1_INIT_10G);
973 + if (err)
974 + dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x failed with error code: 0x%08x \n", g_an_AD1, err);
975 + udelay(1);
976 + err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
977 + if (err)
978 + dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x failed with error code: 0x%08x \n", MDIO_CTRL1, err);
979 + break;
980 +
981 + case PHY_BACKPLANE_40GBASE_KR:
982 + if (xgkr->idx == MASTER_LANE) {
983 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
984 + err = xgkr_phy_write_mmd(&xgkr_inst->xgkr[i], MDIO_MMD_AN, g_an_AD1, KR_AN_AD1_INIT_40G);
985 + if (err)
986 + dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x on lane %d failed with error code: 0x%08x \n", g_an_AD1, xgkr_inst->xgkr[i].idx, err);
987 + }
988 + udelay(1);
989 + err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
990 + if (err)
991 + dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x on Master Lane failed with error code: 0x%08x \n", MDIO_CTRL1, err);
992 + }
993 + break;
994 + }
995 +}
996 +
997 +static void start_1gkx_an(struct phy_device *phydev)
998 +{
999 + phy_write_mmd(phydev, MDIO_MMD_PCS, KX_PCS_IF_MODE, KX_IF_MODE_INIT);
1000 + phy_write_mmd(phydev, MDIO_MMD_AN, g_an_AD1, KX_AN_AD1_INIT);
1001 + phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
1002 + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
1003 +}
1004 +
1005 +static void reset_tecr(struct xgkr_params *xgkr)
1006 +{
1007 + struct phy_device *phydev = xgkr->phydev;
1008 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
1009 +
1010 + switch (xgkr_inst->bp_mode)
1011 + {
1012 + case PHY_BACKPLANE_1000BASE_KX:
1013 + dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1014 + break;
1015 +
1016 + case PHY_BACKPLANE_10GBASE_KR:
1017 + xgkr->ratio_preq = RATIO_PREQ_10G;
1018 + xgkr->ratio_pst1q = RATIO_PST1Q_10G;
1019 + xgkr->adpt_eq = RATIO_EQ_10G;
1020 + break;
1021 +
1022 + case PHY_BACKPLANE_40GBASE_KR:
1023 + xgkr->ratio_preq = RATIO_PREQ_40G;
1024 + xgkr->ratio_pst1q = RATIO_PST1Q_40G;
1025 + xgkr->adpt_eq = RATIO_EQ_40G;
1026 + break;
1027 + }
1028 +
1029 + tune_tecr(xgkr);
1030 +}
1031 +
1032 +static void init_xgkr(struct xgkr_params *xgkr, int reset)
1033 +{
1034 + if (reset)
1035 + reset_tecr(xgkr);
1036 +
1037 + tx_condition_init(&xgkr->tx_c);
1038 + xgkr->state = DETECTING_LP;
1039 +
1040 + xgkr->ld_status &= RX_READY_MASK;
1041 + ld_coe_status(xgkr);
1042 + xgkr->ld_update = 0;
1043 + xgkr->ld_status &= ~RX_READY_MASK;
1044 + ld_coe_status(xgkr);
1045 +
1046 +}
1047 +
1048 +static void initialize(struct xgkr_params *xgkr)
1049 +{
1050 + reset_tecr(xgkr);
1051 +
1052 + xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1053 + xgkr->ld_status |= COE_UPDATED << COP1_SHIFT |
1054 + COE_UPDATED << COZ_SHIFT |
1055 + COE_UPDATED << COM1_SHIFT;
1056 + ld_coe_status(xgkr);
1057 +}
1058 +
1059 +static void train_remote_tx(struct xgkr_params *xgkr)
1060 +{
1061 + struct tx_condition *tx_c = &xgkr->tx_c;
1062 + bool bin_m1_early, bin_long_early;
1063 + u32 lp_status, old_ld_update;
1064 + u32 status_cop1, status_coz, status_com1;
1065 + u32 req_cop1, req_coz, req_com1, req_preset, req_init;
1066 + u32 temp;
1067 +#ifdef NEW_ALGORITHM_TRAIN_TX
1068 + u32 median_gaink2;
1069 +#endif
1070 +
1071 +recheck:
1072 + if (tx_c->bin_long_stop && tx_c->bin_m1_stop) {
1073 + tx_c->tx_complete = true;
1074 + xgkr->ld_status |= RX_READY_MASK;
1075 + ld_coe_status(xgkr);
1076 +
1077 + /* tell LP we are ready */
1078 + xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
1079 + lt_KR_PMD_STATUS, RX_STAT);
1080 +
1081 + return;
1082 + }
1083 +
1084 + /* We start by checking the current LP status. If we got any responses,
1085 + * we can clear up the appropriate update request so that the
1086 + * subsequent code may easily issue new update requests if needed.
1087 + */
1088 + lp_status = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS) &
1089 + REQUEST_MASK;
1090 +
1091 + status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT;
1092 + status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT;
1093 + status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT;
1094 +
1095 + old_ld_update = xgkr->ld_update;
1096 + req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT;
1097 + req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT;
1098 + req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT;
1099 + req_preset = old_ld_update & PRESET_MASK;
1100 + req_init = old_ld_update & INIT_MASK;
1101 +
1102 + /* IEEE802.3-2008, 72.6.10.2.3.1
1103 + * We may clear PRESET when all coefficients show UPDATED or MAX.
1104 + */
1105 + if (req_preset) {
1106 + if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) &&
1107 + (status_coz == COE_UPDATED || status_coz == COE_MAX) &&
1108 + (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) {
1109 + xgkr->ld_update &= ~PRESET_MASK;
1110 + }
1111 + }
1112 +
1113 + /* IEEE802.3-2008, 72.6.10.2.3.2
1114 + * We may clear INITIALIZE when no coefficients show NOT UPDATED.
1115 + */
1116 + if (req_init) {
1117 + if (status_cop1 != COE_NOTUPDATED &&
1118 + status_coz != COE_NOTUPDATED &&
1119 + status_com1 != COE_NOTUPDATED) {
1120 + xgkr->ld_update &= ~INIT_MASK;
1121 + }
1122 + }
1123 +
1124 + /* IEEE802.3-2008, 72.6.10.2.3.2
1125 + * we send initialize to the other side to ensure default settings
1126 + * for the LP. Naturally, we should do this only once.
1127 + */
1128 + if (!tx_c->sent_init) {
1129 + if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) {
1130 + xgkr->ld_update = INIT_MASK;
1131 + tx_c->sent_init = true;
1132 + }
1133 + }
1134 +
1135 + /* IEEE802.3-2008, 72.6.10.2.3.3
1136 + * We set coefficient requests to HOLD when we get the information
1137 + * about any updates On clearing our prior response, we also update
1138 + * our internal status.
1139 + */
1140 + if (status_cop1 != COE_NOTUPDATED) {
1141 + if (req_cop1) {
1142 + xgkr->ld_update &= ~COP1_MASK;
1143 +#ifdef NEW_ALGORITHM_TRAIN_TX
1144 + if (tx_c->post_inc) {
1145 + if (req_cop1 == INCREMENT &&
1146 + status_cop1 == COE_MAX) {
1147 + tx_c->post_inc = 0;
1148 + tx_c->bin_long_stop = true;
1149 + tx_c->bin_m1_stop = true;
1150 + } else {
1151 + tx_c->post_inc -= 1;
1152 + }
1153 +
1154 + ld_coe_update(xgkr);
1155 + goto recheck;
1156 + }
1157 +#endif
1158 + if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) ||
1159 + (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) {
1160 + dev_dbg(&xgkr->phydev->mdio.dev, "COP1 hit limit %s",
1161 + (status_cop1 == COE_MIN) ?
1162 + "DEC MIN" : "INC MAX");
1163 + tx_c->long_min_max_cnt++;
1164 + if (tx_c->long_min_max_cnt >= TIMEOUT_LONG) {
1165 + tx_c->bin_long_stop = true;
1166 + ld_coe_update(xgkr);
1167 + goto recheck;
1168 + }
1169 + }
1170 + }
1171 + }
1172 +
1173 + if (status_coz != COE_NOTUPDATED) {
1174 + if (req_coz)
1175 + xgkr->ld_update &= ~COZ_MASK;
1176 + }
1177 +
1178 + if (status_com1 != COE_NOTUPDATED) {
1179 + if (req_com1) {
1180 + xgkr->ld_update &= ~COM1_MASK;
1181 +#ifdef NEW_ALGORITHM_TRAIN_TX
1182 + if (tx_c->pre_inc) {
1183 + if (req_com1 == INCREMENT &&
1184 + status_com1 == COE_MAX)
1185 + tx_c->pre_inc = 0;
1186 + else
1187 + tx_c->pre_inc -= 1;
1188 +
1189 + ld_coe_update(xgkr);
1190 + goto recheck;
1191 + }
1192 +#endif
1193 + /* Stop If we have reached the limit for a parameter. */
1194 + if ((req_com1 == DECREMENT && status_com1 == COE_MIN) ||
1195 + (req_com1 == INCREMENT && status_com1 == COE_MAX)) {
1196 + dev_dbg(&xgkr->phydev->mdio.dev, "COM1 hit limit %s",
1197 + (status_com1 == COE_MIN) ?
1198 + "DEC MIN" : "INC MAX");
1199 + tx_c->m1_min_max_cnt++;
1200 + if (tx_c->m1_min_max_cnt >= TIMEOUT_M1) {
1201 + tx_c->bin_m1_stop = true;
1202 + ld_coe_update(xgkr);
1203 + goto recheck;
1204 + }
1205 + }
1206 + }
1207 + }
1208 +
1209 + if (old_ld_update != xgkr->ld_update) {
1210 + ld_coe_update(xgkr);
1211 + /* Redo these status checks and updates until we have no more
1212 + * changes, to speed up the overall process.
1213 + */
1214 + goto recheck;
1215 + }
1216 +
1217 + /* Do nothing if we have pending request. */
1218 + if ((req_coz || req_com1 || req_cop1))
1219 + return;
1220 + else if (lp_status)
1221 + /* No pending request but LP status was not reverted to
1222 + * not updated.
1223 + */
1224 + return;
1225 +
1226 +#ifdef NEW_ALGORITHM_TRAIN_TX
1227 + if (!(xgkr->ld_update & (PRESET_MASK | INIT_MASK))) {
1228 + if (tx_c->pre_inc) {
1229 + xgkr->ld_update = INCREMENT << COM1_SHIFT;
1230 + ld_coe_update(xgkr);
1231 + return;
1232 + }
1233 +
1234 + if (status_cop1 != COE_MAX) {
1235 + median_gaink2 = xgkr->srds->get_median_gaink2(xgkr->reg_base);
1236 + if (median_gaink2 == 0xf) {
1237 + tx_c->post_inc = 1;
1238 + } else {
1239 + /* Gaink2 median lower than "F" */
1240 + tx_c->bin_m1_stop = true;
1241 + tx_c->bin_long_stop = true;
1242 + goto recheck;
1243 + }
1244 + } else {
1245 + /* C1 MAX */
1246 + tx_c->bin_m1_stop = true;
1247 + tx_c->bin_long_stop = true;
1248 + goto recheck;
1249 + }
1250 +
1251 + if (tx_c->post_inc) {
1252 + xgkr->ld_update = INCREMENT << COP1_SHIFT;
1253 + ld_coe_update(xgkr);
1254 + return;
1255 + }
1256 + }
1257 +#endif
1258 +
1259 + /* snapshot and select bin */
1260 + bin_m1_early = xgkr->srds->is_bin_early(BIN_M1, xgkr->reg_base);
1261 + bin_long_early = xgkr->srds->is_bin_early(BIN_LONG, xgkr->reg_base);
1262 +
1263 + if (!tx_c->bin_m1_stop && !tx_c->bin_m1_late_early && bin_m1_early) {
1264 + tx_c->bin_m1_stop = true;
1265 + goto recheck;
1266 + }
1267 +
1268 + if (!tx_c->bin_long_stop &&
1269 + tx_c->bin_long_late_early && !bin_long_early) {
1270 + tx_c->bin_long_stop = true;
1271 + goto recheck;
1272 + }
1273 +
1274 + /* IEEE802.3-2008, 72.6.10.2.3.3
1275 + * We only request coefficient updates when no PRESET/INITIALIZE is
1276 + * pending. We also only request coefficient updates when the
1277 + * corresponding status is NOT UPDATED and nothing is pending.
1278 + */
1279 + if (!(xgkr->ld_update & (PRESET_MASK | INIT_MASK))) {
1280 + if (!tx_c->bin_long_stop) {
1281 + /* BinM1 correction means changing COM1 */
1282 + if (!status_com1 && !(xgkr->ld_update & COM1_MASK)) {
1283 + /* Avoid BinM1Late by requesting an
1284 + * immediate decrement.
1285 + */
1286 + if (!bin_m1_early) {
1287 + /* request decrement c(-1) */
1288 + temp = DECREMENT << COM1_SHIFT;
1289 + xgkr->ld_update = temp;
1290 + ld_coe_update(xgkr);
1291 + tx_c->bin_m1_late_early = bin_m1_early;
1292 + return;
1293 + }
1294 + }
1295 +
1296 + /* BinLong correction means changing COP1 */
1297 + if (!status_cop1 && !(xgkr->ld_update & COP1_MASK)) {
1298 + /* Locate BinLong transition point (if any)
1299 + * while avoiding BinM1Late.
1300 + */
1301 + if (bin_long_early) {
1302 + /* request increment c(1) */
1303 + temp = INCREMENT << COP1_SHIFT;
1304 + xgkr->ld_update = temp;
1305 + } else {
1306 + /* request decrement c(1) */
1307 + temp = DECREMENT << COP1_SHIFT;
1308 + xgkr->ld_update = temp;
1309 + }
1310 +
1311 + ld_coe_update(xgkr);
1312 + tx_c->bin_long_late_early = bin_long_early;
1313 + }
1314 + /* We try to finish BinLong before we do BinM1 */
1315 + return;
1316 + }
1317 +
1318 + if (!tx_c->bin_m1_stop) {
1319 + /* BinM1 correction means changing COM1 */
1320 + if (!status_com1 && !(xgkr->ld_update & COM1_MASK)) {
1321 + /* Locate BinM1 transition point (if any) */
1322 + if (bin_m1_early) {
1323 + /* request increment c(-1) */
1324 + temp = INCREMENT << COM1_SHIFT;
1325 + xgkr->ld_update = temp;
1326 + } else {
1327 + /* request decrement c(-1) */
1328 + temp = DECREMENT << COM1_SHIFT;
1329 + xgkr->ld_update = temp;
1330 + }
1331 +
1332 + ld_coe_update(xgkr);
1333 + tx_c->bin_m1_late_early = bin_m1_early;
1334 + }
1335 + }
1336 + }
1337 +}
1338 +
1339 +static int is_link_up(struct phy_device *phydev)
1340 +{
1341 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
1342 + int val = 0;
1343 +
1344 + mutex_lock(&xgkr_inst->phy_lock);
1345 +
1346 + val = phy_read_mmd(phydev, MDIO_MMD_PCS, XFI_PCS_SR1);
1347 +
1348 + mutex_unlock(&xgkr_inst->phy_lock);
1349 +
1350 + return (val & KR_RX_LINK_STAT_MASK) ? 1 : 0;
1351 +}
1352 +
1353 +static int is_link_training_fail(struct xgkr_params *xgkr)
1354 +{
1355 + struct phy_device *phydev = xgkr->phydev;
1356 + int val;
1357 + int timeout = 100;
1358 +
1359 + val = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_STATUS);
1360 +
1361 + if (!(val & TRAIN_FAIL) && (val & RX_STAT)) {
1362 + /* check LNK_STAT for sure */
1363 + while (timeout--) {
1364 + if (is_link_up(phydev))
1365 + return 0;
1366 +
1367 + usleep_range(100, 500);
1368 + }
1369 + }
1370 +
1371 + return 1;
1372 +}
1373 +
1374 +static int check_rx(struct xgkr_params *xgkr)
1375 +{
1376 + return xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS) &
1377 + RX_READY_MASK;
1378 +}
1379 +
1380 +/* Coefficient values have hardware restrictions */
1381 +static int is_ld_valid(struct xgkr_params *xgkr)
1382 +{
1383 + u32 ratio_pst1q = xgkr->ratio_pst1q;
1384 + u32 adpt_eq = xgkr->adpt_eq;
1385 + u32 ratio_preq = xgkr->ratio_preq;
1386 +
1387 + if ((ratio_pst1q + adpt_eq + ratio_preq) > 48)
1388 + return 0;
1389 +
1390 + if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >=
1391 + ((adpt_eq - ratio_pst1q - ratio_preq) * 17))
1392 + return 0;
1393 +
1394 + if (ratio_preq > ratio_pst1q)
1395 + return 0;
1396 +
1397 + if (ratio_preq > 8)
1398 + return 0;
1399 +
1400 + if (adpt_eq < 26)
1401 + return 0;
1402 +
1403 + if (ratio_pst1q > 16)
1404 + return 0;
1405 +
1406 + return 1;
1407 +}
1408 +
1409 +static int is_value_allowed(const u32 *val_table, u32 val)
1410 +{
1411 + int i;
1412 +
1413 + for (i = 0;; i++) {
1414 + if (*(val_table + i) == VAL_INVALID)
1415 + return 0;
1416 + if (*(val_table + i) == val)
1417 + return 1;
1418 + }
1419 +}
1420 +
1421 +static enum coe_update inc_dec(struct xgkr_params *xgkr, int field, int request)
1422 +{
1423 + u32 ld_limit[3], ld_coe[3], step[3];
1424 +
1425 + ld_coe[0] = xgkr->ratio_pst1q;
1426 + ld_coe[1] = xgkr->adpt_eq;
1427 + ld_coe[2] = xgkr->ratio_preq;
1428 +
1429 + /* Information specific to the SerDes for 10GBase-KR:
1430 + * Incrementing C(+1) means *decrementing* RATIO_PST1Q
1431 + * Incrementing C(0) means incrementing ADPT_EQ
1432 + * Incrementing C(-1) means *decrementing* RATIO_PREQ
1433 + */
1434 + step[0] = -1;
1435 + step[1] = 1;
1436 + step[2] = -1;
1437 +
1438 + switch (request) {
1439 + case INCREMENT:
1440 + ld_limit[0] = POST_COE_MAX;
1441 + ld_limit[1] = ZERO_COE_MAX;
1442 + ld_limit[2] = PRE_COE_MAX;
1443 + if (ld_coe[field] != ld_limit[field])
1444 + ld_coe[field] += step[field];
1445 + else
1446 + /* MAX */
1447 + return COE_MAX;
1448 + break;
1449 + case DECREMENT:
1450 + ld_limit[0] = POST_COE_MIN;
1451 + ld_limit[1] = ZERO_COE_MIN;
1452 + ld_limit[2] = PRE_COE_MIN;
1453 + if (ld_coe[field] != ld_limit[field])
1454 + ld_coe[field] -= step[field];
1455 + else
1456 + /* MIN */
1457 + return COE_MIN;
1458 + break;
1459 + default:
1460 + break;
1461 + }
1462 +
1463 + if (is_ld_valid(xgkr)) {
1464 + /* accept new ld */
1465 + xgkr->ratio_pst1q = ld_coe[0];
1466 + xgkr->adpt_eq = ld_coe[1];
1467 + xgkr->ratio_preq = ld_coe[2];
1468 + /* only some values for preq and pst1q can be used.
1469 + * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc.
1470 + * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10.
1471 + */
1472 + if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) {
1473 + dev_dbg(&xgkr->phydev->mdio.dev,
1474 + "preq skipped value: %d\n", ld_coe[2]);
1475 + /* NOT UPDATED */
1476 + return COE_NOTUPDATED;
1477 + }
1478 +
1479 + if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) {
1480 + dev_dbg(&xgkr->phydev->mdio.dev,
1481 + "pst1q skipped value: %d\n", ld_coe[0]);
1482 + /* NOT UPDATED */
1483 + return COE_NOTUPDATED;
1484 + }
1485 +
1486 + tune_tecr(xgkr);
1487 + } else {
1488 + if (request == DECREMENT)
1489 + /* MIN */
1490 + return COE_MIN;
1491 + if (request == INCREMENT)
1492 + /* MAX */
1493 + return COE_MAX;
1494 + }
1495 +
1496 + /* UPDATED */
1497 + return COE_UPDATED;
1498 +}
1499 +
1500 +static void min_max_updated(struct xgkr_params *xgkr, int field, enum coe_update cs)
1501 +{
1502 + u32 mask, val;
1503 + u32 ld_cs = cs;
1504 +
1505 + if (cs == COE_INV)
1506 + return;
1507 +
1508 + switch (field) {
1509 + case COE_COP1:
1510 + mask = COP1_MASK;
1511 + val = ld_cs << COP1_SHIFT;
1512 + break;
1513 + case COE_COZ:
1514 + mask = COZ_MASK;
1515 + val = ld_cs << COZ_SHIFT;
1516 + break;
1517 + case COE_COM:
1518 + mask = COM1_MASK;
1519 + val = ld_cs << COM1_SHIFT;
1520 + break;
1521 + default:
1522 + return;
1523 + }
1524 +
1525 + xgkr->ld_status &= ~mask;
1526 + xgkr->ld_status |= val;
1527 +}
1528 +
1529 +static void check_request(struct xgkr_params *xgkr, int request)
1530 +{
1531 + int cop1_req, coz_req, com_req;
1532 + int old_status;
1533 + enum coe_update cu;
1534 +
1535 + cop1_req = (request & COP1_MASK) >> COP1_SHIFT;
1536 + coz_req = (request & COZ_MASK) >> COZ_SHIFT;
1537 + com_req = (request & COM1_MASK) >> COM1_SHIFT;
1538 +
1539 + /* IEEE802.3-2008, 72.6.10.2.5
1540 + * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED
1541 + */
1542 + old_status = xgkr->ld_status;
1543 +
1544 + if (cop1_req && !(xgkr->ld_status & COP1_MASK)) {
1545 + cu = inc_dec(xgkr, COE_COP1, cop1_req);
1546 + min_max_updated(xgkr, COE_COP1, cu);
1547 + }
1548 +
1549 + if (coz_req && !(xgkr->ld_status & COZ_MASK)) {
1550 + cu = inc_dec(xgkr, COE_COZ, coz_req);
1551 + min_max_updated(xgkr, COE_COZ, cu);
1552 + }
1553 +
1554 + if (com_req && !(xgkr->ld_status & COM1_MASK)) {
1555 + cu = inc_dec(xgkr, COE_COM, com_req);
1556 + min_max_updated(xgkr, COE_COM, cu);
1557 + }
1558 +
1559 + if (old_status != xgkr->ld_status)
1560 + ld_coe_status(xgkr);
1561 +}
1562 +
1563 +static void preset(struct xgkr_params *xgkr)
1564 +{
1565 + /* These are all MAX values from the IEEE802.3 perspective. */
1566 + xgkr->ratio_pst1q = POST_COE_MAX;
1567 + xgkr->adpt_eq = ZERO_COE_MAX;
1568 + xgkr->ratio_preq = PRE_COE_MAX;
1569 +
1570 + tune_tecr(xgkr);
1571 + xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1572 + xgkr->ld_status |= COE_MAX << COP1_SHIFT |
1573 + COE_MAX << COZ_SHIFT |
1574 + COE_MAX << COM1_SHIFT;
1575 + ld_coe_status(xgkr);
1576 +}
1577 +
1578 +static void train_local_tx(struct xgkr_params *xgkr)
1579 +{
1580 + int request, old_ld_status;
1581 +
1582 + /* get request from LP */
1583 + request = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_CU) &
1584 + (LD_ALL_MASK);
1585 +
1586 + old_ld_status = xgkr->ld_status;
1587 +
1588 + /* IEEE802.3-2008, 72.6.10.2.5
1589 + * Ensure we always go to NOT UDPATED for status reporting in
1590 + * response to HOLD requests.
1591 + * IEEE802.3-2008, 72.6.10.2.3.1/2
1592 + * ... but only if PRESET/INITIALIZE are not active to ensure
1593 + * we keep status until they are released.
1594 + */
1595 + if (!(request & (PRESET_MASK | INIT_MASK))) {
1596 + if (!(request & COP1_MASK))
1597 + xgkr->ld_status &= ~COP1_MASK;
1598 +
1599 + if (!(request & COZ_MASK))
1600 + xgkr->ld_status &= ~COZ_MASK;
1601 +
1602 + if (!(request & COM1_MASK))
1603 + xgkr->ld_status &= ~COM1_MASK;
1604 +
1605 + if (old_ld_status != xgkr->ld_status)
1606 + ld_coe_status(xgkr);
1607 + }
1608 +
1609 + /* As soon as the LP shows ready, no need to do any more updates. */
1610 + if (check_rx(xgkr)) {
1611 + /* LP receiver is ready */
1612 + if (xgkr->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) {
1613 + xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1614 + ld_coe_status(xgkr);
1615 + }
1616 + } else {
1617 + /* IEEE802.3-2008, 72.6.10.2.3.1/2
1618 + * only act on PRESET/INITIALIZE if all status is NOT UPDATED.
1619 + */
1620 + if (request & (PRESET_MASK | INIT_MASK)) {
1621 + if (!(xgkr->ld_status &
1622 + (COP1_MASK | COZ_MASK | COM1_MASK))) {
1623 + if (request & PRESET_MASK)
1624 + preset(xgkr);
1625 +
1626 + if (request & INIT_MASK)
1627 + initialize(xgkr);
1628 + }
1629 + }
1630 +
1631 + /* LP Coefficient are not in HOLD */
1632 + if (request & REQUEST_MASK)
1633 + check_request(xgkr, request & REQUEST_MASK);
1634 + }
1635 +}
1636 +
1637 +static void xgkr_start_train(struct xgkr_params *xgkr)
1638 +{
1639 + struct phy_device *phydev = xgkr->phydev;
1640 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
1641 + struct tx_condition *tx_c = &xgkr->tx_c;
1642 + int val = 0, i, j;
1643 + int lt_state;
1644 + unsigned long dead_line;
1645 + int lp_rx_ready, tx_training_complete;
1646 + u32 lt_timeout = 500;
1647 +
1648 + init_xgkr(xgkr, 0);
1649 +
1650 + start_lt(xgkr);
1651 +
1652 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
1653 + lt_timeout = 2000;
1654 + }
1655 +
1656 + for (i = 0; i < 2;) {
1657 +
1658 + dead_line = jiffies + msecs_to_jiffies(lt_timeout);
1659 +
1660 + while (time_before(jiffies, dead_line)) {
1661 +
1662 + val = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD,
1663 + lt_KR_PMD_STATUS);
1664 +
1665 + if (val & TRAIN_FAIL) {
1666 + /* LT failed already, reset lane to avoid
1667 + * it run into hanging, then start LT again.
1668 + */
1669 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
1670 + /* Reset only the Master Lane */
1671 + if (xgkr->idx == MASTER_LANE)
1672 + xgkr->srds->reset_lane(xgkr->reg_base);
1673 + } else {
1674 + xgkr->srds->reset_lane(xgkr->reg_base);
1675 + }
1676 +
1677 + start_lt(xgkr);
1678 + } else if ((val & PMD_STATUS_SUP_STAT) &&
1679 + (val & PMD_STATUS_FRAME_LOCK))
1680 + break;
1681 + usleep_range(100, 500);
1682 + }
1683 +
1684 + if (!((val & PMD_STATUS_FRAME_LOCK) &&
1685 + (val & PMD_STATUS_SUP_STAT))) {
1686 + i++;
1687 + continue;
1688 + }
1689 +
1690 + /* init process */
1691 + lp_rx_ready = false;
1692 + tx_training_complete = false;
1693 + /* the LT should be finished in 500ms, failed or OK. */
1694 + dead_line = jiffies + msecs_to_jiffies(lt_timeout);
1695 +
1696 + while (time_before(jiffies, dead_line)) {
1697 + /* check if the LT is already failed */
1698 +
1699 + lt_state = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD,
1700 + lt_KR_PMD_STATUS);
1701 +
1702 + if (lt_state & TRAIN_FAIL) {
1703 +
1704 + if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
1705 + /* Reset only the Master Lane */
1706 + if (xgkr->idx == MASTER_LANE)
1707 + xgkr->srds->reset_lane(xgkr->reg_base);
1708 + } else {
1709 + xgkr->srds->reset_lane(xgkr->reg_base);
1710 + }
1711 +
1712 + break;
1713 + }
1714 +
1715 + lp_rx_ready = check_rx(xgkr);
1716 + tx_training_complete = tx_c->tx_complete;
1717 +
1718 + if (lp_rx_ready && tx_training_complete)
1719 + break;
1720 +
1721 + if (!lp_rx_ready)
1722 + train_local_tx(xgkr);
1723 +
1724 + if (!tx_training_complete)
1725 + train_remote_tx(xgkr);
1726 +
1727 + usleep_range(100, 500);
1728 + }
1729 +
1730 + i++;
1731 + /* check LT result */
1732 + if (is_link_training_fail(xgkr)) {
1733 + init_xgkr(xgkr, 0);
1734 + continue;
1735 + } else {
1736 + stop_lt(xgkr);
1737 + xgkr->state = TRAINED;
1738 +
1739 + switch (xgkr_inst->bp_mode)
1740 + {
1741 + case PHY_BACKPLANE_10GBASE_KR:
1742 + if (phydev->attached_dev == NULL)
1743 + dev_info(&phydev->mdio.dev, "10GBase-KR link trained (Tx equalization: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x)\n",
1744 + xgkr->tuned_ratio_preq, xgkr->tuned_ratio_pst1q, xgkr->tuned_adpt_eq);
1745 + else
1746 + dev_info(&phydev->mdio.dev, "%s %s: 10GBase-KR link trained (Tx equalization: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x)\n",
1747 + dev_driver_string(phydev->attached_dev->dev.parent),
1748 + dev_name(phydev->attached_dev->dev.parent),
1749 + xgkr->tuned_ratio_preq, xgkr->tuned_ratio_pst1q, xgkr->tuned_adpt_eq);
1750 + break;
1751 +
1752 + case PHY_BACKPLANE_40GBASE_KR:
1753 + if (xgkr->idx == xgkr_inst->phy_lanes - 1) {
1754 + if (phydev->attached_dev == NULL)
1755 + dev_info(&phydev->mdio.dev, "40GBase-KR link trained at lanes Tx equalization:\n");
1756 + else
1757 + dev_info(&phydev->mdio.dev, "%s %s: 40GBase-KR link trained at lanes Tx equalization:\n",
1758 + dev_driver_string(phydev->attached_dev->dev.parent),
1759 + dev_name(phydev->attached_dev->dev.parent));
1760 +
1761 + for (j = 0; j < xgkr_inst->phy_lanes; j++) {
1762 + if (phydev->attached_dev == NULL)
1763 + dev_info(&phydev->mdio.dev, "40GBase-KR Lane %d: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x\n",
1764 + j, xgkr_inst->xgkr[j].tuned_ratio_preq, xgkr_inst->xgkr[j].tuned_ratio_pst1q, xgkr_inst->xgkr[j].tuned_adpt_eq);
1765 + else
1766 + dev_info(&phydev->mdio.dev, "%s %s: 40GBase-KR Lane %d: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x\n",
1767 + dev_driver_string(phydev->attached_dev->dev.parent),
1768 + dev_name(phydev->attached_dev->dev.parent),
1769 + j, xgkr_inst->xgkr[j].tuned_ratio_preq, xgkr_inst->xgkr[j].tuned_ratio_pst1q, xgkr_inst->xgkr[j].tuned_adpt_eq);
1770 + }
1771 + }
1772 + break;
1773 + }
1774 +
1775 + break;
1776 + }
1777 + }
1778 +}
1779 +
1780 +static void xgkr_request_restart_an(struct xgkr_params *xgkr)
1781 +{
1782 + struct phy_device *phydev = xgkr->phydev;
1783 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
1784 + int i;
1785 +
1786 + if (time_before(jiffies, xgkr->rt_time))
1787 + return;
1788 +
1789 + switch (xgkr_inst->bp_mode)
1790 + {
1791 + case PHY_BACKPLANE_1000BASE_KX:
1792 + dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1793 + break;
1794 +
1795 + case PHY_BACKPLANE_10GBASE_KR:
1796 + init_xgkr(xgkr, 0);
1797 + reset_lt(xgkr);
1798 + xgkr->state = DETECTING_LP;
1799 + start_xgkr_an(xgkr);
1800 + start_xgkr_state_machine(&xgkr->xgkr_wk);
1801 + break;
1802 +
1803 + case PHY_BACKPLANE_40GBASE_KR:
1804 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
1805 + init_xgkr(&xgkr_inst->xgkr[i], 0);
1806 + reset_lt(&xgkr_inst->xgkr[i]);
1807 + xgkr_inst->xgkr[i].state = DETECTING_LP;
1808 + }
1809 + //Start AN only for Master Lane
1810 + start_xgkr_an(&xgkr_inst->xgkr[MASTER_LANE]);
1811 + //start state machine
1812 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
1813 + start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
1814 + }
1815 + break;
1816 + }
1817 +
1818 + xgkr->rt_time = jiffies + msecs_to_jiffies(XGKR_DENY_RT_INTERVAL);
1819 +}
1820 +
1821 +static void xgkr_state_machine(struct work_struct *work)
1822 +{
1823 + struct delayed_work *dwork = to_delayed_work(work);
1824 + struct xgkr_params *xgkr = container_of(dwork,
1825 + struct xgkr_params, xgkr_wk);
1826 + struct phy_device *phydev = xgkr->phydev;
1827 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
1828 + int an_state;
1829 + bool start_train = false;
1830 + bool all_lanes_trained = false;
1831 + int i;
1832 +
1833 + if (!xgkr_inst->aneg_done) {
1834 + start_xgkr_state_machine(&xgkr->xgkr_wk);
1835 + return;
1836 + }
1837 +
1838 + mutex_lock(&phydev->lock);
1839 +
1840 + switch (xgkr->state) {
1841 + case DETECTING_LP:
1842 +
1843 + switch (xgkr_inst->bp_mode)
1844 + {
1845 + case PHY_BACKPLANE_1000BASE_KX:
1846 + dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1847 + break;
1848 +
1849 + case PHY_BACKPLANE_10GBASE_KR:
1850 + an_state = xgkr_phy_read_mmd(xgkr, MDIO_MMD_AN, g_an_BP_STAT);
1851 + if (an_state & KR_AN_MASK_10G) {
1852 + //AN acquired: Train the lane
1853 + xgkr->an_wait_count = 0;
1854 + start_train = true;
1855 + } else {
1856 + //AN lost or not yet acquired
1857 + if (!is_link_up(phydev)) {
1858 + //Link is down: restart training
1859 + xgkr->an_wait_count = 0;
1860 + xgkr_request_restart_an(xgkr);
1861 + } else {
1862 + //Link is up: wait few iterations for AN to be acquired
1863 + if (xgkr->an_wait_count >= XGKR_AN_WAIT_ITERATIONS) {
1864 + xgkr->an_wait_count = 0;
1865 + xgkr_request_restart_an(xgkr);
1866 + } else {
1867 + xgkr->an_wait_count++;
1868 + }
1869 + }
1870 + }
1871 + break;
1872 +
1873 + case PHY_BACKPLANE_40GBASE_KR:
1874 + //Check AN state only on Master Lane
1875 + an_state = xgkr_phy_read_mmd(&xgkr_inst->xgkr[MASTER_LANE], MDIO_MMD_AN, g_an_BP_STAT);
1876 + if (an_state & KR_AN_MASK_40G) {
1877 + //AN acquired: Train all lanes in order starting with Master Lane
1878 + xgkr->an_wait_count = 0;
1879 + if (xgkr->idx == MASTER_LANE) {
1880 + start_train = true;
1881 + }
1882 + else if (xgkr_inst->xgkr[xgkr->idx - 1].state == TRAINED) {
1883 + start_train = true;
1884 + }
1885 + } else {
1886 + //AN lost or not yet acquired
1887 + if (!is_link_up(phydev)) {
1888 + //Link is down: restart training
1889 + xgkr->an_wait_count = 0;
1890 + xgkr_request_restart_an(xgkr);
1891 + } else {
1892 + //Link is up: wait few iterations for AN to be acquired
1893 + if (xgkr->an_wait_count >= XGKR_AN_WAIT_ITERATIONS) {
1894 + xgkr->an_wait_count = 0;
1895 + xgkr_request_restart_an(xgkr);
1896 + } else {
1897 + xgkr->an_wait_count++;
1898 + }
1899 + }
1900 + }
1901 + break;
1902 + }
1903 + break;
1904 +
1905 + case TRAINED:
1906 + if (!is_link_up(phydev)) {
1907 + switch (xgkr_inst->bp_mode)
1908 + {
1909 + case PHY_BACKPLANE_1000BASE_KX:
1910 + dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1911 + break;
1912 +
1913 + case PHY_BACKPLANE_10GBASE_KR:
1914 + dev_info(&phydev->mdio.dev, "Detect hotplug, restart training\n");
1915 + xgkr_request_restart_an(xgkr);
1916 + break;
1917 +
1918 + case PHY_BACKPLANE_40GBASE_KR:
1919 + if (xgkr->idx == MASTER_LANE) {
1920 + //check if all lanes are trained only on Master Lane
1921 + all_lanes_trained = true;
1922 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
1923 + if (xgkr_inst->xgkr[i].state != TRAINED) {
1924 + all_lanes_trained = false;
1925 + break;
1926 + }
1927 + }
1928 + if (all_lanes_trained) {
1929 + dev_info(&phydev->mdio.dev, "Detect hotplug, restart training\n");
1930 + xgkr_request_restart_an(xgkr);
1931 + }
1932 + }
1933 + break;
1934 + }
1935 + }
1936 + break;
1937 + }
1938 +
1939 + if (start_train) {
1940 + xgkr_start_train(xgkr);
1941 + }
1942 +
1943 + mutex_unlock(&phydev->lock);
1944 + start_xgkr_state_machine(&xgkr->xgkr_wk);
1945 +}
1946 +
1947 +static int fsl_backplane_probe(struct phy_device *phydev)
1948 +{
1949 + struct xgkr_phy_data *xgkr_inst;
1950 + struct device_node *phy_node, *lane_node;
1951 + struct resource res_lane;
1952 + struct serdes_access *srds = NULL;
1953 + int serdes_type;
1954 + const char *st;
1955 + const char *bm;
1956 + int ret, i, phy_lanes;
1957 + int bp_mode;
1958 + u32 lane_base_addr[MAX_PHY_LANES_NO], lane_memmap_size;
1959 +
1960 + phy_node = phydev->mdio.dev.of_node;
1961 + if (!phy_node) {
1962 + dev_err(&phydev->mdio.dev, "No associated device tree node\n");
1963 + return -EINVAL;
1964 + }
1965 +
1966 + bp_mode = of_property_read_string(phy_node, "backplane-mode", &bm);
1967 + if (bp_mode < 0)
1968 + return -EINVAL;
1969 +
1970 + phy_lanes = 1;
1971 + if (!strcasecmp(bm, "1000base-kx")) {
1972 + bp_mode = PHY_BACKPLANE_1000BASE_KX;
1973 + } else if (!strcasecmp(bm, "10gbase-kr")) {
1974 + bp_mode = PHY_BACKPLANE_10GBASE_KR;
1975 + } else if (!strcasecmp(bm, "40gbase-kr")) {
1976 + bp_mode = PHY_BACKPLANE_40GBASE_KR;
1977 + phy_lanes = 4;
1978 + } else {
1979 + dev_err(&phydev->mdio.dev, "Unknown backplane-mode\n");
1980 + return -EINVAL;
1981 + }
1982 +
1983 + lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
1984 + if (!lane_node) {
1985 + dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
1986 + return -EINVAL;
1987 + }
1988 +
1989 + ret = of_property_read_string(lane_node, "compatible", &st);
1990 + if (ret < 0) {
1991 + //assume SERDES-10G if compatible property is not specified
1992 + serdes_type = SERDES_10G;
1993 + }
1994 + else if (!strcasecmp(st, "fsl,serdes-10g")) {
1995 + serdes_type = SERDES_10G;
1996 + } else if (!strcasecmp(st, "fsl,serdes-28g")) {
1997 + serdes_type = SERDES_28G;
1998 + } else {
1999 + dev_err(&phydev->mdio.dev, "Unknown serdes-type\n");
2000 + return -EINVAL;
2001 + }
2002 +
2003 + ret = of_address_to_resource(lane_node, 0, &res_lane);
2004 + if (ret) {
2005 + dev_err(&phydev->mdio.dev, "could not obtain memory map\n");
2006 + return ret;
2007 + }
2008 +
2009 + of_node_put(lane_node);
2010 + ret = of_property_read_u32_array(phy_node, "fsl,lane-reg",
2011 + (u32 *)lane_base_addr, phy_lanes);
2012 + if (ret) {
2013 + dev_err(&phydev->mdio.dev, "could not get fsl,lane-reg\n");
2014 + return -EINVAL;
2015 + }
2016 +
2017 + switch (serdes_type)
2018 + {
2019 + case SERDES_10G:
2020 + setup_an_lt_ls();
2021 + srds = setup_serdes_access_10g();
2022 + break;
2023 +
2024 + case SERDES_28G:
2025 + setup_an_lt_lx();
2026 + srds = setup_serdes_access_28g();
2027 + break;
2028 +
2029 + default:
2030 + dev_err(&phydev->mdio.dev, "Unsupported serdes-type\n");
2031 + return -EINVAL;
2032 + }
2033 +
2034 + if (!srds) {
2035 + dev_err(&phydev->mdio.dev, "Unsupported serdes-type\n");
2036 + return -EINVAL;
2037 + }
2038 +
2039 + srds->serdes_type = serdes_type;
2040 + srds->is_little_endian = of_property_read_bool(lane_node, "little-endian");
2041 +
2042 + if (srds->is_little_endian) {
2043 + srds->ioread32 = le_ioread32;
2044 + srds->iowrite32 = le_iowrite32;
2045 + } else {
2046 + srds->ioread32 = be_ioread32;
2047 + srds->iowrite32 = be_iowrite32;
2048 + }
2049 +
2050 + xgkr_inst = devm_kzalloc(&phydev->mdio.dev,
2051 + sizeof(*xgkr_inst), GFP_KERNEL);
2052 + if (!xgkr_inst)
2053 + return -ENOMEM;
2054 +
2055 + xgkr_inst->phy_lanes = phy_lanes;
2056 + xgkr_inst->bp_mode = bp_mode;
2057 + mutex_init(&xgkr_inst->phy_lock);
2058 +
2059 + lane_memmap_size = srds->get_lane_memmap_size();
2060 +
2061 + for (i = 0; i < phy_lanes; i++) {
2062 + xgkr_inst->xgkr[i].idx = i;
2063 + xgkr_inst->xgkr[i].phydev = phydev;
2064 + xgkr_inst->xgkr[i].srds = srds;
2065 + xgkr_inst->xgkr[i].reg_base = devm_ioremap_nocache(&phydev->mdio.dev,
2066 + res_lane.start + lane_base_addr[i],
2067 + lane_memmap_size);
2068 + if (!xgkr_inst->xgkr[i].reg_base) {
2069 + dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n");
2070 + return -ENOMEM;
2071 + }
2072 + xgkr_inst->xgkr[i].rt_time = jiffies + msecs_to_jiffies(XGKR_DENY_RT_INTERVAL);
2073 + }
2074 +
2075 + phydev->priv = xgkr_inst;
2076 +
2077 + switch (bp_mode)
2078 + {
2079 + case PHY_BACKPLANE_1000BASE_KX:
2080 + phydev->speed = SPEED_1000;
2081 + /* configure the lane for 1000BASE-KX */
2082 + srds->lane_set_1gkx(xgkr_inst->xgkr[SINGLE_LANE].reg_base);
2083 + break;
2084 +
2085 + case PHY_BACKPLANE_10GBASE_KR:
2086 + phydev->speed = SPEED_10000;
2087 + INIT_DELAYED_WORK(&xgkr_inst->xgkr[SINGLE_LANE].xgkr_wk, xgkr_state_machine);
2088 + break;
2089 +
2090 + case PHY_BACKPLANE_40GBASE_KR:
2091 + phydev->speed = SPEED_40000;
2092 + for (i = 0; i < phy_lanes; i++)
2093 + INIT_DELAYED_WORK(&xgkr_inst->xgkr[i].xgkr_wk, xgkr_state_machine);
2094 + break;
2095 + }
2096 +
2097 + return 0;
2098 +}
2099 +
2100 +static int fsl_backplane_aneg_done(struct phy_device *phydev)
2101 +{
2102 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
2103 +
2104 + if (!phydev->mdio.dev.of_node) {
2105 + dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2106 + return -EINVAL;
2107 + }
2108 +
2109 + xgkr_inst->aneg_done = true;
2110 +
2111 + return 1;
2112 +}
2113 +
2114 +static int fsl_backplane_config_aneg(struct phy_device *phydev)
2115 +{
2116 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
2117 + int i;
2118 +
2119 + if (!phydev->mdio.dev.of_node) {
2120 + dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2121 + return -EINVAL;
2122 + }
2123 +
2124 + switch (phydev->speed)
2125 + {
2126 + case SPEED_1000:
2127 + phydev->supported |= SUPPORTED_1000baseKX_Full;
2128 + start_1gkx_an(phydev);
2129 + break;
2130 +
2131 + case SPEED_10000:
2132 + phydev->supported |= SUPPORTED_10000baseKR_Full;
2133 + reset_lt(&xgkr_inst->xgkr[SINGLE_LANE]);
2134 + start_xgkr_an(&xgkr_inst->xgkr[SINGLE_LANE]);
2135 + /* start state machine*/
2136 + start_xgkr_state_machine(&xgkr_inst->xgkr[SINGLE_LANE].xgkr_wk);
2137 + break;
2138 +
2139 + case SPEED_40000:
2140 + phydev->supported |= SUPPORTED_40000baseKR4_Full;
2141 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
2142 + reset_lt(&xgkr_inst->xgkr[i]);
2143 + }
2144 + //Start AN only for Master Lane
2145 + start_xgkr_an(&xgkr_inst->xgkr[MASTER_LANE]);
2146 + /* start state machine*/
2147 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
2148 + start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
2149 + }
2150 +
2151 + break;
2152 + }
2153 +
2154 + phydev->advertising = phydev->supported;
2155 + phydev->duplex = 1;
2156 +
2157 + return 0;
2158 +}
2159 +
2160 +static int fsl_backplane_suspend(struct phy_device *phydev)
2161 +{
2162 + int i;
2163 +
2164 + if (!phydev->mdio.dev.of_node) {
2165 + dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2166 + return -EINVAL;
2167 + }
2168 +
2169 + if (phydev->speed == SPEED_10000 || phydev->speed == SPEED_40000) {
2170 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
2171 +
2172 + for (i = 0; i < xgkr_inst->phy_lanes; i++)
2173 + cancel_delayed_work_sync(&xgkr_inst->xgkr[i].xgkr_wk);
2174 + }
2175 + return 0;
2176 +}
2177 +
2178 +static int fsl_backplane_resume(struct phy_device *phydev)
2179 +{
2180 + struct xgkr_phy_data *xgkr_inst = phydev->priv;
2181 + int i;
2182 +
2183 + if (!phydev->mdio.dev.of_node) {
2184 + dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2185 + return -EINVAL;
2186 + }
2187 +
2188 + if (phydev->speed == SPEED_10000 || phydev->speed == SPEED_40000) {
2189 + for (i = 0; i < xgkr_inst->phy_lanes; i++) {
2190 + init_xgkr(&xgkr_inst->xgkr[i], 1);
2191 + start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
2192 + }
2193 + }
2194 + return 0;
2195 +}
2196 +
2197 +static int fsl_backplane_read_status(struct phy_device *phydev)
2198 +{
2199 + if (!phydev->mdio.dev.of_node) {
2200 + dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2201 + return -EINVAL;
2202 + }
2203 +
2204 + if (is_link_up(phydev))
2205 + phydev->link = 1;
2206 + else
2207 + phydev->link = 0;
2208 +
2209 + return 0;
2210 +}
2211 +
2212 +static int fsl_backplane_match_phy_device(struct phy_device *phydev)
2213 +{
2214 + struct device_node *phy_node, *lane_node;
2215 + const char *st;
2216 + int serdes_type, i, ret;
2217 + const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
2218 +
2219 + if (!phydev->mdio.dev.of_node) {
2220 + return 0;
2221 + }
2222 +
2223 + // WORKAROUND:
2224 + // Required for LX2 devices
2225 + // where PHY ID cannot be verified in PCS
2226 + // because PCS Device Identifier Upper and Lower registers are hidden
2227 + // and always return 0 when they are read:
2228 + // 2 02 Device_ID0 RO Bits 15:0 0
2229 + // val = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x2);
2230 + // 3 03 Device_ID1 RO Bits 31:16 0
2231 + // val = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x3);
2232 + //
2233 + // To be removed: After the issue will be fixed on LX2 devices
2234 +
2235 + if (!phydev->is_c45)
2236 + return 0;
2237 +
2238 + phy_node = phydev->mdio.dev.of_node;
2239 +
2240 + lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
2241 + if (!lane_node) {
2242 + dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
2243 + return 0;
2244 + }
2245 +
2246 + ret = of_property_read_string(lane_node, "compatible", &st);
2247 + if (ret < 0) {
2248 + //assume SERDES-10G if compatible property is not specified
2249 + serdes_type = SERDES_10G;
2250 + }
2251 + else if (!strcasecmp(st, "fsl,serdes-10g")) {
2252 + serdes_type = SERDES_10G;
2253 + } else if (!strcasecmp(st, "fsl,serdes-28g")) {
2254 + serdes_type = SERDES_28G;
2255 + } else {
2256 + dev_err(&phydev->mdio.dev, "Unknown serdes-type\n");
2257 + return 0;
2258 + }
2259 +
2260 + if (serdes_type == SERDES_10G) {
2261 + //On LS devices we must find the c45 device with correct PHY ID
2262 + //Implementation similar with the one existent in phy_device: @function: phy_bus_match
2263 + for (i = 1; i < num_ids; i++) {
2264 + if (!(phydev->c45_ids.devices_in_package & (1 << i)))
2265 + continue;
2266 +
2267 + if ((PCS_PHY_DEVICE_ID & PCS_PHY_DEVICE_ID_MASK) ==
2268 + (phydev->c45_ids.device_ids[i] & PCS_PHY_DEVICE_ID_MASK))
2269 + {
2270 + return 1;
2271 + }
2272 + }
2273 + return 0;
2274 + }
2275 +
2276 + //On LX devices we cannot verify PHY ID
2277 + //so we are happy only with preliminary verifications already made: mdio.dev.of_node and is_c45
2278 + //because we already filtered other undesired devices: non clause 45
2279 +
2280 + return 1;
2281 +}
2282 +
2283 +static struct phy_driver fsl_backplane_driver[] = {
2284 + {
2285 + .phy_id = PCS_PHY_DEVICE_ID,
2286 + .name = "Freescale Backplane",
2287 + .phy_id_mask = PCS_PHY_DEVICE_ID_MASK,
2288 + .features = SUPPORTED_Backplane | SUPPORTED_Autoneg |
2289 + SUPPORTED_MII,
2290 + .probe = fsl_backplane_probe,
2291 + .aneg_done = fsl_backplane_aneg_done,
2292 + .config_aneg = fsl_backplane_config_aneg,
2293 + .read_status = fsl_backplane_read_status,
2294 + .suspend = fsl_backplane_suspend,
2295 + .resume = fsl_backplane_resume,
2296 + .match_phy_device = fsl_backplane_match_phy_device,
2297 + },
2298 +};
2299 +
2300 +module_phy_driver(fsl_backplane_driver);
2301 +
2302 +static struct mdio_device_id __maybe_unused freescale_tbl[] = {
2303 + { PCS_PHY_DEVICE_ID, PCS_PHY_DEVICE_ID_MASK },
2304 + { }
2305 +};
2306 +
2307 +MODULE_DEVICE_TABLE(mdio, freescale_tbl);
2308 +
2309 +MODULE_DESCRIPTION("Freescale Backplane driver");
2310 +MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
2311 +MODULE_LICENSE("GPL v2");
2312 --- /dev/null
2313 +++ b/drivers/net/phy/fsl_backplane.h
2314 @@ -0,0 +1,41 @@
2315 +/* SPDX-License-Identifier: GPL-2.0+ */
2316 +/*
2317 + * DPAA backplane driver.
2318 + * Author: Florinel Iordache <florinel.iordache@nxp.com>
2319 + *
2320 + * Copyright 2018 NXP
2321 + *
2322 + * Licensed under the GPL-2 or later.
2323 + */
2324 +
2325 +#ifndef FSL_BACKPLANE_H
2326 +#define FSL_BACKPLANE_H
2327 +
2328 +/* C(-1) */
2329 +#define BIN_M1 0
2330 +/* C(1) */
2331 +#define BIN_LONG 1
2332 +
2333 +#define BIN_SNAPSHOT_NUM 5
2334 +#define BIN_M1_THRESHOLD 3
2335 +#define BIN_LONG_THRESHOLD 2
2336 +
2337 +struct serdes_access {
2338 +
2339 + int serdes_type;
2340 + bool is_little_endian;
2341 + u32 (*ioread32)(u32 *reg);
2342 + void (*iowrite32)(u32 value, u32 *reg);
2343 + u32 (*get_lane_memmap_size)(void);
2344 + void (*tune_tecr)(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset);
2345 + void (*reset_lane)(void *reg);
2346 + void (*lane_set_1gkx)(void *reg);
2347 + int (*get_median_gaink2)(u32 *reg);
2348 + bool (*is_bin_early)(int bin_sel, void *reg);
2349 +};
2350 +
2351 +struct serdes_access* setup_serdes_access_10g(void);
2352 +struct serdes_access* setup_serdes_access_28g(void);
2353 +
2354 +
2355 +#endif //FSL_BACKPLANE_H
2356 --- /dev/null
2357 +++ b/drivers/net/phy/fsl_backplane_serdes_10g.c
2358 @@ -0,0 +1,281 @@
2359 +// SPDX-License-Identifier: GPL-2.0+
2360 +/*
2361 + * DPAA backplane driver for SerDes 10G.
2362 + * Author: Florinel Iordache <florinel.iordache@nxp.com>
2363 + *
2364 + * Copyright 2018 NXP
2365 + *
2366 + * Licensed under the GPL-2 or later.
2367 + */
2368 +
2369 +#include <linux/io.h>
2370 +#include <linux/delay.h>
2371 +
2372 +#include "fsl_backplane.h"
2373 +
2374 +#define BIN_M1_SEL 6
2375 +#define BIN_Long_SEL 7
2376 +#define CDR_SEL_MASK 0x00070000
2377 +
2378 +#define PRE_COE_SHIFT 22
2379 +#define POST_COE_SHIFT 16
2380 +#define ZERO_COE_SHIFT 8
2381 +
2382 +#define TECR0_INIT 0x24200000
2383 +
2384 +#define GCR0_RESET_MASK 0x00600000
2385 +
2386 +#define GCR1_SNP_START_MASK 0x00000040
2387 +#define GCR1_CTL_SNP_START_MASK 0x00002000
2388 +
2389 +#define RECR1_CTL_SNP_DONE_MASK 0x00000002
2390 +#define RECR1_SNP_DONE_MASK 0x00000004
2391 +#define TCSR1_SNP_DATA_MASK 0x0000ffc0
2392 +#define TCSR1_SNP_DATA_SHIFT 6
2393 +#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100
2394 +
2395 +#define RECR1_GAINK2_MASK 0x0f000000
2396 +#define RECR1_GAINK2_SHIFT 24
2397 +
2398 +/* Required only for 1000BASE KX */
2399 +#define GCR1_REIDL_TH_MASK 0x00700000
2400 +#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
2401 +#define GCR1_REIDL_ET_MAS_MASK 0x00004000
2402 +#define TECR0_AMP_RED_MASK 0x0000003f
2403 +
2404 +struct per_lane_ctrl_status {
2405 + u32 gcr0; /* 0x.000 - General Control Register 0 */
2406 + u32 gcr1; /* 0x.004 - General Control Register 1 */
2407 + u32 gcr2; /* 0x.008 - General Control Register 2 */
2408 + u32 resv1; /* 0x.00C - Reserved */
2409 + u32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */
2410 + u32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */
2411 + u32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */
2412 + u32 resv2; /* 0x.01C - Reserved */
2413 + u32 tlcr0; /* 0x.020 - TTL Control Register 0 */
2414 + u32 tlcr1; /* 0x.024 - TTL Control Register 1 */
2415 + u32 tlcr2; /* 0x.028 - TTL Control Register 2 */
2416 + u32 tlcr3; /* 0x.02C - TTL Control Register 3 */
2417 + u32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */
2418 + u32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */
2419 + u32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */
2420 + u32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */
2421 +};
2422 +
2423 +static struct serdes_access srds;
2424 +
2425 +static u32 get_lane_memmap_size(void)
2426 +{
2427 + return 0x40;
2428 +}
2429 +
2430 +static void reset_lane(void *reg)
2431 +{
2432 + struct per_lane_ctrl_status *reg_base = reg;
2433 +
2434 + /* reset the lane */
2435 + srds.iowrite32(srds.ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
2436 + &reg_base->gcr0);
2437 + udelay(1);
2438 +
2439 + /* unreset the lane */
2440 + srds.iowrite32(srds.ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
2441 + &reg_base->gcr0);
2442 + udelay(1);
2443 +}
2444 +
2445 +static void tune_tecr(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset)
2446 +{
2447 + struct per_lane_ctrl_status *reg_base = reg;
2448 + u32 val;
2449 +
2450 + val = TECR0_INIT |
2451 + adpt_eq << ZERO_COE_SHIFT |
2452 + ratio_preq << PRE_COE_SHIFT |
2453 + ratio_pst1q << POST_COE_SHIFT;
2454 +
2455 + if (reset) {
2456 + /* reset the lane */
2457 + srds.iowrite32(srds.ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
2458 + &reg_base->gcr0);
2459 + udelay(1);
2460 + }
2461 +
2462 + srds.iowrite32(val, &reg_base->tecr0);
2463 + udelay(1);
2464 +
2465 + if (reset) {
2466 + /* unreset the lane */
2467 + srds.iowrite32(srds.ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
2468 + &reg_base->gcr0);
2469 + udelay(1);
2470 + }
2471 +}
2472 +
2473 +static void lane_set_1gkx(void *reg)
2474 +{
2475 + struct per_lane_ctrl_status *reg_base = reg;
2476 + u32 val;
2477 +
2478 + /* reset the lane */
2479 + srds.iowrite32(srds.ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
2480 + &reg_base->gcr0);
2481 + udelay(1);
2482 +
2483 + /* set gcr1 for 1GKX */
2484 + val = srds.ioread32(&reg_base->gcr1);
2485 + val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
2486 + GCR1_REIDL_ET_MAS_MASK);
2487 + srds.iowrite32(val, &reg_base->gcr1);
2488 + udelay(1);
2489 +
2490 + /* set tecr0 for 1GKX */
2491 + val = srds.ioread32(&reg_base->tecr0);
2492 + val &= ~TECR0_AMP_RED_MASK;
2493 + srds.iowrite32(val, &reg_base->tecr0);
2494 + udelay(1);
2495 +
2496 + /* unreset the lane */
2497 + srds.iowrite32(srds.ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
2498 + &reg_base->gcr0);
2499 + udelay(1);
2500 +}
2501 +
2502 +static int get_median_gaink2(u32 *reg)
2503 +{
2504 + int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
2505 + u32 rx_eq_snp;
2506 + struct per_lane_ctrl_status *reg_base;
2507 + int timeout;
2508 + int i, j, tmp, pos;
2509 +
2510 + reg_base = (struct per_lane_ctrl_status *)reg;
2511 +
2512 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
2513 + /* wait RECR1_CTL_SNP_DONE_MASK has cleared */
2514 + timeout = 100;
2515 + while (srds.ioread32(&reg_base->recr1) &
2516 + RECR1_CTL_SNP_DONE_MASK) {
2517 + udelay(1);
2518 + timeout--;
2519 + if (timeout == 0)
2520 + break;
2521 + }
2522 +
2523 + /* start snap shot */
2524 + srds.iowrite32((srds.ioread32(&reg_base->gcr1) |
2525 + GCR1_CTL_SNP_START_MASK),
2526 + &reg_base->gcr1);
2527 +
2528 + /* wait for SNP done */
2529 + timeout = 100;
2530 + while (!(srds.ioread32(&reg_base->recr1) &
2531 + RECR1_CTL_SNP_DONE_MASK)) {
2532 + udelay(1);
2533 + timeout--;
2534 + if (timeout == 0)
2535 + break;
2536 + }
2537 +
2538 + /* read and save the snap shot */
2539 + rx_eq_snp = srds.ioread32(&reg_base->recr1);
2540 + gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >>
2541 + RECR1_GAINK2_SHIFT;
2542 +
2543 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
2544 + srds.iowrite32((srds.ioread32(&reg_base->gcr1) &
2545 + ~GCR1_CTL_SNP_START_MASK),
2546 + &reg_base->gcr1);
2547 + }
2548 +
2549 + /* get median of the 5 snap shot */
2550 + for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
2551 + tmp = gaink2_snap_shot[i];
2552 + pos = i;
2553 + for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
2554 + if (gaink2_snap_shot[j] < tmp) {
2555 + tmp = gaink2_snap_shot[j];
2556 + pos = j;
2557 + }
2558 + }
2559 +
2560 + gaink2_snap_shot[pos] = gaink2_snap_shot[i];
2561 + gaink2_snap_shot[i] = tmp;
2562 + }
2563 +
2564 + return gaink2_snap_shot[2];
2565 +}
2566 +
2567 +static bool is_bin_early(int bin_sel, void *reg)
2568 +{
2569 + bool early = false;
2570 + int bin_snap_shot[BIN_SNAPSHOT_NUM];
2571 + int i, negative_count = 0;
2572 + struct per_lane_ctrl_status *reg_base = reg;
2573 + int timeout;
2574 +
2575 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
2576 + /* wait RECR1_SNP_DONE_MASK has cleared */
2577 + timeout = 100;
2578 + while ((srds.ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
2579 + udelay(1);
2580 + timeout--;
2581 + if (timeout == 0)
2582 + break;
2583 + }
2584 +
2585 + /* set TCSR1[CDR_SEL] to BinM1/BinLong */
2586 + if (bin_sel == BIN_M1) {
2587 + srds.iowrite32((srds.ioread32(&reg_base->tcsr1) &
2588 + ~CDR_SEL_MASK) | BIN_M1_SEL,
2589 + &reg_base->tcsr1);
2590 + } else {
2591 + srds.iowrite32((srds.ioread32(&reg_base->tcsr1) &
2592 + ~CDR_SEL_MASK) | BIN_Long_SEL,
2593 + &reg_base->tcsr1);
2594 + }
2595 +
2596 + /* start snap shot */
2597 + srds.iowrite32(srds.ioread32(&reg_base->gcr1) | GCR1_SNP_START_MASK,
2598 + &reg_base->gcr1);
2599 +
2600 + /* wait for SNP done */
2601 + timeout = 100;
2602 + while (!(srds.ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
2603 + udelay(1);
2604 + timeout--;
2605 + if (timeout == 0)
2606 + break;
2607 + }
2608 +
2609 + /* read and save the snap shot */
2610 + bin_snap_shot[i] = (srds.ioread32(&reg_base->tcsr1) &
2611 + TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT;
2612 + if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK)
2613 + negative_count++;
2614 +
2615 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
2616 + srds.iowrite32(srds.ioread32(&reg_base->gcr1) & ~GCR1_SNP_START_MASK,
2617 + &reg_base->gcr1);
2618 + }
2619 +
2620 + if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
2621 + ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
2622 + early = true;
2623 + }
2624 +
2625 + return early;
2626 +}
2627 +
2628 +struct serdes_access* setup_serdes_access_10g(void)
2629 +{
2630 + srds.get_lane_memmap_size = get_lane_memmap_size;
2631 + srds.tune_tecr = tune_tecr;
2632 + srds.reset_lane = reset_lane;
2633 + srds.lane_set_1gkx = lane_set_1gkx;
2634 + srds.get_median_gaink2 = get_median_gaink2;
2635 + srds.is_bin_early = is_bin_early;
2636 +
2637 + return &srds;
2638 +}
2639 +
2640 --- /dev/null
2641 +++ b/drivers/net/phy/fsl_backplane_serdes_28g.c
2642 @@ -0,0 +1,336 @@
2643 +// SPDX-License-Identifier: GPL-2.0+
2644 +/*
2645 + * DPAA backplane driver for SerDes 28G.
2646 + * Author: Florinel Iordache <florinel.iordache@nxp.com>
2647 + *
2648 + * Copyright 2018 NXP
2649 + *
2650 + * Licensed under the GPL-2 or later.
2651 + */
2652 +
2653 +#include <linux/io.h>
2654 +#include <linux/delay.h>
2655 +#include <linux/sched.h>
2656 +
2657 +#include "fsl_backplane.h"
2658 +
2659 +#define BIN_M1_SEL 0x0000c000
2660 +#define BIN_Long_SEL 0x0000d000
2661 +#define CDR_SEL_MASK 0x0000f000
2662 +
2663 +#define PRE_COE_SHIFT 16
2664 +#define POST_COE_SHIFT 8
2665 +#define ZERO_COE_SHIFT 24
2666 +
2667 +#define TECR0_INIT 0x20808000
2668 +
2669 +#define RESET_REQ_MASK 0x80000000
2670 +
2671 +#define RECR3_SNP_START_MASK 0x80000000
2672 +#define RECR3_SNP_DONE_MASK 0x40000000
2673 +
2674 +#define RECR4_SNP_DATA_MASK 0x000003ff
2675 +#define RECR4_SNP_DATA_SHIFT 0
2676 +#define RECR4_EQ_SNPBIN_SIGN_MASK 0x200
2677 +
2678 +#define RECR3_GAINK2_MASK 0x1f000000
2679 +#define RECR3_GAINK2_SHIFT 24
2680 +
2681 +/* Required only for 1000BASE KX */
2682 +#define GCR1_REIDL_TH_MASK 0x00700000
2683 +#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
2684 +#define GCR1_REIDL_ET_MAS_MASK 0x04000000
2685 +#define TECR0_AMP_RED_MASK 0x0000003f
2686 +
2687 +struct per_lane_ctrl_status {
2688 + u32 gcr0; /* 0x.000 - General Control Register 0 */
2689 + u32 resv1; /* 0x.004 - Reserved */
2690 + u32 resv2; /* 0x.008 - Reserved */
2691 + u32 resv3; /* 0x.00C - Reserved */
2692 + u32 resv4; /* 0x.010 - Reserved */
2693 + u32 resv5; /* 0x.014 - Reserved */
2694 + u32 resv6; /* 0x.018 - Reserved */
2695 + u32 resv7; /* 0x.01C - Reserved */
2696 + u32 trstctl; /* 0x.020 - TX Reset Control Register */
2697 + u32 tgcr0; /* 0x.024 - TX General Control Register 0 */
2698 + u32 tgcr1; /* 0x.028 - TX General Control Register 1 */
2699 + u32 tgcr2; /* 0x.02C - TX General Control Register 2 */
2700 + u32 tecr0; /* 0x.030 - Transmit Equalization Control Register 0 */
2701 + u32 tecr1; /* 0x.034 - Transmit Equalization Control Register 1 */
2702 + u32 resv8; /* 0x.038 - Reserved */
2703 + u32 resv9; /* 0x.03C - Reserved */
2704 + u32 rrstctl; /* 0x.040 - RX Reset Control Register */
2705 + u32 rgcr0; /* 0x.044 - RX General Control Register 0 */
2706 + u32 rxgcr1; /* 0x.048 - RX General Control Register 1 */
2707 + u32 resv10; /* 0x.04C - Reserved */
2708 + u32 recr0; /* 0x.050 - RX Equalization Register 0 */
2709 + u32 recr1; /* 0x.054 - RX Equalization Register 1 */
2710 + u32 recr2; /* 0x.058 - RX Equalization Register 2 */
2711 + u32 recr3; /* 0x.05C - RX Equalization Register 3 */
2712 + u32 recr4; /* 0x.060 - RX Equalization Register 4 */
2713 + u32 resv11; /* 0x.064 - Reserved */
2714 + u32 rccr0; /* 0x.068 - RX Calibration Register 0 */
2715 + u32 rccr1; /* 0x.06C - RX Calibration Register 1 */
2716 + u32 rcpcr0; /* 0x.070 - RX Clock Path Register 0 */
2717 + u32 rsccr0; /* 0x.074 - RX Sampler Calibration Control Register 0 */
2718 + u32 rsccr1; /* 0x.078 - RX Sampler Calibration Control Register 1 */
2719 + u32 resv12; /* 0x.07C - Reserved */
2720 + u32 ttlcr0; /* 0x.080 - Transition Tracking Loop Register 0 */
2721 + u32 ttlcr1; /* 0x.084 - Transition Tracking Loop Register 1 */
2722 + u32 ttlcr2; /* 0x.088 - Transition Tracking Loop Register 2 */
2723 + u32 ttlcr3; /* 0x.08C - Transition Tracking Loop Register 3 */
2724 + u32 resv13; /* 0x.090 - Reserved */
2725 + u32 resv14; /* 0x.094 - Reserved */
2726 + u32 resv15; /* 0x.098 - Reserved */
2727 + u32 resv16; /* 0x.09C - Reserved */
2728 + u32 tcsr0; /* 0x.0A0 - Test Control/Status Register 0 */
2729 + u32 tcsr1; /* 0x.0A4 - Test Control/Status Register 1 */
2730 + u32 tcsr2; /* 0x.0A8 - Test Control/Status Register 2 */
2731 + u32 tcsr3; /* 0x.0AC - Test Control/Status Register 3 */
2732 + u32 tcsr4; /* 0x.0B0 - Test Control/Status Register 4 */
2733 + u32 resv17; /* 0x.0B4 - Reserved */
2734 + u32 resv18; /* 0x.0B8 - Reserved */
2735 + u32 resv19; /* 0x.0BC - Reserved */
2736 + u32 rxcb0; /* 0x.0C0 - RX Control Block Register 0 */
2737 + u32 rxcb1; /* 0x.0C4 - RX Control Block Register 1 */
2738 + u32 resv20; /* 0x.0C8 - Reserved */
2739 + u32 resv21; /* 0x.0CC - Reserved */
2740 + u32 rxss0; /* 0x.0D0 - RX Speed Switch Register 0 */
2741 + u32 rxss1; /* 0x.0D4 - RX Speed Switch Register 1 */
2742 + u32 rxss2; /* 0x.0D8 - RX Speed Switch Register 2 */
2743 + u32 resv22; /* 0x.0DC - Reserved */
2744 + u32 txcb0; /* 0x.0E0 - TX Control Block Register 0 */
2745 + u32 txcb1; /* 0x.0E4 - TX Control Block Register 1 */
2746 + u32 resv23; /* 0x.0E8 - Reserved */
2747 + u32 resv24; /* 0x.0EC - Reserved */
2748 + u32 txss0; /* 0x.0F0 - TX Speed Switch Register 0 */
2749 + u32 txss1; /* 0x.0F4 - TX Speed Switch Register 1 */
2750 + u32 txss2; /* 0x.0F8 - TX Speed Switch Register 2 */
2751 + u32 resv25; /* 0x.0FC - Reserved */
2752 +};
2753 +
2754 +static struct serdes_access srds;
2755 +
2756 +static u32 get_lane_memmap_size(void)
2757 +{
2758 + return 0x100;
2759 +}
2760 +
2761 +static void reset_lane(void *reg)
2762 +{
2763 + struct per_lane_ctrl_status *reg_base = reg;
2764 + u32 val;
2765 + unsigned long timeout;
2766 +
2767 + /* reset Tx lane: send reset request */
2768 + srds.iowrite32(srds.ioread32(&reg_base->trstctl) | RESET_REQ_MASK,
2769 + &reg_base->trstctl);
2770 + udelay(1);
2771 + timeout = 10;
2772 + while (timeout--) {
2773 + val = srds.ioread32(&reg_base->trstctl);
2774 + if (!(val & RESET_REQ_MASK))
2775 + break;
2776 + usleep_range(5, 20);
2777 + }
2778 +
2779 + /* reset Rx lane: send reset request */
2780 + srds.iowrite32(srds.ioread32(&reg_base->rrstctl) | RESET_REQ_MASK,
2781 + &reg_base->rrstctl);
2782 + udelay(1);
2783 + timeout = 10;
2784 + while (timeout--) {
2785 + val = srds.ioread32(&reg_base->rrstctl);
2786 + if (!(val & RESET_REQ_MASK))
2787 + break;
2788 + usleep_range(5, 20);
2789 + }
2790 +
2791 + /* wait for a while after reset */
2792 + timeout = jiffies + 10;
2793 + while (time_before(jiffies, timeout)) {
2794 + schedule();
2795 + usleep_range(5, 20);
2796 + }
2797 +}
2798 +
2799 +static void tune_tecr(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset)
2800 +{
2801 + struct per_lane_ctrl_status *reg_base = reg;
2802 + u32 val;
2803 +
2804 + if (reset) {
2805 + /* reset lanes */
2806 + reset_lane(reg);
2807 + }
2808 +
2809 + val = TECR0_INIT |
2810 + ratio_preq << PRE_COE_SHIFT |
2811 + ratio_pst1q << POST_COE_SHIFT;
2812 + srds.iowrite32(val, &reg_base->tecr0);
2813 +
2814 + val = adpt_eq << ZERO_COE_SHIFT;
2815 + srds.iowrite32(val, &reg_base->tecr1);
2816 +
2817 + udelay(1);
2818 +}
2819 +
2820 +static void lane_set_1gkx(void *reg)
2821 +{
2822 + struct per_lane_ctrl_status *reg_base = reg;
2823 + u32 val;
2824 +
2825 + /* reset lanes */
2826 + reset_lane(reg);
2827 +
2828 + /* set gcr1 for 1GKX */
2829 + val = srds.ioread32(&reg_base->rxgcr1);
2830 + val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
2831 + GCR1_REIDL_ET_MAS_MASK);
2832 + srds.iowrite32(val, &reg_base->rxgcr1);
2833 + udelay(1);
2834 +
2835 + /* set tecr0 for 1GKX */
2836 + val = srds.ioread32(&reg_base->tecr0);
2837 + val &= ~TECR0_AMP_RED_MASK;
2838 + srds.iowrite32(val, &reg_base->tecr0);
2839 + udelay(1);
2840 +}
2841 +
2842 +static int get_median_gaink2(u32 *reg)
2843 +{
2844 + int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
2845 + u32 rx_eq_snp;
2846 + struct per_lane_ctrl_status *reg_base;
2847 + int timeout;
2848 + int i, j, tmp, pos;
2849 +
2850 + reg_base = (struct per_lane_ctrl_status *)reg;
2851 +
2852 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
2853 + /* wait RECR3_SNP_DONE_MASK has cleared */
2854 + timeout = 100;
2855 + while (srds.ioread32(&reg_base->recr3) &
2856 + RECR3_SNP_DONE_MASK) {
2857 + udelay(1);
2858 + timeout--;
2859 + if (timeout == 0)
2860 + break;
2861 + }
2862 +
2863 + /* start snap shot */
2864 + srds.iowrite32((srds.ioread32(&reg_base->recr3) |
2865 + RECR3_SNP_START_MASK),
2866 + &reg_base->recr3);
2867 +
2868 + /* wait for SNP done */
2869 + timeout = 100;
2870 + while (!(srds.ioread32(&reg_base->recr3) &
2871 + RECR3_SNP_DONE_MASK)) {
2872 + udelay(1);
2873 + timeout--;
2874 + if (timeout == 0)
2875 + break;
2876 + }
2877 +
2878 + /* read and save the snap shot */
2879 + rx_eq_snp = srds.ioread32(&reg_base->recr3);
2880 + gaink2_snap_shot[i] = (rx_eq_snp & RECR3_GAINK2_MASK) >>
2881 + RECR3_GAINK2_SHIFT;
2882 +
2883 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
2884 + srds.iowrite32((srds.ioread32(&reg_base->recr3) &
2885 + ~RECR3_SNP_START_MASK),
2886 + &reg_base->recr3);
2887 + }
2888 +
2889 + /* get median of the 5 snap shot */
2890 + for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
2891 + tmp = gaink2_snap_shot[i];
2892 + pos = i;
2893 + for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
2894 + if (gaink2_snap_shot[j] < tmp) {
2895 + tmp = gaink2_snap_shot[j];
2896 + pos = j;
2897 + }
2898 + }
2899 +
2900 + gaink2_snap_shot[pos] = gaink2_snap_shot[i];
2901 + gaink2_snap_shot[i] = tmp;
2902 + }
2903 +
2904 + return gaink2_snap_shot[2];
2905 +}
2906 +
2907 +static bool is_bin_early(int bin_sel, void *reg)
2908 +{
2909 + bool early = false;
2910 + int bin_snap_shot[BIN_SNAPSHOT_NUM];
2911 + int i, negative_count = 0;
2912 + struct per_lane_ctrl_status *reg_base = reg;
2913 + int timeout;
2914 +
2915 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
2916 + /* wait RECR3_SNP_DONE_MASK has cleared */
2917 + timeout = 100;
2918 + while ((srds.ioread32(&reg_base->recr3) & RECR3_SNP_DONE_MASK)) {
2919 + udelay(1);
2920 + timeout--;
2921 + if (timeout == 0)
2922 + break;
2923 + }
2924 +
2925 + /* set TCSR1[CDR_SEL] to BinM1/BinLong */
2926 + if (bin_sel == BIN_M1) {
2927 + srds.iowrite32((srds.ioread32(&reg_base->recr4) &
2928 + ~CDR_SEL_MASK) | BIN_M1_SEL,
2929 + &reg_base->recr4);
2930 + } else {
2931 + srds.iowrite32((srds.ioread32(&reg_base->recr4) &
2932 + ~CDR_SEL_MASK) | BIN_Long_SEL,
2933 + &reg_base->recr4);
2934 + }
2935 +
2936 + /* start snap shot */
2937 + srds.iowrite32(srds.ioread32(&reg_base->recr3) | RECR3_SNP_START_MASK,
2938 + &reg_base->recr3);
2939 +
2940 + /* wait for SNP done */
2941 + timeout = 100;
2942 + while (!(srds.ioread32(&reg_base->recr3) & RECR3_SNP_DONE_MASK)) {
2943 + udelay(1);
2944 + timeout--;
2945 + if (timeout == 0)
2946 + break;
2947 + }
2948 +
2949 + /* read and save the snap shot */
2950 + bin_snap_shot[i] = (srds.ioread32(&reg_base->recr4) &
2951 + RECR4_SNP_DATA_MASK) >> RECR4_SNP_DATA_SHIFT;
2952 + if (bin_snap_shot[i] & RECR4_EQ_SNPBIN_SIGN_MASK)
2953 + negative_count++;
2954 +
2955 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
2956 + srds.iowrite32(srds.ioread32(&reg_base->recr3) & ~RECR3_SNP_START_MASK,
2957 + &reg_base->recr3);
2958 + }
2959 +
2960 + if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
2961 + ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
2962 + early = true;
2963 + }
2964 +
2965 + return early;
2966 +}
2967 +
2968 +struct serdes_access* setup_serdes_access_28g(void)
2969 +{
2970 + srds.get_lane_memmap_size = get_lane_memmap_size;
2971 + srds.tune_tecr = tune_tecr;
2972 + srds.reset_lane = reset_lane;
2973 + srds.lane_set_1gkx = lane_set_1gkx;
2974 + srds.get_median_gaink2 = get_median_gaink2;
2975 + srds.is_bin_early = is_bin_early;
2976 +
2977 + return &srds;
2978 +}
2979 --- /dev/null
2980 +++ b/drivers/net/phy/inphi.c
2981 @@ -0,0 +1,594 @@
2982 +/*
2983 + * Copyright 2018 NXP
2984 + * Copyright 2018 INPHI
2985 + *
2986 + * Redistribution and use in source and binary forms, with or without
2987 + * modification, are permitted provided that the following conditions are met:
2988 + *
2989 + * 1. Redistributions of source code must retain the above copyright notice,
2990 + * this list of conditions and the following disclaimer.
2991 + * 2. Redistributions in binary form must reproduce the above copyright notice,
2992 + * this list of conditions and the following disclaimer in the documentation
2993 + * and/or other materials provided with the distribution.
2994 + *
2995 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2996 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2997 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2998 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
2999 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
3000 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
3001 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
3002 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
3003 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3004 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3005 + * POSSIBILITY OF SUCH DAMAGE.
3006 + *
3007 + * Inphi is a registered trademark of Inphi Corporation
3008 + *
3009 + */
3010 +
3011 +#include <linux/module.h>
3012 +#include <linux/phy.h>
3013 +#include <linux/mdio.h>
3014 +#include <linux/interrupt.h>
3015 +#include <linux/platform_device.h>
3016 +#include <linux/of_irq.h>
3017 +#include <linux/workqueue.h>
3018 +#include <linux/i2c.h>
3019 +#include <linux/timer.h>
3020 +#include <linux/delay.h>
3021 +#include <linux/kernel.h>
3022 +#include <linux/init.h>
3023 +#include <linux/fs.h>
3024 +#include <linux/cdev.h>
3025 +#include <linux/device.h>
3026 +#include <linux/slab.h>
3027 +#include <linux/uaccess.h>
3028 +
3029 +#define PHY_ID_IN112525 0x02107440
3030 +
3031 +#define INPHI_S03_DEVICE_ID_MSB 0x2
3032 +#define INPHI_S03_DEVICE_ID_LSB 0x3
3033 +
3034 +#define ALL_LANES 4
3035 +#define INPHI_POLL_DELAY 2500
3036 +
3037 +#define PHYCTRL_REG1 0x0012
3038 +#define PHYCTRL_REG2 0x0014
3039 +#define PHYCTRL_REG3 0x0120
3040 +#define PHYCTRL_REG4 0x0121
3041 +#define PHYCTRL_REG5 0x0180
3042 +#define PHYCTRL_REG6 0x0580
3043 +#define PHYCTRL_REG7 0x05C4
3044 +#define PHYCTRL_REG8 0x01C8
3045 +#define PHYCTRL_REG9 0x0521
3046 +
3047 +#define PHYSTAT_REG1 0x0021
3048 +#define PHYSTAT_REG2 0x0022
3049 +#define PHYSTAT_REG3 0x0123
3050 +
3051 +#define PHYMISC_REG1 0x0025
3052 +#define PHYMISC_REG2 0x002c
3053 +#define PHYMISC_REG3 0x00b3
3054 +#define PHYMISC_REG4 0x0181
3055 +#define PHYMISC_REG5 0x019D
3056 +#define PHYMISC_REG6 0x0198
3057 +#define PHYMISC_REG7 0x0199
3058 +#define PHYMISC_REG8 0x0581
3059 +#define PHYMISC_REG9 0x0598
3060 +#define PHYMISC_REG10 0x059c
3061 +#define PHYMISC_REG20 0x01B0
3062 +#define PHYMISC_REG21 0x01BC
3063 +#define PHYMISC_REG22 0x01C0
3064 +
3065 +#define RX_VCO_CODE_OFFSET 5
3066 +#define VCO_CODE 390
3067 +
3068 +int vco_codes[ALL_LANES] = {
3069 + VCO_CODE,
3070 + VCO_CODE,
3071 + VCO_CODE,
3072 + VCO_CODE
3073 +};
3074 +
3075 +static void mykmod_work_handler(struct work_struct *w);
3076 +
3077 +static struct workqueue_struct *wq;
3078 +static DECLARE_DELAYED_WORK(mykmod_work, mykmod_work_handler);
3079 +static unsigned long onesec;
3080 +struct phy_device *inphi_phydev;
3081 +
3082 +static int mdio_wr(u32 regnum, u16 val)
3083 +{
3084 + regnum = MII_ADDR_C45 | (MDIO_MMD_VEND1 << 16) | (regnum & 0xffff);
3085 +
3086 + return mdiobus_write(inphi_phydev->mdio.bus, inphi_phydev->mdio.addr,
3087 + regnum, val);
3088 +}
3089 +
3090 +static int mdio_rd(u32 regnum)
3091 +{
3092 + regnum = MII_ADDR_C45 | (MDIO_MMD_VEND1 << 16) | (regnum & 0xffff);
3093 +
3094 + return mdiobus_read(inphi_phydev->mdio.bus, inphi_phydev->mdio.addr,
3095 + regnum);
3096 +}
3097 +
3098 +
3099 +int bit_test(int value, int bit_field)
3100 +{
3101 + int result;
3102 + int bit_mask = (1 << bit_field);
3103 +
3104 + result = ((value & bit_mask) == bit_mask);
3105 + return result;
3106 +}
3107 +
3108 +int tx_pll_lock_test(int lane)
3109 +{
3110 + int i, val, locked = 1;
3111 +
3112 + if (lane == ALL_LANES) {
3113 + for (i = 0; i < ALL_LANES; i++) {
3114 + val = mdio_rd(i * 0x100 + PHYSTAT_REG3);
3115 + locked = locked & bit_test(val, 15);
3116 + }
3117 + } else {
3118 + val = mdio_rd(lane * 0x100 + PHYSTAT_REG3);
3119 + locked = locked & bit_test(val, 15);
3120 + }
3121 +
3122 + return locked;
3123 +}
3124 +
3125 +void rx_reset_assert(int lane)
3126 +{
3127 + int mask, val;
3128 +
3129 + if (lane == ALL_LANES) {
3130 + val = mdio_rd(PHYMISC_REG2);
3131 + mask = (1 << 15);
3132 + mdio_wr(PHYMISC_REG2, val + mask);
3133 + } else {
3134 + val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
3135 + mask = (1 << 6);
3136 + mdio_wr(lane * 0x100 + PHYCTRL_REG8, val + mask);
3137 + }
3138 +}
3139 +
3140 +void rx_reset_de_assert(int lane)
3141 +{
3142 + int mask, val;
3143 +
3144 + if (lane == ALL_LANES) {
3145 + val = mdio_rd(PHYMISC_REG2);
3146 + mask = 0xffff - (1 << 15);
3147 + mdio_wr(PHYMISC_REG2, val & mask);
3148 + } else {
3149 + val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
3150 + mask = 0xffff - (1 << 6);
3151 + mdio_wr(lane * 0x100 + PHYCTRL_REG8, val & mask);
3152 + }
3153 +}
3154 +
3155 +void rx_powerdown_assert(int lane)
3156 +{
3157 + int mask, val;
3158 +
3159 + val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
3160 + mask = (1 << 5);
3161 + mdio_wr(lane * 0x100 + PHYCTRL_REG8, val + mask);
3162 +}
3163 +
3164 +void rx_powerdown_de_assert(int lane)
3165 +{
3166 + int mask, val;
3167 +
3168 + val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
3169 + mask = 0xffff - (1 << 5);
3170 + mdio_wr(lane * 0x100 + PHYCTRL_REG8, val & mask);
3171 +}
3172 +
3173 +void tx_pll_assert(int lane)
3174 +{
3175 + int val, recal;
3176 +
3177 + if (lane == ALL_LANES) {
3178 + val = mdio_rd(PHYMISC_REG2);
3179 + recal = (1 << 12);
3180 + mdio_wr(PHYMISC_REG2, val | recal);
3181 + } else {
3182 + val = mdio_rd(lane * 0x100 + PHYCTRL_REG4);
3183 + recal = (1 << 15);
3184 + mdio_wr(lane * 0x100 + PHYCTRL_REG4, val | recal);
3185 + }
3186 +}
3187 +
3188 +void tx_pll_de_assert(int lane)
3189 +{
3190 + int recal, val;
3191 +
3192 + if (lane == ALL_LANES) {
3193 + val = mdio_rd(PHYMISC_REG2);
3194 + recal = 0xefff;
3195 + mdio_wr(PHYMISC_REG2, val & recal);
3196 + } else {
3197 + val = mdio_rd(lane * 0x100 + PHYCTRL_REG4);
3198 + recal = 0x7fff;
3199 + mdio_wr(lane * 0x100 + PHYCTRL_REG4, val & recal);
3200 + }
3201 +}
3202 +
3203 +void tx_core_assert(int lane)
3204 +{
3205 + int recal, val, val2, core_reset;
3206 +
3207 + if (lane == 4) {
3208 + val = mdio_rd(PHYMISC_REG2);
3209 + recal = 1 << 10;
3210 + mdio_wr(PHYMISC_REG2, val | recal);
3211 + } else {
3212 + val2 = mdio_rd(PHYMISC_REG3);
3213 + core_reset = (1 << (lane + 8));
3214 + mdio_wr(PHYMISC_REG3, val2 | core_reset);
3215 + }
3216 +}
3217 +
3218 +void lol_disable(int lane)
3219 +{
3220 + int val, mask;
3221 +
3222 + val = mdio_rd(PHYMISC_REG3);
3223 + mask = 1 << (lane + 4);
3224 + mdio_wr(PHYMISC_REG3, val | mask);
3225 +}
3226 +
3227 +void tx_core_de_assert(int lane)
3228 +{
3229 + int val, recal, val2, core_reset;
3230 +
3231 + if (lane == ALL_LANES) {
3232 + val = mdio_rd(PHYMISC_REG2);
3233 + recal = 0xffff - (1 << 10);
3234 + mdio_wr(PHYMISC_REG2, val & recal);
3235 + } else {
3236 + val2 = mdio_rd(PHYMISC_REG3);
3237 + core_reset = 0xffff - (1 << (lane + 8));
3238 + mdio_wr(PHYMISC_REG3, val2 & core_reset);
3239 + }
3240 +}
3241 +
3242 +void tx_restart(int lane)
3243 +{
3244 + tx_core_assert(lane);
3245 + tx_pll_assert(lane);
3246 + tx_pll_de_assert(lane);
3247 + usleep_range(1500, 1600);
3248 + tx_core_de_assert(lane);
3249 +}
3250 +
3251 +void disable_lane(int lane)
3252 +{
3253 + rx_reset_assert(lane);
3254 + rx_powerdown_assert(lane);
3255 + tx_core_assert(lane);
3256 + lol_disable(lane);
3257 +}
3258 +
3259 +void toggle_reset(int lane)
3260 +{
3261 + int reg, val, orig;
3262 +
3263 + if (lane == ALL_LANES) {
3264 + mdio_wr(PHYMISC_REG2, 0x8000);
3265 + udelay(100);
3266 + mdio_wr(PHYMISC_REG2, 0x0000);
3267 + } else {
3268 + reg = lane * 0x100 + PHYCTRL_REG8;
3269 + val = (1 << 6);
3270 + orig = mdio_rd(reg);
3271 + mdio_wr(reg, orig + val);
3272 + udelay(100);
3273 + mdio_wr(reg, orig);
3274 + }
3275 +}
3276 +
3277 +int az_complete_test(int lane)
3278 +{
3279 + int success = 1, value;
3280 +
3281 + if (lane == 0 || lane == ALL_LANES) {
3282 + value = mdio_rd(PHYCTRL_REG5);
3283 + success = success & bit_test(value, 2);
3284 + }
3285 + if (lane == 1 || lane == ALL_LANES) {
3286 + value = mdio_rd(PHYCTRL_REG5 + 0x100);
3287 + success = success & bit_test(value, 2);
3288 + }
3289 + if (lane == 2 || lane == ALL_LANES) {
3290 + value = mdio_rd(PHYCTRL_REG5 + 0x200);
3291 + success = success & bit_test(value, 2);
3292 + }
3293 + if (lane == 3 || lane == ALL_LANES) {
3294 + value = mdio_rd(PHYCTRL_REG5 + 0x300);
3295 + success = success & bit_test(value, 2);
3296 + }
3297 +
3298 + return success;
3299 +}
3300 +
3301 +void save_az_offsets(int lane)
3302 +{
3303 + int i;
3304 +
3305 +#define AZ_OFFSET_LANE_UPDATE(reg, lane) \
3306 + mdio_wr((reg) + (lane) * 0x100, \
3307 + (mdio_rd((reg) + (lane) * 0x100) >> 8))
3308 +
3309 + if (lane == ALL_LANES) {
3310 + for (i = 0; i < ALL_LANES; i++) {
3311 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20, i);
3312 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 1, i);
3313 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 2, i);
3314 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 3, i);
3315 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21, i);
3316 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 1, i);
3317 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 2, i);
3318 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 3, i);
3319 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG22, i);
3320 + }
3321 + } else {
3322 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20, lane);
3323 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 1, lane);
3324 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 2, lane);
3325 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 3, lane);
3326 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21, lane);
3327 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 1, lane);
3328 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 2, lane);
3329 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 3, lane);
3330 + AZ_OFFSET_LANE_UPDATE(PHYMISC_REG22, lane);
3331 + }
3332 +
3333 + mdio_wr(PHYCTRL_REG7, 0x0001);
3334 +}
3335 +
3336 +void save_vco_codes(int lane)
3337 +{
3338 + int i;
3339 +
3340 + if (lane == ALL_LANES) {
3341 + for (i = 0; i < ALL_LANES; i++) {
3342 + vco_codes[i] = mdio_rd(PHYMISC_REG5 + i * 0x100);
3343 + mdio_wr(PHYMISC_REG5 + i * 0x100,
3344 + vco_codes[i] + RX_VCO_CODE_OFFSET);
3345 + }
3346 + } else {
3347 + vco_codes[lane] = mdio_rd(PHYMISC_REG5 + lane * 0x100);
3348 + mdio_wr(PHYMISC_REG5 + lane * 0x100,
3349 + vco_codes[lane] + RX_VCO_CODE_OFFSET);
3350 + }
3351 +}
3352 +
3353 +int inphi_lane_recovery(int lane)
3354 +{
3355 + int i, value, az_pass;
3356 +
3357 + switch (lane) {
3358 + case 0:
3359 + case 1:
3360 + case 2:
3361 + case 3:
3362 + rx_reset_assert(lane);
3363 + mdelay(20);
3364 + break;
3365 + case ALL_LANES:
3366 + mdio_wr(PHYMISC_REG2, 0x9C00);
3367 + mdelay(20);
3368 + do {
3369 + value = mdio_rd(PHYMISC_REG2);
3370 + udelay(10);
3371 + } while (!bit_test(value, 4));
3372 + break;
3373 + default:
3374 + dev_err(&inphi_phydev->mdio.dev,
3375 + "Incorrect usage of APIs in %s driver\n",
3376 + inphi_phydev->drv->name);
3377 + break;
3378 + }
3379 +
3380 + if (lane == ALL_LANES) {
3381 + for (i = 0; i < ALL_LANES; i++)
3382 + mdio_wr(PHYMISC_REG7 + i * 0x100, VCO_CODE);
3383 + } else {
3384 + mdio_wr(PHYMISC_REG7 + lane * 0x100, VCO_CODE);
3385 + }
3386 +
3387 + if (lane == ALL_LANES)
3388 + for (i = 0; i < ALL_LANES; i++)
3389 + mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0418);
3390 + else
3391 + mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0418);
3392 +
3393 + mdio_wr(PHYCTRL_REG7, 0x0000);
3394 +
3395 + rx_reset_de_assert(lane);
3396 +
3397 + if (lane == ALL_LANES) {
3398 + for (i = 0; i < ALL_LANES; i++) {
3399 + mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0410);
3400 + mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0412);
3401 + }
3402 + } else {
3403 + mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0410);
3404 + mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0412);
3405 + }
3406 +
3407 + for (i = 0; i < 64; i++) {
3408 + mdelay(100);
3409 + az_pass = az_complete_test(lane);
3410 + if (az_pass) {
3411 + save_az_offsets(lane);
3412 + break;
3413 + }
3414 + }
3415 +
3416 + if (!az_pass) {
3417 + pr_info("in112525: AZ calibration fail @ lane=%d\n", lane);
3418 + return -1;
3419 + }
3420 +
3421 + if (lane == ALL_LANES) {
3422 + mdio_wr(PHYMISC_REG8, 0x0002);
3423 + mdio_wr(PHYMISC_REG9, 0x2028);
3424 + mdio_wr(PHYCTRL_REG6, 0x0010);
3425 + usleep_range(1000, 1200);
3426 + mdio_wr(PHYCTRL_REG6, 0x0110);
3427 + mdelay(30);
3428 + mdio_wr(PHYMISC_REG9, 0x3020);
3429 + } else {
3430 + mdio_wr(PHYMISC_REG4 + lane * 0x100, 0x0002);
3431 + mdio_wr(PHYMISC_REG6 + lane * 0x100, 0x2028);
3432 + mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0010);
3433 + usleep_range(1000, 1200);
3434 + mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0110);
3435 + mdelay(30);
3436 + mdio_wr(PHYMISC_REG6 + lane * 0x100, 0x3020);
3437 + }
3438 +
3439 + if (lane == ALL_LANES) {
3440 + mdio_wr(PHYMISC_REG2, 0x1C00);
3441 + mdio_wr(PHYMISC_REG2, 0x0C00);
3442 + } else {
3443 + tx_restart(lane);
3444 + mdelay(11);
3445 + }
3446 +
3447 + if (lane == ALL_LANES) {
3448 + if (bit_test(mdio_rd(PHYMISC_REG2), 6) == 0)
3449 + return -1;
3450 + } else {
3451 + if (tx_pll_lock_test(lane) == 0)
3452 + return -1;
3453 + }
3454 +
3455 + save_vco_codes(lane);
3456 +
3457 + if (lane == ALL_LANES) {
3458 + mdio_wr(PHYMISC_REG2, 0x0400);
3459 + mdio_wr(PHYMISC_REG2, 0x0000);
3460 + value = mdio_rd(PHYCTRL_REG1);
3461 + value = value & 0xffbf;
3462 + mdio_wr(PHYCTRL_REG2, value);
3463 + } else {
3464 + tx_core_de_assert(lane);
3465 + }
3466 +
3467 + if (lane == ALL_LANES) {
3468 + mdio_wr(PHYMISC_REG1, 0x8000);
3469 + mdio_wr(PHYMISC_REG1, 0x0000);
3470 + }
3471 + mdio_rd(PHYMISC_REG1);
3472 + mdio_rd(PHYMISC_REG1);
3473 + usleep_range(1000, 1200);
3474 + mdio_rd(PHYSTAT_REG1);
3475 + mdio_rd(PHYSTAT_REG2);
3476 +
3477 + return 0;
3478 +}
3479 +
3480 +static void mykmod_work_handler(struct work_struct *w)
3481 +{
3482 + int all_lanes_lock, lane0_lock, lane1_lock, lane2_lock, lane3_lock;
3483 +
3484 + lane0_lock = bit_test(mdio_rd(0x123), 15);
3485 + lane1_lock = bit_test(mdio_rd(0x223), 15);
3486 + lane2_lock = bit_test(mdio_rd(0x323), 15);
3487 + lane3_lock = bit_test(mdio_rd(0x423), 15);
3488 +
3489 + /* check if the chip had any successful lane lock from the previous
3490 + * stage (e.g. u-boot)
3491 + */
3492 + all_lanes_lock = lane0_lock | lane1_lock | lane2_lock | lane3_lock;
3493 +
3494 + if (!all_lanes_lock) {
3495 + /* start fresh */
3496 + inphi_lane_recovery(ALL_LANES);
3497 + } else {
3498 + if (!lane0_lock)
3499 + inphi_lane_recovery(0);
3500 + if (!lane1_lock)
3501 + inphi_lane_recovery(1);
3502 + if (!lane2_lock)
3503 + inphi_lane_recovery(2);
3504 + if (!lane3_lock)
3505 + inphi_lane_recovery(3);
3506 + }
3507 +
3508 + queue_delayed_work(wq, &mykmod_work, onesec);
3509 +}
3510 +
3511 +int inphi_probe(struct phy_device *phydev)
3512 +{
3513 + int phy_id = 0, id_lsb = 0, id_msb = 0;
3514 +
3515 + /* setup the inphi_phydev ptr for mdio_rd/mdio_wr APIs */
3516 + inphi_phydev = phydev;
3517 +
3518 + /* Read device id from phy registers */
3519 + id_lsb = mdio_rd(INPHI_S03_DEVICE_ID_MSB);
3520 + if (id_lsb < 0)
3521 + return -ENXIO;
3522 +
3523 + phy_id = id_lsb << 16;
3524 +
3525 + id_msb = mdio_rd(INPHI_S03_DEVICE_ID_LSB);
3526 + if (id_msb < 0)
3527 + return -ENXIO;
3528 +
3529 + phy_id |= id_msb;
3530 +
3531 + /* Make sure the device tree binding matched the driver with the
3532 + * right device.
3533 + */
3534 + if (phy_id != phydev->drv->phy_id) {
3535 + dev_err(&phydev->mdio.dev,
3536 + "Error matching phy with %s driver\n",
3537 + phydev->drv->name);
3538 + return -ENODEV;
3539 + }
3540 +
3541 + /* update the local phydev pointer, used inside all APIs */
3542 + inphi_phydev = phydev;
3543 + onesec = msecs_to_jiffies(INPHI_POLL_DELAY);
3544 +
3545 + wq = create_singlethread_workqueue("inphi_kmod");
3546 + if (wq) {
3547 + queue_delayed_work(wq, &mykmod_work, onesec);
3548 + } else {
3549 + dev_err(&phydev->mdio.dev,
3550 + "Error creating kernel workqueue for %s driver\n",
3551 + phydev->drv->name);
3552 + return -ENOMEM;
3553 + }
3554 +
3555 + return 0;
3556 +}
3557 +
3558 +static struct phy_driver inphi_driver[] = {
3559 +{
3560 + .phy_id = PHY_ID_IN112525,
3561 + .phy_id_mask = 0x0ff0fff0,
3562 + .name = "Inphi 112525_S03",
3563 + .features = PHY_GBIT_FEATURES,
3564 + .probe = &inphi_probe,
3565 +},
3566 +};
3567 +
3568 +module_phy_driver(inphi_driver);
3569 +
3570 +static struct mdio_device_id __maybe_unused inphi_tbl[] = {
3571 + { PHY_ID_IN112525, 0x0ff0fff0},
3572 + {},
3573 +};
3574 +
3575 +MODULE_DEVICE_TABLE(mdio, inphi_tbl);
3576 --- /dev/null
3577 +++ b/drivers/net/phy/mdio-mux-multiplexer.c
3578 @@ -0,0 +1,122 @@
3579 +// SPDX-License-Identifier: GPL-2.0+
3580 +/* MDIO bus multiplexer using kernel multiplexer subsystem
3581 + *
3582 + * Copyright 2019 NXP
3583 + */
3584 +
3585 +#include <linux/platform_device.h>
3586 +#include <linux/mdio-mux.h>
3587 +#include <linux/module.h>
3588 +#include <linux/mux/consumer.h>
3589 +
3590 +struct mdio_mux_multiplexer_state {
3591 + struct mux_control *muxc;
3592 + bool do_deselect;
3593 + void *mux_handle;
3594 +};
3595 +
3596 +/**
3597 + * mdio_mux_multiplexer_switch_fn - This function is called by the mdio-mux
3598 + * layer when it thinks the mdio bus
3599 + * multiplexer needs to switch.
3600 + * @current_child: current value of the mux register.
3601 + * @desired_child: value of the 'reg' property of the target child MDIO node.
3602 + * @data: Private data used by this switch_fn passed to mdio_mux_init function
3603 + * via mdio_mux_init(.., .., .., .., data, ..).
3604 + *
3605 + * The first time this function is called, current_child == -1.
3606 + * If current_child == desired_child, then the mux is already set to the
3607 + * correct bus.
3608 + */
3609 +static int mdio_mux_multiplexer_switch_fn(int current_child, int desired_child,
3610 + void *data)
3611 +{
3612 + struct platform_device *pdev;
3613 + struct mdio_mux_multiplexer_state *s;
3614 + int ret = 0;
3615 +
3616 + pdev = (struct platform_device *)data;
3617 + s = platform_get_drvdata(pdev);
3618 +
3619 + if (!(current_child ^ desired_child))
3620 + return 0;
3621 +
3622 + if (s->do_deselect)
3623 + ret = mux_control_deselect(s->muxc);
3624 + if (ret) {
3625 + dev_err(&pdev->dev, "mux_control_deselect failed in %s: %d\n",
3626 + __func__, ret);
3627 + return ret;
3628 + }
3629 +
3630 + ret = mux_control_select(s->muxc, desired_child);
3631 + if (!ret) {
3632 + dev_dbg(&pdev->dev, "%s %d -> %d\n", __func__, current_child,
3633 + desired_child);
3634 + s->do_deselect = true;
3635 + } else {
3636 + s->do_deselect = false;
3637 + }
3638 +
3639 + return ret;
3640 +}
3641 +
3642 +static int mdio_mux_multiplexer_probe(struct platform_device *pdev)
3643 +{
3644 + struct device *dev = &pdev->dev;
3645 + struct mdio_mux_multiplexer_state *s;
3646 + int ret = 0;
3647 +
3648 + s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
3649 + if (!s)
3650 + return -ENOMEM;
3651 +
3652 + s->muxc = devm_mux_control_get(dev, NULL);
3653 + if (IS_ERR(s->muxc)) {
3654 + ret = PTR_ERR(s->muxc);
3655 + if (ret != -EPROBE_DEFER)
3656 + dev_err(&pdev->dev, "Failed to get mux: %d\n", ret);
3657 + return ret;
3658 + }
3659 +
3660 + platform_set_drvdata(pdev, s);
3661 +
3662 + ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
3663 + mdio_mux_multiplexer_switch_fn, &s->mux_handle,
3664 + pdev, NULL);
3665 +
3666 + return ret;
3667 +}
3668 +
3669 +static int mdio_mux_multiplexer_remove(struct platform_device *pdev)
3670 +{
3671 + struct mdio_mux_multiplexer_state *s = platform_get_drvdata(pdev);
3672 +
3673 + mdio_mux_uninit(s->mux_handle);
3674 +
3675 + if (s->do_deselect)
3676 + mux_control_deselect(s->muxc);
3677 +
3678 + return 0;
3679 +}
3680 +
3681 +static const struct of_device_id mdio_mux_multiplexer_match[] = {
3682 + { .compatible = "mdio-mux-multiplexer", },
3683 + {},
3684 +};
3685 +MODULE_DEVICE_TABLE(of, mdio_mux_multiplexer_match);
3686 +
3687 +static struct platform_driver mdio_mux_multiplexer_driver = {
3688 + .driver = {
3689 + .name = "mdio-mux-multiplexer",
3690 + .of_match_table = mdio_mux_multiplexer_match,
3691 + },
3692 + .probe = mdio_mux_multiplexer_probe,
3693 + .remove = mdio_mux_multiplexer_remove,
3694 +};
3695 +
3696 +module_platform_driver(mdio_mux_multiplexer_driver);
3697 +
3698 +MODULE_DESCRIPTION("MDIO bus multiplexer using kernel multiplexer subsystem");
3699 +MODULE_AUTHOR("Pankaj Bansal <pankaj.bansal@nxp.com>");
3700 +MODULE_LICENSE("GPL");
3701 --- a/drivers/net/phy/swphy.c
3702 +++ b/drivers/net/phy/swphy.c
3703 @@ -77,6 +77,7 @@ static const struct swmii_regs duplex[]
3704 static int swphy_decode_speed(int speed)
3705 {
3706 switch (speed) {
3707 + case 10000:
3708 case 1000:
3709 return SWMII_SPEED_1000;
3710 case 100:
3711 --- a/include/linux/phy.h
3712 +++ b/include/linux/phy.h
3713 @@ -87,6 +87,7 @@ typedef enum {
3714 PHY_INTERFACE_MODE_XAUI,
3715 /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */
3716 PHY_INTERFACE_MODE_10GKR,
3717 + PHY_INTERFACE_MODE_2500SGMII,
3718 PHY_INTERFACE_MODE_MAX,
3719 } phy_interface_t;
3720
3721 @@ -159,6 +160,8 @@ static inline const char *phy_modes(phy_
3722 return "xaui";
3723 case PHY_INTERFACE_MODE_10GKR:
3724 return "10gbase-kr";
3725 + case PHY_INTERFACE_MODE_2500SGMII:
3726 + return "sgmii-2500";
3727 default:
3728 return "unknown";
3729 }