layerscape: refresh patches
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 703-phy-support-layerscape.patch
1 From be07319b9897738a4ab1501880b7dd9be26eba66 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 25 Sep 2017 11:54:28 +0800
4 Subject: [PATCH] phy: support layerscape
5
6 This is a integrated patch for layerscape mdio-phy support.
7
8 Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
9 Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
10 Signed-off-by: costi <constantin.tudor@freescale.com>
11 Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
12 Signed-off-by: Shaohui Xie <Shaohui.Xie@nxp.com>
13 Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
14 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
15 ---
16 drivers/net/phy/Kconfig | 11 +
17 drivers/net/phy/Makefile | 2 +
18 drivers/net/phy/aquantia.c | 28 +
19 drivers/net/phy/cortina.c | 118 ++++
20 drivers/net/phy/fsl_backplane.c | 1358 +++++++++++++++++++++++++++++++++++++++
21 drivers/net/phy/phy.c | 23 +-
22 drivers/net/phy/phy_device.c | 6 +-
23 drivers/net/phy/swphy.c | 1 +
24 include/linux/phy.h | 4 +
25 9 files changed, 1544 insertions(+), 7 deletions(-)
26 create mode 100644 drivers/net/phy/cortina.c
27 create mode 100644 drivers/net/phy/fsl_backplane.c
28
29 --- a/drivers/net/phy/Kconfig
30 +++ b/drivers/net/phy/Kconfig
31 @@ -89,6 +89,12 @@ config MDIO_BUS_MUX_MMIOREG
32 config MDIO_CAVIUM
33 tristate
34
35 +config MDIO_FSL_BACKPLANE
36 + tristate "Support for backplane on Freescale XFI interface"
37 + depends on OF_MDIO
38 + help
39 + This module provides a driver for Freescale XFI's backplane.
40 +
41 config MDIO_GPIO
42 tristate "GPIO lib-based bitbanged MDIO buses"
43 depends on MDIO_BITBANG && GPIOLIB
44 @@ -298,6 +304,11 @@ config CICADA_PHY
45 ---help---
46 Currently supports the cis8204
47
48 +config CORTINA_PHY
49 + tristate "Cortina EDC CDR 10G Ethernet PHY"
50 + ---help---
51 + Currently supports the CS4340 phy.
52 +
53 config DAVICOM_PHY
54 tristate "Davicom PHYs"
55 ---help---
56 --- a/drivers/net/phy/Makefile
57 +++ b/drivers/net/phy/Makefile
58 @@ -30,6 +30,7 @@ obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) +=
59 obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
60 obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
61 obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
62 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane.o
63 obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
64 obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
65 obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
66 @@ -48,6 +49,7 @@ obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygn
67 obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
68 obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
69 obj-$(CONFIG_CICADA_PHY) += cicada.o
70 +obj-$(CONFIG_CORTINA_PHY) += cortina.o
71 obj-$(CONFIG_DAVICOM_PHY) += davicom.o
72 obj-$(CONFIG_DP83640_PHY) += dp83640.o
73 obj-$(CONFIG_DP83848_PHY) += dp83848.o
74 --- a/drivers/net/phy/aquantia.c
75 +++ b/drivers/net/phy/aquantia.c
76 @@ -21,6 +21,8 @@
77 #define PHY_ID_AQ1202 0x03a1b445
78 #define PHY_ID_AQ2104 0x03a1b460
79 #define PHY_ID_AQR105 0x03a1b4a2
80 +#define PHY_ID_AQR106 0x03a1b4d0
81 +#define PHY_ID_AQR107 0x03a1b4e0
82 #define PHY_ID_AQR405 0x03a1b4b0
83
84 #define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
85 @@ -154,6 +156,30 @@ static struct phy_driver aquantia_driver
86 .read_status = aquantia_read_status,
87 },
88 {
89 + .phy_id = PHY_ID_AQR106,
90 + .phy_id_mask = 0xfffffff0,
91 + .name = "Aquantia AQR106",
92 + .features = PHY_AQUANTIA_FEATURES,
93 + .flags = PHY_HAS_INTERRUPT,
94 + .aneg_done = aquantia_aneg_done,
95 + .config_aneg = aquantia_config_aneg,
96 + .config_intr = aquantia_config_intr,
97 + .ack_interrupt = aquantia_ack_interrupt,
98 + .read_status = aquantia_read_status,
99 +},
100 +{
101 + .phy_id = PHY_ID_AQR107,
102 + .phy_id_mask = 0xfffffff0,
103 + .name = "Aquantia AQR107",
104 + .features = PHY_AQUANTIA_FEATURES,
105 + .flags = PHY_HAS_INTERRUPT,
106 + .aneg_done = aquantia_aneg_done,
107 + .config_aneg = aquantia_config_aneg,
108 + .config_intr = aquantia_config_intr,
109 + .ack_interrupt = aquantia_ack_interrupt,
110 + .read_status = aquantia_read_status,
111 +},
112 +{
113 .phy_id = PHY_ID_AQR405,
114 .phy_id_mask = 0xfffffff0,
115 .name = "Aquantia AQR405",
116 @@ -173,6 +199,8 @@ static struct mdio_device_id __maybe_unu
117 { PHY_ID_AQ1202, 0xfffffff0 },
118 { PHY_ID_AQ2104, 0xfffffff0 },
119 { PHY_ID_AQR105, 0xfffffff0 },
120 + { PHY_ID_AQR106, 0xfffffff0 },
121 + { PHY_ID_AQR107, 0xfffffff0 },
122 { PHY_ID_AQR405, 0xfffffff0 },
123 { }
124 };
125 --- /dev/null
126 +++ b/drivers/net/phy/cortina.c
127 @@ -0,0 +1,118 @@
128 +/*
129 + * Copyright 2017 NXP
130 + *
131 + * This program is free software; you can redistribute it and/or modify
132 + * it under the terms of the GNU General Public License as published by
133 + * the Free Software Foundation; either version 2 of the License, or
134 + * (at your option) any later version.
135 + *
136 + * This program is distributed in the hope that it will be useful,
137 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
138 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
139 + * GNU General Public License for more details.
140 + *
141 + * CORTINA is a registered trademark of Cortina Systems, Inc.
142 + *
143 + */
144 +#include <linux/module.h>
145 +#include <linux/phy.h>
146 +
147 +#define PHY_ID_CS4340 0x13e51002
148 +
149 +#define VILLA_GLOBAL_CHIP_ID_LSB 0x0
150 +#define VILLA_GLOBAL_CHIP_ID_MSB 0x1
151 +
152 +#define VILLA_GLOBAL_GPIO_1_INTS 0x017
153 +
154 +static int cortina_read_reg(struct phy_device *phydev, u16 regnum)
155 +{
156 + return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
157 + MII_ADDR_C45 | regnum);
158 +}
159 +
160 +static int cortina_config_aneg(struct phy_device *phydev)
161 +{
162 + phydev->supported = SUPPORTED_10000baseT_Full;
163 + phydev->advertising = SUPPORTED_10000baseT_Full;
164 +
165 + return 0;
166 +}
167 +
168 +static int cortina_read_status(struct phy_device *phydev)
169 +{
170 + int gpio_int_status, ret = 0;
171 +
172 + gpio_int_status = cortina_read_reg(phydev, VILLA_GLOBAL_GPIO_1_INTS);
173 + if (gpio_int_status < 0) {
174 + ret = gpio_int_status;
175 + goto err;
176 + }
177 +
178 + if (gpio_int_status & 0x8) {
179 + /* up when edc_convergedS set */
180 + phydev->speed = SPEED_10000;
181 + phydev->duplex = DUPLEX_FULL;
182 + phydev->link = 1;
183 + } else {
184 + phydev->link = 0;
185 + }
186 +
187 +err:
188 + return ret;
189 +}
190 +
191 +static int cortina_soft_reset(struct phy_device *phydev)
192 +{
193 + return 0;
194 +}
195 +
196 +static int cortina_probe(struct phy_device *phydev)
197 +{
198 + u32 phy_id = 0;
199 + int id_lsb = 0, id_msb = 0;
200 +
201 + /* Read device id from phy registers. */
202 + id_lsb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_LSB);
203 + if (id_lsb < 0)
204 + return -ENXIO;
205 +
206 + phy_id = id_lsb << 16;
207 +
208 + id_msb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_MSB);
209 + if (id_msb < 0)
210 + return -ENXIO;
211 +
212 + phy_id |= id_msb;
213 +
214 + /* Make sure the device tree binding matched the driver with the
215 + * right device.
216 + */
217 + if (phy_id != phydev->drv->phy_id) {
218 + phydev_err(phydev, "Error matching phy with %s driver\n",
219 + phydev->drv->name);
220 + return -ENODEV;
221 + }
222 +
223 + return 0;
224 +}
225 +
226 +static struct phy_driver cortina_driver[] = {
227 +{
228 + .phy_id = PHY_ID_CS4340,
229 + .phy_id_mask = 0xffffffff,
230 + .name = "Cortina CS4340",
231 + .config_aneg = cortina_config_aneg,
232 + .read_status = cortina_read_status,
233 + .soft_reset = cortina_soft_reset,
234 + .probe = cortina_probe,
235 +},
236 +};
237 +
238 +module_phy_driver(cortina_driver);
239 +
240 +static struct mdio_device_id __maybe_unused cortina_tbl[] = {
241 + { PHY_ID_CS4340, 0xffffffff},
242 + {},
243 +};
244 +
245 +MODULE_DEVICE_TABLE(mdio, cortina_tbl);
246 --- /dev/null
247 +++ b/drivers/net/phy/fsl_backplane.c
248 @@ -0,0 +1,1358 @@
249 +/* Freescale backplane driver.
250 + * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
251 + *
252 + * Copyright 2015 Freescale Semiconductor, Inc.
253 + *
254 + * Licensed under the GPL-2 or later.
255 + */
256 +
257 +#include <linux/kernel.h>
258 +#include <linux/module.h>
259 +#include <linux/mii.h>
260 +#include <linux/mdio.h>
261 +#include <linux/ethtool.h>
262 +#include <linux/phy.h>
263 +#include <linux/io.h>
264 +#include <linux/of.h>
265 +#include <linux/of_net.h>
266 +#include <linux/of_address.h>
267 +#include <linux/of_platform.h>
268 +#include <linux/timer.h>
269 +#include <linux/delay.h>
270 +#include <linux/workqueue.h>
271 +
272 +/* XFI PCS Device Identifier */
273 +#define FSL_PCS_PHY_ID 0x0083e400
274 +
275 +/* Freescale KR PMD registers */
276 +#define FSL_KR_PMD_CTRL 0x96
277 +#define FSL_KR_PMD_STATUS 0x97
278 +#define FSL_KR_LP_CU 0x98
279 +#define FSL_KR_LP_STATUS 0x99
280 +#define FSL_KR_LD_CU 0x9a
281 +#define FSL_KR_LD_STATUS 0x9b
282 +
283 +/* Freescale KR PMD defines */
284 +#define PMD_RESET 0x1
285 +#define PMD_STATUS_SUP_STAT 0x4
286 +#define PMD_STATUS_FRAME_LOCK 0x2
287 +#define TRAIN_EN 0x3
288 +#define TRAIN_DISABLE 0x1
289 +#define RX_STAT 0x1
290 +
291 +#define FSL_KR_RX_LINK_STAT_MASK 0x1000
292 +#define FSL_XFI_PCS_10GR_SR1 0x20
293 +
294 +/* Freescale KX PCS mode register */
295 +#define FSL_PCS_IF_MODE 0x8014
296 +
297 +/* Freescale KX PCS mode register init value */
298 +#define IF_MODE_INIT 0x8
299 +
300 +/* Freescale KX/KR AN registers */
301 +#define FSL_AN_AD1 0x11
302 +#define FSL_AN_BP_STAT 0x30
303 +
304 +/* Freescale KX/KR AN registers defines */
305 +#define AN_CTRL_INIT 0x1200
306 +#define KX_AN_AD1_INIT 0x25
307 +#define KR_AN_AD1_INIT 0x85
308 +#define AN_LNK_UP_MASK 0x4
309 +#define KR_AN_MASK 0x8
310 +#define TRAIN_FAIL 0x8
311 +
312 +/* C(-1) */
313 +#define BIN_M1 0
314 +/* C(1) */
315 +#define BIN_LONG 1
316 +#define BIN_M1_SEL 6
317 +#define BIN_Long_SEL 7
318 +#define CDR_SEL_MASK 0x00070000
319 +#define BIN_SNAPSHOT_NUM 5
320 +#define BIN_M1_THRESHOLD 3
321 +#define BIN_LONG_THRESHOLD 2
322 +
323 +#define PRE_COE_SHIFT 22
324 +#define POST_COE_SHIFT 16
325 +#define ZERO_COE_SHIFT 8
326 +
327 +#define PRE_COE_MAX 0x0
328 +#define PRE_COE_MIN 0x8
329 +#define POST_COE_MAX 0x0
330 +#define POST_COE_MIN 0x10
331 +#define ZERO_COE_MAX 0x30
332 +#define ZERO_COE_MIN 0x0
333 +
334 +#define TECR0_INIT 0x24200000
335 +#define RATIO_PREQ 0x3
336 +#define RATIO_PST1Q 0xd
337 +#define RATIO_EQ 0x20
338 +
339 +#define GCR0_RESET_MASK 0x600000
340 +#define GCR1_SNP_START_MASK 0x00000040
341 +#define GCR1_CTL_SNP_START_MASK 0x00002000
342 +#define GCR1_REIDL_TH_MASK 0x00700000
343 +#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
344 +#define GCR1_REIDL_ET_MAS_MASK 0x00004000
345 +#define TECR0_AMP_RED_MASK 0x0000003f
346 +
347 +#define RECR1_CTL_SNP_DONE_MASK 0x00000002
348 +#define RECR1_SNP_DONE_MASK 0x00000004
349 +#define TCSR1_SNP_DATA_MASK 0x0000ffc0
350 +#define TCSR1_SNP_DATA_SHIFT 6
351 +#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100
352 +
353 +#define RECR1_GAINK2_MASK 0x0f000000
354 +#define RECR1_GAINK2_SHIFT 24
355 +#define RECR1_GAINK3_MASK 0x000f0000
356 +#define RECR1_GAINK3_SHIFT 16
357 +#define RECR1_OFFSET_MASK 0x00003f80
358 +#define RECR1_OFFSET_SHIFT 7
359 +#define RECR1_BLW_MASK 0x00000f80
360 +#define RECR1_BLW_SHIFT 7
361 +#define EYE_CTRL_SHIFT 12
362 +#define BASE_WAND_SHIFT 10
363 +
364 +#define XGKR_TIMEOUT 1050
365 +
366 +#define INCREMENT 1
367 +#define DECREMENT 2
368 +#define TIMEOUT_LONG 3
369 +#define TIMEOUT_M1 3
370 +
371 +#define RX_READY_MASK 0x8000
372 +#define PRESET_MASK 0x2000
373 +#define INIT_MASK 0x1000
374 +#define COP1_MASK 0x30
375 +#define COP1_SHIFT 4
376 +#define COZ_MASK 0xc
377 +#define COZ_SHIFT 2
378 +#define COM1_MASK 0x3
379 +#define COM1_SHIFT 0
380 +#define REQUEST_MASK 0x3f
381 +#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \
382 + COP1_MASK | COZ_MASK | COM1_MASK)
383 +
384 +#define NEW_ALGORITHM_TRAIN_TX
385 +#ifdef NEW_ALGORITHM_TRAIN_TX
386 +#define FORCE_INC_COP1_NUMBER 0
387 +#define FORCE_INC_COM1_NUMBER 1
388 +#endif
389 +
390 +#define VAL_INVALID 0xff
391 +
392 +static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5,
393 + 0x7, 0x9, 0xb, 0xc, VAL_INVALID};
394 +static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, 0x7,
395 + 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID};
396 +
397 +enum backplane_mode {
398 + PHY_BACKPLANE_1000BASE_KX,
399 + PHY_BACKPLANE_10GBASE_KR,
400 + PHY_BACKPLANE_INVAL
401 +};
402 +
403 +enum coe_filed {
404 + COE_COP1,
405 + COE_COZ,
406 + COE_COM
407 +};
408 +
409 +enum coe_update {
410 + COE_NOTUPDATED,
411 + COE_UPDATED,
412 + COE_MIN,
413 + COE_MAX,
414 + COE_INV
415 +};
416 +
417 +enum train_state {
418 + DETECTING_LP,
419 + TRAINED,
420 +};
421 +
422 +struct per_lane_ctrl_status {
423 + __be32 gcr0; /* 0x.000 - General Control Register 0 */
424 + __be32 gcr1; /* 0x.004 - General Control Register 1 */
425 + __be32 gcr2; /* 0x.008 - General Control Register 2 */
426 + __be32 resv1; /* 0x.00C - Reserved */
427 + __be32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */
428 + __be32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */
429 + __be32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */
430 + __be32 resv2; /* 0x.01C - Reserved */
431 + __be32 tlcr0; /* 0x.020 - TTL Control Register 0 */
432 + __be32 tlcr1; /* 0x.024 - TTL Control Register 1 */
433 + __be32 tlcr2; /* 0x.028 - TTL Control Register 2 */
434 + __be32 tlcr3; /* 0x.02C - TTL Control Register 3 */
435 + __be32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */
436 + __be32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */
437 + __be32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */
438 + __be32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */
439 +};
440 +
441 +struct tx_condition {
442 + bool bin_m1_late_early;
443 + bool bin_long_late_early;
444 + bool bin_m1_stop;
445 + bool bin_long_stop;
446 + bool tx_complete;
447 + bool sent_init;
448 + int m1_min_max_cnt;
449 + int long_min_max_cnt;
450 +#ifdef NEW_ALGORITHM_TRAIN_TX
451 + int pre_inc;
452 + int post_inc;
453 +#endif
454 +};
455 +
456 +struct fsl_xgkr_inst {
457 + void *reg_base;
458 + struct phy_device *phydev;
459 + struct tx_condition tx_c;
460 + struct delayed_work xgkr_wk;
461 + enum train_state state;
462 + u32 ld_update;
463 + u32 ld_status;
464 + u32 ratio_preq;
465 + u32 ratio_pst1q;
466 + u32 adpt_eq;
467 +};
468 +
469 +static void tx_condition_init(struct tx_condition *tx_c)
470 +{
471 + tx_c->bin_m1_late_early = true;
472 + tx_c->bin_long_late_early = false;
473 + tx_c->bin_m1_stop = false;
474 + tx_c->bin_long_stop = false;
475 + tx_c->tx_complete = false;
476 + tx_c->sent_init = false;
477 + tx_c->m1_min_max_cnt = 0;
478 + tx_c->long_min_max_cnt = 0;
479 +#ifdef NEW_ALGORITHM_TRAIN_TX
480 + tx_c->pre_inc = FORCE_INC_COM1_NUMBER;
481 + tx_c->post_inc = FORCE_INC_COP1_NUMBER;
482 +#endif
483 +}
484 +
485 +void tune_tecr0(struct fsl_xgkr_inst *inst)
486 +{
487 + struct per_lane_ctrl_status *reg_base = inst->reg_base;
488 + u32 val;
489 +
490 + val = TECR0_INIT |
491 + inst->adpt_eq << ZERO_COE_SHIFT |
492 + inst->ratio_preq << PRE_COE_SHIFT |
493 + inst->ratio_pst1q << POST_COE_SHIFT;
494 +
495 + /* reset the lane */
496 + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
497 + &reg_base->gcr0);
498 + udelay(1);
499 + iowrite32(val, &reg_base->tecr0);
500 + udelay(1);
501 + /* unreset the lane */
502 + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
503 + &reg_base->gcr0);
504 + udelay(1);
505 +}
506 +
507 +static void start_lt(struct phy_device *phydev)
508 +{
509 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_EN);
510 +}
511 +
512 +static void stop_lt(struct phy_device *phydev)
513 +{
514 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
515 +}
516 +
517 +static void reset_gcr0(struct fsl_xgkr_inst *inst)
518 +{
519 + struct per_lane_ctrl_status *reg_base = inst->reg_base;
520 +
521 + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
522 + &reg_base->gcr0);
523 + udelay(1);
524 + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
525 + &reg_base->gcr0);
526 + udelay(1);
527 +}
528 +
529 +void lane_set_1gkx(void *reg)
530 +{
531 + struct per_lane_ctrl_status *reg_base = reg;
532 + u32 val;
533 +
534 + /* reset the lane */
535 + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
536 + &reg_base->gcr0);
537 + udelay(1);
538 +
539 + /* set gcr1 for 1GKX */
540 + val = ioread32(&reg_base->gcr1);
541 + val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
542 + GCR1_REIDL_ET_MAS_MASK);
543 + iowrite32(val, &reg_base->gcr1);
544 + udelay(1);
545 +
546 + /* set tecr0 for 1GKX */
547 + val = ioread32(&reg_base->tecr0);
548 + val &= ~TECR0_AMP_RED_MASK;
549 + iowrite32(val, &reg_base->tecr0);
550 + udelay(1);
551 +
552 + /* unreset the lane */
553 + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
554 + &reg_base->gcr0);
555 + udelay(1);
556 +}
557 +
558 +static void reset_lt(struct phy_device *phydev)
559 +{
560 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, PMD_RESET);
561 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
562 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_CU, 0);
563 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_STATUS, 0);
564 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS, 0);
565 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU, 0);
566 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS, 0);
567 +}
568 +
569 +static void start_xgkr_state_machine(struct delayed_work *work)
570 +{
571 + queue_delayed_work(system_power_efficient_wq, work,
572 + msecs_to_jiffies(XGKR_TIMEOUT));
573 +}
574 +
575 +static void start_xgkr_an(struct phy_device *phydev)
576 +{
577 + struct fsl_xgkr_inst *inst;
578 +
579 + reset_lt(phydev);
580 + phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KR_AN_AD1_INIT);
581 + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
582 +
583 + inst = phydev->priv;
584 +
585 + /* start state machine*/
586 + start_xgkr_state_machine(&inst->xgkr_wk);
587 +}
588 +
589 +static void start_1gkx_an(struct phy_device *phydev)
590 +{
591 + phy_write_mmd(phydev, MDIO_MMD_PCS, FSL_PCS_IF_MODE, IF_MODE_INIT);
592 + phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KX_AN_AD1_INIT);
593 + phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
594 + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
595 +}
596 +
597 +static void ld_coe_status(struct fsl_xgkr_inst *inst)
598 +{
599 + phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
600 + FSL_KR_LD_STATUS, inst->ld_status);
601 +}
602 +
603 +static void ld_coe_update(struct fsl_xgkr_inst *inst)
604 +{
605 + dev_dbg(&inst->phydev->mdio.dev, "sending request: %x\n", inst->ld_update);
606 + phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
607 + FSL_KR_LD_CU, inst->ld_update);
608 +}
609 +
610 +static void init_inst(struct fsl_xgkr_inst *inst, int reset)
611 +{
612 + if (reset) {
613 + inst->ratio_preq = RATIO_PREQ;
614 + inst->ratio_pst1q = RATIO_PST1Q;
615 + inst->adpt_eq = RATIO_EQ;
616 + tune_tecr0(inst);
617 + }
618 +
619 + tx_condition_init(&inst->tx_c);
620 + inst->state = DETECTING_LP;
621 + inst->ld_status &= RX_READY_MASK;
622 + ld_coe_status(inst);
623 + inst->ld_update = 0;
624 + inst->ld_status &= ~RX_READY_MASK;
625 + ld_coe_status(inst);
626 +}
627 +
628 +#ifdef NEW_ALGORITHM_TRAIN_TX
629 +static int get_median_gaink2(u32 *reg)
630 +{
631 + int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
632 + u32 rx_eq_snp;
633 + struct per_lane_ctrl_status *reg_base;
634 + int timeout;
635 + int i, j, tmp, pos;
636 +
637 + reg_base = (struct per_lane_ctrl_status *)reg;
638 +
639 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
640 + /* wait RECR1_CTL_SNP_DONE_MASK has cleared */
641 + timeout = 100;
642 + while (ioread32(&reg_base->recr1) &
643 + RECR1_CTL_SNP_DONE_MASK) {
644 + udelay(1);
645 + timeout--;
646 + if (timeout == 0)
647 + break;
648 + }
649 +
650 + /* start snap shot */
651 + iowrite32((ioread32(&reg_base->gcr1) |
652 + GCR1_CTL_SNP_START_MASK),
653 + &reg_base->gcr1);
654 +
655 + /* wait for SNP done */
656 + timeout = 100;
657 + while (!(ioread32(&reg_base->recr1) &
658 + RECR1_CTL_SNP_DONE_MASK)) {
659 + udelay(1);
660 + timeout--;
661 + if (timeout == 0)
662 + break;
663 + }
664 +
665 + /* read and save the snap shot */
666 + rx_eq_snp = ioread32(&reg_base->recr1);
667 + gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >>
668 + RECR1_GAINK2_SHIFT;
669 +
670 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
671 + iowrite32((ioread32(&reg_base->gcr1) &
672 + ~GCR1_CTL_SNP_START_MASK),
673 + &reg_base->gcr1);
674 + }
675 +
676 + /* get median of the 5 snap shot */
677 + for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
678 + tmp = gaink2_snap_shot[i];
679 + pos = i;
680 + for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
681 + if (gaink2_snap_shot[j] < tmp) {
682 + tmp = gaink2_snap_shot[j];
683 + pos = j;
684 + }
685 + }
686 +
687 + gaink2_snap_shot[pos] = gaink2_snap_shot[i];
688 + gaink2_snap_shot[i] = tmp;
689 + }
690 +
691 + return gaink2_snap_shot[2];
692 +}
693 +#endif
694 +
695 +static bool is_bin_early(int bin_sel, void *reg)
696 +{
697 + bool early = false;
698 + int bin_snap_shot[BIN_SNAPSHOT_NUM];
699 + int i, negative_count = 0;
700 + struct per_lane_ctrl_status *reg_base = reg;
701 + int timeout;
702 +
703 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
704 + /* wait RECR1_SNP_DONE_MASK has cleared */
705 + timeout = 100;
706 + while ((ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
707 + udelay(1);
708 + timeout--;
709 + if (timeout == 0)
710 + break;
711 + }
712 +
713 + /* set TCSR1[CDR_SEL] to BinM1/BinLong */
714 + if (bin_sel == BIN_M1) {
715 + iowrite32((ioread32(&reg_base->tcsr1) &
716 + ~CDR_SEL_MASK) | BIN_M1_SEL,
717 + &reg_base->tcsr1);
718 + } else {
719 + iowrite32((ioread32(&reg_base->tcsr1) &
720 + ~CDR_SEL_MASK) | BIN_Long_SEL,
721 + &reg_base->tcsr1);
722 + }
723 +
724 + /* start snap shot */
725 + iowrite32(ioread32(&reg_base->gcr1) | GCR1_SNP_START_MASK,
726 + &reg_base->gcr1);
727 +
728 + /* wait for SNP done */
729 + timeout = 100;
730 + while (!(ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
731 + udelay(1);
732 + timeout--;
733 + if (timeout == 0)
734 + break;
735 + }
736 +
737 + /* read and save the snap shot */
738 + bin_snap_shot[i] = (ioread32(&reg_base->tcsr1) &
739 + TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT;
740 + if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK)
741 + negative_count++;
742 +
743 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
744 + iowrite32(ioread32(&reg_base->gcr1) & ~GCR1_SNP_START_MASK,
745 + &reg_base->gcr1);
746 + }
747 +
748 + if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
749 + ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
750 + early = true;
751 + }
752 +
753 + return early;
754 +}
755 +
756 +static void train_tx(struct fsl_xgkr_inst *inst)
757 +{
758 + struct phy_device *phydev = inst->phydev;
759 + struct tx_condition *tx_c = &inst->tx_c;
760 + bool bin_m1_early, bin_long_early;
761 + u32 lp_status, old_ld_update;
762 + u32 status_cop1, status_coz, status_com1;
763 + u32 req_cop1, req_coz, req_com1, req_preset, req_init;
764 + u32 temp;
765 +#ifdef NEW_ALGORITHM_TRAIN_TX
766 + u32 median_gaink2;
767 +#endif
768 +
769 +recheck:
770 + if (tx_c->bin_long_stop && tx_c->bin_m1_stop) {
771 + tx_c->tx_complete = true;
772 + inst->ld_status |= RX_READY_MASK;
773 + ld_coe_status(inst);
774 + /* tell LP we are ready */
775 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD,
776 + FSL_KR_PMD_STATUS, RX_STAT);
777 + return;
778 + }
779 +
780 + /* We start by checking the current LP status. If we got any responses,
781 + * we can clear up the appropriate update request so that the
782 + * subsequent code may easily issue new update requests if needed.
783 + */
784 + lp_status = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
785 + REQUEST_MASK;
786 + status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT;
787 + status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT;
788 + status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT;
789 +
790 + old_ld_update = inst->ld_update;
791 + req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT;
792 + req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT;
793 + req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT;
794 + req_preset = old_ld_update & PRESET_MASK;
795 + req_init = old_ld_update & INIT_MASK;
796 +
797 + /* IEEE802.3-2008, 72.6.10.2.3.1
798 + * We may clear PRESET when all coefficients show UPDATED or MAX.
799 + */
800 + if (req_preset) {
801 + if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) &&
802 + (status_coz == COE_UPDATED || status_coz == COE_MAX) &&
803 + (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) {
804 + inst->ld_update &= ~PRESET_MASK;
805 + }
806 + }
807 +
808 + /* IEEE802.3-2008, 72.6.10.2.3.2
809 + * We may clear INITIALIZE when no coefficients show NOT UPDATED.
810 + */
811 + if (req_init) {
812 + if (status_cop1 != COE_NOTUPDATED &&
813 + status_coz != COE_NOTUPDATED &&
814 + status_com1 != COE_NOTUPDATED) {
815 + inst->ld_update &= ~INIT_MASK;
816 + }
817 + }
818 +
819 + /* IEEE802.3-2008, 72.6.10.2.3.2
820 + * we send initialize to the other side to ensure default settings
821 + * for the LP. Naturally, we should do this only once.
822 + */
823 + if (!tx_c->sent_init) {
824 + if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) {
825 + inst->ld_update = INIT_MASK;
826 + tx_c->sent_init = true;
827 + }
828 + }
829 +
830 + /* IEEE802.3-2008, 72.6.10.2.3.3
831 + * We set coefficient requests to HOLD when we get the information
832 + * about any updates On clearing our prior response, we also update
833 + * our internal status.
834 + */
835 + if (status_cop1 != COE_NOTUPDATED) {
836 + if (req_cop1) {
837 + inst->ld_update &= ~COP1_MASK;
838 +#ifdef NEW_ALGORITHM_TRAIN_TX
839 + if (tx_c->post_inc) {
840 + if (req_cop1 == INCREMENT &&
841 + status_cop1 == COE_MAX) {
842 + tx_c->post_inc = 0;
843 + tx_c->bin_long_stop = true;
844 + tx_c->bin_m1_stop = true;
845 + } else {
846 + tx_c->post_inc -= 1;
847 + }
848 +
849 + ld_coe_update(inst);
850 + goto recheck;
851 + }
852 +#endif
853 + if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) ||
854 + (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) {
855 + dev_dbg(&inst->phydev->mdio.dev, "COP1 hit limit %s",
856 + (status_cop1 == COE_MIN) ?
857 + "DEC MIN" : "INC MAX");
858 + tx_c->long_min_max_cnt++;
859 + if (tx_c->long_min_max_cnt >= TIMEOUT_LONG) {
860 + tx_c->bin_long_stop = true;
861 + ld_coe_update(inst);
862 + goto recheck;
863 + }
864 + }
865 + }
866 + }
867 +
868 + if (status_coz != COE_NOTUPDATED) {
869 + if (req_coz)
870 + inst->ld_update &= ~COZ_MASK;
871 + }
872 +
873 + if (status_com1 != COE_NOTUPDATED) {
874 + if (req_com1) {
875 + inst->ld_update &= ~COM1_MASK;
876 +#ifdef NEW_ALGORITHM_TRAIN_TX
877 + if (tx_c->pre_inc) {
878 + if (req_com1 == INCREMENT &&
879 + status_com1 == COE_MAX)
880 + tx_c->pre_inc = 0;
881 + else
882 + tx_c->pre_inc -= 1;
883 +
884 + ld_coe_update(inst);
885 + goto recheck;
886 + }
887 +#endif
888 + /* Stop If we have reached the limit for a parameter. */
889 + if ((req_com1 == DECREMENT && status_com1 == COE_MIN) ||
890 + (req_com1 == INCREMENT && status_com1 == COE_MAX)) {
891 + dev_dbg(&inst->phydev->mdio.dev, "COM1 hit limit %s",
892 + (status_com1 == COE_MIN) ?
893 + "DEC MIN" : "INC MAX");
894 + tx_c->m1_min_max_cnt++;
895 + if (tx_c->m1_min_max_cnt >= TIMEOUT_M1) {
896 + tx_c->bin_m1_stop = true;
897 + ld_coe_update(inst);
898 + goto recheck;
899 + }
900 + }
901 + }
902 + }
903 +
904 + if (old_ld_update != inst->ld_update) {
905 + ld_coe_update(inst);
906 + /* Redo these status checks and updates until we have no more
907 + * changes, to speed up the overall process.
908 + */
909 + goto recheck;
910 + }
911 +
912 + /* Do nothing if we have pending request. */
913 + if ((req_coz || req_com1 || req_cop1))
914 + return;
915 + else if (lp_status)
916 + /* No pending request but LP status was not reverted to
917 + * not updated.
918 + */
919 + return;
920 +
921 +#ifdef NEW_ALGORITHM_TRAIN_TX
922 + if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
923 + if (tx_c->pre_inc) {
924 + inst->ld_update = INCREMENT << COM1_SHIFT;
925 + ld_coe_update(inst);
926 + return;
927 + }
928 +
929 + if (status_cop1 != COE_MAX) {
930 + median_gaink2 = get_median_gaink2(inst->reg_base);
931 + if (median_gaink2 == 0xf) {
932 + tx_c->post_inc = 1;
933 + } else {
934 + /* Gaink2 median lower than "F" */
935 + tx_c->bin_m1_stop = true;
936 + tx_c->bin_long_stop = true;
937 + goto recheck;
938 + }
939 + } else {
940 + /* C1 MAX */
941 + tx_c->bin_m1_stop = true;
942 + tx_c->bin_long_stop = true;
943 + goto recheck;
944 + }
945 +
946 + if (tx_c->post_inc) {
947 + inst->ld_update = INCREMENT << COP1_SHIFT;
948 + ld_coe_update(inst);
949 + return;
950 + }
951 + }
952 +#endif
953 +
954 + /* snapshot and select bin */
955 + bin_m1_early = is_bin_early(BIN_M1, inst->reg_base);
956 + bin_long_early = is_bin_early(BIN_LONG, inst->reg_base);
957 +
958 + if (!tx_c->bin_m1_stop && !tx_c->bin_m1_late_early && bin_m1_early) {
959 + tx_c->bin_m1_stop = true;
960 + goto recheck;
961 + }
962 +
963 + if (!tx_c->bin_long_stop &&
964 + tx_c->bin_long_late_early && !bin_long_early) {
965 + tx_c->bin_long_stop = true;
966 + goto recheck;
967 + }
968 +
969 + /* IEEE802.3-2008, 72.6.10.2.3.3
970 + * We only request coefficient updates when no PRESET/INITIALIZE is
971 + * pending. We also only request coefficient updates when the
972 + * corresponding status is NOT UPDATED and nothing is pending.
973 + */
974 + if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
975 + if (!tx_c->bin_long_stop) {
976 + /* BinM1 correction means changing COM1 */
977 + if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
978 + /* Avoid BinM1Late by requesting an
979 + * immediate decrement.
980 + */
981 + if (!bin_m1_early) {
982 + /* request decrement c(-1) */
983 + temp = DECREMENT << COM1_SHIFT;
984 + inst->ld_update = temp;
985 + ld_coe_update(inst);
986 + tx_c->bin_m1_late_early = bin_m1_early;
987 + return;
988 + }
989 + }
990 +
991 + /* BinLong correction means changing COP1 */
992 + if (!status_cop1 && !(inst->ld_update & COP1_MASK)) {
993 + /* Locate BinLong transition point (if any)
994 + * while avoiding BinM1Late.
995 + */
996 + if (bin_long_early) {
997 + /* request increment c(1) */
998 + temp = INCREMENT << COP1_SHIFT;
999 + inst->ld_update = temp;
1000 + } else {
1001 + /* request decrement c(1) */
1002 + temp = DECREMENT << COP1_SHIFT;
1003 + inst->ld_update = temp;
1004 + }
1005 +
1006 + ld_coe_update(inst);
1007 + tx_c->bin_long_late_early = bin_long_early;
1008 + }
1009 + /* We try to finish BinLong before we do BinM1 */
1010 + return;
1011 + }
1012 +
1013 + if (!tx_c->bin_m1_stop) {
1014 + /* BinM1 correction means changing COM1 */
1015 + if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
1016 + /* Locate BinM1 transition point (if any) */
1017 + if (bin_m1_early) {
1018 + /* request increment c(-1) */
1019 + temp = INCREMENT << COM1_SHIFT;
1020 + inst->ld_update = temp;
1021 + } else {
1022 + /* request decrement c(-1) */
1023 + temp = DECREMENT << COM1_SHIFT;
1024 + inst->ld_update = temp;
1025 + }
1026 +
1027 + ld_coe_update(inst);
1028 + tx_c->bin_m1_late_early = bin_m1_early;
1029 + }
1030 + }
1031 + }
1032 +}
1033 +
1034 +static int is_link_up(struct phy_device *phydev)
1035 +{
1036 + int val;
1037 +
1038 + phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
1039 + val = phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
1040 +
1041 + return (val & FSL_KR_RX_LINK_STAT_MASK) ? 1 : 0;
1042 +}
1043 +
1044 +static int is_link_training_fail(struct phy_device *phydev)
1045 +{
1046 + int val;
1047 + int timeout = 100;
1048 +
1049 + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS);
1050 + if (!(val & TRAIN_FAIL) && (val & RX_STAT)) {
1051 + /* check LNK_STAT for sure */
1052 + while (timeout--) {
1053 + if (is_link_up(phydev))
1054 + return 0;
1055 +
1056 + usleep_range(100, 500);
1057 + }
1058 + }
1059 +
1060 + return 1;
1061 +}
1062 +
1063 +static int check_rx(struct phy_device *phydev)
1064 +{
1065 + return phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
1066 + RX_READY_MASK;
1067 +}
1068 +
1069 +/* Coefficient values have hardware restrictions */
1070 +static int is_ld_valid(struct fsl_xgkr_inst *inst)
1071 +{
1072 + u32 ratio_pst1q = inst->ratio_pst1q;
1073 + u32 adpt_eq = inst->adpt_eq;
1074 + u32 ratio_preq = inst->ratio_preq;
1075 +
1076 + if ((ratio_pst1q + adpt_eq + ratio_preq) > 48)
1077 + return 0;
1078 +
1079 + if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >=
1080 + ((adpt_eq - ratio_pst1q - ratio_preq) * 17))
1081 + return 0;
1082 +
1083 + if (ratio_preq > ratio_pst1q)
1084 + return 0;
1085 +
1086 + if (ratio_preq > 8)
1087 + return 0;
1088 +
1089 + if (adpt_eq < 26)
1090 + return 0;
1091 +
1092 + if (ratio_pst1q > 16)
1093 + return 0;
1094 +
1095 + return 1;
1096 +}
1097 +
1098 +static int is_value_allowed(const u32 *val_table, u32 val)
1099 +{
1100 + int i;
1101 +
1102 + for (i = 0;; i++) {
1103 + if (*(val_table + i) == VAL_INVALID)
1104 + return 0;
1105 + if (*(val_table + i) == val)
1106 + return 1;
1107 + }
1108 +}
1109 +
1110 +static int inc_dec(struct fsl_xgkr_inst *inst, int field, int request)
1111 +{
1112 + u32 ld_limit[3], ld_coe[3], step[3];
1113 +
1114 + ld_coe[0] = inst->ratio_pst1q;
1115 + ld_coe[1] = inst->adpt_eq;
1116 + ld_coe[2] = inst->ratio_preq;
1117 +
1118 + /* Information specific to the Freescale SerDes for 10GBase-KR:
1119 + * Incrementing C(+1) means *decrementing* RATIO_PST1Q
1120 + * Incrementing C(0) means incrementing ADPT_EQ
1121 + * Incrementing C(-1) means *decrementing* RATIO_PREQ
1122 + */
1123 + step[0] = -1;
1124 + step[1] = 1;
1125 + step[2] = -1;
1126 +
1127 + switch (request) {
1128 + case INCREMENT:
1129 + ld_limit[0] = POST_COE_MAX;
1130 + ld_limit[1] = ZERO_COE_MAX;
1131 + ld_limit[2] = PRE_COE_MAX;
1132 + if (ld_coe[field] != ld_limit[field])
1133 + ld_coe[field] += step[field];
1134 + else
1135 + /* MAX */
1136 + return 2;
1137 + break;
1138 + case DECREMENT:
1139 + ld_limit[0] = POST_COE_MIN;
1140 + ld_limit[1] = ZERO_COE_MIN;
1141 + ld_limit[2] = PRE_COE_MIN;
1142 + if (ld_coe[field] != ld_limit[field])
1143 + ld_coe[field] -= step[field];
1144 + else
1145 + /* MIN */
1146 + return 1;
1147 + break;
1148 + default:
1149 + break;
1150 + }
1151 +
1152 + if (is_ld_valid(inst)) {
1153 + /* accept new ld */
1154 + inst->ratio_pst1q = ld_coe[0];
1155 + inst->adpt_eq = ld_coe[1];
1156 + inst->ratio_preq = ld_coe[2];
1157 + /* only some values for preq and pst1q can be used.
1158 + * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc.
1159 + * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10.
1160 + */
1161 + if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) {
1162 + dev_dbg(&inst->phydev->mdio.dev,
1163 + "preq skipped value: %d\n", ld_coe[2]);
1164 + return 0;
1165 + }
1166 +
1167 + if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) {
1168 + dev_dbg(&inst->phydev->mdio.dev,
1169 + "pst1q skipped value: %d\n", ld_coe[0]);
1170 + return 0;
1171 + }
1172 +
1173 + tune_tecr0(inst);
1174 + } else {
1175 + if (request == DECREMENT)
1176 + /* MIN */
1177 + return 1;
1178 + if (request == INCREMENT)
1179 + /* MAX */
1180 + return 2;
1181 + }
1182 +
1183 + return 0;
1184 +}
1185 +
1186 +static void min_max_updated(struct fsl_xgkr_inst *inst, int field, int new_ld)
1187 +{
1188 + u32 ld_coe[] = {COE_UPDATED, COE_MIN, COE_MAX};
1189 + u32 mask, val;
1190 +
1191 + switch (field) {
1192 + case COE_COP1:
1193 + mask = COP1_MASK;
1194 + val = ld_coe[new_ld] << COP1_SHIFT;
1195 + break;
1196 + case COE_COZ:
1197 + mask = COZ_MASK;
1198 + val = ld_coe[new_ld] << COZ_SHIFT;
1199 + break;
1200 + case COE_COM:
1201 + mask = COM1_MASK;
1202 + val = ld_coe[new_ld] << COM1_SHIFT;
1203 + break;
1204 + default:
1205 + return;
1206 + }
1207 +
1208 + inst->ld_status &= ~mask;
1209 + inst->ld_status |= val;
1210 +}
1211 +
1212 +static void check_request(struct fsl_xgkr_inst *inst, int request)
1213 +{
1214 + int cop1_req, coz_req, com_req;
1215 + int old_status, new_ld_sta;
1216 +
1217 + cop1_req = (request & COP1_MASK) >> COP1_SHIFT;
1218 + coz_req = (request & COZ_MASK) >> COZ_SHIFT;
1219 + com_req = (request & COM1_MASK) >> COM1_SHIFT;
1220 +
1221 + /* IEEE802.3-2008, 72.6.10.2.5
1222 + * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED
1223 + */
1224 + old_status = inst->ld_status;
1225 +
1226 + if (cop1_req && !(inst->ld_status & COP1_MASK)) {
1227 + new_ld_sta = inc_dec(inst, COE_COP1, cop1_req);
1228 + min_max_updated(inst, COE_COP1, new_ld_sta);
1229 + }
1230 +
1231 + if (coz_req && !(inst->ld_status & COZ_MASK)) {
1232 + new_ld_sta = inc_dec(inst, COE_COZ, coz_req);
1233 + min_max_updated(inst, COE_COZ, new_ld_sta);
1234 + }
1235 +
1236 + if (com_req && !(inst->ld_status & COM1_MASK)) {
1237 + new_ld_sta = inc_dec(inst, COE_COM, com_req);
1238 + min_max_updated(inst, COE_COM, new_ld_sta);
1239 + }
1240 +
1241 + if (old_status != inst->ld_status)
1242 + ld_coe_status(inst);
1243 +}
1244 +
1245 +static void preset(struct fsl_xgkr_inst *inst)
1246 +{
1247 + /* These are all MAX values from the IEEE802.3 perspective. */
1248 + inst->ratio_pst1q = POST_COE_MAX;
1249 + inst->adpt_eq = ZERO_COE_MAX;
1250 + inst->ratio_preq = PRE_COE_MAX;
1251 +
1252 + tune_tecr0(inst);
1253 + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1254 + inst->ld_status |= COE_MAX << COP1_SHIFT |
1255 + COE_MAX << COZ_SHIFT |
1256 + COE_MAX << COM1_SHIFT;
1257 + ld_coe_status(inst);
1258 +}
1259 +
1260 +static void initialize(struct fsl_xgkr_inst *inst)
1261 +{
1262 + inst->ratio_preq = RATIO_PREQ;
1263 + inst->ratio_pst1q = RATIO_PST1Q;
1264 + inst->adpt_eq = RATIO_EQ;
1265 +
1266 + tune_tecr0(inst);
1267 + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1268 + inst->ld_status |= COE_UPDATED << COP1_SHIFT |
1269 + COE_UPDATED << COZ_SHIFT |
1270 + COE_UPDATED << COM1_SHIFT;
1271 + ld_coe_status(inst);
1272 +}
1273 +
1274 +static void train_rx(struct fsl_xgkr_inst *inst)
1275 +{
1276 + struct phy_device *phydev = inst->phydev;
1277 + int request, old_ld_status;
1278 +
1279 + /* get request from LP */
1280 + request = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU) &
1281 + (LD_ALL_MASK);
1282 + old_ld_status = inst->ld_status;
1283 +
1284 + /* IEEE802.3-2008, 72.6.10.2.5
1285 + * Ensure we always go to NOT UDPATED for status reporting in
1286 + * response to HOLD requests.
1287 + * IEEE802.3-2008, 72.6.10.2.3.1/2
1288 + * ... but only if PRESET/INITIALIZE are not active to ensure
1289 + * we keep status until they are released.
1290 + */
1291 + if (!(request & (PRESET_MASK | INIT_MASK))) {
1292 + if (!(request & COP1_MASK))
1293 + inst->ld_status &= ~COP1_MASK;
1294 +
1295 + if (!(request & COZ_MASK))
1296 + inst->ld_status &= ~COZ_MASK;
1297 +
1298 + if (!(request & COM1_MASK))
1299 + inst->ld_status &= ~COM1_MASK;
1300 +
1301 + if (old_ld_status != inst->ld_status)
1302 + ld_coe_status(inst);
1303 + }
1304 +
1305 + /* As soon as the LP shows ready, no need to do any more updates. */
1306 + if (check_rx(phydev)) {
1307 + /* LP receiver is ready */
1308 + if (inst->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) {
1309 + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1310 + ld_coe_status(inst);
1311 + }
1312 + } else {
1313 + /* IEEE802.3-2008, 72.6.10.2.3.1/2
1314 + * only act on PRESET/INITIALIZE if all status is NOT UPDATED.
1315 + */
1316 + if (request & (PRESET_MASK | INIT_MASK)) {
1317 + if (!(inst->ld_status &
1318 + (COP1_MASK | COZ_MASK | COM1_MASK))) {
1319 + if (request & PRESET_MASK)
1320 + preset(inst);
1321 +
1322 + if (request & INIT_MASK)
1323 + initialize(inst);
1324 + }
1325 + }
1326 +
1327 + /* LP Coefficient are not in HOLD */
1328 + if (request & REQUEST_MASK)
1329 + check_request(inst, request & REQUEST_MASK);
1330 + }
1331 +}
1332 +
1333 +static void xgkr_start_train(struct phy_device *phydev)
1334 +{
1335 + struct fsl_xgkr_inst *inst = phydev->priv;
1336 + struct tx_condition *tx_c = &inst->tx_c;
1337 + int val = 0, i;
1338 + int lt_state;
1339 + unsigned long dead_line;
1340 + int rx_ok, tx_ok;
1341 +
1342 + init_inst(inst, 0);
1343 + start_lt(phydev);
1344 +
1345 + for (i = 0; i < 2;) {
1346 + dead_line = jiffies + msecs_to_jiffies(500);
1347 + while (time_before(jiffies, dead_line)) {
1348 + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
1349 + FSL_KR_PMD_STATUS);
1350 + if (val & TRAIN_FAIL) {
1351 + /* LT failed already, reset lane to avoid
1352 + * it run into hanging, then start LT again.
1353 + */
1354 + reset_gcr0(inst);
1355 + start_lt(phydev);
1356 + } else if ((val & PMD_STATUS_SUP_STAT) &&
1357 + (val & PMD_STATUS_FRAME_LOCK))
1358 + break;
1359 + usleep_range(100, 500);
1360 + }
1361 +
1362 + if (!((val & PMD_STATUS_FRAME_LOCK) &&
1363 + (val & PMD_STATUS_SUP_STAT))) {
1364 + i++;
1365 + continue;
1366 + }
1367 +
1368 + /* init process */
1369 + rx_ok = false;
1370 + tx_ok = false;
1371 + /* the LT should be finished in 500ms, failed or OK. */
1372 + dead_line = jiffies + msecs_to_jiffies(500);
1373 +
1374 + while (time_before(jiffies, dead_line)) {
1375 + /* check if the LT is already failed */
1376 + lt_state = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
1377 + FSL_KR_PMD_STATUS);
1378 + if (lt_state & TRAIN_FAIL) {
1379 + reset_gcr0(inst);
1380 + break;
1381 + }
1382 +
1383 + rx_ok = check_rx(phydev);
1384 + tx_ok = tx_c->tx_complete;
1385 +
1386 + if (rx_ok && tx_ok)
1387 + break;
1388 +
1389 + if (!rx_ok)
1390 + train_rx(inst);
1391 +
1392 + if (!tx_ok)
1393 + train_tx(inst);
1394 +
1395 + usleep_range(100, 500);
1396 + }
1397 +
1398 + i++;
1399 + /* check LT result */
1400 + if (is_link_training_fail(phydev)) {
1401 + init_inst(inst, 0);
1402 + continue;
1403 + } else {
1404 + stop_lt(phydev);
1405 + inst->state = TRAINED;
1406 + break;
1407 + }
1408 + }
1409 +}
1410 +
1411 +static void xgkr_state_machine(struct work_struct *work)
1412 +{
1413 + struct delayed_work *dwork = to_delayed_work(work);
1414 + struct fsl_xgkr_inst *inst = container_of(dwork,
1415 + struct fsl_xgkr_inst,
1416 + xgkr_wk);
1417 + struct phy_device *phydev = inst->phydev;
1418 + int an_state;
1419 + bool needs_train = false;
1420 +
1421 + mutex_lock(&phydev->lock);
1422 +
1423 + switch (inst->state) {
1424 + case DETECTING_LP:
1425 + phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
1426 + an_state = phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
1427 + if ((an_state & KR_AN_MASK))
1428 + needs_train = true;
1429 + break;
1430 + case TRAINED:
1431 + if (!is_link_up(phydev)) {
1432 + dev_info(&phydev->mdio.dev,
1433 + "Detect hotplug, restart training\n");
1434 + init_inst(inst, 1);
1435 + start_xgkr_an(phydev);
1436 + inst->state = DETECTING_LP;
1437 + }
1438 + break;
1439 + }
1440 +
1441 + if (needs_train)
1442 + xgkr_start_train(phydev);
1443 +
1444 + mutex_unlock(&phydev->lock);
1445 + queue_delayed_work(system_power_efficient_wq, &inst->xgkr_wk,
1446 + msecs_to_jiffies(XGKR_TIMEOUT));
1447 +}
1448 +
1449 +static int fsl_backplane_probe(struct phy_device *phydev)
1450 +{
1451 + struct fsl_xgkr_inst *xgkr_inst;
1452 + struct device_node *phy_node, *lane_node;
1453 + struct resource res_lane;
1454 + const char *bm;
1455 + int ret;
1456 + int bp_mode;
1457 + u32 lane[2];
1458 +
1459 + phy_node = phydev->mdio.dev.of_node;
1460 + bp_mode = of_property_read_string(phy_node, "backplane-mode", &bm);
1461 + if (bp_mode < 0)
1462 + return 0;
1463 +
1464 + if (!strcasecmp(bm, "1000base-kx")) {
1465 + bp_mode = PHY_BACKPLANE_1000BASE_KX;
1466 + } else if (!strcasecmp(bm, "10gbase-kr")) {
1467 + bp_mode = PHY_BACKPLANE_10GBASE_KR;
1468 + } else {
1469 + dev_err(&phydev->mdio.dev, "Unknown backplane-mode\n");
1470 + return -EINVAL;
1471 + }
1472 +
1473 + lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
1474 + if (!lane_node) {
1475 + dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
1476 + return -EINVAL;
1477 + }
1478 +
1479 + ret = of_address_to_resource(lane_node, 0, &res_lane);
1480 + if (ret) {
1481 + dev_err(&phydev->mdio.dev, "could not obtain memory map\n");
1482 + return ret;
1483 + }
1484 +
1485 + of_node_put(lane_node);
1486 + ret = of_property_read_u32_array(phy_node, "fsl,lane-reg",
1487 + (u32 *)&lane, 2);
1488 + if (ret) {
1489 + dev_err(&phydev->mdio.dev, "could not get fsl,lane-reg\n");
1490 + return -EINVAL;
1491 + }
1492 +
1493 + phydev->priv = devm_ioremap_nocache(&phydev->mdio.dev,
1494 + res_lane.start + lane[0],
1495 + lane[1]);
1496 + if (!phydev->priv) {
1497 + dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n");
1498 + return -ENOMEM;
1499 + }
1500 +
1501 + if (bp_mode == PHY_BACKPLANE_1000BASE_KX) {
1502 + phydev->speed = SPEED_1000;
1503 + /* configure the lane for 1000BASE-KX */
1504 + lane_set_1gkx(phydev->priv);
1505 + return 0;
1506 + }
1507 +
1508 + xgkr_inst = devm_kzalloc(&phydev->mdio.dev,
1509 + sizeof(*xgkr_inst), GFP_KERNEL);
1510 + if (!xgkr_inst)
1511 + return -ENOMEM;
1512 +
1513 + xgkr_inst->reg_base = phydev->priv;
1514 + xgkr_inst->phydev = phydev;
1515 + phydev->priv = xgkr_inst;
1516 +
1517 + if (bp_mode == PHY_BACKPLANE_10GBASE_KR) {
1518 + phydev->speed = SPEED_10000;
1519 + INIT_DELAYED_WORK(&xgkr_inst->xgkr_wk, xgkr_state_machine);
1520 + }
1521 +
1522 + return 0;
1523 +}
1524 +
1525 +static int fsl_backplane_aneg_done(struct phy_device *phydev)
1526 +{
1527 + return 1;
1528 +}
1529 +
1530 +static int fsl_backplane_config_aneg(struct phy_device *phydev)
1531 +{
1532 + if (phydev->speed == SPEED_10000) {
1533 + phydev->supported |= SUPPORTED_10000baseKR_Full;
1534 + start_xgkr_an(phydev);
1535 + } else if (phydev->speed == SPEED_1000) {
1536 + phydev->supported |= SUPPORTED_1000baseKX_Full;
1537 + start_1gkx_an(phydev);
1538 + }
1539 +
1540 + phydev->advertising = phydev->supported;
1541 + phydev->duplex = 1;
1542 +
1543 + return 0;
1544 +}
1545 +
1546 +static int fsl_backplane_suspend(struct phy_device *phydev)
1547 +{
1548 + if (phydev->speed == SPEED_10000) {
1549 + struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
1550 +
1551 + cancel_delayed_work_sync(&xgkr_inst->xgkr_wk);
1552 + }
1553 + return 0;
1554 +}
1555 +
1556 +static int fsl_backplane_resume(struct phy_device *phydev)
1557 +{
1558 + if (phydev->speed == SPEED_10000) {
1559 + struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
1560 +
1561 + init_inst(xgkr_inst, 1);
1562 + queue_delayed_work(system_power_efficient_wq,
1563 + &xgkr_inst->xgkr_wk,
1564 + msecs_to_jiffies(XGKR_TIMEOUT));
1565 + }
1566 + return 0;
1567 +}
1568 +
1569 +static int fsl_backplane_read_status(struct phy_device *phydev)
1570 +{
1571 + if (is_link_up(phydev))
1572 + phydev->link = 1;
1573 + else
1574 + phydev->link = 0;
1575 +
1576 + return 0;
1577 +}
1578 +
1579 +static struct phy_driver fsl_backplane_driver[] = {
1580 + {
1581 + .phy_id = FSL_PCS_PHY_ID,
1582 + .name = "Freescale Backplane",
1583 + .phy_id_mask = 0xffffffff,
1584 + .features = SUPPORTED_Backplane | SUPPORTED_Autoneg |
1585 + SUPPORTED_MII,
1586 + .probe = fsl_backplane_probe,
1587 + .aneg_done = fsl_backplane_aneg_done,
1588 + .config_aneg = fsl_backplane_config_aneg,
1589 + .read_status = fsl_backplane_read_status,
1590 + .suspend = fsl_backplane_suspend,
1591 + .resume = fsl_backplane_resume,
1592 + },
1593 +};
1594 +
1595 +module_phy_driver(fsl_backplane_driver);
1596 +
1597 +static struct mdio_device_id __maybe_unused freescale_tbl[] = {
1598 + { FSL_PCS_PHY_ID, 0xffffffff },
1599 + { }
1600 +};
1601 +
1602 +MODULE_DEVICE_TABLE(mdio, freescale_tbl);
1603 +
1604 +MODULE_DESCRIPTION("Freescale Backplane driver");
1605 +MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
1606 +MODULE_LICENSE("GPL v2");
1607 --- a/drivers/net/phy/phy.c
1608 +++ b/drivers/net/phy/phy.c
1609 @@ -585,7 +585,7 @@ int phy_mii_ioctl(struct phy_device *phy
1610 return 0;
1611
1612 case SIOCSHWTSTAMP:
1613 - if (phydev->drv->hwtstamp)
1614 + if (phydev->drv && phydev->drv->hwtstamp)
1615 return phydev->drv->hwtstamp(phydev, ifr);
1616 /* fall through */
1617
1618 @@ -610,6 +610,9 @@ static int phy_start_aneg_priv(struct ph
1619 bool trigger = 0;
1620 int err;
1621
1622 + if (!phydev->drv)
1623 + return -EIO;
1624 +
1625 mutex_lock(&phydev->lock);
1626
1627 if (AUTONEG_DISABLE == phydev->autoneg)
1628 @@ -1009,7 +1012,7 @@ void phy_state_machine(struct work_struc
1629
1630 old_state = phydev->state;
1631
1632 - if (phydev->drv->link_change_notify)
1633 + if (phydev->drv && phydev->drv->link_change_notify)
1634 phydev->drv->link_change_notify(phydev);
1635
1636 switch (phydev->state) {
1637 @@ -1311,6 +1314,9 @@ EXPORT_SYMBOL(phy_write_mmd_indirect);
1638 */
1639 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1640 {
1641 + if (!phydev->drv)
1642 + return -EIO;
1643 +
1644 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1645 * Also EEE feature is active when core is operating with MII, GMII
1646 * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
1647 @@ -1388,6 +1394,9 @@ EXPORT_SYMBOL(phy_init_eee);
1648 */
1649 int phy_get_eee_err(struct phy_device *phydev)
1650 {
1651 + if (!phydev->drv)
1652 + return -EIO;
1653 +
1654 return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS);
1655 }
1656 EXPORT_SYMBOL(phy_get_eee_err);
1657 @@ -1404,6 +1413,9 @@ int phy_ethtool_get_eee(struct phy_devic
1658 {
1659 int val;
1660
1661 + if (!phydev->drv)
1662 + return -EIO;
1663 +
1664 /* Get Supported EEE */
1665 val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS);
1666 if (val < 0)
1667 @@ -1437,6 +1449,9 @@ int phy_ethtool_set_eee(struct phy_devic
1668 {
1669 int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
1670
1671 + if (!phydev->drv)
1672 + return -EIO;
1673 +
1674 /* Mask prohibited EEE modes */
1675 val &= ~phydev->eee_broken_modes;
1676
1677 @@ -1448,7 +1463,7 @@ EXPORT_SYMBOL(phy_ethtool_set_eee);
1678
1679 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1680 {
1681 - if (phydev->drv->set_wol)
1682 + if (phydev->drv && phydev->drv->set_wol)
1683 return phydev->drv->set_wol(phydev, wol);
1684
1685 return -EOPNOTSUPP;
1686 @@ -1457,7 +1472,7 @@ EXPORT_SYMBOL(phy_ethtool_set_wol);
1687
1688 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1689 {
1690 - if (phydev->drv->get_wol)
1691 + if (phydev->drv && phydev->drv->get_wol)
1692 phydev->drv->get_wol(phydev, wol);
1693 }
1694 EXPORT_SYMBOL(phy_ethtool_get_wol);
1695 --- a/drivers/net/phy/phy_device.c
1696 +++ b/drivers/net/phy/phy_device.c
1697 @@ -1046,7 +1046,7 @@ int phy_suspend(struct phy_device *phyde
1698 if (wol.wolopts)
1699 return -EBUSY;
1700
1701 - if (phydrv->suspend)
1702 + if (phydev->drv && phydrv->suspend)
1703 ret = phydrv->suspend(phydev);
1704
1705 if (ret)
1706 @@ -1063,7 +1063,7 @@ int phy_resume(struct phy_device *phydev
1707 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
1708 int ret = 0;
1709
1710 - if (phydrv->resume)
1711 + if (phydev->drv && phydrv->resume)
1712 ret = phydrv->resume(phydev);
1713
1714 if (ret)
1715 @@ -1726,7 +1726,7 @@ static int phy_remove(struct device *dev
1716 phydev->state = PHY_DOWN;
1717 mutex_unlock(&phydev->lock);
1718
1719 - if (phydev->drv->remove)
1720 + if (phydev->drv && phydev->drv->remove)
1721 phydev->drv->remove(phydev);
1722 phydev->drv = NULL;
1723
1724 --- a/drivers/net/phy/swphy.c
1725 +++ b/drivers/net/phy/swphy.c
1726 @@ -77,6 +77,7 @@ static const struct swmii_regs duplex[]
1727 static int swphy_decode_speed(int speed)
1728 {
1729 switch (speed) {
1730 + case 10000:
1731 case 1000:
1732 return SWMII_SPEED_1000;
1733 case 100:
1734 --- a/include/linux/phy.h
1735 +++ b/include/linux/phy.h
1736 @@ -81,6 +81,7 @@ typedef enum {
1737 PHY_INTERFACE_MODE_MOCA,
1738 PHY_INTERFACE_MODE_QSGMII,
1739 PHY_INTERFACE_MODE_TRGMII,
1740 + PHY_INTERFACE_MODE_SGMII_2500,
1741 PHY_INTERFACE_MODE_MAX,
1742 } phy_interface_t;
1743
1744 @@ -784,6 +785,9 @@ int phy_stop_interrupts(struct phy_devic
1745
1746 static inline int phy_read_status(struct phy_device *phydev)
1747 {
1748 + if (!phydev->drv)
1749 + return -EIO;
1750 +
1751 return phydev->drv->read_status(phydev);
1752 }
1753