kernel: bump 4.9 to 4.9.146
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 703-phy-support-layerscape.patch
1 From 8949ebc0c5b982eab7ca493dad7b86c30befa6ec Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 17 Jan 2018 15:01:30 +0800
4 Subject: [PATCH 09/30] phy: support layerscape
5
6 This is an integrated patch for layerscape mdio-phy support.
7
8 Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
9 Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
10 Signed-off-by: costi <constantin.tudor@freescale.com>
11 Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
12 Signed-off-by: Shaohui Xie <Shaohui.Xie@nxp.com>
13 Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
14 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
15 ---
16 drivers/net/phy/Kconfig | 11 +
17 drivers/net/phy/Makefile | 2 +
18 drivers/net/phy/aquantia.c | 28 +
19 drivers/net/phy/cortina.c | 118 ++++
20 drivers/net/phy/fsl_backplane.c | 1358 +++++++++++++++++++++++++++++++++++++++
21 drivers/net/phy/marvell.c | 2 +-
22 drivers/net/phy/phy.c | 23 +-
23 drivers/net/phy/phy_device.c | 6 +-
24 drivers/net/phy/swphy.c | 1 +
25 include/linux/phy.h | 6 +
26 10 files changed, 1547 insertions(+), 8 deletions(-)
27 create mode 100644 drivers/net/phy/cortina.c
28 create mode 100644 drivers/net/phy/fsl_backplane.c
29
30 --- a/drivers/net/phy/Kconfig
31 +++ b/drivers/net/phy/Kconfig
32 @@ -89,6 +89,12 @@ config MDIO_BUS_MUX_MMIOREG
33 config MDIO_CAVIUM
34 tristate
35
36 +config MDIO_FSL_BACKPLANE
37 + tristate "Support for backplane on Freescale XFI interface"
38 + depends on OF_MDIO
39 + help
40 + This module provides a driver for Freescale XFI's backplane.
41 +
42 config MDIO_GPIO
43 tristate "GPIO lib-based bitbanged MDIO buses"
44 depends on MDIO_BITBANG && GPIOLIB
45 @@ -298,6 +304,11 @@ config CICADA_PHY
46 ---help---
47 Currently supports the cis8204
48
49 +config CORTINA_PHY
50 + tristate "Cortina EDC CDR 10G Ethernet PHY"
51 + ---help---
52 + Currently supports the CS4340 phy.
53 +
54 config DAVICOM_PHY
55 tristate "Davicom PHYs"
56 ---help---
57 --- a/drivers/net/phy/Makefile
58 +++ b/drivers/net/phy/Makefile
59 @@ -30,6 +30,7 @@ obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) +=
60 obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
61 obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
62 obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
63 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane.o
64 obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
65 obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
66 obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
67 @@ -48,6 +49,7 @@ obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygn
68 obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
69 obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
70 obj-$(CONFIG_CICADA_PHY) += cicada.o
71 +obj-$(CONFIG_CORTINA_PHY) += cortina.o
72 obj-$(CONFIG_DAVICOM_PHY) += davicom.o
73 obj-$(CONFIG_DP83640_PHY) += dp83640.o
74 obj-$(CONFIG_DP83848_PHY) += dp83848.o
75 --- a/drivers/net/phy/aquantia.c
76 +++ b/drivers/net/phy/aquantia.c
77 @@ -21,6 +21,8 @@
78 #define PHY_ID_AQ1202 0x03a1b445
79 #define PHY_ID_AQ2104 0x03a1b460
80 #define PHY_ID_AQR105 0x03a1b4a2
81 +#define PHY_ID_AQR106 0x03a1b4d0
82 +#define PHY_ID_AQR107 0x03a1b4e0
83 #define PHY_ID_AQR405 0x03a1b4b0
84
85 #define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
86 @@ -154,6 +156,30 @@ static struct phy_driver aquantia_driver
87 .read_status = aquantia_read_status,
88 },
89 {
90 + .phy_id = PHY_ID_AQR106,
91 + .phy_id_mask = 0xfffffff0,
92 + .name = "Aquantia AQR106",
93 + .features = PHY_AQUANTIA_FEATURES,
94 + .flags = PHY_HAS_INTERRUPT,
95 + .aneg_done = aquantia_aneg_done,
96 + .config_aneg = aquantia_config_aneg,
97 + .config_intr = aquantia_config_intr,
98 + .ack_interrupt = aquantia_ack_interrupt,
99 + .read_status = aquantia_read_status,
100 +},
101 +{
102 + .phy_id = PHY_ID_AQR107,
103 + .phy_id_mask = 0xfffffff0,
104 + .name = "Aquantia AQR107",
105 + .features = PHY_AQUANTIA_FEATURES,
106 + .flags = PHY_HAS_INTERRUPT,
107 + .aneg_done = aquantia_aneg_done,
108 + .config_aneg = aquantia_config_aneg,
109 + .config_intr = aquantia_config_intr,
110 + .ack_interrupt = aquantia_ack_interrupt,
111 + .read_status = aquantia_read_status,
112 +},
113 +{
114 .phy_id = PHY_ID_AQR405,
115 .phy_id_mask = 0xfffffff0,
116 .name = "Aquantia AQR405",
117 @@ -173,6 +199,8 @@ static struct mdio_device_id __maybe_unu
118 { PHY_ID_AQ1202, 0xfffffff0 },
119 { PHY_ID_AQ2104, 0xfffffff0 },
120 { PHY_ID_AQR105, 0xfffffff0 },
121 + { PHY_ID_AQR106, 0xfffffff0 },
122 + { PHY_ID_AQR107, 0xfffffff0 },
123 { PHY_ID_AQR405, 0xfffffff0 },
124 { }
125 };
126 --- /dev/null
127 +++ b/drivers/net/phy/cortina.c
128 @@ -0,0 +1,118 @@
129 +/*
130 + * Copyright 2017 NXP
131 + *
132 + * This program is free software; you can redistribute it and/or modify
133 + * it under the terms of the GNU General Public License as published by
134 + * the Free Software Foundation; either version 2 of the License, or
135 + * (at your option) any later version.
136 + *
137 + * This program is distributed in the hope that it will be useful,
138 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
139 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
140 + * GNU General Public License for more details.
141 + *
142 + * CORTINA is a registered trademark of Cortina Systems, Inc.
143 + *
144 + */
145 +#include <linux/module.h>
146 +#include <linux/phy.h>
147 +
148 +#define PHY_ID_CS4340 0x13e51002
149 +
150 +#define VILLA_GLOBAL_CHIP_ID_LSB 0x0
151 +#define VILLA_GLOBAL_CHIP_ID_MSB 0x1
152 +
153 +#define VILLA_GLOBAL_GPIO_1_INTS 0x017
154 +
155 +static int cortina_read_reg(struct phy_device *phydev, u16 regnum)
156 +{
157 + return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
158 + MII_ADDR_C45 | regnum);
159 +}
160 +
161 +static int cortina_config_aneg(struct phy_device *phydev)
162 +{
163 + phydev->supported = SUPPORTED_10000baseT_Full;
164 + phydev->advertising = SUPPORTED_10000baseT_Full;
165 +
166 + return 0;
167 +}
168 +
169 +static int cortina_read_status(struct phy_device *phydev)
170 +{
171 + int gpio_int_status, ret = 0;
172 +
173 + gpio_int_status = cortina_read_reg(phydev, VILLA_GLOBAL_GPIO_1_INTS);
174 + if (gpio_int_status < 0) {
175 + ret = gpio_int_status;
176 + goto err;
177 + }
178 +
179 + if (gpio_int_status & 0x8) {
180 + /* up when edc_convergedS set */
181 + phydev->speed = SPEED_10000;
182 + phydev->duplex = DUPLEX_FULL;
183 + phydev->link = 1;
184 + } else {
185 + phydev->link = 0;
186 + }
187 +
188 +err:
189 + return ret;
190 +}
191 +
192 +static int cortina_soft_reset(struct phy_device *phydev)
193 +{
194 + return 0;
195 +}
196 +
197 +static int cortina_probe(struct phy_device *phydev)
198 +{
199 + u32 phy_id = 0;
200 + int id_lsb = 0, id_msb = 0;
201 +
202 + /* Read device id from phy registers. */
203 + id_lsb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_LSB);
204 + if (id_lsb < 0)
205 + return -ENXIO;
206 +
207 + phy_id = id_lsb << 16;
208 +
209 + id_msb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_MSB);
210 + if (id_msb < 0)
211 + return -ENXIO;
212 +
213 + phy_id |= id_msb;
214 +
215 + /* Make sure the device tree binding matched the driver with the
216 + * right device.
217 + */
218 + if (phy_id != phydev->drv->phy_id) {
219 + phydev_err(phydev, "Error matching phy with %s driver\n",
220 + phydev->drv->name);
221 + return -ENODEV;
222 + }
223 +
224 + return 0;
225 +}
226 +
227 +static struct phy_driver cortina_driver[] = {
228 +{
229 + .phy_id = PHY_ID_CS4340,
230 + .phy_id_mask = 0xffffffff,
231 + .name = "Cortina CS4340",
232 + .config_aneg = cortina_config_aneg,
233 + .read_status = cortina_read_status,
234 + .soft_reset = cortina_soft_reset,
235 + .probe = cortina_probe,
236 +},
237 +};
238 +
239 +module_phy_driver(cortina_driver);
240 +
241 +static struct mdio_device_id __maybe_unused cortina_tbl[] = {
242 + { PHY_ID_CS4340, 0xffffffff},
243 + {},
244 +};
245 +
246 +MODULE_DEVICE_TABLE(mdio, cortina_tbl);
247 --- /dev/null
248 +++ b/drivers/net/phy/fsl_backplane.c
249 @@ -0,0 +1,1358 @@
250 +/* Freescale backplane driver.
251 + * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
252 + *
253 + * Copyright 2015 Freescale Semiconductor, Inc.
254 + *
255 + * Licensed under the GPL-2 or later.
256 + */
257 +
258 +#include <linux/kernel.h>
259 +#include <linux/module.h>
260 +#include <linux/mii.h>
261 +#include <linux/mdio.h>
262 +#include <linux/ethtool.h>
263 +#include <linux/phy.h>
264 +#include <linux/io.h>
265 +#include <linux/of.h>
266 +#include <linux/of_net.h>
267 +#include <linux/of_address.h>
268 +#include <linux/of_platform.h>
269 +#include <linux/timer.h>
270 +#include <linux/delay.h>
271 +#include <linux/workqueue.h>
272 +
273 +/* XFI PCS Device Identifier */
274 +#define FSL_PCS_PHY_ID 0x0083e400
275 +
276 +/* Freescale KR PMD registers */
277 +#define FSL_KR_PMD_CTRL 0x96
278 +#define FSL_KR_PMD_STATUS 0x97
279 +#define FSL_KR_LP_CU 0x98
280 +#define FSL_KR_LP_STATUS 0x99
281 +#define FSL_KR_LD_CU 0x9a
282 +#define FSL_KR_LD_STATUS 0x9b
283 +
284 +/* Freescale KR PMD defines */
285 +#define PMD_RESET 0x1
286 +#define PMD_STATUS_SUP_STAT 0x4
287 +#define PMD_STATUS_FRAME_LOCK 0x2
288 +#define TRAIN_EN 0x3
289 +#define TRAIN_DISABLE 0x1
290 +#define RX_STAT 0x1
291 +
292 +#define FSL_KR_RX_LINK_STAT_MASK 0x1000
293 +#define FSL_XFI_PCS_10GR_SR1 0x20
294 +
295 +/* Freescale KX PCS mode register */
296 +#define FSL_PCS_IF_MODE 0x8014
297 +
298 +/* Freescale KX PCS mode register init value */
299 +#define IF_MODE_INIT 0x8
300 +
301 +/* Freescale KX/KR AN registers */
302 +#define FSL_AN_AD1 0x11
303 +#define FSL_AN_BP_STAT 0x30
304 +
305 +/* Freescale KX/KR AN registers defines */
306 +#define AN_CTRL_INIT 0x1200
307 +#define KX_AN_AD1_INIT 0x25
308 +#define KR_AN_AD1_INIT 0x85
309 +#define AN_LNK_UP_MASK 0x4
310 +#define KR_AN_MASK 0x8
311 +#define TRAIN_FAIL 0x8
312 +
313 +/* C(-1) */
314 +#define BIN_M1 0
315 +/* C(1) */
316 +#define BIN_LONG 1
317 +#define BIN_M1_SEL 6
318 +#define BIN_Long_SEL 7
319 +#define CDR_SEL_MASK 0x00070000
320 +#define BIN_SNAPSHOT_NUM 5
321 +#define BIN_M1_THRESHOLD 3
322 +#define BIN_LONG_THRESHOLD 2
323 +
324 +#define PRE_COE_SHIFT 22
325 +#define POST_COE_SHIFT 16
326 +#define ZERO_COE_SHIFT 8
327 +
328 +#define PRE_COE_MAX 0x0
329 +#define PRE_COE_MIN 0x8
330 +#define POST_COE_MAX 0x0
331 +#define POST_COE_MIN 0x10
332 +#define ZERO_COE_MAX 0x30
333 +#define ZERO_COE_MIN 0x0
334 +
335 +#define TECR0_INIT 0x24200000
336 +#define RATIO_PREQ 0x3
337 +#define RATIO_PST1Q 0xd
338 +#define RATIO_EQ 0x20
339 +
340 +#define GCR0_RESET_MASK 0x600000
341 +#define GCR1_SNP_START_MASK 0x00000040
342 +#define GCR1_CTL_SNP_START_MASK 0x00002000
343 +#define GCR1_REIDL_TH_MASK 0x00700000
344 +#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
345 +#define GCR1_REIDL_ET_MAS_MASK 0x00004000
346 +#define TECR0_AMP_RED_MASK 0x0000003f
347 +
348 +#define RECR1_CTL_SNP_DONE_MASK 0x00000002
349 +#define RECR1_SNP_DONE_MASK 0x00000004
350 +#define TCSR1_SNP_DATA_MASK 0x0000ffc0
351 +#define TCSR1_SNP_DATA_SHIFT 6
352 +#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100
353 +
354 +#define RECR1_GAINK2_MASK 0x0f000000
355 +#define RECR1_GAINK2_SHIFT 24
356 +#define RECR1_GAINK3_MASK 0x000f0000
357 +#define RECR1_GAINK3_SHIFT 16
358 +#define RECR1_OFFSET_MASK 0x00003f80
359 +#define RECR1_OFFSET_SHIFT 7
360 +#define RECR1_BLW_MASK 0x00000f80
361 +#define RECR1_BLW_SHIFT 7
362 +#define EYE_CTRL_SHIFT 12
363 +#define BASE_WAND_SHIFT 10
364 +
365 +#define XGKR_TIMEOUT 1050
366 +
367 +#define INCREMENT 1
368 +#define DECREMENT 2
369 +#define TIMEOUT_LONG 3
370 +#define TIMEOUT_M1 3
371 +
372 +#define RX_READY_MASK 0x8000
373 +#define PRESET_MASK 0x2000
374 +#define INIT_MASK 0x1000
375 +#define COP1_MASK 0x30
376 +#define COP1_SHIFT 4
377 +#define COZ_MASK 0xc
378 +#define COZ_SHIFT 2
379 +#define COM1_MASK 0x3
380 +#define COM1_SHIFT 0
381 +#define REQUEST_MASK 0x3f
382 +#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \
383 + COP1_MASK | COZ_MASK | COM1_MASK)
384 +
385 +#define NEW_ALGORITHM_TRAIN_TX
386 +#ifdef NEW_ALGORITHM_TRAIN_TX
387 +#define FORCE_INC_COP1_NUMBER 0
388 +#define FORCE_INC_COM1_NUMBER 1
389 +#endif
390 +
391 +#define VAL_INVALID 0xff
392 +
393 +static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5,
394 + 0x7, 0x9, 0xb, 0xc, VAL_INVALID};
395 +static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, 0x7,
396 + 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID};
397 +
398 +enum backplane_mode {
399 + PHY_BACKPLANE_1000BASE_KX,
400 + PHY_BACKPLANE_10GBASE_KR,
401 + PHY_BACKPLANE_INVAL
402 +};
403 +
404 +enum coe_filed {
405 + COE_COP1,
406 + COE_COZ,
407 + COE_COM
408 +};
409 +
410 +enum coe_update {
411 + COE_NOTUPDATED,
412 + COE_UPDATED,
413 + COE_MIN,
414 + COE_MAX,
415 + COE_INV
416 +};
417 +
418 +enum train_state {
419 + DETECTING_LP,
420 + TRAINED,
421 +};
422 +
423 +struct per_lane_ctrl_status {
424 + __be32 gcr0; /* 0x.000 - General Control Register 0 */
425 + __be32 gcr1; /* 0x.004 - General Control Register 1 */
426 + __be32 gcr2; /* 0x.008 - General Control Register 2 */
427 + __be32 resv1; /* 0x.00C - Reserved */
428 + __be32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */
429 + __be32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */
430 + __be32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */
431 + __be32 resv2; /* 0x.01C - Reserved */
432 + __be32 tlcr0; /* 0x.020 - TTL Control Register 0 */
433 + __be32 tlcr1; /* 0x.024 - TTL Control Register 1 */
434 + __be32 tlcr2; /* 0x.028 - TTL Control Register 2 */
435 + __be32 tlcr3; /* 0x.02C - TTL Control Register 3 */
436 + __be32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */
437 + __be32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */
438 + __be32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */
439 + __be32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */
440 +};
441 +
442 +struct tx_condition {
443 + bool bin_m1_late_early;
444 + bool bin_long_late_early;
445 + bool bin_m1_stop;
446 + bool bin_long_stop;
447 + bool tx_complete;
448 + bool sent_init;
449 + int m1_min_max_cnt;
450 + int long_min_max_cnt;
451 +#ifdef NEW_ALGORITHM_TRAIN_TX
452 + int pre_inc;
453 + int post_inc;
454 +#endif
455 +};
456 +
457 +struct fsl_xgkr_inst {
458 + void *reg_base;
459 + struct phy_device *phydev;
460 + struct tx_condition tx_c;
461 + struct delayed_work xgkr_wk;
462 + enum train_state state;
463 + u32 ld_update;
464 + u32 ld_status;
465 + u32 ratio_preq;
466 + u32 ratio_pst1q;
467 + u32 adpt_eq;
468 +};
469 +
470 +static void tx_condition_init(struct tx_condition *tx_c)
471 +{
472 + tx_c->bin_m1_late_early = true;
473 + tx_c->bin_long_late_early = false;
474 + tx_c->bin_m1_stop = false;
475 + tx_c->bin_long_stop = false;
476 + tx_c->tx_complete = false;
477 + tx_c->sent_init = false;
478 + tx_c->m1_min_max_cnt = 0;
479 + tx_c->long_min_max_cnt = 0;
480 +#ifdef NEW_ALGORITHM_TRAIN_TX
481 + tx_c->pre_inc = FORCE_INC_COM1_NUMBER;
482 + tx_c->post_inc = FORCE_INC_COP1_NUMBER;
483 +#endif
484 +}
485 +
486 +void tune_tecr0(struct fsl_xgkr_inst *inst)
487 +{
488 + struct per_lane_ctrl_status *reg_base = inst->reg_base;
489 + u32 val;
490 +
491 + val = TECR0_INIT |
492 + inst->adpt_eq << ZERO_COE_SHIFT |
493 + inst->ratio_preq << PRE_COE_SHIFT |
494 + inst->ratio_pst1q << POST_COE_SHIFT;
495 +
496 + /* reset the lane */
497 + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
498 + &reg_base->gcr0);
499 + udelay(1);
500 + iowrite32(val, &reg_base->tecr0);
501 + udelay(1);
502 + /* unreset the lane */
503 + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
504 + &reg_base->gcr0);
505 + udelay(1);
506 +}
507 +
508 +static void start_lt(struct phy_device *phydev)
509 +{
510 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_EN);
511 +}
512 +
513 +static void stop_lt(struct phy_device *phydev)
514 +{
515 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
516 +}
517 +
518 +static void reset_gcr0(struct fsl_xgkr_inst *inst)
519 +{
520 + struct per_lane_ctrl_status *reg_base = inst->reg_base;
521 +
522 + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
523 + &reg_base->gcr0);
524 + udelay(1);
525 + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
526 + &reg_base->gcr0);
527 + udelay(1);
528 +}
529 +
530 +void lane_set_1gkx(void *reg)
531 +{
532 + struct per_lane_ctrl_status *reg_base = reg;
533 + u32 val;
534 +
535 + /* reset the lane */
536 + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
537 + &reg_base->gcr0);
538 + udelay(1);
539 +
540 + /* set gcr1 for 1GKX */
541 + val = ioread32(&reg_base->gcr1);
542 + val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
543 + GCR1_REIDL_ET_MAS_MASK);
544 + iowrite32(val, &reg_base->gcr1);
545 + udelay(1);
546 +
547 + /* set tecr0 for 1GKX */
548 + val = ioread32(&reg_base->tecr0);
549 + val &= ~TECR0_AMP_RED_MASK;
550 + iowrite32(val, &reg_base->tecr0);
551 + udelay(1);
552 +
553 + /* unreset the lane */
554 + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
555 + &reg_base->gcr0);
556 + udelay(1);
557 +}
558 +
559 +static void reset_lt(struct phy_device *phydev)
560 +{
561 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, PMD_RESET);
562 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
563 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_CU, 0);
564 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_STATUS, 0);
565 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS, 0);
566 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU, 0);
567 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS, 0);
568 +}
569 +
570 +static void start_xgkr_state_machine(struct delayed_work *work)
571 +{
572 + queue_delayed_work(system_power_efficient_wq, work,
573 + msecs_to_jiffies(XGKR_TIMEOUT));
574 +}
575 +
576 +static void start_xgkr_an(struct phy_device *phydev)
577 +{
578 + struct fsl_xgkr_inst *inst;
579 +
580 + reset_lt(phydev);
581 + phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KR_AN_AD1_INIT);
582 + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
583 +
584 + inst = phydev->priv;
585 +
586 + /* start state machine*/
587 + start_xgkr_state_machine(&inst->xgkr_wk);
588 +}
589 +
590 +static void start_1gkx_an(struct phy_device *phydev)
591 +{
592 + phy_write_mmd(phydev, MDIO_MMD_PCS, FSL_PCS_IF_MODE, IF_MODE_INIT);
593 + phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KX_AN_AD1_INIT);
594 + phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
595 + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
596 +}
597 +
598 +static void ld_coe_status(struct fsl_xgkr_inst *inst)
599 +{
600 + phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
601 + FSL_KR_LD_STATUS, inst->ld_status);
602 +}
603 +
604 +static void ld_coe_update(struct fsl_xgkr_inst *inst)
605 +{
606 + dev_dbg(&inst->phydev->mdio.dev, "sending request: %x\n", inst->ld_update);
607 + phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
608 + FSL_KR_LD_CU, inst->ld_update);
609 +}
610 +
611 +static void init_inst(struct fsl_xgkr_inst *inst, int reset)
612 +{
613 + if (reset) {
614 + inst->ratio_preq = RATIO_PREQ;
615 + inst->ratio_pst1q = RATIO_PST1Q;
616 + inst->adpt_eq = RATIO_EQ;
617 + tune_tecr0(inst);
618 + }
619 +
620 + tx_condition_init(&inst->tx_c);
621 + inst->state = DETECTING_LP;
622 + inst->ld_status &= RX_READY_MASK;
623 + ld_coe_status(inst);
624 + inst->ld_update = 0;
625 + inst->ld_status &= ~RX_READY_MASK;
626 + ld_coe_status(inst);
627 +}
628 +
629 +#ifdef NEW_ALGORITHM_TRAIN_TX
630 +static int get_median_gaink2(u32 *reg)
631 +{
632 + int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
633 + u32 rx_eq_snp;
634 + struct per_lane_ctrl_status *reg_base;
635 + int timeout;
636 + int i, j, tmp, pos;
637 +
638 + reg_base = (struct per_lane_ctrl_status *)reg;
639 +
640 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
641 + /* wait RECR1_CTL_SNP_DONE_MASK has cleared */
642 + timeout = 100;
643 + while (ioread32(&reg_base->recr1) &
644 + RECR1_CTL_SNP_DONE_MASK) {
645 + udelay(1);
646 + timeout--;
647 + if (timeout == 0)
648 + break;
649 + }
650 +
651 + /* start snap shot */
652 + iowrite32((ioread32(&reg_base->gcr1) |
653 + GCR1_CTL_SNP_START_MASK),
654 + &reg_base->gcr1);
655 +
656 + /* wait for SNP done */
657 + timeout = 100;
658 + while (!(ioread32(&reg_base->recr1) &
659 + RECR1_CTL_SNP_DONE_MASK)) {
660 + udelay(1);
661 + timeout--;
662 + if (timeout == 0)
663 + break;
664 + }
665 +
666 + /* read and save the snap shot */
667 + rx_eq_snp = ioread32(&reg_base->recr1);
668 + gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >>
669 + RECR1_GAINK2_SHIFT;
670 +
671 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
672 + iowrite32((ioread32(&reg_base->gcr1) &
673 + ~GCR1_CTL_SNP_START_MASK),
674 + &reg_base->gcr1);
675 + }
676 +
677 + /* get median of the 5 snap shot */
678 + for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
679 + tmp = gaink2_snap_shot[i];
680 + pos = i;
681 + for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
682 + if (gaink2_snap_shot[j] < tmp) {
683 + tmp = gaink2_snap_shot[j];
684 + pos = j;
685 + }
686 + }
687 +
688 + gaink2_snap_shot[pos] = gaink2_snap_shot[i];
689 + gaink2_snap_shot[i] = tmp;
690 + }
691 +
692 + return gaink2_snap_shot[2];
693 +}
694 +#endif
695 +
696 +static bool is_bin_early(int bin_sel, void *reg)
697 +{
698 + bool early = false;
699 + int bin_snap_shot[BIN_SNAPSHOT_NUM];
700 + int i, negative_count = 0;
701 + struct per_lane_ctrl_status *reg_base = reg;
702 + int timeout;
703 +
704 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
705 + /* wait RECR1_SNP_DONE_MASK has cleared */
706 + timeout = 100;
707 + while ((ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
708 + udelay(1);
709 + timeout--;
710 + if (timeout == 0)
711 + break;
712 + }
713 +
714 + /* set TCSR1[CDR_SEL] to BinM1/BinLong */
715 + if (bin_sel == BIN_M1) {
716 + iowrite32((ioread32(&reg_base->tcsr1) &
717 + ~CDR_SEL_MASK) | BIN_M1_SEL,
718 + &reg_base->tcsr1);
719 + } else {
720 + iowrite32((ioread32(&reg_base->tcsr1) &
721 + ~CDR_SEL_MASK) | BIN_Long_SEL,
722 + &reg_base->tcsr1);
723 + }
724 +
725 + /* start snap shot */
726 + iowrite32(ioread32(&reg_base->gcr1) | GCR1_SNP_START_MASK,
727 + &reg_base->gcr1);
728 +
729 + /* wait for SNP done */
730 + timeout = 100;
731 + while (!(ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
732 + udelay(1);
733 + timeout--;
734 + if (timeout == 0)
735 + break;
736 + }
737 +
738 + /* read and save the snap shot */
739 + bin_snap_shot[i] = (ioread32(&reg_base->tcsr1) &
740 + TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT;
741 + if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK)
742 + negative_count++;
743 +
744 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
745 + iowrite32(ioread32(&reg_base->gcr1) & ~GCR1_SNP_START_MASK,
746 + &reg_base->gcr1);
747 + }
748 +
749 + if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
750 + ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
751 + early = true;
752 + }
753 +
754 + return early;
755 +}
756 +
757 +static void train_tx(struct fsl_xgkr_inst *inst)
758 +{
759 + struct phy_device *phydev = inst->phydev;
760 + struct tx_condition *tx_c = &inst->tx_c;
761 + bool bin_m1_early, bin_long_early;
762 + u32 lp_status, old_ld_update;
763 + u32 status_cop1, status_coz, status_com1;
764 + u32 req_cop1, req_coz, req_com1, req_preset, req_init;
765 + u32 temp;
766 +#ifdef NEW_ALGORITHM_TRAIN_TX
767 + u32 median_gaink2;
768 +#endif
769 +
770 +recheck:
771 + if (tx_c->bin_long_stop && tx_c->bin_m1_stop) {
772 + tx_c->tx_complete = true;
773 + inst->ld_status |= RX_READY_MASK;
774 + ld_coe_status(inst);
775 + /* tell LP we are ready */
776 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD,
777 + FSL_KR_PMD_STATUS, RX_STAT);
778 + return;
779 + }
780 +
781 + /* We start by checking the current LP status. If we got any responses,
782 + * we can clear up the appropriate update request so that the
783 + * subsequent code may easily issue new update requests if needed.
784 + */
785 + lp_status = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
786 + REQUEST_MASK;
787 + status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT;
788 + status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT;
789 + status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT;
790 +
791 + old_ld_update = inst->ld_update;
792 + req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT;
793 + req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT;
794 + req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT;
795 + req_preset = old_ld_update & PRESET_MASK;
796 + req_init = old_ld_update & INIT_MASK;
797 +
798 + /* IEEE802.3-2008, 72.6.10.2.3.1
799 + * We may clear PRESET when all coefficients show UPDATED or MAX.
800 + */
801 + if (req_preset) {
802 + if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) &&
803 + (status_coz == COE_UPDATED || status_coz == COE_MAX) &&
804 + (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) {
805 + inst->ld_update &= ~PRESET_MASK;
806 + }
807 + }
808 +
809 + /* IEEE802.3-2008, 72.6.10.2.3.2
810 + * We may clear INITIALIZE when no coefficients show NOT UPDATED.
811 + */
812 + if (req_init) {
813 + if (status_cop1 != COE_NOTUPDATED &&
814 + status_coz != COE_NOTUPDATED &&
815 + status_com1 != COE_NOTUPDATED) {
816 + inst->ld_update &= ~INIT_MASK;
817 + }
818 + }
819 +
820 + /* IEEE802.3-2008, 72.6.10.2.3.2
821 + * we send initialize to the other side to ensure default settings
822 + * for the LP. Naturally, we should do this only once.
823 + */
824 + if (!tx_c->sent_init) {
825 + if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) {
826 + inst->ld_update = INIT_MASK;
827 + tx_c->sent_init = true;
828 + }
829 + }
830 +
831 + /* IEEE802.3-2008, 72.6.10.2.3.3
832 + * We set coefficient requests to HOLD when we get the information
833 + * about any updates On clearing our prior response, we also update
834 + * our internal status.
835 + */
836 + if (status_cop1 != COE_NOTUPDATED) {
837 + if (req_cop1) {
838 + inst->ld_update &= ~COP1_MASK;
839 +#ifdef NEW_ALGORITHM_TRAIN_TX
840 + if (tx_c->post_inc) {
841 + if (req_cop1 == INCREMENT &&
842 + status_cop1 == COE_MAX) {
843 + tx_c->post_inc = 0;
844 + tx_c->bin_long_stop = true;
845 + tx_c->bin_m1_stop = true;
846 + } else {
847 + tx_c->post_inc -= 1;
848 + }
849 +
850 + ld_coe_update(inst);
851 + goto recheck;
852 + }
853 +#endif
854 + if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) ||
855 + (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) {
856 + dev_dbg(&inst->phydev->mdio.dev, "COP1 hit limit %s",
857 + (status_cop1 == COE_MIN) ?
858 + "DEC MIN" : "INC MAX");
859 + tx_c->long_min_max_cnt++;
860 + if (tx_c->long_min_max_cnt >= TIMEOUT_LONG) {
861 + tx_c->bin_long_stop = true;
862 + ld_coe_update(inst);
863 + goto recheck;
864 + }
865 + }
866 + }
867 + }
868 +
869 + if (status_coz != COE_NOTUPDATED) {
870 + if (req_coz)
871 + inst->ld_update &= ~COZ_MASK;
872 + }
873 +
874 + if (status_com1 != COE_NOTUPDATED) {
875 + if (req_com1) {
876 + inst->ld_update &= ~COM1_MASK;
877 +#ifdef NEW_ALGORITHM_TRAIN_TX
878 + if (tx_c->pre_inc) {
879 + if (req_com1 == INCREMENT &&
880 + status_com1 == COE_MAX)
881 + tx_c->pre_inc = 0;
882 + else
883 + tx_c->pre_inc -= 1;
884 +
885 + ld_coe_update(inst);
886 + goto recheck;
887 + }
888 +#endif
889 + /* Stop If we have reached the limit for a parameter. */
890 + if ((req_com1 == DECREMENT && status_com1 == COE_MIN) ||
891 + (req_com1 == INCREMENT && status_com1 == COE_MAX)) {
892 + dev_dbg(&inst->phydev->mdio.dev, "COM1 hit limit %s",
893 + (status_com1 == COE_MIN) ?
894 + "DEC MIN" : "INC MAX");
895 + tx_c->m1_min_max_cnt++;
896 + if (tx_c->m1_min_max_cnt >= TIMEOUT_M1) {
897 + tx_c->bin_m1_stop = true;
898 + ld_coe_update(inst);
899 + goto recheck;
900 + }
901 + }
902 + }
903 + }
904 +
905 + if (old_ld_update != inst->ld_update) {
906 + ld_coe_update(inst);
907 + /* Redo these status checks and updates until we have no more
908 + * changes, to speed up the overall process.
909 + */
910 + goto recheck;
911 + }
912 +
913 + /* Do nothing if we have pending request. */
914 + if ((req_coz || req_com1 || req_cop1))
915 + return;
916 + else if (lp_status)
917 + /* No pending request but LP status was not reverted to
918 + * not updated.
919 + */
920 + return;
921 +
922 +#ifdef NEW_ALGORITHM_TRAIN_TX
923 + if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
924 + if (tx_c->pre_inc) {
925 + inst->ld_update = INCREMENT << COM1_SHIFT;
926 + ld_coe_update(inst);
927 + return;
928 + }
929 +
930 + if (status_cop1 != COE_MAX) {
931 + median_gaink2 = get_median_gaink2(inst->reg_base);
932 + if (median_gaink2 == 0xf) {
933 + tx_c->post_inc = 1;
934 + } else {
935 + /* Gaink2 median lower than "F" */
936 + tx_c->bin_m1_stop = true;
937 + tx_c->bin_long_stop = true;
938 + goto recheck;
939 + }
940 + } else {
941 + /* C1 MAX */
942 + tx_c->bin_m1_stop = true;
943 + tx_c->bin_long_stop = true;
944 + goto recheck;
945 + }
946 +
947 + if (tx_c->post_inc) {
948 + inst->ld_update = INCREMENT << COP1_SHIFT;
949 + ld_coe_update(inst);
950 + return;
951 + }
952 + }
953 +#endif
954 +
955 + /* snapshot and select bin */
956 + bin_m1_early = is_bin_early(BIN_M1, inst->reg_base);
957 + bin_long_early = is_bin_early(BIN_LONG, inst->reg_base);
958 +
959 + if (!tx_c->bin_m1_stop && !tx_c->bin_m1_late_early && bin_m1_early) {
960 + tx_c->bin_m1_stop = true;
961 + goto recheck;
962 + }
963 +
964 + if (!tx_c->bin_long_stop &&
965 + tx_c->bin_long_late_early && !bin_long_early) {
966 + tx_c->bin_long_stop = true;
967 + goto recheck;
968 + }
969 +
970 + /* IEEE802.3-2008, 72.6.10.2.3.3
971 + * We only request coefficient updates when no PRESET/INITIALIZE is
972 + * pending. We also only request coefficient updates when the
973 + * corresponding status is NOT UPDATED and nothing is pending.
974 + */
975 + if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
976 + if (!tx_c->bin_long_stop) {
977 + /* BinM1 correction means changing COM1 */
978 + if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
979 + /* Avoid BinM1Late by requesting an
980 + * immediate decrement.
981 + */
982 + if (!bin_m1_early) {
983 + /* request decrement c(-1) */
984 + temp = DECREMENT << COM1_SHIFT;
985 + inst->ld_update = temp;
986 + ld_coe_update(inst);
987 + tx_c->bin_m1_late_early = bin_m1_early;
988 + return;
989 + }
990 + }
991 +
992 + /* BinLong correction means changing COP1 */
993 + if (!status_cop1 && !(inst->ld_update & COP1_MASK)) {
994 + /* Locate BinLong transition point (if any)
995 + * while avoiding BinM1Late.
996 + */
997 + if (bin_long_early) {
998 + /* request increment c(1) */
999 + temp = INCREMENT << COP1_SHIFT;
1000 + inst->ld_update = temp;
1001 + } else {
1002 + /* request decrement c(1) */
1003 + temp = DECREMENT << COP1_SHIFT;
1004 + inst->ld_update = temp;
1005 + }
1006 +
1007 + ld_coe_update(inst);
1008 + tx_c->bin_long_late_early = bin_long_early;
1009 + }
1010 + /* We try to finish BinLong before we do BinM1 */
1011 + return;
1012 + }
1013 +
1014 + if (!tx_c->bin_m1_stop) {
1015 + /* BinM1 correction means changing COM1 */
1016 + if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
1017 + /* Locate BinM1 transition point (if any) */
1018 + if (bin_m1_early) {
1019 + /* request increment c(-1) */
1020 + temp = INCREMENT << COM1_SHIFT;
1021 + inst->ld_update = temp;
1022 + } else {
1023 + /* request decrement c(-1) */
1024 + temp = DECREMENT << COM1_SHIFT;
1025 + inst->ld_update = temp;
1026 + }
1027 +
1028 + ld_coe_update(inst);
1029 + tx_c->bin_m1_late_early = bin_m1_early;
1030 + }
1031 + }
1032 + }
1033 +}
1034 +
1035 +static int is_link_up(struct phy_device *phydev)
1036 +{
1037 + int val;
1038 +
1039 + phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
1040 + val = phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
1041 +
1042 + return (val & FSL_KR_RX_LINK_STAT_MASK) ? 1 : 0;
1043 +}
1044 +
1045 +static int is_link_training_fail(struct phy_device *phydev)
1046 +{
1047 + int val;
1048 + int timeout = 100;
1049 +
1050 + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS);
1051 + if (!(val & TRAIN_FAIL) && (val & RX_STAT)) {
1052 + /* check LNK_STAT for sure */
1053 + while (timeout--) {
1054 + if (is_link_up(phydev))
1055 + return 0;
1056 +
1057 + usleep_range(100, 500);
1058 + }
1059 + }
1060 +
1061 + return 1;
1062 +}
1063 +
1064 +static int check_rx(struct phy_device *phydev)
1065 +{
1066 + return phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
1067 + RX_READY_MASK;
1068 +}
1069 +
1070 +/* Coefficient values have hardware restrictions */
1071 +static int is_ld_valid(struct fsl_xgkr_inst *inst)
1072 +{
1073 + u32 ratio_pst1q = inst->ratio_pst1q;
1074 + u32 adpt_eq = inst->adpt_eq;
1075 + u32 ratio_preq = inst->ratio_preq;
1076 +
1077 + if ((ratio_pst1q + adpt_eq + ratio_preq) > 48)
1078 + return 0;
1079 +
1080 + if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >=
1081 + ((adpt_eq - ratio_pst1q - ratio_preq) * 17))
1082 + return 0;
1083 +
1084 + if (ratio_preq > ratio_pst1q)
1085 + return 0;
1086 +
1087 + if (ratio_preq > 8)
1088 + return 0;
1089 +
1090 + if (adpt_eq < 26)
1091 + return 0;
1092 +
1093 + if (ratio_pst1q > 16)
1094 + return 0;
1095 +
1096 + return 1;
1097 +}
1098 +
1099 +static int is_value_allowed(const u32 *val_table, u32 val)
1100 +{
1101 + int i;
1102 +
1103 + for (i = 0;; i++) {
1104 + if (*(val_table + i) == VAL_INVALID)
1105 + return 0;
1106 + if (*(val_table + i) == val)
1107 + return 1;
1108 + }
1109 +}
1110 +
1111 +static int inc_dec(struct fsl_xgkr_inst *inst, int field, int request)
1112 +{
1113 + u32 ld_limit[3], ld_coe[3], step[3];
1114 +
1115 + ld_coe[0] = inst->ratio_pst1q;
1116 + ld_coe[1] = inst->adpt_eq;
1117 + ld_coe[2] = inst->ratio_preq;
1118 +
1119 + /* Information specific to the Freescale SerDes for 10GBase-KR:
1120 + * Incrementing C(+1) means *decrementing* RATIO_PST1Q
1121 + * Incrementing C(0) means incrementing ADPT_EQ
1122 + * Incrementing C(-1) means *decrementing* RATIO_PREQ
1123 + */
1124 + step[0] = -1;
1125 + step[1] = 1;
1126 + step[2] = -1;
1127 +
1128 + switch (request) {
1129 + case INCREMENT:
1130 + ld_limit[0] = POST_COE_MAX;
1131 + ld_limit[1] = ZERO_COE_MAX;
1132 + ld_limit[2] = PRE_COE_MAX;
1133 + if (ld_coe[field] != ld_limit[field])
1134 + ld_coe[field] += step[field];
1135 + else
1136 + /* MAX */
1137 + return 2;
1138 + break;
1139 + case DECREMENT:
1140 + ld_limit[0] = POST_COE_MIN;
1141 + ld_limit[1] = ZERO_COE_MIN;
1142 + ld_limit[2] = PRE_COE_MIN;
1143 + if (ld_coe[field] != ld_limit[field])
1144 + ld_coe[field] -= step[field];
1145 + else
1146 + /* MIN */
1147 + return 1;
1148 + break;
1149 + default:
1150 + break;
1151 + }
1152 +
1153 + if (is_ld_valid(inst)) {
1154 + /* accept new ld */
1155 + inst->ratio_pst1q = ld_coe[0];
1156 + inst->adpt_eq = ld_coe[1];
1157 + inst->ratio_preq = ld_coe[2];
1158 + /* only some values for preq and pst1q can be used.
1159 + * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc.
1160 + * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10.
1161 + */
1162 + if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) {
1163 + dev_dbg(&inst->phydev->mdio.dev,
1164 + "preq skipped value: %d\n", ld_coe[2]);
1165 + return 0;
1166 + }
1167 +
1168 + if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) {
1169 + dev_dbg(&inst->phydev->mdio.dev,
1170 + "pst1q skipped value: %d\n", ld_coe[0]);
1171 + return 0;
1172 + }
1173 +
1174 + tune_tecr0(inst);
1175 + } else {
1176 + if (request == DECREMENT)
1177 + /* MIN */
1178 + return 1;
1179 + if (request == INCREMENT)
1180 + /* MAX */
1181 + return 2;
1182 + }
1183 +
1184 + return 0;
1185 +}
1186 +
1187 +static void min_max_updated(struct fsl_xgkr_inst *inst, int field, int new_ld)
1188 +{
1189 + u32 ld_coe[] = {COE_UPDATED, COE_MIN, COE_MAX};
1190 + u32 mask, val;
1191 +
1192 + switch (field) {
1193 + case COE_COP1:
1194 + mask = COP1_MASK;
1195 + val = ld_coe[new_ld] << COP1_SHIFT;
1196 + break;
1197 + case COE_COZ:
1198 + mask = COZ_MASK;
1199 + val = ld_coe[new_ld] << COZ_SHIFT;
1200 + break;
1201 + case COE_COM:
1202 + mask = COM1_MASK;
1203 + val = ld_coe[new_ld] << COM1_SHIFT;
1204 + break;
1205 + default:
1206 + return;
1207 + }
1208 +
1209 + inst->ld_status &= ~mask;
1210 + inst->ld_status |= val;
1211 +}
1212 +
1213 +static void check_request(struct fsl_xgkr_inst *inst, int request)
1214 +{
1215 + int cop1_req, coz_req, com_req;
1216 + int old_status, new_ld_sta;
1217 +
1218 + cop1_req = (request & COP1_MASK) >> COP1_SHIFT;
1219 + coz_req = (request & COZ_MASK) >> COZ_SHIFT;
1220 + com_req = (request & COM1_MASK) >> COM1_SHIFT;
1221 +
1222 + /* IEEE802.3-2008, 72.6.10.2.5
1223 + * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED
1224 + */
1225 + old_status = inst->ld_status;
1226 +
1227 + if (cop1_req && !(inst->ld_status & COP1_MASK)) {
1228 + new_ld_sta = inc_dec(inst, COE_COP1, cop1_req);
1229 + min_max_updated(inst, COE_COP1, new_ld_sta);
1230 + }
1231 +
1232 + if (coz_req && !(inst->ld_status & COZ_MASK)) {
1233 + new_ld_sta = inc_dec(inst, COE_COZ, coz_req);
1234 + min_max_updated(inst, COE_COZ, new_ld_sta);
1235 + }
1236 +
1237 + if (com_req && !(inst->ld_status & COM1_MASK)) {
1238 + new_ld_sta = inc_dec(inst, COE_COM, com_req);
1239 + min_max_updated(inst, COE_COM, new_ld_sta);
1240 + }
1241 +
1242 + if (old_status != inst->ld_status)
1243 + ld_coe_status(inst);
1244 +}
1245 +
1246 +static void preset(struct fsl_xgkr_inst *inst)
1247 +{
1248 + /* These are all MAX values from the IEEE802.3 perspective. */
1249 + inst->ratio_pst1q = POST_COE_MAX;
1250 + inst->adpt_eq = ZERO_COE_MAX;
1251 + inst->ratio_preq = PRE_COE_MAX;
1252 +
1253 + tune_tecr0(inst);
1254 + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1255 + inst->ld_status |= COE_MAX << COP1_SHIFT |
1256 + COE_MAX << COZ_SHIFT |
1257 + COE_MAX << COM1_SHIFT;
1258 + ld_coe_status(inst);
1259 +}
1260 +
1261 +static void initialize(struct fsl_xgkr_inst *inst)
1262 +{
1263 + inst->ratio_preq = RATIO_PREQ;
1264 + inst->ratio_pst1q = RATIO_PST1Q;
1265 + inst->adpt_eq = RATIO_EQ;
1266 +
1267 + tune_tecr0(inst);
1268 + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1269 + inst->ld_status |= COE_UPDATED << COP1_SHIFT |
1270 + COE_UPDATED << COZ_SHIFT |
1271 + COE_UPDATED << COM1_SHIFT;
1272 + ld_coe_status(inst);
1273 +}
1274 +
1275 +static void train_rx(struct fsl_xgkr_inst *inst)
1276 +{
1277 + struct phy_device *phydev = inst->phydev;
1278 + int request, old_ld_status;
1279 +
1280 + /* get request from LP */
1281 + request = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU) &
1282 + (LD_ALL_MASK);
1283 + old_ld_status = inst->ld_status;
1284 +
1285 + /* IEEE802.3-2008, 72.6.10.2.5
1286 + * Ensure we always go to NOT UDPATED for status reporting in
1287 + * response to HOLD requests.
1288 + * IEEE802.3-2008, 72.6.10.2.3.1/2
1289 + * ... but only if PRESET/INITIALIZE are not active to ensure
1290 + * we keep status until they are released.
1291 + */
1292 + if (!(request & (PRESET_MASK | INIT_MASK))) {
1293 + if (!(request & COP1_MASK))
1294 + inst->ld_status &= ~COP1_MASK;
1295 +
1296 + if (!(request & COZ_MASK))
1297 + inst->ld_status &= ~COZ_MASK;
1298 +
1299 + if (!(request & COM1_MASK))
1300 + inst->ld_status &= ~COM1_MASK;
1301 +
1302 + if (old_ld_status != inst->ld_status)
1303 + ld_coe_status(inst);
1304 + }
1305 +
1306 + /* As soon as the LP shows ready, no need to do any more updates. */
1307 + if (check_rx(phydev)) {
1308 + /* LP receiver is ready */
1309 + if (inst->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) {
1310 + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1311 + ld_coe_status(inst);
1312 + }
1313 + } else {
1314 + /* IEEE802.3-2008, 72.6.10.2.3.1/2
1315 + * only act on PRESET/INITIALIZE if all status is NOT UPDATED.
1316 + */
1317 + if (request & (PRESET_MASK | INIT_MASK)) {
1318 + if (!(inst->ld_status &
1319 + (COP1_MASK | COZ_MASK | COM1_MASK))) {
1320 + if (request & PRESET_MASK)
1321 + preset(inst);
1322 +
1323 + if (request & INIT_MASK)
1324 + initialize(inst);
1325 + }
1326 + }
1327 +
1328 + /* LP Coefficient are not in HOLD */
1329 + if (request & REQUEST_MASK)
1330 + check_request(inst, request & REQUEST_MASK);
1331 + }
1332 +}
1333 +
1334 +static void xgkr_start_train(struct phy_device *phydev)
1335 +{
1336 + struct fsl_xgkr_inst *inst = phydev->priv;
1337 + struct tx_condition *tx_c = &inst->tx_c;
1338 + int val = 0, i;
1339 + int lt_state;
1340 + unsigned long dead_line;
1341 + int rx_ok, tx_ok;
1342 +
1343 + init_inst(inst, 0);
1344 + start_lt(phydev);
1345 +
1346 + for (i = 0; i < 2;) {
1347 + dead_line = jiffies + msecs_to_jiffies(500);
1348 + while (time_before(jiffies, dead_line)) {
1349 + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
1350 + FSL_KR_PMD_STATUS);
1351 + if (val & TRAIN_FAIL) {
1352 + /* LT failed already, reset lane to avoid
1353 + * it run into hanging, then start LT again.
1354 + */
1355 + reset_gcr0(inst);
1356 + start_lt(phydev);
1357 + } else if ((val & PMD_STATUS_SUP_STAT) &&
1358 + (val & PMD_STATUS_FRAME_LOCK))
1359 + break;
1360 + usleep_range(100, 500);
1361 + }
1362 +
1363 + if (!((val & PMD_STATUS_FRAME_LOCK) &&
1364 + (val & PMD_STATUS_SUP_STAT))) {
1365 + i++;
1366 + continue;
1367 + }
1368 +
1369 + /* init process */
1370 + rx_ok = false;
1371 + tx_ok = false;
1372 + /* the LT should be finished in 500ms, failed or OK. */
1373 + dead_line = jiffies + msecs_to_jiffies(500);
1374 +
1375 + while (time_before(jiffies, dead_line)) {
1376 + /* check if the LT is already failed */
1377 + lt_state = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
1378 + FSL_KR_PMD_STATUS);
1379 + if (lt_state & TRAIN_FAIL) {
1380 + reset_gcr0(inst);
1381 + break;
1382 + }
1383 +
1384 + rx_ok = check_rx(phydev);
1385 + tx_ok = tx_c->tx_complete;
1386 +
1387 + if (rx_ok && tx_ok)
1388 + break;
1389 +
1390 + if (!rx_ok)
1391 + train_rx(inst);
1392 +
1393 + if (!tx_ok)
1394 + train_tx(inst);
1395 +
1396 + usleep_range(100, 500);
1397 + }
1398 +
1399 + i++;
1400 + /* check LT result */
1401 + if (is_link_training_fail(phydev)) {
1402 + init_inst(inst, 0);
1403 + continue;
1404 + } else {
1405 + stop_lt(phydev);
1406 + inst->state = TRAINED;
1407 + break;
1408 + }
1409 + }
1410 +}
1411 +
1412 +static void xgkr_state_machine(struct work_struct *work)
1413 +{
1414 + struct delayed_work *dwork = to_delayed_work(work);
1415 + struct fsl_xgkr_inst *inst = container_of(dwork,
1416 + struct fsl_xgkr_inst,
1417 + xgkr_wk);
1418 + struct phy_device *phydev = inst->phydev;
1419 + int an_state;
1420 + bool needs_train = false;
1421 +
1422 + mutex_lock(&phydev->lock);
1423 +
1424 + switch (inst->state) {
1425 + case DETECTING_LP:
1426 + phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
1427 + an_state = phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
1428 + if ((an_state & KR_AN_MASK))
1429 + needs_train = true;
1430 + break;
1431 + case TRAINED:
1432 + if (!is_link_up(phydev)) {
1433 + dev_info(&phydev->mdio.dev,
1434 + "Detect hotplug, restart training\n");
1435 + init_inst(inst, 1);
1436 + start_xgkr_an(phydev);
1437 + inst->state = DETECTING_LP;
1438 + }
1439 + break;
1440 + }
1441 +
1442 + if (needs_train)
1443 + xgkr_start_train(phydev);
1444 +
1445 + mutex_unlock(&phydev->lock);
1446 + queue_delayed_work(system_power_efficient_wq, &inst->xgkr_wk,
1447 + msecs_to_jiffies(XGKR_TIMEOUT));
1448 +}
1449 +
1450 +static int fsl_backplane_probe(struct phy_device *phydev)
1451 +{
1452 + struct fsl_xgkr_inst *xgkr_inst;
1453 + struct device_node *phy_node, *lane_node;
1454 + struct resource res_lane;
1455 + const char *bm;
1456 + int ret;
1457 + int bp_mode;
1458 + u32 lane[2];
1459 +
1460 + phy_node = phydev->mdio.dev.of_node;
1461 + bp_mode = of_property_read_string(phy_node, "backplane-mode", &bm);
1462 + if (bp_mode < 0)
1463 + return 0;
1464 +
1465 + if (!strcasecmp(bm, "1000base-kx")) {
1466 + bp_mode = PHY_BACKPLANE_1000BASE_KX;
1467 + } else if (!strcasecmp(bm, "10gbase-kr")) {
1468 + bp_mode = PHY_BACKPLANE_10GBASE_KR;
1469 + } else {
1470 + dev_err(&phydev->mdio.dev, "Unknown backplane-mode\n");
1471 + return -EINVAL;
1472 + }
1473 +
1474 + lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
1475 + if (!lane_node) {
1476 + dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
1477 + return -EINVAL;
1478 + }
1479 +
1480 + ret = of_address_to_resource(lane_node, 0, &res_lane);
1481 + if (ret) {
1482 + dev_err(&phydev->mdio.dev, "could not obtain memory map\n");
1483 + return ret;
1484 + }
1485 +
1486 + of_node_put(lane_node);
1487 + ret = of_property_read_u32_array(phy_node, "fsl,lane-reg",
1488 + (u32 *)&lane, 2);
1489 + if (ret) {
1490 + dev_err(&phydev->mdio.dev, "could not get fsl,lane-reg\n");
1491 + return -EINVAL;
1492 + }
1493 +
1494 + phydev->priv = devm_ioremap_nocache(&phydev->mdio.dev,
1495 + res_lane.start + lane[0],
1496 + lane[1]);
1497 + if (!phydev->priv) {
1498 + dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n");
1499 + return -ENOMEM;
1500 + }
1501 +
1502 + if (bp_mode == PHY_BACKPLANE_1000BASE_KX) {
1503 + phydev->speed = SPEED_1000;
1504 + /* configure the lane for 1000BASE-KX */
1505 + lane_set_1gkx(phydev->priv);
1506 + return 0;
1507 + }
1508 +
1509 + xgkr_inst = devm_kzalloc(&phydev->mdio.dev,
1510 + sizeof(*xgkr_inst), GFP_KERNEL);
1511 + if (!xgkr_inst)
1512 + return -ENOMEM;
1513 +
1514 + xgkr_inst->reg_base = phydev->priv;
1515 + xgkr_inst->phydev = phydev;
1516 + phydev->priv = xgkr_inst;
1517 +
1518 + if (bp_mode == PHY_BACKPLANE_10GBASE_KR) {
1519 + phydev->speed = SPEED_10000;
1520 + INIT_DELAYED_WORK(&xgkr_inst->xgkr_wk, xgkr_state_machine);
1521 + }
1522 +
1523 + return 0;
1524 +}
1525 +
1526 +static int fsl_backplane_aneg_done(struct phy_device *phydev)
1527 +{
1528 + return 1;
1529 +}
1530 +
1531 +static int fsl_backplane_config_aneg(struct phy_device *phydev)
1532 +{
1533 + if (phydev->speed == SPEED_10000) {
1534 + phydev->supported |= SUPPORTED_10000baseKR_Full;
1535 + start_xgkr_an(phydev);
1536 + } else if (phydev->speed == SPEED_1000) {
1537 + phydev->supported |= SUPPORTED_1000baseKX_Full;
1538 + start_1gkx_an(phydev);
1539 + }
1540 +
1541 + phydev->advertising = phydev->supported;
1542 + phydev->duplex = 1;
1543 +
1544 + return 0;
1545 +}
1546 +
1547 +static int fsl_backplane_suspend(struct phy_device *phydev)
1548 +{
1549 + if (phydev->speed == SPEED_10000) {
1550 + struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
1551 +
1552 + cancel_delayed_work_sync(&xgkr_inst->xgkr_wk);
1553 + }
1554 + return 0;
1555 +}
1556 +
1557 +static int fsl_backplane_resume(struct phy_device *phydev)
1558 +{
1559 + if (phydev->speed == SPEED_10000) {
1560 + struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
1561 +
1562 + init_inst(xgkr_inst, 1);
1563 + queue_delayed_work(system_power_efficient_wq,
1564 + &xgkr_inst->xgkr_wk,
1565 + msecs_to_jiffies(XGKR_TIMEOUT));
1566 + }
1567 + return 0;
1568 +}
1569 +
1570 +static int fsl_backplane_read_status(struct phy_device *phydev)
1571 +{
1572 + if (is_link_up(phydev))
1573 + phydev->link = 1;
1574 + else
1575 + phydev->link = 0;
1576 +
1577 + return 0;
1578 +}
1579 +
1580 +static struct phy_driver fsl_backplane_driver[] = {
1581 + {
1582 + .phy_id = FSL_PCS_PHY_ID,
1583 + .name = "Freescale Backplane",
1584 + .phy_id_mask = 0xffffffff,
1585 + .features = SUPPORTED_Backplane | SUPPORTED_Autoneg |
1586 + SUPPORTED_MII,
1587 + .probe = fsl_backplane_probe,
1588 + .aneg_done = fsl_backplane_aneg_done,
1589 + .config_aneg = fsl_backplane_config_aneg,
1590 + .read_status = fsl_backplane_read_status,
1591 + .suspend = fsl_backplane_suspend,
1592 + .resume = fsl_backplane_resume,
1593 + },
1594 +};
1595 +
1596 +module_phy_driver(fsl_backplane_driver);
1597 +
1598 +static struct mdio_device_id __maybe_unused freescale_tbl[] = {
1599 + { FSL_PCS_PHY_ID, 0xffffffff },
1600 + { }
1601 +};
1602 +
1603 +MODULE_DEVICE_TABLE(mdio, freescale_tbl);
1604 +
1605 +MODULE_DESCRIPTION("Freescale Backplane driver");
1606 +MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
1607 +MODULE_LICENSE("GPL v2");
1608 --- a/drivers/net/phy/phy.c
1609 +++ b/drivers/net/phy/phy.c
1610 @@ -591,7 +591,7 @@ int phy_mii_ioctl(struct phy_device *phy
1611 return 0;
1612
1613 case SIOCSHWTSTAMP:
1614 - if (phydev->drv->hwtstamp)
1615 + if (phydev->drv && phydev->drv->hwtstamp)
1616 return phydev->drv->hwtstamp(phydev, ifr);
1617 /* fall through */
1618
1619 @@ -616,6 +616,9 @@ static int phy_start_aneg_priv(struct ph
1620 bool trigger = 0;
1621 int err;
1622
1623 + if (!phydev->drv)
1624 + return -EIO;
1625 +
1626 mutex_lock(&phydev->lock);
1627
1628 if (AUTONEG_DISABLE == phydev->autoneg)
1629 @@ -1015,7 +1018,7 @@ void phy_state_machine(struct work_struc
1630
1631 old_state = phydev->state;
1632
1633 - if (phydev->drv->link_change_notify)
1634 + if (phydev->drv && phydev->drv->link_change_notify)
1635 phydev->drv->link_change_notify(phydev);
1636
1637 switch (phydev->state) {
1638 @@ -1317,6 +1320,9 @@ EXPORT_SYMBOL(phy_write_mmd_indirect);
1639 */
1640 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1641 {
1642 + if (!phydev->drv)
1643 + return -EIO;
1644 +
1645 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1646 * Also EEE feature is active when core is operating with MII, GMII
1647 * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
1648 @@ -1394,6 +1400,9 @@ EXPORT_SYMBOL(phy_init_eee);
1649 */
1650 int phy_get_eee_err(struct phy_device *phydev)
1651 {
1652 + if (!phydev->drv)
1653 + return -EIO;
1654 +
1655 return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS);
1656 }
1657 EXPORT_SYMBOL(phy_get_eee_err);
1658 @@ -1410,6 +1419,9 @@ int phy_ethtool_get_eee(struct phy_devic
1659 {
1660 int val;
1661
1662 + if (!phydev->drv)
1663 + return -EIO;
1664 +
1665 /* Get Supported EEE */
1666 val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS);
1667 if (val < 0)
1668 @@ -1443,6 +1455,9 @@ int phy_ethtool_set_eee(struct phy_devic
1669 {
1670 int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
1671
1672 + if (!phydev->drv)
1673 + return -EIO;
1674 +
1675 /* Mask prohibited EEE modes */
1676 val &= ~phydev->eee_broken_modes;
1677
1678 @@ -1454,7 +1469,7 @@ EXPORT_SYMBOL(phy_ethtool_set_eee);
1679
1680 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1681 {
1682 - if (phydev->drv->set_wol)
1683 + if (phydev->drv && phydev->drv->set_wol)
1684 return phydev->drv->set_wol(phydev, wol);
1685
1686 return -EOPNOTSUPP;
1687 @@ -1463,7 +1478,7 @@ EXPORT_SYMBOL(phy_ethtool_set_wol);
1688
1689 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1690 {
1691 - if (phydev->drv->get_wol)
1692 + if (phydev->drv && phydev->drv->get_wol)
1693 phydev->drv->get_wol(phydev, wol);
1694 }
1695 EXPORT_SYMBOL(phy_ethtool_get_wol);
1696 --- a/drivers/net/phy/phy_device.c
1697 +++ b/drivers/net/phy/phy_device.c
1698 @@ -1046,7 +1046,7 @@ int phy_suspend(struct phy_device *phyde
1699 if (wol.wolopts)
1700 return -EBUSY;
1701
1702 - if (phydrv->suspend)
1703 + if (phydev->drv && phydrv->suspend)
1704 ret = phydrv->suspend(phydev);
1705
1706 if (ret)
1707 @@ -1063,7 +1063,7 @@ int phy_resume(struct phy_device *phydev
1708 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
1709 int ret = 0;
1710
1711 - if (phydrv->resume)
1712 + if (phydev->drv && phydrv->resume)
1713 ret = phydrv->resume(phydev);
1714
1715 if (ret)
1716 @@ -1720,7 +1720,7 @@ static int phy_remove(struct device *dev
1717 phydev->state = PHY_DOWN;
1718 mutex_unlock(&phydev->lock);
1719
1720 - if (phydev->drv->remove)
1721 + if (phydev->drv && phydev->drv->remove)
1722 phydev->drv->remove(phydev);
1723 phydev->drv = NULL;
1724
1725 --- a/drivers/net/phy/swphy.c
1726 +++ b/drivers/net/phy/swphy.c
1727 @@ -77,6 +77,7 @@ static const struct swmii_regs duplex[]
1728 static int swphy_decode_speed(int speed)
1729 {
1730 switch (speed) {
1731 + case 10000:
1732 case 1000:
1733 return SWMII_SPEED_1000;
1734 case 100:
1735 --- a/include/linux/phy.h
1736 +++ b/include/linux/phy.h
1737 @@ -81,6 +81,7 @@ typedef enum {
1738 PHY_INTERFACE_MODE_MOCA,
1739 PHY_INTERFACE_MODE_QSGMII,
1740 PHY_INTERFACE_MODE_TRGMII,
1741 + PHY_INTERFACE_MODE_2500SGMII,
1742 PHY_INTERFACE_MODE_MAX,
1743 } phy_interface_t;
1744
1745 @@ -126,6 +127,8 @@ static inline const char *phy_modes(phy_
1746 return "qsgmii";
1747 case PHY_INTERFACE_MODE_TRGMII:
1748 return "trgmii";
1749 + case PHY_INTERFACE_MODE_2500SGMII:
1750 + return "sgmii-2500";
1751 default:
1752 return "unknown";
1753 }
1754 @@ -791,6 +794,9 @@ int phy_stop_interrupts(struct phy_devic
1755
1756 static inline int phy_read_status(struct phy_device *phydev)
1757 {
1758 + if (!phydev->drv)
1759 + return -EIO;
1760 +
1761 return phydev->drv->read_status(phydev);
1762 }
1763