layerscape: upgrade kernel to 4.14
[openwrt/staging/wigyori.git] / target / linux / layerscape / patches-4.14 / 709-mdio-phy-support-layerscape.patch
1 From 8eb578a8c1eb55715a40f02790e43aba4a528c38 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Tue, 30 Oct 2018 18:26:51 +0800
4 Subject: [PATCH 15/40] mdio-phy: support layerscae
5 This is an integrated patch of mdio-phy for layerscape
6
7 Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
8 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
9 Signed-off-by: Constantin Tudor <constantin.tudor@nxp.com>
10 Signed-off-by: costi <constantin.tudor@freescale.com>
11 Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
12 Signed-off-by: Shaohui Xie <Shaohui.Xie@freescale.com>
13 Signed-off-by: Biwen Li <biwen.li@nxp.com>
14 ---
15 drivers/net/phy/Kconfig | 6 +
16 drivers/net/phy/Makefile | 1 +
17 drivers/net/phy/fsl_backplane.c | 1358 +++++++++++++++++++++++++++++++
18 drivers/net/phy/swphy.c | 1 +
19 include/linux/phy.h | 3 +
20 5 files changed, 1369 insertions(+)
21 create mode 100644 drivers/net/phy/fsl_backplane.c
22
23 --- a/drivers/net/phy/Kconfig
24 +++ b/drivers/net/phy/Kconfig
25 @@ -90,6 +90,12 @@ config MDIO_BUS_MUX_MMIOREG
26 config MDIO_CAVIUM
27 tristate
28
29 +config MDIO_FSL_BACKPLANE
30 + tristate "Support for backplane on Freescale XFI interface"
31 + depends on OF_MDIO
32 + help
33 + This module provides a driver for Freescale XFI's backplane.
34 +
35 config MDIO_GPIO
36 tristate "GPIO lib-based bitbanged MDIO buses"
37 depends on MDIO_BITBANG && GPIOLIB
38 --- a/drivers/net/phy/Makefile
39 +++ b/drivers/net/phy/Makefile
40 @@ -45,6 +45,7 @@ obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) +=
41 obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
42 obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
43 obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
44 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane.o
45 obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
46 obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
47 obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
48 --- /dev/null
49 +++ b/drivers/net/phy/fsl_backplane.c
50 @@ -0,0 +1,1358 @@
51 +/* Freescale backplane driver.
52 + * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
53 + *
54 + * Copyright 2015 Freescale Semiconductor, Inc.
55 + *
56 + * Licensed under the GPL-2 or later.
57 + */
58 +
59 +#include <linux/kernel.h>
60 +#include <linux/module.h>
61 +#include <linux/mii.h>
62 +#include <linux/mdio.h>
63 +#include <linux/ethtool.h>
64 +#include <linux/phy.h>
65 +#include <linux/io.h>
66 +#include <linux/of.h>
67 +#include <linux/of_net.h>
68 +#include <linux/of_address.h>
69 +#include <linux/of_platform.h>
70 +#include <linux/timer.h>
71 +#include <linux/delay.h>
72 +#include <linux/workqueue.h>
73 +
74 +/* XFI PCS Device Identifier */
75 +#define FSL_PCS_PHY_ID 0x0083e400
76 +
77 +/* Freescale KR PMD registers */
78 +#define FSL_KR_PMD_CTRL 0x96
79 +#define FSL_KR_PMD_STATUS 0x97
80 +#define FSL_KR_LP_CU 0x98
81 +#define FSL_KR_LP_STATUS 0x99
82 +#define FSL_KR_LD_CU 0x9a
83 +#define FSL_KR_LD_STATUS 0x9b
84 +
85 +/* Freescale KR PMD defines */
86 +#define PMD_RESET 0x1
87 +#define PMD_STATUS_SUP_STAT 0x4
88 +#define PMD_STATUS_FRAME_LOCK 0x2
89 +#define TRAIN_EN 0x3
90 +#define TRAIN_DISABLE 0x1
91 +#define RX_STAT 0x1
92 +
93 +#define FSL_KR_RX_LINK_STAT_MASK 0x1000
94 +#define FSL_XFI_PCS_10GR_SR1 0x20
95 +
96 +/* Freescale KX PCS mode register */
97 +#define FSL_PCS_IF_MODE 0x8014
98 +
99 +/* Freescale KX PCS mode register init value */
100 +#define IF_MODE_INIT 0x8
101 +
102 +/* Freescale KX/KR AN registers */
103 +#define FSL_AN_AD1 0x11
104 +#define FSL_AN_BP_STAT 0x30
105 +
106 +/* Freescale KX/KR AN registers defines */
107 +#define AN_CTRL_INIT 0x1200
108 +#define KX_AN_AD1_INIT 0x25
109 +#define KR_AN_AD1_INIT 0x85
110 +#define AN_LNK_UP_MASK 0x4
111 +#define KR_AN_MASK 0x8
112 +#define TRAIN_FAIL 0x8
113 +
114 +/* C(-1) */
115 +#define BIN_M1 0
116 +/* C(1) */
117 +#define BIN_LONG 1
118 +#define BIN_M1_SEL 6
119 +#define BIN_Long_SEL 7
120 +#define CDR_SEL_MASK 0x00070000
121 +#define BIN_SNAPSHOT_NUM 5
122 +#define BIN_M1_THRESHOLD 3
123 +#define BIN_LONG_THRESHOLD 2
124 +
125 +#define PRE_COE_SHIFT 22
126 +#define POST_COE_SHIFT 16
127 +#define ZERO_COE_SHIFT 8
128 +
129 +#define PRE_COE_MAX 0x0
130 +#define PRE_COE_MIN 0x8
131 +#define POST_COE_MAX 0x0
132 +#define POST_COE_MIN 0x10
133 +#define ZERO_COE_MAX 0x30
134 +#define ZERO_COE_MIN 0x0
135 +
136 +#define TECR0_INIT 0x24200000
137 +#define RATIO_PREQ 0x3
138 +#define RATIO_PST1Q 0xd
139 +#define RATIO_EQ 0x20
140 +
141 +#define GCR0_RESET_MASK 0x600000
142 +#define GCR1_SNP_START_MASK 0x00000040
143 +#define GCR1_CTL_SNP_START_MASK 0x00002000
144 +#define GCR1_REIDL_TH_MASK 0x00700000
145 +#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
146 +#define GCR1_REIDL_ET_MAS_MASK 0x00004000
147 +#define TECR0_AMP_RED_MASK 0x0000003f
148 +
149 +#define RECR1_CTL_SNP_DONE_MASK 0x00000002
150 +#define RECR1_SNP_DONE_MASK 0x00000004
151 +#define TCSR1_SNP_DATA_MASK 0x0000ffc0
152 +#define TCSR1_SNP_DATA_SHIFT 6
153 +#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100
154 +
155 +#define RECR1_GAINK2_MASK 0x0f000000
156 +#define RECR1_GAINK2_SHIFT 24
157 +#define RECR1_GAINK3_MASK 0x000f0000
158 +#define RECR1_GAINK3_SHIFT 16
159 +#define RECR1_OFFSET_MASK 0x00003f80
160 +#define RECR1_OFFSET_SHIFT 7
161 +#define RECR1_BLW_MASK 0x00000f80
162 +#define RECR1_BLW_SHIFT 7
163 +#define EYE_CTRL_SHIFT 12
164 +#define BASE_WAND_SHIFT 10
165 +
166 +#define XGKR_TIMEOUT 1050
167 +
168 +#define INCREMENT 1
169 +#define DECREMENT 2
170 +#define TIMEOUT_LONG 3
171 +#define TIMEOUT_M1 3
172 +
173 +#define RX_READY_MASK 0x8000
174 +#define PRESET_MASK 0x2000
175 +#define INIT_MASK 0x1000
176 +#define COP1_MASK 0x30
177 +#define COP1_SHIFT 4
178 +#define COZ_MASK 0xc
179 +#define COZ_SHIFT 2
180 +#define COM1_MASK 0x3
181 +#define COM1_SHIFT 0
182 +#define REQUEST_MASK 0x3f
183 +#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \
184 + COP1_MASK | COZ_MASK | COM1_MASK)
185 +
186 +#define NEW_ALGORITHM_TRAIN_TX
187 +#ifdef NEW_ALGORITHM_TRAIN_TX
188 +#define FORCE_INC_COP1_NUMBER 0
189 +#define FORCE_INC_COM1_NUMBER 1
190 +#endif
191 +
192 +#define VAL_INVALID 0xff
193 +
194 +static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5,
195 + 0x7, 0x9, 0xb, 0xc, VAL_INVALID};
196 +static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, 0x7,
197 + 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID};
198 +
199 +enum backplane_mode {
200 + PHY_BACKPLANE_1000BASE_KX,
201 + PHY_BACKPLANE_10GBASE_KR,
202 + PHY_BACKPLANE_INVAL
203 +};
204 +
205 +enum coe_filed {
206 + COE_COP1,
207 + COE_COZ,
208 + COE_COM
209 +};
210 +
211 +enum coe_update {
212 + COE_NOTUPDATED,
213 + COE_UPDATED,
214 + COE_MIN,
215 + COE_MAX,
216 + COE_INV
217 +};
218 +
219 +enum train_state {
220 + DETECTING_LP,
221 + TRAINED,
222 +};
223 +
224 +struct per_lane_ctrl_status {
225 + __be32 gcr0; /* 0x.000 - General Control Register 0 */
226 + __be32 gcr1; /* 0x.004 - General Control Register 1 */
227 + __be32 gcr2; /* 0x.008 - General Control Register 2 */
228 + __be32 resv1; /* 0x.00C - Reserved */
229 + __be32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */
230 + __be32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */
231 + __be32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */
232 + __be32 resv2; /* 0x.01C - Reserved */
233 + __be32 tlcr0; /* 0x.020 - TTL Control Register 0 */
234 + __be32 tlcr1; /* 0x.024 - TTL Control Register 1 */
235 + __be32 tlcr2; /* 0x.028 - TTL Control Register 2 */
236 + __be32 tlcr3; /* 0x.02C - TTL Control Register 3 */
237 + __be32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */
238 + __be32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */
239 + __be32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */
240 + __be32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */
241 +};
242 +
243 +struct tx_condition {
244 + bool bin_m1_late_early;
245 + bool bin_long_late_early;
246 + bool bin_m1_stop;
247 + bool bin_long_stop;
248 + bool tx_complete;
249 + bool sent_init;
250 + int m1_min_max_cnt;
251 + int long_min_max_cnt;
252 +#ifdef NEW_ALGORITHM_TRAIN_TX
253 + int pre_inc;
254 + int post_inc;
255 +#endif
256 +};
257 +
258 +struct fsl_xgkr_inst {
259 + void *reg_base;
260 + struct phy_device *phydev;
261 + struct tx_condition tx_c;
262 + struct delayed_work xgkr_wk;
263 + enum train_state state;
264 + u32 ld_update;
265 + u32 ld_status;
266 + u32 ratio_preq;
267 + u32 ratio_pst1q;
268 + u32 adpt_eq;
269 +};
270 +
271 +static void tx_condition_init(struct tx_condition *tx_c)
272 +{
273 + tx_c->bin_m1_late_early = true;
274 + tx_c->bin_long_late_early = false;
275 + tx_c->bin_m1_stop = false;
276 + tx_c->bin_long_stop = false;
277 + tx_c->tx_complete = false;
278 + tx_c->sent_init = false;
279 + tx_c->m1_min_max_cnt = 0;
280 + tx_c->long_min_max_cnt = 0;
281 +#ifdef NEW_ALGORITHM_TRAIN_TX
282 + tx_c->pre_inc = FORCE_INC_COM1_NUMBER;
283 + tx_c->post_inc = FORCE_INC_COP1_NUMBER;
284 +#endif
285 +}
286 +
287 +void tune_tecr0(struct fsl_xgkr_inst *inst)
288 +{
289 + struct per_lane_ctrl_status *reg_base = inst->reg_base;
290 + u32 val;
291 +
292 + val = TECR0_INIT |
293 + inst->adpt_eq << ZERO_COE_SHIFT |
294 + inst->ratio_preq << PRE_COE_SHIFT |
295 + inst->ratio_pst1q << POST_COE_SHIFT;
296 +
297 + /* reset the lane */
298 + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
299 + &reg_base->gcr0);
300 + udelay(1);
301 + iowrite32(val, &reg_base->tecr0);
302 + udelay(1);
303 + /* unreset the lane */
304 + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
305 + &reg_base->gcr0);
306 + udelay(1);
307 +}
308 +
309 +static void start_lt(struct phy_device *phydev)
310 +{
311 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_EN);
312 +}
313 +
314 +static void stop_lt(struct phy_device *phydev)
315 +{
316 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
317 +}
318 +
319 +static void reset_gcr0(struct fsl_xgkr_inst *inst)
320 +{
321 + struct per_lane_ctrl_status *reg_base = inst->reg_base;
322 +
323 + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
324 + &reg_base->gcr0);
325 + udelay(1);
326 + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
327 + &reg_base->gcr0);
328 + udelay(1);
329 +}
330 +
331 +void lane_set_1gkx(void *reg)
332 +{
333 + struct per_lane_ctrl_status *reg_base = reg;
334 + u32 val;
335 +
336 + /* reset the lane */
337 + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
338 + &reg_base->gcr0);
339 + udelay(1);
340 +
341 + /* set gcr1 for 1GKX */
342 + val = ioread32(&reg_base->gcr1);
343 + val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
344 + GCR1_REIDL_ET_MAS_MASK);
345 + iowrite32(val, &reg_base->gcr1);
346 + udelay(1);
347 +
348 + /* set tecr0 for 1GKX */
349 + val = ioread32(&reg_base->tecr0);
350 + val &= ~TECR0_AMP_RED_MASK;
351 + iowrite32(val, &reg_base->tecr0);
352 + udelay(1);
353 +
354 + /* unreset the lane */
355 + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
356 + &reg_base->gcr0);
357 + udelay(1);
358 +}
359 +
360 +static void reset_lt(struct phy_device *phydev)
361 +{
362 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, PMD_RESET);
363 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
364 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_CU, 0);
365 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_STATUS, 0);
366 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS, 0);
367 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU, 0);
368 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS, 0);
369 +}
370 +
371 +static void start_xgkr_state_machine(struct delayed_work *work)
372 +{
373 + queue_delayed_work(system_power_efficient_wq, work,
374 + msecs_to_jiffies(XGKR_TIMEOUT));
375 +}
376 +
377 +static void start_xgkr_an(struct phy_device *phydev)
378 +{
379 + struct fsl_xgkr_inst *inst;
380 +
381 + reset_lt(phydev);
382 + phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KR_AN_AD1_INIT);
383 + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
384 +
385 + inst = phydev->priv;
386 +
387 + /* start state machine*/
388 + start_xgkr_state_machine(&inst->xgkr_wk);
389 +}
390 +
391 +static void start_1gkx_an(struct phy_device *phydev)
392 +{
393 + phy_write_mmd(phydev, MDIO_MMD_PCS, FSL_PCS_IF_MODE, IF_MODE_INIT);
394 + phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KX_AN_AD1_INIT);
395 + phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
396 + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
397 +}
398 +
399 +static void ld_coe_status(struct fsl_xgkr_inst *inst)
400 +{
401 + phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
402 + FSL_KR_LD_STATUS, inst->ld_status);
403 +}
404 +
405 +static void ld_coe_update(struct fsl_xgkr_inst *inst)
406 +{
407 + dev_dbg(&inst->phydev->mdio.dev, "sending request: %x\n", inst->ld_update);
408 + phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
409 + FSL_KR_LD_CU, inst->ld_update);
410 +}
411 +
412 +static void init_inst(struct fsl_xgkr_inst *inst, int reset)
413 +{
414 + if (reset) {
415 + inst->ratio_preq = RATIO_PREQ;
416 + inst->ratio_pst1q = RATIO_PST1Q;
417 + inst->adpt_eq = RATIO_EQ;
418 + tune_tecr0(inst);
419 + }
420 +
421 + tx_condition_init(&inst->tx_c);
422 + inst->state = DETECTING_LP;
423 + inst->ld_status &= RX_READY_MASK;
424 + ld_coe_status(inst);
425 + inst->ld_update = 0;
426 + inst->ld_status &= ~RX_READY_MASK;
427 + ld_coe_status(inst);
428 +}
429 +
430 +#ifdef NEW_ALGORITHM_TRAIN_TX
431 +static int get_median_gaink2(u32 *reg)
432 +{
433 + int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
434 + u32 rx_eq_snp;
435 + struct per_lane_ctrl_status *reg_base;
436 + int timeout;
437 + int i, j, tmp, pos;
438 +
439 + reg_base = (struct per_lane_ctrl_status *)reg;
440 +
441 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
442 + /* wait RECR1_CTL_SNP_DONE_MASK has cleared */
443 + timeout = 100;
444 + while (ioread32(&reg_base->recr1) &
445 + RECR1_CTL_SNP_DONE_MASK) {
446 + udelay(1);
447 + timeout--;
448 + if (timeout == 0)
449 + break;
450 + }
451 +
452 + /* start snap shot */
453 + iowrite32((ioread32(&reg_base->gcr1) |
454 + GCR1_CTL_SNP_START_MASK),
455 + &reg_base->gcr1);
456 +
457 + /* wait for SNP done */
458 + timeout = 100;
459 + while (!(ioread32(&reg_base->recr1) &
460 + RECR1_CTL_SNP_DONE_MASK)) {
461 + udelay(1);
462 + timeout--;
463 + if (timeout == 0)
464 + break;
465 + }
466 +
467 + /* read and save the snap shot */
468 + rx_eq_snp = ioread32(&reg_base->recr1);
469 + gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >>
470 + RECR1_GAINK2_SHIFT;
471 +
472 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
473 + iowrite32((ioread32(&reg_base->gcr1) &
474 + ~GCR1_CTL_SNP_START_MASK),
475 + &reg_base->gcr1);
476 + }
477 +
478 + /* get median of the 5 snap shot */
479 + for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
480 + tmp = gaink2_snap_shot[i];
481 + pos = i;
482 + for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
483 + if (gaink2_snap_shot[j] < tmp) {
484 + tmp = gaink2_snap_shot[j];
485 + pos = j;
486 + }
487 + }
488 +
489 + gaink2_snap_shot[pos] = gaink2_snap_shot[i];
490 + gaink2_snap_shot[i] = tmp;
491 + }
492 +
493 + return gaink2_snap_shot[2];
494 +}
495 +#endif
496 +
497 +static bool is_bin_early(int bin_sel, void *reg)
498 +{
499 + bool early = false;
500 + int bin_snap_shot[BIN_SNAPSHOT_NUM];
501 + int i, negative_count = 0;
502 + struct per_lane_ctrl_status *reg_base = reg;
503 + int timeout;
504 +
505 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
506 + /* wait RECR1_SNP_DONE_MASK has cleared */
507 + timeout = 100;
508 + while ((ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
509 + udelay(1);
510 + timeout--;
511 + if (timeout == 0)
512 + break;
513 + }
514 +
515 + /* set TCSR1[CDR_SEL] to BinM1/BinLong */
516 + if (bin_sel == BIN_M1) {
517 + iowrite32((ioread32(&reg_base->tcsr1) &
518 + ~CDR_SEL_MASK) | BIN_M1_SEL,
519 + &reg_base->tcsr1);
520 + } else {
521 + iowrite32((ioread32(&reg_base->tcsr1) &
522 + ~CDR_SEL_MASK) | BIN_Long_SEL,
523 + &reg_base->tcsr1);
524 + }
525 +
526 + /* start snap shot */
527 + iowrite32(ioread32(&reg_base->gcr1) | GCR1_SNP_START_MASK,
528 + &reg_base->gcr1);
529 +
530 + /* wait for SNP done */
531 + timeout = 100;
532 + while (!(ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
533 + udelay(1);
534 + timeout--;
535 + if (timeout == 0)
536 + break;
537 + }
538 +
539 + /* read and save the snap shot */
540 + bin_snap_shot[i] = (ioread32(&reg_base->tcsr1) &
541 + TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT;
542 + if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK)
543 + negative_count++;
544 +
545 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
546 + iowrite32(ioread32(&reg_base->gcr1) & ~GCR1_SNP_START_MASK,
547 + &reg_base->gcr1);
548 + }
549 +
550 + if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
551 + ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
552 + early = true;
553 + }
554 +
555 + return early;
556 +}
557 +
558 +static void train_tx(struct fsl_xgkr_inst *inst)
559 +{
560 + struct phy_device *phydev = inst->phydev;
561 + struct tx_condition *tx_c = &inst->tx_c;
562 + bool bin_m1_early, bin_long_early;
563 + u32 lp_status, old_ld_update;
564 + u32 status_cop1, status_coz, status_com1;
565 + u32 req_cop1, req_coz, req_com1, req_preset, req_init;
566 + u32 temp;
567 +#ifdef NEW_ALGORITHM_TRAIN_TX
568 + u32 median_gaink2;
569 +#endif
570 +
571 +recheck:
572 + if (tx_c->bin_long_stop && tx_c->bin_m1_stop) {
573 + tx_c->tx_complete = true;
574 + inst->ld_status |= RX_READY_MASK;
575 + ld_coe_status(inst);
576 + /* tell LP we are ready */
577 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD,
578 + FSL_KR_PMD_STATUS, RX_STAT);
579 + return;
580 + }
581 +
582 + /* We start by checking the current LP status. If we got any responses,
583 + * we can clear up the appropriate update request so that the
584 + * subsequent code may easily issue new update requests if needed.
585 + */
586 + lp_status = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
587 + REQUEST_MASK;
588 + status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT;
589 + status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT;
590 + status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT;
591 +
592 + old_ld_update = inst->ld_update;
593 + req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT;
594 + req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT;
595 + req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT;
596 + req_preset = old_ld_update & PRESET_MASK;
597 + req_init = old_ld_update & INIT_MASK;
598 +
599 + /* IEEE802.3-2008, 72.6.10.2.3.1
600 + * We may clear PRESET when all coefficients show UPDATED or MAX.
601 + */
602 + if (req_preset) {
603 + if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) &&
604 + (status_coz == COE_UPDATED || status_coz == COE_MAX) &&
605 + (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) {
606 + inst->ld_update &= ~PRESET_MASK;
607 + }
608 + }
609 +
610 + /* IEEE802.3-2008, 72.6.10.2.3.2
611 + * We may clear INITIALIZE when no coefficients show NOT UPDATED.
612 + */
613 + if (req_init) {
614 + if (status_cop1 != COE_NOTUPDATED &&
615 + status_coz != COE_NOTUPDATED &&
616 + status_com1 != COE_NOTUPDATED) {
617 + inst->ld_update &= ~INIT_MASK;
618 + }
619 + }
620 +
621 + /* IEEE802.3-2008, 72.6.10.2.3.2
622 + * we send initialize to the other side to ensure default settings
623 + * for the LP. Naturally, we should do this only once.
624 + */
625 + if (!tx_c->sent_init) {
626 + if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) {
627 + inst->ld_update = INIT_MASK;
628 + tx_c->sent_init = true;
629 + }
630 + }
631 +
632 + /* IEEE802.3-2008, 72.6.10.2.3.3
633 + * We set coefficient requests to HOLD when we get the information
634 + * about any updates On clearing our prior response, we also update
635 + * our internal status.
636 + */
637 + if (status_cop1 != COE_NOTUPDATED) {
638 + if (req_cop1) {
639 + inst->ld_update &= ~COP1_MASK;
640 +#ifdef NEW_ALGORITHM_TRAIN_TX
641 + if (tx_c->post_inc) {
642 + if (req_cop1 == INCREMENT &&
643 + status_cop1 == COE_MAX) {
644 + tx_c->post_inc = 0;
645 + tx_c->bin_long_stop = true;
646 + tx_c->bin_m1_stop = true;
647 + } else {
648 + tx_c->post_inc -= 1;
649 + }
650 +
651 + ld_coe_update(inst);
652 + goto recheck;
653 + }
654 +#endif
655 + if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) ||
656 + (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) {
657 + dev_dbg(&inst->phydev->mdio.dev, "COP1 hit limit %s",
658 + (status_cop1 == COE_MIN) ?
659 + "DEC MIN" : "INC MAX");
660 + tx_c->long_min_max_cnt++;
661 + if (tx_c->long_min_max_cnt >= TIMEOUT_LONG) {
662 + tx_c->bin_long_stop = true;
663 + ld_coe_update(inst);
664 + goto recheck;
665 + }
666 + }
667 + }
668 + }
669 +
670 + if (status_coz != COE_NOTUPDATED) {
671 + if (req_coz)
672 + inst->ld_update &= ~COZ_MASK;
673 + }
674 +
675 + if (status_com1 != COE_NOTUPDATED) {
676 + if (req_com1) {
677 + inst->ld_update &= ~COM1_MASK;
678 +#ifdef NEW_ALGORITHM_TRAIN_TX
679 + if (tx_c->pre_inc) {
680 + if (req_com1 == INCREMENT &&
681 + status_com1 == COE_MAX)
682 + tx_c->pre_inc = 0;
683 + else
684 + tx_c->pre_inc -= 1;
685 +
686 + ld_coe_update(inst);
687 + goto recheck;
688 + }
689 +#endif
690 + /* Stop If we have reached the limit for a parameter. */
691 + if ((req_com1 == DECREMENT && status_com1 == COE_MIN) ||
692 + (req_com1 == INCREMENT && status_com1 == COE_MAX)) {
693 + dev_dbg(&inst->phydev->mdio.dev, "COM1 hit limit %s",
694 + (status_com1 == COE_MIN) ?
695 + "DEC MIN" : "INC MAX");
696 + tx_c->m1_min_max_cnt++;
697 + if (tx_c->m1_min_max_cnt >= TIMEOUT_M1) {
698 + tx_c->bin_m1_stop = true;
699 + ld_coe_update(inst);
700 + goto recheck;
701 + }
702 + }
703 + }
704 + }
705 +
706 + if (old_ld_update != inst->ld_update) {
707 + ld_coe_update(inst);
708 + /* Redo these status checks and updates until we have no more
709 + * changes, to speed up the overall process.
710 + */
711 + goto recheck;
712 + }
713 +
714 + /* Do nothing if we have pending request. */
715 + if ((req_coz || req_com1 || req_cop1))
716 + return;
717 + else if (lp_status)
718 + /* No pending request but LP status was not reverted to
719 + * not updated.
720 + */
721 + return;
722 +
723 +#ifdef NEW_ALGORITHM_TRAIN_TX
724 + if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
725 + if (tx_c->pre_inc) {
726 + inst->ld_update = INCREMENT << COM1_SHIFT;
727 + ld_coe_update(inst);
728 + return;
729 + }
730 +
731 + if (status_cop1 != COE_MAX) {
732 + median_gaink2 = get_median_gaink2(inst->reg_base);
733 + if (median_gaink2 == 0xf) {
734 + tx_c->post_inc = 1;
735 + } else {
736 + /* Gaink2 median lower than "F" */
737 + tx_c->bin_m1_stop = true;
738 + tx_c->bin_long_stop = true;
739 + goto recheck;
740 + }
741 + } else {
742 + /* C1 MAX */
743 + tx_c->bin_m1_stop = true;
744 + tx_c->bin_long_stop = true;
745 + goto recheck;
746 + }
747 +
748 + if (tx_c->post_inc) {
749 + inst->ld_update = INCREMENT << COP1_SHIFT;
750 + ld_coe_update(inst);
751 + return;
752 + }
753 + }
754 +#endif
755 +
756 + /* snapshot and select bin */
757 + bin_m1_early = is_bin_early(BIN_M1, inst->reg_base);
758 + bin_long_early = is_bin_early(BIN_LONG, inst->reg_base);
759 +
760 + if (!tx_c->bin_m1_stop && !tx_c->bin_m1_late_early && bin_m1_early) {
761 + tx_c->bin_m1_stop = true;
762 + goto recheck;
763 + }
764 +
765 + if (!tx_c->bin_long_stop &&
766 + tx_c->bin_long_late_early && !bin_long_early) {
767 + tx_c->bin_long_stop = true;
768 + goto recheck;
769 + }
770 +
771 + /* IEEE802.3-2008, 72.6.10.2.3.3
772 + * We only request coefficient updates when no PRESET/INITIALIZE is
773 + * pending. We also only request coefficient updates when the
774 + * corresponding status is NOT UPDATED and nothing is pending.
775 + */
776 + if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
777 + if (!tx_c->bin_long_stop) {
778 + /* BinM1 correction means changing COM1 */
779 + if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
780 + /* Avoid BinM1Late by requesting an
781 + * immediate decrement.
782 + */
783 + if (!bin_m1_early) {
784 + /* request decrement c(-1) */
785 + temp = DECREMENT << COM1_SHIFT;
786 + inst->ld_update = temp;
787 + ld_coe_update(inst);
788 + tx_c->bin_m1_late_early = bin_m1_early;
789 + return;
790 + }
791 + }
792 +
793 + /* BinLong correction means changing COP1 */
794 + if (!status_cop1 && !(inst->ld_update & COP1_MASK)) {
795 + /* Locate BinLong transition point (if any)
796 + * while avoiding BinM1Late.
797 + */
798 + if (bin_long_early) {
799 + /* request increment c(1) */
800 + temp = INCREMENT << COP1_SHIFT;
801 + inst->ld_update = temp;
802 + } else {
803 + /* request decrement c(1) */
804 + temp = DECREMENT << COP1_SHIFT;
805 + inst->ld_update = temp;
806 + }
807 +
808 + ld_coe_update(inst);
809 + tx_c->bin_long_late_early = bin_long_early;
810 + }
811 + /* We try to finish BinLong before we do BinM1 */
812 + return;
813 + }
814 +
815 + if (!tx_c->bin_m1_stop) {
816 + /* BinM1 correction means changing COM1 */
817 + if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
818 + /* Locate BinM1 transition point (if any) */
819 + if (bin_m1_early) {
820 + /* request increment c(-1) */
821 + temp = INCREMENT << COM1_SHIFT;
822 + inst->ld_update = temp;
823 + } else {
824 + /* request decrement c(-1) */
825 + temp = DECREMENT << COM1_SHIFT;
826 + inst->ld_update = temp;
827 + }
828 +
829 + ld_coe_update(inst);
830 + tx_c->bin_m1_late_early = bin_m1_early;
831 + }
832 + }
833 + }
834 +}
835 +
836 +static int is_link_up(struct phy_device *phydev)
837 +{
838 + int val;
839 +
840 + phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
841 + val = phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
842 +
843 + return (val & FSL_KR_RX_LINK_STAT_MASK) ? 1 : 0;
844 +}
845 +
846 +static int is_link_training_fail(struct phy_device *phydev)
847 +{
848 + int val;
849 + int timeout = 100;
850 +
851 + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS);
852 + if (!(val & TRAIN_FAIL) && (val & RX_STAT)) {
853 + /* check LNK_STAT for sure */
854 + while (timeout--) {
855 + if (is_link_up(phydev))
856 + return 0;
857 +
858 + usleep_range(100, 500);
859 + }
860 + }
861 +
862 + return 1;
863 +}
864 +
865 +static int check_rx(struct phy_device *phydev)
866 +{
867 + return phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
868 + RX_READY_MASK;
869 +}
870 +
871 +/* Coefficient values have hardware restrictions */
872 +static int is_ld_valid(struct fsl_xgkr_inst *inst)
873 +{
874 + u32 ratio_pst1q = inst->ratio_pst1q;
875 + u32 adpt_eq = inst->adpt_eq;
876 + u32 ratio_preq = inst->ratio_preq;
877 +
878 + if ((ratio_pst1q + adpt_eq + ratio_preq) > 48)
879 + return 0;
880 +
881 + if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >=
882 + ((adpt_eq - ratio_pst1q - ratio_preq) * 17))
883 + return 0;
884 +
885 + if (ratio_preq > ratio_pst1q)
886 + return 0;
887 +
888 + if (ratio_preq > 8)
889 + return 0;
890 +
891 + if (adpt_eq < 26)
892 + return 0;
893 +
894 + if (ratio_pst1q > 16)
895 + return 0;
896 +
897 + return 1;
898 +}
899 +
900 +static int is_value_allowed(const u32 *val_table, u32 val)
901 +{
902 + int i;
903 +
904 + for (i = 0;; i++) {
905 + if (*(val_table + i) == VAL_INVALID)
906 + return 0;
907 + if (*(val_table + i) == val)
908 + return 1;
909 + }
910 +}
911 +
912 +static int inc_dec(struct fsl_xgkr_inst *inst, int field, int request)
913 +{
914 + u32 ld_limit[3], ld_coe[3], step[3];
915 +
916 + ld_coe[0] = inst->ratio_pst1q;
917 + ld_coe[1] = inst->adpt_eq;
918 + ld_coe[2] = inst->ratio_preq;
919 +
920 + /* Information specific to the Freescale SerDes for 10GBase-KR:
921 + * Incrementing C(+1) means *decrementing* RATIO_PST1Q
922 + * Incrementing C(0) means incrementing ADPT_EQ
923 + * Incrementing C(-1) means *decrementing* RATIO_PREQ
924 + */
925 + step[0] = -1;
926 + step[1] = 1;
927 + step[2] = -1;
928 +
929 + switch (request) {
930 + case INCREMENT:
931 + ld_limit[0] = POST_COE_MAX;
932 + ld_limit[1] = ZERO_COE_MAX;
933 + ld_limit[2] = PRE_COE_MAX;
934 + if (ld_coe[field] != ld_limit[field])
935 + ld_coe[field] += step[field];
936 + else
937 + /* MAX */
938 + return 2;
939 + break;
940 + case DECREMENT:
941 + ld_limit[0] = POST_COE_MIN;
942 + ld_limit[1] = ZERO_COE_MIN;
943 + ld_limit[2] = PRE_COE_MIN;
944 + if (ld_coe[field] != ld_limit[field])
945 + ld_coe[field] -= step[field];
946 + else
947 + /* MIN */
948 + return 1;
949 + break;
950 + default:
951 + break;
952 + }
953 +
954 + if (is_ld_valid(inst)) {
955 + /* accept new ld */
956 + inst->ratio_pst1q = ld_coe[0];
957 + inst->adpt_eq = ld_coe[1];
958 + inst->ratio_preq = ld_coe[2];
959 + /* only some values for preq and pst1q can be used.
960 + * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc.
961 + * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10.
962 + */
963 + if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) {
964 + dev_dbg(&inst->phydev->mdio.dev,
965 + "preq skipped value: %d\n", ld_coe[2]);
966 + return 0;
967 + }
968 +
969 + if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) {
970 + dev_dbg(&inst->phydev->mdio.dev,
971 + "pst1q skipped value: %d\n", ld_coe[0]);
972 + return 0;
973 + }
974 +
975 + tune_tecr0(inst);
976 + } else {
977 + if (request == DECREMENT)
978 + /* MIN */
979 + return 1;
980 + if (request == INCREMENT)
981 + /* MAX */
982 + return 2;
983 + }
984 +
985 + return 0;
986 +}
987 +
988 +static void min_max_updated(struct fsl_xgkr_inst *inst, int field, int new_ld)
989 +{
990 + u32 ld_coe[] = {COE_UPDATED, COE_MIN, COE_MAX};
991 + u32 mask, val;
992 +
993 + switch (field) {
994 + case COE_COP1:
995 + mask = COP1_MASK;
996 + val = ld_coe[new_ld] << COP1_SHIFT;
997 + break;
998 + case COE_COZ:
999 + mask = COZ_MASK;
1000 + val = ld_coe[new_ld] << COZ_SHIFT;
1001 + break;
1002 + case COE_COM:
1003 + mask = COM1_MASK;
1004 + val = ld_coe[new_ld] << COM1_SHIFT;
1005 + break;
1006 + default:
1007 + return;
1008 + }
1009 +
1010 + inst->ld_status &= ~mask;
1011 + inst->ld_status |= val;
1012 +}
1013 +
1014 +static void check_request(struct fsl_xgkr_inst *inst, int request)
1015 +{
1016 + int cop1_req, coz_req, com_req;
1017 + int old_status, new_ld_sta;
1018 +
1019 + cop1_req = (request & COP1_MASK) >> COP1_SHIFT;
1020 + coz_req = (request & COZ_MASK) >> COZ_SHIFT;
1021 + com_req = (request & COM1_MASK) >> COM1_SHIFT;
1022 +
1023 + /* IEEE802.3-2008, 72.6.10.2.5
1024 + * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED
1025 + */
1026 + old_status = inst->ld_status;
1027 +
1028 + if (cop1_req && !(inst->ld_status & COP1_MASK)) {
1029 + new_ld_sta = inc_dec(inst, COE_COP1, cop1_req);
1030 + min_max_updated(inst, COE_COP1, new_ld_sta);
1031 + }
1032 +
1033 + if (coz_req && !(inst->ld_status & COZ_MASK)) {
1034 + new_ld_sta = inc_dec(inst, COE_COZ, coz_req);
1035 + min_max_updated(inst, COE_COZ, new_ld_sta);
1036 + }
1037 +
1038 + if (com_req && !(inst->ld_status & COM1_MASK)) {
1039 + new_ld_sta = inc_dec(inst, COE_COM, com_req);
1040 + min_max_updated(inst, COE_COM, new_ld_sta);
1041 + }
1042 +
1043 + if (old_status != inst->ld_status)
1044 + ld_coe_status(inst);
1045 +}
1046 +
1047 +static void preset(struct fsl_xgkr_inst *inst)
1048 +{
1049 + /* These are all MAX values from the IEEE802.3 perspective. */
1050 + inst->ratio_pst1q = POST_COE_MAX;
1051 + inst->adpt_eq = ZERO_COE_MAX;
1052 + inst->ratio_preq = PRE_COE_MAX;
1053 +
1054 + tune_tecr0(inst);
1055 + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1056 + inst->ld_status |= COE_MAX << COP1_SHIFT |
1057 + COE_MAX << COZ_SHIFT |
1058 + COE_MAX << COM1_SHIFT;
1059 + ld_coe_status(inst);
1060 +}
1061 +
1062 +static void initialize(struct fsl_xgkr_inst *inst)
1063 +{
1064 + inst->ratio_preq = RATIO_PREQ;
1065 + inst->ratio_pst1q = RATIO_PST1Q;
1066 + inst->adpt_eq = RATIO_EQ;
1067 +
1068 + tune_tecr0(inst);
1069 + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1070 + inst->ld_status |= COE_UPDATED << COP1_SHIFT |
1071 + COE_UPDATED << COZ_SHIFT |
1072 + COE_UPDATED << COM1_SHIFT;
1073 + ld_coe_status(inst);
1074 +}
1075 +
1076 +static void train_rx(struct fsl_xgkr_inst *inst)
1077 +{
1078 + struct phy_device *phydev = inst->phydev;
1079 + int request, old_ld_status;
1080 +
1081 + /* get request from LP */
1082 + request = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU) &
1083 + (LD_ALL_MASK);
1084 + old_ld_status = inst->ld_status;
1085 +
1086 + /* IEEE802.3-2008, 72.6.10.2.5
1087 + * Ensure we always go to NOT UDPATED for status reporting in
1088 + * response to HOLD requests.
1089 + * IEEE802.3-2008, 72.6.10.2.3.1/2
1090 + * ... but only if PRESET/INITIALIZE are not active to ensure
1091 + * we keep status until they are released.
1092 + */
1093 + if (!(request & (PRESET_MASK | INIT_MASK))) {
1094 + if (!(request & COP1_MASK))
1095 + inst->ld_status &= ~COP1_MASK;
1096 +
1097 + if (!(request & COZ_MASK))
1098 + inst->ld_status &= ~COZ_MASK;
1099 +
1100 + if (!(request & COM1_MASK))
1101 + inst->ld_status &= ~COM1_MASK;
1102 +
1103 + if (old_ld_status != inst->ld_status)
1104 + ld_coe_status(inst);
1105 + }
1106 +
1107 + /* As soon as the LP shows ready, no need to do any more updates. */
1108 + if (check_rx(phydev)) {
1109 + /* LP receiver is ready */
1110 + if (inst->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) {
1111 + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1112 + ld_coe_status(inst);
1113 + }
1114 + } else {
1115 + /* IEEE802.3-2008, 72.6.10.2.3.1/2
1116 + * only act on PRESET/INITIALIZE if all status is NOT UPDATED.
1117 + */
1118 + if (request & (PRESET_MASK | INIT_MASK)) {
1119 + if (!(inst->ld_status &
1120 + (COP1_MASK | COZ_MASK | COM1_MASK))) {
1121 + if (request & PRESET_MASK)
1122 + preset(inst);
1123 +
1124 + if (request & INIT_MASK)
1125 + initialize(inst);
1126 + }
1127 + }
1128 +
1129 + /* LP Coefficient are not in HOLD */
1130 + if (request & REQUEST_MASK)
1131 + check_request(inst, request & REQUEST_MASK);
1132 + }
1133 +}
1134 +
1135 +static void xgkr_start_train(struct phy_device *phydev)
1136 +{
1137 + struct fsl_xgkr_inst *inst = phydev->priv;
1138 + struct tx_condition *tx_c = &inst->tx_c;
1139 + int val = 0, i;
1140 + int lt_state;
1141 + unsigned long dead_line;
1142 + int rx_ok, tx_ok;
1143 +
1144 + init_inst(inst, 0);
1145 + start_lt(phydev);
1146 +
1147 + for (i = 0; i < 2;) {
1148 + dead_line = jiffies + msecs_to_jiffies(500);
1149 + while (time_before(jiffies, dead_line)) {
1150 + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
1151 + FSL_KR_PMD_STATUS);
1152 + if (val & TRAIN_FAIL) {
1153 + /* LT failed already, reset lane to avoid
1154 + * it run into hanging, then start LT again.
1155 + */
1156 + reset_gcr0(inst);
1157 + start_lt(phydev);
1158 + } else if ((val & PMD_STATUS_SUP_STAT) &&
1159 + (val & PMD_STATUS_FRAME_LOCK))
1160 + break;
1161 + usleep_range(100, 500);
1162 + }
1163 +
1164 + if (!((val & PMD_STATUS_FRAME_LOCK) &&
1165 + (val & PMD_STATUS_SUP_STAT))) {
1166 + i++;
1167 + continue;
1168 + }
1169 +
1170 + /* init process */
1171 + rx_ok = false;
1172 + tx_ok = false;
1173 + /* the LT should be finished in 500ms, failed or OK. */
1174 + dead_line = jiffies + msecs_to_jiffies(500);
1175 +
1176 + while (time_before(jiffies, dead_line)) {
1177 + /* check if the LT is already failed */
1178 + lt_state = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
1179 + FSL_KR_PMD_STATUS);
1180 + if (lt_state & TRAIN_FAIL) {
1181 + reset_gcr0(inst);
1182 + break;
1183 + }
1184 +
1185 + rx_ok = check_rx(phydev);
1186 + tx_ok = tx_c->tx_complete;
1187 +
1188 + if (rx_ok && tx_ok)
1189 + break;
1190 +
1191 + if (!rx_ok)
1192 + train_rx(inst);
1193 +
1194 + if (!tx_ok)
1195 + train_tx(inst);
1196 +
1197 + usleep_range(100, 500);
1198 + }
1199 +
1200 + i++;
1201 + /* check LT result */
1202 + if (is_link_training_fail(phydev)) {
1203 + init_inst(inst, 0);
1204 + continue;
1205 + } else {
1206 + stop_lt(phydev);
1207 + inst->state = TRAINED;
1208 + break;
1209 + }
1210 + }
1211 +}
1212 +
1213 +static void xgkr_state_machine(struct work_struct *work)
1214 +{
1215 + struct delayed_work *dwork = to_delayed_work(work);
1216 + struct fsl_xgkr_inst *inst = container_of(dwork,
1217 + struct fsl_xgkr_inst,
1218 + xgkr_wk);
1219 + struct phy_device *phydev = inst->phydev;
1220 + int an_state;
1221 + bool needs_train = false;
1222 +
1223 + mutex_lock(&phydev->lock);
1224 +
1225 + switch (inst->state) {
1226 + case DETECTING_LP:
1227 + phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
1228 + an_state = phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
1229 + if ((an_state & KR_AN_MASK))
1230 + needs_train = true;
1231 + break;
1232 + case TRAINED:
1233 + if (!is_link_up(phydev)) {
1234 + dev_info(&phydev->mdio.dev,
1235 + "Detect hotplug, restart training\n");
1236 + init_inst(inst, 1);
1237 + start_xgkr_an(phydev);
1238 + inst->state = DETECTING_LP;
1239 + }
1240 + break;
1241 + }
1242 +
1243 + if (needs_train)
1244 + xgkr_start_train(phydev);
1245 +
1246 + mutex_unlock(&phydev->lock);
1247 + queue_delayed_work(system_power_efficient_wq, &inst->xgkr_wk,
1248 + msecs_to_jiffies(XGKR_TIMEOUT));
1249 +}
1250 +
1251 +static int fsl_backplane_probe(struct phy_device *phydev)
1252 +{
1253 + struct fsl_xgkr_inst *xgkr_inst;
1254 + struct device_node *phy_node, *lane_node;
1255 + struct resource res_lane;
1256 + const char *bm;
1257 + int ret;
1258 + int bp_mode;
1259 + u32 lane[2];
1260 +
1261 + phy_node = phydev->mdio.dev.of_node;
1262 + bp_mode = of_property_read_string(phy_node, "backplane-mode", &bm);
1263 + if (bp_mode < 0)
1264 + return 0;
1265 +
1266 + if (!strcasecmp(bm, "1000base-kx")) {
1267 + bp_mode = PHY_BACKPLANE_1000BASE_KX;
1268 + } else if (!strcasecmp(bm, "10gbase-kr")) {
1269 + bp_mode = PHY_BACKPLANE_10GBASE_KR;
1270 + } else {
1271 + dev_err(&phydev->mdio.dev, "Unknown backplane-mode\n");
1272 + return -EINVAL;
1273 + }
1274 +
1275 + lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
1276 + if (!lane_node) {
1277 + dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
1278 + return -EINVAL;
1279 + }
1280 +
1281 + ret = of_address_to_resource(lane_node, 0, &res_lane);
1282 + if (ret) {
1283 + dev_err(&phydev->mdio.dev, "could not obtain memory map\n");
1284 + return ret;
1285 + }
1286 +
1287 + of_node_put(lane_node);
1288 + ret = of_property_read_u32_array(phy_node, "fsl,lane-reg",
1289 + (u32 *)&lane, 2);
1290 + if (ret) {
1291 + dev_err(&phydev->mdio.dev, "could not get fsl,lane-reg\n");
1292 + return -EINVAL;
1293 + }
1294 +
1295 + phydev->priv = devm_ioremap_nocache(&phydev->mdio.dev,
1296 + res_lane.start + lane[0],
1297 + lane[1]);
1298 + if (!phydev->priv) {
1299 + dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n");
1300 + return -ENOMEM;
1301 + }
1302 +
1303 + if (bp_mode == PHY_BACKPLANE_1000BASE_KX) {
1304 + phydev->speed = SPEED_1000;
1305 + /* configure the lane for 1000BASE-KX */
1306 + lane_set_1gkx(phydev->priv);
1307 + return 0;
1308 + }
1309 +
1310 + xgkr_inst = devm_kzalloc(&phydev->mdio.dev,
1311 + sizeof(*xgkr_inst), GFP_KERNEL);
1312 + if (!xgkr_inst)
1313 + return -ENOMEM;
1314 +
1315 + xgkr_inst->reg_base = phydev->priv;
1316 + xgkr_inst->phydev = phydev;
1317 + phydev->priv = xgkr_inst;
1318 +
1319 + if (bp_mode == PHY_BACKPLANE_10GBASE_KR) {
1320 + phydev->speed = SPEED_10000;
1321 + INIT_DELAYED_WORK(&xgkr_inst->xgkr_wk, xgkr_state_machine);
1322 + }
1323 +
1324 + return 0;
1325 +}
1326 +
1327 +static int fsl_backplane_aneg_done(struct phy_device *phydev)
1328 +{
1329 + return 1;
1330 +}
1331 +
1332 +static int fsl_backplane_config_aneg(struct phy_device *phydev)
1333 +{
1334 + if (phydev->speed == SPEED_10000) {
1335 + phydev->supported |= SUPPORTED_10000baseKR_Full;
1336 + start_xgkr_an(phydev);
1337 + } else if (phydev->speed == SPEED_1000) {
1338 + phydev->supported |= SUPPORTED_1000baseKX_Full;
1339 + start_1gkx_an(phydev);
1340 + }
1341 +
1342 + phydev->advertising = phydev->supported;
1343 + phydev->duplex = 1;
1344 +
1345 + return 0;
1346 +}
1347 +
1348 +static int fsl_backplane_suspend(struct phy_device *phydev)
1349 +{
1350 + if (phydev->speed == SPEED_10000) {
1351 + struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
1352 +
1353 + cancel_delayed_work_sync(&xgkr_inst->xgkr_wk);
1354 + }
1355 + return 0;
1356 +}
1357 +
1358 +static int fsl_backplane_resume(struct phy_device *phydev)
1359 +{
1360 + if (phydev->speed == SPEED_10000) {
1361 + struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
1362 +
1363 + init_inst(xgkr_inst, 1);
1364 + queue_delayed_work(system_power_efficient_wq,
1365 + &xgkr_inst->xgkr_wk,
1366 + msecs_to_jiffies(XGKR_TIMEOUT));
1367 + }
1368 + return 0;
1369 +}
1370 +
1371 +static int fsl_backplane_read_status(struct phy_device *phydev)
1372 +{
1373 + if (is_link_up(phydev))
1374 + phydev->link = 1;
1375 + else
1376 + phydev->link = 0;
1377 +
1378 + return 0;
1379 +}
1380 +
1381 +static struct phy_driver fsl_backplane_driver[] = {
1382 + {
1383 + .phy_id = FSL_PCS_PHY_ID,
1384 + .name = "Freescale Backplane",
1385 + .phy_id_mask = 0xffffffff,
1386 + .features = SUPPORTED_Backplane | SUPPORTED_Autoneg |
1387 + SUPPORTED_MII,
1388 + .probe = fsl_backplane_probe,
1389 + .aneg_done = fsl_backplane_aneg_done,
1390 + .config_aneg = fsl_backplane_config_aneg,
1391 + .read_status = fsl_backplane_read_status,
1392 + .suspend = fsl_backplane_suspend,
1393 + .resume = fsl_backplane_resume,
1394 + },
1395 +};
1396 +
1397 +module_phy_driver(fsl_backplane_driver);
1398 +
1399 +static struct mdio_device_id __maybe_unused freescale_tbl[] = {
1400 + { FSL_PCS_PHY_ID, 0xffffffff },
1401 + { }
1402 +};
1403 +
1404 +MODULE_DEVICE_TABLE(mdio, freescale_tbl);
1405 +
1406 +MODULE_DESCRIPTION("Freescale Backplane driver");
1407 +MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
1408 +MODULE_LICENSE("GPL v2");
1409 --- a/drivers/net/phy/swphy.c
1410 +++ b/drivers/net/phy/swphy.c
1411 @@ -77,6 +77,7 @@ static const struct swmii_regs duplex[]
1412 static int swphy_decode_speed(int speed)
1413 {
1414 switch (speed) {
1415 + case 10000:
1416 case 1000:
1417 return SWMII_SPEED_1000;
1418 case 100:
1419 --- a/include/linux/phy.h
1420 +++ b/include/linux/phy.h
1421 @@ -87,6 +87,7 @@ typedef enum {
1422 PHY_INTERFACE_MODE_XAUI,
1423 /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */
1424 PHY_INTERFACE_MODE_10GKR,
1425 + PHY_INTERFACE_MODE_2500SGMII,
1426 PHY_INTERFACE_MODE_MAX,
1427 } phy_interface_t;
1428
1429 @@ -159,6 +160,8 @@ static inline const char *phy_modes(phy_
1430 return "xaui";
1431 case PHY_INTERFACE_MODE_10GKR:
1432 return "10gbase-kr";
1433 + case PHY_INTERFACE_MODE_2500SGMII:
1434 + return "sgmii-2500";
1435 default:
1436 return "unknown";
1437 }