c5521c416b28d687d8ee1f12e4a092c7018f3a3e
[openwrt/staging/chunkeey.git] / target / linux / mediatek / patches-4.19 / 0001-eth-sync-from-mtk-lede.patch
1 Index: linux-4.19.57/drivers/net/ethernet/mediatek/Kconfig
2 ===================================================================
3 --- linux-4.19.57.orig/drivers/net/ethernet/mediatek/Kconfig
4 +++ linux-4.19.57/drivers/net/ethernet/mediatek/Kconfig
5 @@ -1,6 +1,6 @@
6 config NET_VENDOR_MEDIATEK
7 bool "MediaTek ethernet driver"
8 - depends on ARCH_MEDIATEK
9 + depends on ARCH_MEDIATEK || RALINK
10 ---help---
11 If you have a Mediatek SoC with ethernet, say Y.
12
13 Index: linux-4.19.57/drivers/net/ethernet/mediatek/Makefile
14 ===================================================================
15 --- linux-4.19.57.orig/drivers/net/ethernet/mediatek/Makefile
16 +++ linux-4.19.57/drivers/net/ethernet/mediatek/Makefile
17 @@ -2,4 +2,5 @@
18 # Makefile for the Mediatek SoCs built-in ethernet macs
19 #
20
21 -obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth_soc.o
22 +obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth_soc.o mtk_sgmii.o \
23 + mtk_eth_path.o
24 Index: linux-4.19.57/drivers/net/ethernet/mediatek/mtk_eth_path.c
25 ===================================================================
26 --- /dev/null
27 +++ linux-4.19.57/drivers/net/ethernet/mediatek/mtk_eth_path.c
28 @@ -0,0 +1,333 @@
29 +/*
30 + * Copyright (C) 2018 MediaTek Inc.
31 + *
32 + * This program is free software; you can redistribute it and/or modify
33 + * it under the terms of the GNU General Public License as published by
34 + * the Free Software Foundation; version 2 of the License
35 + *
36 + * This program is distributed in the hope that it will be useful,
37 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
38 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
39 + * GNU General Public License for more details.
40 + *
41 + * Copyright (C) 2018 Sean Wang <sean.wang@mediatek.com>
42 + */
43 +
44 +#include <linux/phy.h>
45 +#include <linux/regmap.h>
46 +
47 +#include "mtk_eth_soc.h"
48 +
49 +struct mtk_eth_muxc {
50 + int (*set_path)(struct mtk_eth *eth, int path);
51 +};
52 +
53 +static const char * const mtk_eth_mux_name[] = {
54 + "mux_gdm1_to_gmac1_esw", "mux_gmac2_gmac0_to_gephy",
55 + "mux_u3_gmac2_to_qphy", "mux_gmac1_gmac2_to_sgmii_rgmii",
56 + "mux_gmac12_to_gephy_sgmii",
57 +};
58 +
59 +static const char * const mtk_eth_path_name[] = {
60 + "gmac1_rgmii", "gmac1_trgmii", "gmac1_sgmii", "gmac2_rgmii",
61 + "gmac2_sgmii", "gmac2_gephy", "gdm1_esw",
62 +};
63 +
64 +static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
65 +{
66 + u32 val, mask, set;
67 + bool updated = true;
68 +
69 + switch (path) {
70 + case MTK_ETH_PATH_GMAC1_SGMII:
71 + mask = ~(u32)MTK_MUX_TO_ESW;
72 + set = 0;
73 + break;
74 + case MTK_ETH_PATH_GDM1_ESW:
75 + mask = ~(u32)MTK_MUX_TO_ESW;
76 + set = MTK_MUX_TO_ESW;
77 + break;
78 + default:
79 + updated = false;
80 + break;
81 + };
82 +
83 + if (updated) {
84 + val = mtk_r32(eth, MTK_MAC_MISC);
85 + val = (val & mask) | set;
86 + mtk_w32(eth, val, MTK_MAC_MISC);
87 + }
88 +
89 + dev_info(eth->dev, "path %s in %s updated = %d\n",
90 + mtk_eth_path_name[path], __func__, updated);
91 +
92 + return 0;
93 +}
94 +
95 +static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
96 +{
97 + unsigned int val = 0;
98 + bool updated = true;
99 +
100 + switch (path) {
101 + case MTK_ETH_PATH_GMAC2_GEPHY:
102 + val = ~(u32)GEPHY_MAC_SEL;
103 + break;
104 + default:
105 + updated = false;
106 + break;
107 + }
108 +
109 + if (updated)
110 + regmap_update_bits(eth->infra, INFRA_MISC2, GEPHY_MAC_SEL, val);
111 +
112 + dev_info(eth->dev, "path %s in %s updated = %d\n",
113 + mtk_eth_path_name[path], __func__, updated);
114 +
115 + return 0;
116 +}
117 +
118 +static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
119 +{
120 + unsigned int val = 0;
121 + bool updated = true;
122 +
123 + switch (path) {
124 + case MTK_ETH_PATH_GMAC2_SGMII:
125 + val = CO_QPHY_SEL;
126 + break;
127 + default:
128 + updated = false;
129 + break;
130 + }
131 +
132 + if (updated)
133 + regmap_update_bits(eth->infra, INFRA_MISC2, CO_QPHY_SEL, val);
134 +
135 + dev_info(eth->dev, "path %s in %s updated = %d\n",
136 + mtk_eth_path_name[path], __func__, updated);
137 +
138 + return 0;
139 +}
140 +
141 +static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
142 +{
143 + unsigned int val = 0;
144 + bool updated = true;
145 +
146 + switch (path) {
147 + case MTK_ETH_PATH_GMAC1_SGMII:
148 + val = SYSCFG0_SGMII_GMAC1;
149 + break;
150 + case MTK_ETH_PATH_GMAC2_SGMII:
151 + val = SYSCFG0_SGMII_GMAC2;
152 + break;
153 + case MTK_ETH_PATH_GMAC1_RGMII:
154 + case MTK_ETH_PATH_GMAC2_RGMII:
155 + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
156 + val &= SYSCFG0_SGMII_MASK;
157 +
158 + if ((path == MTK_GMAC1_RGMII && val == SYSCFG0_SGMII_GMAC1) ||
159 + (path == MTK_GMAC2_RGMII && val == SYSCFG0_SGMII_GMAC2))
160 + val = 0;
161 + else
162 + updated = false;
163 + break;
164 + default:
165 + updated = false;
166 + break;
167 + };
168 +
169 + if (updated)
170 + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
171 + SYSCFG0_SGMII_MASK, val);
172 +
173 + dev_info(eth->dev, "path %s in %s updated = %d\n",
174 + mtk_eth_path_name[path], __func__, updated);
175 +
176 + return 0;
177 +}
178 +
179 +static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
180 +{
181 + unsigned int val = 0;
182 + bool updated = true;
183 +
184 + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
185 +
186 + switch (path) {
187 + case MTK_ETH_PATH_GMAC1_SGMII:
188 + val |= SYSCFG0_SGMII_GMAC1_V2;
189 + break;
190 + case MTK_ETH_PATH_GMAC2_GEPHY:
191 + val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2;
192 + break;
193 + case MTK_ETH_PATH_GMAC2_SGMII:
194 + val |= SYSCFG0_SGMII_GMAC2_V2;
195 + break;
196 + default:
197 + updated = false;
198 + };
199 +
200 + if (updated)
201 + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
202 + SYSCFG0_SGMII_MASK, val);
203 +
204 + if (!updated)
205 + dev_info(eth->dev, "path %s no needs updatiion in %s\n",
206 + mtk_eth_path_name[path], __func__);
207 +
208 + dev_info(eth->dev, "path %s in %s updated = %d\n",
209 + mtk_eth_path_name[path], __func__, updated);
210 +
211 + return 0;
212 +}
213 +
214 +static const struct mtk_eth_muxc mtk_eth_muxc[] = {
215 + { .set_path = set_mux_gdm1_to_gmac1_esw, },
216 + { .set_path = set_mux_gmac2_gmac0_to_gephy, },
217 + { .set_path = set_mux_u3_gmac2_to_qphy, },
218 + { .set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii, },
219 + { .set_path = set_mux_gmac12_to_gephy_sgmii, }
220 +};
221 +
222 +static int mtk_eth_mux_setup(struct mtk_eth *eth, int path)
223 +{
224 + int i, err = 0;
225 +
226 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_PATH_BIT(path))) {
227 + dev_info(eth->dev, "path %s isn't support on the SoC\n",
228 + mtk_eth_path_name[path]);
229 + return -EINVAL;
230 + }
231 +
232 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_MUX))
233 + return 0;
234 +
235 + /* Setup MUX in path fabric */
236 + for (i = 0; i < MTK_ETH_MUX_MAX; i++) {
237 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_MUX_BIT(i))) {
238 + err = mtk_eth_muxc[i].set_path(eth, path);
239 + if (err)
240 + goto out;
241 + } else {
242 + dev_info(eth->dev, "mux %s isn't present on the SoC\n",
243 + mtk_eth_mux_name[i]);
244 + }
245 + }
246 +
247 +out:
248 + return err;
249 +}
250 +
251 +static int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
252 +{
253 + unsigned int val = 0;
254 + int sid, err, path;
255 +
256 + path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
257 + MTK_ETH_PATH_GMAC2_SGMII;
258 +
259 + /* Setup proper MUXes along the path */
260 + err = mtk_eth_mux_setup(eth, path);
261 + if (err)
262 + return err;
263 +
264 + /* The path GMAC to SGMII will be enabled once the SGMIISYS is being
265 + * setup done.
266 + */
267 + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
268 +
269 + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
270 + SYSCFG0_SGMII_MASK, ~(u32)SYSCFG0_SGMII_MASK);
271 +
272 + /* Decide how GMAC and SGMIISYS be mapped */
273 + sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? 0 : mac_id;
274 +
275 + /* Setup SGMIISYS with the determined property */
276 + if (MTK_HAS_FLAGS(eth->sgmii->flags[sid], MTK_SGMII_PHYSPEED_AN))
277 + err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
278 + else
279 + err = mtk_sgmii_setup_mode_force(eth->sgmii, sid);
280 +
281 + if (err)
282 + return err;
283 +
284 + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
285 + SYSCFG0_SGMII_MASK, val);
286 +
287 + return 0;
288 +}
289 +
290 +static int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
291 +{
292 + int err, path = 0;
293 +
294 + if (mac_id == 1)
295 + path = MTK_ETH_PATH_GMAC2_GEPHY;
296 +
297 + if (!path)
298 + return -EINVAL;
299 +
300 + /* Setup proper MUXes along the path */
301 + err = mtk_eth_mux_setup(eth, path);
302 + if (err)
303 + return err;
304 +
305 + return 0;
306 +}
307 +
308 +static int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
309 +{
310 + int err, path;
311 +
312 + path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
313 + MTK_ETH_PATH_GMAC2_RGMII;
314 +
315 + /* Setup proper MUXes along the path */
316 + err = mtk_eth_mux_setup(eth, path);
317 + if (err)
318 + return err;
319 +
320 + return 0;
321 +}
322 +
323 +int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode)
324 +{
325 + int err;
326 +
327 + switch (phymode) {
328 + case PHY_INTERFACE_MODE_TRGMII:
329 + case PHY_INTERFACE_MODE_RGMII_TXID:
330 + case PHY_INTERFACE_MODE_RGMII_RXID:
331 + case PHY_INTERFACE_MODE_RGMII_ID:
332 + case PHY_INTERFACE_MODE_RGMII:
333 + case PHY_INTERFACE_MODE_MII:
334 + case PHY_INTERFACE_MODE_REVMII:
335 + case PHY_INTERFACE_MODE_RMII:
336 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
337 + err = mtk_gmac_rgmii_path_setup(eth, mac_id);
338 + if (err)
339 + return err;
340 + }
341 + break;
342 + case PHY_INTERFACE_MODE_SGMII:
343 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
344 + err = mtk_gmac_sgmii_path_setup(eth, mac_id);
345 + if (err)
346 + return err;
347 + }
348 + break;
349 + case PHY_INTERFACE_MODE_GMII:
350 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
351 + err = mtk_gmac_gephy_path_setup(eth, mac_id);
352 + if (err)
353 + return err;
354 + }
355 + break;
356 + default:
357 + break;
358 + }
359 +
360 + return 0;
361 +}
362 Index: linux-4.19.57/drivers/net/ethernet/mediatek/mtk_eth_soc.c
363 ===================================================================
364 --- linux-4.19.57.orig/drivers/net/ethernet/mediatek/mtk_eth_soc.c
365 +++ linux-4.19.57/drivers/net/ethernet/mediatek/mtk_eth_soc.c
366 @@ -23,6 +23,7 @@
367 #include <linux/reset.h>
368 #include <linux/tcp.h>
369 #include <linux/interrupt.h>
370 +#include <linux/mdio.h>
371 #include <linux/pinctrl/devinfo.h>
372
373 #include "mtk_eth_soc.h"
374 @@ -54,8 +55,10 @@ static const struct mtk_ethtool_stats {
375 };
376
377 static const char * const mtk_clks_source_name[] = {
378 - "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m",
379 - "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll"
380 + "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
381 + "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
382 + "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
383 + "sgmii_ck", "eth2pll",
384 };
385
386 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
387 @@ -84,8 +87,8 @@ static int mtk_mdio_busy_wait(struct mtk
388 return -1;
389 }
390
391 -static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
392 - u32 phy_register, u32 write_data)
393 +u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
394 + u32 phy_register, u32 write_data)
395 {
396 if (mtk_mdio_busy_wait(eth))
397 return -1;
398 @@ -103,7 +106,7 @@ static u32 _mtk_mdio_write(struct mtk_et
399 return 0;
400 }
401
402 -static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
403 +u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
404 {
405 u32 d;
406
407 @@ -123,6 +126,34 @@ static u32 _mtk_mdio_read(struct mtk_eth
408 return d;
409 }
410
411 +u32 mtk_cl45_ind_read(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 *data)
412 +{
413 + mutex_lock(&eth->mii_bus->mdio_lock);
414 +
415 + _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
416 + _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
417 + _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
418 + *data = _mtk_mdio_read(eth, port, MII_MMD_ADDR_DATA_REG);
419 +
420 + mutex_unlock(&eth->mii_bus->mdio_lock);
421 +
422 + return 0;
423 +}
424 +
425 +u32 mtk_cl45_ind_write(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 data)
426 +{
427 + mutex_lock(&eth->mii_bus->mdio_lock);
428 +
429 + _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
430 + _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
431 + _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
432 + _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, data);
433 +
434 + mutex_unlock(&eth->mii_bus->mdio_lock);
435 +
436 + return 0;
437 +}
438 +
439 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
440 int phy_reg, u16 val)
441 {
442 @@ -165,51 +196,12 @@ static void mtk_gmac0_rgmii_adjust(struc
443 mtk_w32(eth, val, TRGMII_TCK_CTRL);
444 }
445
446 -static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id)
447 -{
448 - u32 val;
449 -
450 - /* Setup the link timer and QPHY power up inside SGMIISYS */
451 - regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER,
452 - SGMII_LINK_TIMER_DEFAULT);
453 -
454 - regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val);
455 - val |= SGMII_REMOTE_FAULT_DIS;
456 - regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val);
457 -
458 - regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val);
459 - val |= SGMII_AN_RESTART;
460 - regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val);
461 -
462 - regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
463 - val &= ~SGMII_PHYA_PWD;
464 - regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val);
465 -
466 - /* Determine MUX for which GMAC uses the SGMII interface */
467 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) {
468 - regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
469 - val &= ~SYSCFG0_SGMII_MASK;
470 - val |= !mac_id ? SYSCFG0_SGMII_GMAC1 : SYSCFG0_SGMII_GMAC2;
471 - regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
472 -
473 - dev_info(eth->dev, "setup shared sgmii for gmac=%d\n",
474 - mac_id);
475 - }
476 -
477 - /* Setup the GMAC1 going through SGMII path when SoC also support
478 - * ESW on GMAC1
479 - */
480 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) &&
481 - !mac_id) {
482 - mtk_w32(eth, 0, MTK_MAC_MISC);
483 - dev_info(eth->dev, "setup gmac1 going through sgmii");
484 - }
485 -}
486 -
487 static void mtk_phy_link_adjust(struct net_device *dev)
488 {
489 struct mtk_mac *mac = netdev_priv(dev);
490 + struct mtk_eth *eth = mac->hw;
491 u16 lcl_adv = 0, rmt_adv = 0;
492 + u32 lcl_eee = 0, rmt_eee = 0;
493 u8 flowctrl;
494 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
495 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
496 @@ -229,7 +221,7 @@ static void mtk_phy_link_adjust(struct n
497 };
498
499 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
500 - !mac->id && !mac->trgmii)
501 + !mac->id && !mac->trgmii)
502 mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
503
504 if (dev->phydev->link)
505 @@ -259,7 +251,16 @@ static void mtk_phy_link_adjust(struct n
506 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
507 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
508 }
509 + /*EEE capability*/
510 + mtk_cl45_ind_read(eth, 0, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &lcl_eee);
511 + mtk_cl45_ind_read(eth, 0, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &rmt_eee);
512 +
513 + if ((lcl_eee & rmt_eee & MDIO_EEE_1000T) == MDIO_EEE_1000T)
514 + mcr |= MAC_MCR_MDIO_EEE_1000T;
515 + if ((lcl_eee & rmt_eee & MDIO_EEE_100TX) == MDIO_EEE_100TX)
516 + mcr |= MAC_MCR_MDIO_EEE_100TX;
517
518 + /*Setup MCR*/
519 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
520
521 if (dev->phydev->link)
522 @@ -290,10 +291,10 @@ static int mtk_phy_connect_node(struct m
523 return -ENODEV;
524 }
525
526 - dev_info(eth->dev,
527 - "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
528 - mac->id, phydev_name(phydev), phydev->phy_id,
529 - phydev->drv->name);
530 + dev_info(eth->dev,
531 + "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
532 + mac->id, phydev_name(phydev), phydev->phy_id,
533 + phydev->drv->name);
534
535 return 0;
536 }
537 @@ -304,6 +305,7 @@ static int mtk_phy_connect(struct net_de
538 struct mtk_eth *eth;
539 struct device_node *np;
540 u32 val;
541 + int err;
542
543 eth = mac->hw;
544 np = of_parse_phandle(mac->of_node, "phy-handle", 0);
545 @@ -313,6 +315,10 @@ static int mtk_phy_connect(struct net_de
546 if (!np)
547 return -ENODEV;
548
549 + err = mtk_setup_hw_path(eth, mac->id, of_get_phy_mode(np));
550 + if (err)
551 + goto err_phy;
552 +
553 mac->ge_mode = 0;
554 switch (of_get_phy_mode(np)) {
555 case PHY_INTERFACE_MODE_TRGMII:
556 @@ -323,10 +329,9 @@ static int mtk_phy_connect(struct net_de
557 case PHY_INTERFACE_MODE_RGMII:
558 break;
559 case PHY_INTERFACE_MODE_SGMII:
560 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII))
561 - mtk_gmac_sgmii_hw_setup(eth, mac->id);
562 break;
563 case PHY_INTERFACE_MODE_MII:
564 + case PHY_INTERFACE_MODE_GMII:
565 mac->ge_mode = 1;
566 break;
567 case PHY_INTERFACE_MODE_REVMII:
568 @@ -355,7 +360,7 @@ static int mtk_phy_connect(struct net_de
569 dev->phydev->speed = 0;
570 dev->phydev->duplex = 0;
571
572 - if (of_phy_is_fixed_link(mac->of_node))
573 + if (!strncmp(dev->phydev->drv->name, "Generic", 7))
574 dev->phydev->supported |=
575 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
576
577 @@ -535,37 +540,37 @@ static void mtk_stats_update(struct mtk_
578 }
579
580 static void mtk_get_stats64(struct net_device *dev,
581 - struct rtnl_link_stats64 *storage)
582 + struct rtnl_link_stats64 *storage)
583 {
584 - struct mtk_mac *mac = netdev_priv(dev);
585 - struct mtk_hw_stats *hw_stats = mac->hw_stats;
586 - unsigned int start;
587 -
588 - if (netif_running(dev) && netif_device_present(dev)) {
589 - if (spin_trylock_bh(&hw_stats->stats_lock)) {
590 - mtk_stats_update_mac(mac);
591 - spin_unlock_bh(&hw_stats->stats_lock);
592 - }
593 - }
594 -
595 - do {
596 - start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
597 - storage->rx_packets = hw_stats->rx_packets;
598 - storage->tx_packets = hw_stats->tx_packets;
599 - storage->rx_bytes = hw_stats->rx_bytes;
600 - storage->tx_bytes = hw_stats->tx_bytes;
601 - storage->collisions = hw_stats->tx_collisions;
602 - storage->rx_length_errors = hw_stats->rx_short_errors +
603 - hw_stats->rx_long_errors;
604 - storage->rx_over_errors = hw_stats->rx_overflow;
605 - storage->rx_crc_errors = hw_stats->rx_fcs_errors;
606 - storage->rx_errors = hw_stats->rx_checksum_errors;
607 - storage->tx_aborted_errors = hw_stats->tx_skip;
608 - } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
609 -
610 - storage->tx_errors = dev->stats.tx_errors;
611 - storage->rx_dropped = dev->stats.rx_dropped;
612 - storage->tx_dropped = dev->stats.tx_dropped;
613 + struct mtk_mac *mac = netdev_priv(dev);
614 + struct mtk_hw_stats *hw_stats = mac->hw_stats;
615 + unsigned int start;
616 +
617 + if (netif_running(dev) && netif_device_present(dev)) {
618 + if (spin_trylock_bh(&hw_stats->stats_lock)) {
619 + mtk_stats_update_mac(mac);
620 + spin_unlock_bh(&hw_stats->stats_lock);
621 + }
622 + }
623 +
624 + do {
625 + start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
626 + storage->rx_packets = hw_stats->rx_packets;
627 + storage->tx_packets = hw_stats->tx_packets;
628 + storage->rx_bytes = hw_stats->rx_bytes;
629 + storage->tx_bytes = hw_stats->tx_bytes;
630 + storage->collisions = hw_stats->tx_collisions;
631 + storage->rx_length_errors = hw_stats->rx_short_errors +
632 + hw_stats->rx_long_errors;
633 + storage->rx_over_errors = hw_stats->rx_overflow;
634 + storage->rx_crc_errors = hw_stats->rx_fcs_errors;
635 + storage->rx_errors = hw_stats->rx_checksum_errors;
636 + storage->tx_aborted_errors = hw_stats->tx_skip;
637 + } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
638 +
639 + storage->tx_errors = dev->stats.tx_errors;
640 + storage->rx_dropped = dev->stats.rx_dropped;
641 + storage->tx_dropped = dev->stats.tx_dropped;
642 }
643
644 static inline int mtk_max_frag_size(int mtu)
645 @@ -605,10 +610,10 @@ static int mtk_init_fq_dma(struct mtk_et
646 dma_addr_t dma_addr;
647 int i;
648
649 - eth->scratch_ring = dma_zalloc_coherent(eth->dev,
650 - cnt * sizeof(struct mtk_tx_dma),
651 - &eth->phy_scratch_ring,
652 - GFP_ATOMIC);
653 + eth->scratch_ring = dma_alloc_coherent(eth->dev,
654 + cnt * sizeof(struct mtk_tx_dma),
655 + &eth->phy_scratch_ring,
656 + GFP_ATOMIC | __GFP_ZERO);
657 if (unlikely(!eth->scratch_ring))
658 return -ENOMEM;
659
660 @@ -623,6 +628,7 @@ static int mtk_init_fq_dma(struct mtk_et
661 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
662 return -ENOMEM;
663
664 + memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
665 phy_ring_tail = eth->phy_scratch_ring +
666 (sizeof(struct mtk_tx_dma) * (cnt - 1));
667
668 @@ -673,7 +679,7 @@ static void mtk_tx_unmap(struct mtk_eth
669 }
670 tx_buf->flags = 0;
671 if (tx_buf->skb &&
672 - (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
673 + (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
674 dev_kfree_skb_any(tx_buf->skb);
675 tx_buf->skb = NULL;
676 }
677 @@ -689,6 +695,7 @@ static int mtk_tx_map(struct sk_buff *sk
678 unsigned int nr_frags;
679 int i, n_desc = 1;
680 u32 txd4 = 0, fport;
681 + u32 qid = 0;
682
683 itxd = ring->next_free;
684 if (itxd == ring->last_free)
685 @@ -708,9 +715,10 @@ static int mtk_tx_map(struct sk_buff *sk
686 if (skb->ip_summed == CHECKSUM_PARTIAL)
687 txd4 |= TX_DMA_CHKSUM;
688
689 - /* VLAN header offload */
690 - if (skb_vlan_tag_present(skb))
691 - txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
692 +#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
693 + qid = skb->mark & (MTK_QDMA_TX_MASK);
694 + qid += (!mac->id) ? (MTK_QDMA_TX_MASK + 1) : 0;
695 +#endif
696
697 mapped_addr = dma_map_single(eth->dev, skb->data,
698 skb_headlen(skb), DMA_TO_DEVICE);
699 @@ -727,6 +735,7 @@ static int mtk_tx_map(struct sk_buff *sk
700 /* TX SG offload */
701 txd = itxd;
702 nr_frags = skb_shinfo(skb)->nr_frags;
703 +
704 for (i = 0; i < nr_frags; i++) {
705 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
706 unsigned int offset = 0;
707 @@ -753,10 +762,10 @@ static int mtk_tx_map(struct sk_buff *sk
708 last_frag = true;
709
710 WRITE_ONCE(txd->txd1, mapped_addr);
711 - WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
712 + WRITE_ONCE(txd->txd3, (TX_DMA_SWC | QID_LOW_BITS(qid) |
713 TX_DMA_PLEN0(frag_map_size) |
714 last_frag * TX_DMA_LS0));
715 - WRITE_ONCE(txd->txd4, fport);
716 + WRITE_ONCE(txd->txd4, fport | QID_HIGH_BITS(qid));
717
718 tx_buf = mtk_desc_to_tx_buf(ring, txd);
719 memset(tx_buf, 0, sizeof(*tx_buf));
720 @@ -775,9 +784,9 @@ static int mtk_tx_map(struct sk_buff *sk
721 /* store skb to cleanup */
722 itx_buf->skb = skb;
723
724 - WRITE_ONCE(itxd->txd4, txd4);
725 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
726 - (!nr_frags * TX_DMA_LS0)));
727 + (!nr_frags * TX_DMA_LS0)) | QID_LOW_BITS(qid));
728 + WRITE_ONCE(itxd->txd4, txd4 | QID_HIGH_BITS(qid));
729
730 netdev_sent_queue(dev, skb->len);
731 skb_tx_timestamp(skb);
732 @@ -922,7 +931,7 @@ drop:
733 return NETDEV_TX_OK;
734 }
735
736 -static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
737 +struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
738 {
739 int i;
740 struct mtk_rx_ring *ring;
741 @@ -991,10 +1000,24 @@ static int mtk_poll_rx(struct napi_struc
742 break;
743
744 /* find out which mac the packet come from. values start at 1 */
745 +#if defined(CONFIG_NET_DSA)
746 + mac = (trxd.rxd4 >> 22) & 0x1;
747 + mac = (mac + 1) % 2;
748 +#else
749 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
750 - RX_DMA_FPORT_MASK;
751 - mac--;
752 -
753 + RX_DMA_FPORT_MASK;
754 + /* From QDMA(5). This is a external interface case of HWNAT.
755 + * When the incoming frame comes from an external interface
756 + * rather than GMAC1/GMAC2, HWNAT driver sends the original
757 + * frame to PPE via PPD(ping pong device) for HWNAT RX
758 + * frame learning. After learning, PPE transmit the
759 + * original frame back to PPD again to run SW NAT path.
760 + */
761 + if (mac == 5)
762 + mac = 0;
763 + else
764 + mac--;
765 +#endif
766 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
767 !eth->netdev[mac]))
768 goto release_desc;
769 @@ -1044,6 +1067,7 @@ static int mtk_poll_rx(struct napi_struc
770 RX_DMA_VID(trxd.rxd3))
771 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
772 RX_DMA_VID(trxd.rxd3));
773 +
774 skb_record_rx_queue(skb, 0);
775 napi_gro_receive(napi, skb);
776
777 @@ -1128,7 +1152,7 @@ static int mtk_poll_tx(struct mtk_eth *e
778 }
779
780 if (mtk_queue_stopped(eth) &&
781 - (atomic_read(&ring->free_count) > ring->thresh))
782 + (atomic_read(&ring->free_count) > ring->thresh))
783 mtk_wake_queue(eth);
784
785 return total;
786 @@ -1220,11 +1244,14 @@ static int mtk_tx_alloc(struct mtk_eth *
787 if (!ring->buf)
788 goto no_tx_mem;
789
790 - ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
791 - &ring->phys, GFP_ATOMIC);
792 + ring->dma = dma_alloc_coherent(eth->dev,
793 + MTK_DMA_SIZE * sz,
794 + &ring->phys,
795 + GFP_ATOMIC | __GFP_ZERO);
796 if (!ring->dma)
797 goto no_tx_mem;
798
799 + memset(ring->dma, 0, MTK_DMA_SIZE * sz);
800 for (i = 0; i < MTK_DMA_SIZE; i++) {
801 int next = (i + 1) % MTK_DMA_SIZE;
802 u32 next_ptr = ring->phys + next * sz;
803 @@ -1317,9 +1344,10 @@ static int mtk_rx_alloc(struct mtk_eth *
804 return -ENOMEM;
805 }
806
807 - ring->dma = dma_zalloc_coherent(eth->dev,
808 - rx_dma_size * sizeof(*ring->dma),
809 - &ring->phys, GFP_ATOMIC);
810 + ring->dma = dma_alloc_coherent(eth->dev,
811 + rx_dma_size * sizeof(*ring->dma),
812 + &ring->phys,
813 + GFP_ATOMIC | __GFP_ZERO);
814 if (!ring->dma)
815 return -ENOMEM;
816
817 @@ -1516,8 +1544,8 @@ static int mtk_hwlro_add_ipaddr(struct n
818 int hwlro_idx;
819
820 if ((fsp->flow_type != TCP_V4_FLOW) ||
821 - (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
822 - (fsp->location > 1))
823 + (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
824 + (fsp->location > 1))
825 return -EINVAL;
826
827 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
828 @@ -1744,6 +1772,34 @@ static void mtk_tx_timeout(struct net_de
829 schedule_work(&eth->pending_work);
830 }
831
832 +static irqreturn_t mtk_handle_irq_tx_rx(int irq, void *_eth)
833 +{
834 + struct mtk_eth *eth = _eth;
835 + u32 tx_status, rx_status;
836 +
837 + tx_status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
838 +
839 + if (tx_status & MTK_TX_DONE_INT) {
840 + if (likely(napi_schedule_prep(&eth->tx_napi))) {
841 + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
842 + __napi_schedule(&eth->tx_napi);
843 + }
844 + mtk_w32(eth, tx_status, MTK_QMTK_INT_STATUS);
845 + }
846 +
847 + rx_status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
848 +
849 + if (rx_status & MTK_RX_DONE_INT) {
850 + if (likely(napi_schedule_prep(&eth->rx_napi))) {
851 + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
852 + __napi_schedule(&eth->rx_napi);
853 + }
854 + mtk_w32(eth, rx_status, MTK_PDMA_INT_STATUS);
855 + }
856 +
857 + return IRQ_HANDLED;
858 +}
859 +
860 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
861 {
862 struct mtk_eth *eth = _eth;
863 @@ -1784,8 +1840,8 @@ static void mtk_poll_controller(struct n
864
865 static int mtk_start_dma(struct mtk_eth *eth)
866 {
867 - u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
868 int err;
869 + u32 rx_2b_offet = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
870
871 err = mtk_dma_init(eth);
872 if (err) {
873 @@ -1801,7 +1857,7 @@ static int mtk_start_dma(struct mtk_eth
874 MTK_QDMA_GLO_CFG);
875
876 mtk_w32(eth,
877 - MTK_RX_DMA_EN | rx_2b_offset |
878 + MTK_RX_DMA_EN | rx_2b_offet |
879 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
880 MTK_PDMA_GLO_CFG);
881
882 @@ -1814,7 +1870,7 @@ static int mtk_open(struct net_device *d
883 struct mtk_eth *eth = mac->hw;
884
885 /* we run 2 netdevs on the same dma ring so we only bring it up once */
886 - if (!refcount_read(&eth->dma_refcnt)) {
887 + if (!atomic_read(&eth->dma_refcnt)) {
888 int err = mtk_start_dma(eth);
889
890 if (err)
891 @@ -1824,10 +1880,8 @@ static int mtk_open(struct net_device *d
892 napi_enable(&eth->rx_napi);
893 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
894 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
895 - refcount_set(&eth->dma_refcnt, 1);
896 }
897 - else
898 - refcount_inc(&eth->dma_refcnt);
899 + atomic_inc(&eth->dma_refcnt);
900
901 phy_start(dev->phydev);
902 netif_start_queue(dev);
903 @@ -1867,7 +1921,7 @@ static int mtk_stop(struct net_device *d
904 phy_stop(dev->phydev);
905
906 /* only shutdown DMA if this is the last user */
907 - if (!refcount_dec_and_test(&eth->dma_refcnt))
908 + if (!atomic_dec_and_test(&eth->dma_refcnt))
909 return 0;
910
911 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
912 @@ -1973,14 +2027,16 @@ static int mtk_hw_init(struct mtk_eth *e
913 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
914 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
915
916 - /* Enable RX VLan Offloading */
917 - mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
918 + /* Disable RX VLan Offloading */
919 + mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
920 +
921 +#if defined(CONFIG_NET_DSA)
922 + mtk_w32(eth, 0x81000001, MTK_CDMP_IG_CTRL);
923 +#endif
924
925 - /* enable interrupt delay for RX */
926 - mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
927 + mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
928 + mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
929
930 - /* disable delay and normal interrupt */
931 - mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
932 mtk_tx_irq_disable(eth, ~0);
933 mtk_rx_irq_disable(eth, ~0);
934 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
935 @@ -2172,27 +2228,27 @@ static int mtk_cleanup(struct mtk_eth *e
936 }
937
938 static int mtk_get_link_ksettings(struct net_device *ndev,
939 - struct ethtool_link_ksettings *cmd)
940 + struct ethtool_link_ksettings *cmd)
941 {
942 - struct mtk_mac *mac = netdev_priv(ndev);
943 + struct mtk_mac *mac = netdev_priv(ndev);
944
945 - if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
946 - return -EBUSY;
947 + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
948 + return -EBUSY;
949
950 - phy_ethtool_ksettings_get(ndev->phydev, cmd);
951 + phy_ethtool_ksettings_get(ndev->phydev, cmd);
952
953 - return 0;
954 + return 0;
955 }
956
957 static int mtk_set_link_ksettings(struct net_device *ndev,
958 - const struct ethtool_link_ksettings *cmd)
959 + const struct ethtool_link_ksettings *cmd)
960 {
961 - struct mtk_mac *mac = netdev_priv(ndev);
962 + struct mtk_mac *mac = netdev_priv(ndev);
963
964 - if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
965 - return -EBUSY;
966 + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
967 + return -EBUSY;
968
969 - return phy_ethtool_ksettings_set(ndev->phydev, cmd);
970 + return phy_ethtool_ksettings_set(ndev->phydev, cmd);
971 }
972
973 static void mtk_get_drvinfo(struct net_device *dev,
974 @@ -2355,8 +2411,8 @@ static int mtk_set_rxnfc(struct net_devi
975 }
976
977 static const struct ethtool_ops mtk_ethtool_ops = {
978 - .get_link_ksettings = mtk_get_link_ksettings,
979 - .set_link_ksettings = mtk_set_link_ksettings,
980 + .get_link_ksettings = mtk_get_link_ksettings,
981 + .set_link_ksettings = mtk_set_link_ksettings,
982 .get_drvinfo = mtk_get_drvinfo,
983 .get_msglevel = mtk_get_msglevel,
984 .set_msglevel = mtk_set_msglevel,
985 @@ -2366,7 +2422,7 @@ static const struct ethtool_ops mtk_etht
986 .get_sset_count = mtk_get_sset_count,
987 .get_ethtool_stats = mtk_get_ethtool_stats,
988 .get_rxnfc = mtk_get_rxnfc,
989 - .set_rxnfc = mtk_set_rxnfc,
990 + .set_rxnfc = mtk_set_rxnfc,
991 };
992
993 static const struct net_device_ops mtk_netdev_ops = {
994 @@ -2463,6 +2519,7 @@ static int mtk_probe(struct platform_dev
995 {
996 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
997 struct device_node *mac_np;
998 + const struct of_device_id *match;
999 struct mtk_eth *eth;
1000 int err;
1001 int i;
1002 @@ -2471,7 +2528,8 @@ static int mtk_probe(struct platform_dev
1003 if (!eth)
1004 return -ENOMEM;
1005
1006 - eth->soc = of_device_get_match_data(&pdev->dev);
1007 + match = of_match_device(of_mtk_match, &pdev->dev);
1008 + eth->soc = (struct mtk_soc_data *)match->data;
1009
1010 eth->dev = &pdev->dev;
1011 eth->base = devm_ioremap_resource(&pdev->dev, res);
1012 @@ -2489,26 +2547,37 @@ static int mtk_probe(struct platform_dev
1013 return PTR_ERR(eth->ethsys);
1014 }
1015
1016 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
1017 - eth->sgmiisys =
1018 - syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1019 - "mediatek,sgmiisys");
1020 - if (IS_ERR(eth->sgmiisys)) {
1021 - dev_err(&pdev->dev, "no sgmiisys regmap found\n");
1022 - return PTR_ERR(eth->sgmiisys);
1023 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
1024 + eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1025 + "mediatek,infracfg");
1026 + if (IS_ERR(eth->infra)) {
1027 + dev_info(&pdev->dev, "no ethsys regmap found\n");
1028 + return PTR_ERR(eth->infra);
1029 }
1030 }
1031
1032 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
1033 + eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
1034 + GFP_KERNEL);
1035 + if (!eth->sgmii)
1036 + return -ENOMEM;
1037 +
1038 + err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
1039 + eth->soc->ana_rgc3);
1040 + if (err)
1041 + return err;
1042 + }
1043 +
1044 if (eth->soc->required_pctl) {
1045 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1046 "mediatek,pctl");
1047 if (IS_ERR(eth->pctl)) {
1048 - dev_err(&pdev->dev, "no pctl regmap found\n");
1049 + dev_info(&pdev->dev, "no pctl regmap found\n");
1050 return PTR_ERR(eth->pctl);
1051 }
1052 }
1053
1054 - for (i = 0; i < 3; i++) {
1055 + for (i = 0; i < eth->soc->irq_num; i++) {
1056 eth->irq[i] = platform_get_irq(pdev, i);
1057 if (eth->irq[i] < 0) {
1058 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
1059 @@ -2552,15 +2621,22 @@ static int mtk_probe(struct platform_dev
1060 goto err_deinit_hw;
1061 }
1062
1063 - err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
1064 - dev_name(eth->dev), eth);
1065 - if (err)
1066 - goto err_free_dev;
1067 + if (eth->soc->irq_num > 1) {
1068 + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
1069 + dev_name(eth->dev), eth);
1070 + if (err)
1071 + goto err_free_dev;
1072
1073 - err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
1074 - dev_name(eth->dev), eth);
1075 - if (err)
1076 - goto err_free_dev;
1077 + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
1078 + dev_name(eth->dev), eth);
1079 + if (err)
1080 + goto err_free_dev;
1081 + } else {
1082 + err = devm_request_irq(eth->dev, eth->irq[0], mtk_handle_irq_tx_rx, 0,
1083 + dev_name(eth->dev), eth);
1084 + if (err)
1085 + goto err_free_dev;
1086 + }
1087
1088 err = mtk_mdio_init(eth);
1089 if (err)
1090 @@ -2626,27 +2702,48 @@ static int mtk_remove(struct platform_de
1091 }
1092
1093 static const struct mtk_soc_data mt2701_data = {
1094 - .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
1095 + .caps = MT7623_CAPS | MTK_HWLRO,
1096 .required_clks = MT7623_CLKS_BITMAP,
1097 .required_pctl = true,
1098 + .irq_num = 3,
1099 };
1100
1101 static const struct mtk_soc_data mt7622_data = {
1102 - .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO,
1103 + .ana_rgc3 = 0x2028,
1104 + .caps = MT7622_CAPS | MTK_HWLRO,
1105 .required_clks = MT7622_CLKS_BITMAP,
1106 .required_pctl = false,
1107 + .irq_num = 3,
1108 };
1109
1110 static const struct mtk_soc_data mt7623_data = {
1111 - .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
1112 + .caps = MT7623_CAPS | MTK_HWLRO,
1113 .required_clks = MT7623_CLKS_BITMAP,
1114 .required_pctl = true,
1115 + .irq_num = 3,
1116 +};
1117 +
1118 +static const struct mtk_soc_data leopard_data = {
1119 + .ana_rgc3 = 0x128,
1120 + .caps = LEOPARD_CAPS | MTK_HWLRO,
1121 + .required_clks = LEOPARD_CLKS_BITMAP,
1122 + .required_pctl = false,
1123 + .irq_num = 3,
1124 +};
1125 +
1126 +static const struct mtk_soc_data mt7621_data = {
1127 + .caps = MT7621_CAPS,
1128 + .required_clks = MT7621_CLKS_BITMAP,
1129 + .required_pctl = false,
1130 + .irq_num = 1,
1131 };
1132
1133 const struct of_device_id of_mtk_match[] = {
1134 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
1135 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
1136 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
1137 + { .compatible = "mediatek,mt7629-eth", .data = &leopard_data},
1138 + { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
1139 {},
1140 };
1141 MODULE_DEVICE_TABLE(of, of_mtk_match);
1142 Index: linux-4.19.57/drivers/net/ethernet/mediatek/mtk_eth_soc.h
1143 ===================================================================
1144 --- linux-4.19.57.orig/drivers/net/ethernet/mediatek/mtk_eth_soc.h
1145 +++ linux-4.19.57/drivers/net/ethernet/mediatek/mtk_eth_soc.h
1146 @@ -15,13 +15,17 @@
1147 #ifndef MTK_ETH_H
1148 #define MTK_ETH_H
1149
1150 +#include <linux/dma-mapping.h>
1151 +#include <linux/netdevice.h>
1152 +#include <linux/of_net.h>
1153 +#include <linux/u64_stats_sync.h>
1154 #include <linux/refcount.h>
1155
1156 #define MTK_QDMA_PAGE_SIZE 2048
1157 #define MTK_MAX_RX_LENGTH 1536
1158 #define MTK_TX_DMA_BUF_LEN 0x3fff
1159 -#define MTK_DMA_SIZE 256
1160 -#define MTK_NAPI_WEIGHT 64
1161 +#define MTK_DMA_SIZE 2048
1162 +#define MTK_NAPI_WEIGHT 256
1163 #define MTK_MAC_COUNT 2
1164 #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
1165 #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
1166 @@ -36,8 +40,6 @@
1167 NETIF_MSG_TX_ERR)
1168 #define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \
1169 NETIF_F_RXCSUM | \
1170 - NETIF_F_HW_VLAN_CTAG_TX | \
1171 - NETIF_F_HW_VLAN_CTAG_RX | \
1172 NETIF_F_SG | NETIF_F_TSO | \
1173 NETIF_F_TSO6 | \
1174 NETIF_F_IPV6_CSUM)
1175 @@ -76,6 +78,9 @@
1176 #define MTK_CDMQ_IG_CTRL 0x1400
1177 #define MTK_CDMQ_STAG_EN BIT(0)
1178
1179 +/* CDMP Ingress Control Register */
1180 +#define MTK_CDMP_IG_CTRL 0x400
1181 +
1182 /* CDMP Exgress Control Register */
1183 #define MTK_CDMP_EG_CTRL 0x404
1184
1185 @@ -225,8 +230,9 @@
1186 #define MTK_TX_DONE_INT1 BIT(1)
1187 #define MTK_TX_DONE_INT0 BIT(0)
1188 #define MTK_RX_DONE_INT MTK_RX_DONE_DLY
1189 -#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
1190 - MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
1191 +#define MTK_TX_DONE_DLY BIT(28)
1192 +#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
1193 +
1194
1195 /* QDMA Interrupt grouping registers */
1196 #define MTK_QDMA_INT_GRP1 0x1a20
1197 @@ -267,6 +273,12 @@
1198 #define MTK_GDM1_TX_GBCNT 0x2400
1199 #define MTK_STAT_OFFSET 0x40
1200
1201 +/* QDMA TX NUM */
1202 +#define MTK_QDMA_TX_NUM 16
1203 +#define MTK_QDMA_TX_MASK ((MTK_QDMA_TX_NUM / 2) - 1)
1204 +#define QID_LOW_BITS(x) ((x) & 0xf)
1205 +#define QID_HIGH_BITS(x) ((((x) >> 4) & 0x3) & GENMASK(21, 20))
1206 +
1207 /* QDMA descriptor txd4 */
1208 #define TX_DMA_CHKSUM (0x7 << 29)
1209 #define TX_DMA_TSO BIT(28)
1210 @@ -316,6 +328,8 @@
1211 #define MAC_MCR_RX_EN BIT(13)
1212 #define MAC_MCR_BACKOFF_EN BIT(9)
1213 #define MAC_MCR_BACKPR_EN BIT(8)
1214 +#define MAC_MCR_MDIO_EEE_1000T BIT(7)
1215 +#define MAC_MCR_MDIO_EEE_100TX BIT(6)
1216 #define MAC_MCR_FORCE_RX_FC BIT(5)
1217 #define MAC_MCR_FORCE_TX_FC BIT(4)
1218 #define MAC_MCR_SPEED_1000 BIT(3)
1219 @@ -368,9 +382,11 @@
1220 #define ETHSYS_SYSCFG0 0x14
1221 #define SYSCFG0_GE_MASK 0x3
1222 #define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
1223 -#define SYSCFG0_SGMII_MASK (3 << 8)
1224 -#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & GENMASK(9, 8))
1225 -#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & GENMASK(9, 8))
1226 +#define SYSCFG0_SGMII_MASK GENMASK(9, 8)
1227 +#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK)
1228 +#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK)
1229 +#define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
1230 +#define SYSCFG0_SGMII_GMAC2_V2 BIT(8)
1231
1232 /* ethernet subsystem clock register */
1233 #define ETHSYS_CLKCFG0 0x2c
1234 @@ -398,6 +414,16 @@
1235 #define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
1236 #define SGMII_PHYA_PWD BIT(4)
1237
1238 +/* Infrasys subsystem config registers */
1239 +#define INFRA_MISC2 0x70c
1240 +#define CO_QPHY_SEL BIT(0)
1241 +#define GEPHY_MAC_SEL BIT(1)
1242 +
1243 +/*MDIO control*/
1244 +#define MII_MMD_ACC_CTL_REG 0x0d
1245 +#define MII_MMD_ADDR_DATA_REG 0x0e
1246 +#define MMD_OP_MODE_DATA BIT(14)
1247 +
1248 struct mtk_rx_dma {
1249 unsigned int rxd1;
1250 unsigned int rxd2;
1251 @@ -462,15 +488,21 @@ enum mtk_tx_flags {
1252 */
1253 enum mtk_clks_map {
1254 MTK_CLK_ETHIF,
1255 + MTK_CLK_SGMIITOP,
1256 MTK_CLK_ESW,
1257 MTK_CLK_GP0,
1258 MTK_CLK_GP1,
1259 MTK_CLK_GP2,
1260 + MTK_CLK_FE,
1261 MTK_CLK_TRGPLL,
1262 MTK_CLK_SGMII_TX_250M,
1263 MTK_CLK_SGMII_RX_250M,
1264 MTK_CLK_SGMII_CDR_REF,
1265 MTK_CLK_SGMII_CDR_FB,
1266 + MTK_CLK_SGMII2_TX_250M,
1267 + MTK_CLK_SGMII2_RX_250M,
1268 + MTK_CLK_SGMII2_CDR_REF,
1269 + MTK_CLK_SGMII2_CDR_FB,
1270 MTK_CLK_SGMII_CK,
1271 MTK_CLK_ETH2PLL,
1272 MTK_CLK_MAX
1273 @@ -488,6 +520,22 @@ enum mtk_clks_map {
1274 BIT(MTK_CLK_SGMII_CDR_FB) | \
1275 BIT(MTK_CLK_SGMII_CK) | \
1276 BIT(MTK_CLK_ETH2PLL))
1277 +#define LEOPARD_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
1278 + BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
1279 + BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
1280 + BIT(MTK_CLK_SGMII_TX_250M) | \
1281 + BIT(MTK_CLK_SGMII_RX_250M) | \
1282 + BIT(MTK_CLK_SGMII_CDR_REF) | \
1283 + BIT(MTK_CLK_SGMII_CDR_FB) | \
1284 + BIT(MTK_CLK_SGMII2_TX_250M) | \
1285 + BIT(MTK_CLK_SGMII2_RX_250M) | \
1286 + BIT(MTK_CLK_SGMII2_CDR_REF) | \
1287 + BIT(MTK_CLK_SGMII2_CDR_FB) | \
1288 + BIT(MTK_CLK_SGMII_CK) | \
1289 + BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
1290 +
1291 +#define MT7621_CLKS_BITMAP 0
1292 +
1293 enum mtk_dev_state {
1294 MTK_HW_INIT,
1295 MTK_RESETTING
1296 @@ -557,35 +605,149 @@ struct mtk_rx_ring {
1297 u32 crx_idx_reg;
1298 };
1299
1300 -#define MTK_TRGMII BIT(0)
1301 -#define MTK_GMAC1_TRGMII (BIT(1) | MTK_TRGMII)
1302 -#define MTK_ESW BIT(4)
1303 -#define MTK_GMAC1_ESW (BIT(5) | MTK_ESW)
1304 -#define MTK_SGMII BIT(8)
1305 -#define MTK_GMAC1_SGMII (BIT(9) | MTK_SGMII)
1306 -#define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII)
1307 -#define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \
1308 - MTK_GMAC2_SGMII)
1309 +enum mtk_eth_mux {
1310 + MTK_ETH_MUX_GDM1_TO_GMAC1_ESW,
1311 + MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY,
1312 + MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
1313 + MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
1314 + MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII,
1315 + MTK_ETH_MUX_MAX,
1316 +};
1317 +
1318 +enum mtk_eth_path {
1319 + MTK_ETH_PATH_GMAC1_RGMII,
1320 + MTK_ETH_PATH_GMAC1_TRGMII,
1321 + MTK_ETH_PATH_GMAC1_SGMII,
1322 + MTK_ETH_PATH_GMAC2_RGMII,
1323 + MTK_ETH_PATH_GMAC2_SGMII,
1324 + MTK_ETH_PATH_GMAC2_GEPHY,
1325 + MTK_ETH_PATH_GDM1_ESW,
1326 + MTK_ETH_PATH_MAX,
1327 +};
1328 +
1329 +/* Capability for function group */
1330 +#define MTK_RGMII BIT(0)
1331 +#define MTK_TRGMII BIT(1)
1332 +#define MTK_SGMII BIT(2)
1333 +#define MTK_ESW BIT(3)
1334 +#define MTK_GEPHY BIT(4)
1335 +#define MTK_MUX BIT(5)
1336 +#define MTK_INFRA BIT(6)
1337 +#define MTK_SHARED_SGMII BIT(7)
1338 +
1339 +/* Capability for features on SoCs */
1340 +#define MTK_PATH_BIT(x) BIT((x) + 10)
1341 +
1342 +#define MTK_GMAC1_RGMII \
1343 + (MTK_PATH_BIT(MTK_ETH_PATH_GMAC1_RGMII) | MTK_RGMII)
1344 +
1345 +#define MTK_GMAC1_TRGMII \
1346 + (MTK_PATH_BIT(MTK_ETH_PATH_GMAC1_TRGMII) | MTK_TRGMII)
1347 +
1348 +#define MTK_GMAC1_SGMII \
1349 + (MTK_PATH_BIT(MTK_ETH_PATH_GMAC1_SGMII) | MTK_SGMII)
1350 +
1351 +#define MTK_GMAC2_RGMII \
1352 + (MTK_PATH_BIT(MTK_ETH_PATH_GMAC2_RGMII) | MTK_RGMII)
1353 +
1354 +#define MTK_GMAC2_SGMII \
1355 + (MTK_PATH_BIT(MTK_ETH_PATH_GMAC2_SGMII) | MTK_SGMII)
1356 +
1357 +#define MTK_GMAC2_GEPHY \
1358 + (MTK_PATH_BIT(MTK_ETH_PATH_GMAC2_GEPHY) | MTK_GEPHY)
1359 +
1360 +#define MTK_GDM1_ESW \
1361 + (MTK_PATH_BIT(MTK_ETH_PATH_GDM1_ESW) | MTK_ESW)
1362 +
1363 +#define MTK_MUX_BIT(x) BIT((x) + 20)
1364 +
1365 +/* Capability for MUXes present on SoCs */
1366 +/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
1367 +#define MTK_MUX_GDM1_TO_GMAC1_ESW \
1368 + (MTK_MUX_BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW) | MTK_MUX)
1369 +
1370 +/* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */
1371 +#define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \
1372 + (MTK_MUX_BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY) | MTK_MUX | MTK_INFRA)
1373 +
1374 +/* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */
1375 +#define MTK_MUX_U3_GMAC2_TO_QPHY \
1376 + (MTK_MUX_BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY) | MTK_MUX | MTK_INFRA)
1377 +
1378 +/* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */
1379 +#define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
1380 + (MTK_MUX_BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII) | MTK_MUX | \
1381 + MTK_SHARED_SGMII)
1382 +
1383 +/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
1384 +#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
1385 + (MTK_MUX_BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII) | MTK_MUX)
1386 +
1387 #define MTK_HWLRO BIT(12)
1388 +
1389 #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
1390
1391 +#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
1392 + MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
1393 + MTK_MUX_GDM1_TO_GMAC1_ESW | \
1394 + MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII)
1395 +
1396 +#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII)
1397 +
1398 +#define LEOPARD_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
1399 + MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
1400 + MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
1401 + MTK_MUX_U3_GMAC2_TO_QPHY | \
1402 + MTK_MUX_GMAC12_TO_GEPHY_SGMII)
1403 +
1404 +#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII)
1405 +
1406 /* struct mtk_eth_data - This is the structure holding all differences
1407 * among various plaforms
1408 + * @ana_rgc3: The offset for register ANA_RGC3 related to
1409 + * sgmiisys syscon
1410 * @caps Flags shown the extra capability for the SoC
1411 * @required_clks Flags shown the bitmap for required clocks on
1412 * the target SoC
1413 * @required_pctl A bool value to show whether the SoC requires
1414 * the extra setup for those pins used by GMAC.
1415 + * @irq_num total eth irq num support in target SoC
1416 */
1417 struct mtk_soc_data {
1418 + u32 ana_rgc3;
1419 u32 caps;
1420 u32 required_clks;
1421 bool required_pctl;
1422 + u32 irq_num;
1423 };
1424
1425 /* currently no SoC has more than 2 macs */
1426 #define MTK_MAX_DEVS 2
1427
1428 +struct mtk_eth_debug {
1429 + struct dentry *root;
1430 +};
1431 +
1432 +#define MTK_SGMII_PHYSPEED_AN BIT(31)
1433 +#define MTK_SGMII_PHYSPEED_MASK GENMASK(0, 2)
1434 +#define MTK_SGMII_PHYSPEED_1000 BIT(0)
1435 +#define MTK_SGMII_PHYSPEED_2500 BIT(1)
1436 +#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x))
1437 +
1438 +/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
1439 + * characteristics
1440 + * @regmap: The register map pointing at the range used to setup
1441 + * SGMII modes
1442 + * @flags: The enum refers to which mode the sgmii wants to run on
1443 + * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
1444 + */
1445 +
1446 +struct mtk_sgmii {
1447 + struct regmap *regmap[MTK_MAX_DEVS];
1448 + u32 flags[MTK_MAX_DEVS];
1449 + u32 ana_rgc3;
1450 +};
1451 +
1452 /* struct mtk_eth - This is the main datasructure for holding the state
1453 * of the driver
1454 * @dev: The device pointer
1455 @@ -601,14 +763,15 @@ struct mtk_soc_data {
1456 * @msg_enable: Ethtool msg level
1457 * @ethsys: The register map pointing at the range used to setup
1458 * MII modes
1459 - * @sgmiisys: The register map pointing at the range used to setup
1460 - * SGMII modes
1461 + * @infra: The register map pointing at the range used to setup
1462 + * SGMII and GePHY path
1463 * @pctl: The register map pointing at the range used to setup
1464 * GMAC port drive/slew values
1465 * @dma_refcnt: track how many netdevs are using the DMA engine
1466 * @tx_ring: Pointer to the memory holding info about the TX ring
1467 * @rx_ring: Pointer to the memory holding info about the RX ring
1468 - * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring
1469 + * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX
1470 + * ring
1471 * @tx_napi: The TX NAPI struct
1472 * @rx_napi: The RX NAPI struct
1473 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
1474 @@ -619,13 +782,16 @@ struct mtk_soc_data {
1475 * @pending_work: The workqueue used to reset the dma ring
1476 * @state: Initialization and runtime state of the device
1477 * @soc: Holding specific data among vaious SoCs
1478 + * @debug: Holding specific data for mtk_eth_dbg usage.
1479 */
1480
1481 struct mtk_eth {
1482 struct device *dev;
1483 void __iomem *base;
1484 spinlock_t page_lock;
1485 + /* spin_lock for enable/disable tx irq critial section */
1486 spinlock_t tx_irq_lock;
1487 + /* spin_lock for enable/disable rx irq critial section */
1488 spinlock_t rx_irq_lock;
1489 struct net_device dummy_dev;
1490 struct net_device *netdev[MTK_MAX_DEVS];
1491 @@ -634,10 +800,11 @@ struct mtk_eth {
1492 u32 msg_enable;
1493 unsigned long sysclk;
1494 struct regmap *ethsys;
1495 - struct regmap *sgmiisys;
1496 + struct regmap *infra;
1497 + struct mtk_sgmii *sgmii;
1498 struct regmap *pctl;
1499 bool hwlro;
1500 - refcount_t dma_refcnt;
1501 + atomic_t dma_refcnt;
1502 struct mtk_tx_ring tx_ring;
1503 struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
1504 struct mtk_rx_ring rx_ring_qdma;
1505 @@ -653,6 +820,7 @@ struct mtk_eth {
1506 unsigned long state;
1507
1508 const struct mtk_soc_data *soc;
1509 + struct mtk_eth_debug debug;
1510 };
1511
1512 /* struct mtk_mac - the structure that holds the info about the MACs of the
1513 @@ -664,6 +832,7 @@ struct mtk_eth {
1514 * @hw_stats: Packet statistics counter
1515 * @trgmii Indicate if the MAC uses TRGMII connected to internal
1516 switch
1517 + * @phy_dev: The attached PHY if available
1518 */
1519 struct mtk_mac {
1520 int id;
1521 @@ -674,6 +843,7 @@ struct mtk_mac {
1522 __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
1523 int hwlro_ip_cnt;
1524 bool trgmii;
1525 + struct phy_device *phy_dev;
1526 };
1527
1528 /* the struct describing the SoC. these are declared in the soc_xyz.c files */
1529 @@ -685,4 +855,10 @@ void mtk_stats_update_mac(struct mtk_mac
1530 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
1531 u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
1532
1533 +int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
1534 + u32 ana_rgc3);
1535 +int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
1536 +int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id);
1537 +int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode);
1538 +
1539 #endif /* MTK_ETH_H */
1540 Index: linux-4.19.57/drivers/net/ethernet/mediatek/mtk_sgmii.c
1541 ===================================================================
1542 --- /dev/null
1543 +++ linux-4.19.57/drivers/net/ethernet/mediatek/mtk_sgmii.c
1544 @@ -0,0 +1,114 @@
1545 +/*
1546 + * Copyright (C) 2018 MediaTek Inc.
1547 + *
1548 + * This program is free software; you can redistribute it and/or modify
1549 + * it under the terms of the GNU General Public License as published by
1550 + * the Free Software Foundation; version 2 of the License
1551 + *
1552 + * This program is distributed in the hope that it will be useful,
1553 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1554 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1555 + * GNU General Public License for more details.
1556 + *
1557 + * Copyright (C) 2018 Sean Wang <sean.wang@mediatek.com>
1558 + */
1559 +
1560 +#include <linux/mfd/syscon.h>
1561 +#include <linux/of.h>
1562 +#include <linux/regmap.h>
1563 +
1564 +#include "mtk_eth_soc.h"
1565 +
1566 +int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
1567 +{
1568 + struct device_node *np;
1569 + const char *str;
1570 + int i, err;
1571 +
1572 + ss->ana_rgc3 = ana_rgc3;
1573 +
1574 + for (i = 0; i < MTK_MAX_DEVS; i++) {
1575 + np = of_parse_phandle(r, "mediatek,sgmiisys", i);
1576 + if (!np)
1577 + break;
1578 +
1579 + ss->regmap[i] = syscon_node_to_regmap(np);
1580 + if (IS_ERR(ss->regmap[i]))
1581 + return PTR_ERR(ss->regmap[i]);
1582 +
1583 + err = of_property_read_string(np, "mediatek,physpeed", &str);
1584 + if (err)
1585 + return err;
1586 +
1587 + if (!strcmp(str, "2500"))
1588 + pr_info("sean debug physpeed = 2500\n");
1589 +
1590 + if (!strcmp(str, "2500"))
1591 + ss->flags[i] |= MTK_SGMII_PHYSPEED_2500;
1592 + else if (!strcmp(str, "1000"))
1593 + ss->flags[i] |= MTK_SGMII_PHYSPEED_1000;
1594 + else if (!strcmp(str, "auto"))
1595 + ss->flags[i] |= MTK_SGMII_PHYSPEED_AN;
1596 + else
1597 + return -EINVAL;
1598 + }
1599 +
1600 + return 0;
1601 +}
1602 +
1603 +int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id)
1604 +{
1605 + unsigned int val;
1606 +
1607 + if (!ss->regmap[id])
1608 + return -EINVAL;
1609 +
1610 + /* Setup the link timer and QPHY power up inside SGMIISYS */
1611 + regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER,
1612 + SGMII_LINK_TIMER_DEFAULT);
1613 +
1614 + regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
1615 + val |= SGMII_REMOTE_FAULT_DIS;
1616 + regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
1617 +
1618 + regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
1619 + val |= SGMII_AN_RESTART;
1620 + regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
1621 +
1622 + regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
1623 + val &= ~SGMII_PHYA_PWD;
1624 + regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
1625 +
1626 + return 0;
1627 +}
1628 +
1629 +int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id)
1630 +{
1631 + unsigned int val;
1632 + int mode;
1633 +
1634 + if (!ss->regmap[id])
1635 + return -EINVAL;
1636 +
1637 + regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
1638 + val &= ~GENMASK(2, 3);
1639 + mode = ss->flags[id] & MTK_SGMII_PHYSPEED_MASK;
1640 + val |= (mode == MTK_SGMII_PHYSPEED_1000) ? 0 : BIT(2);
1641 + regmap_write(ss->regmap[id], ss->ana_rgc3, val);
1642 +
1643 + /* disable SGMII AN */
1644 + regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
1645 + val &= ~BIT(12);
1646 + regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
1647 +
1648 + /* SGMII force mode setting */
1649 + val = 0x31120019;
1650 + regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
1651 +
1652 + /* Release PHYA power down state */
1653 + regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
1654 + val &= ~SGMII_PHYA_PWD;
1655 + regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
1656 +
1657 + return 0;
1658 +}