kernel: bump 4.19 to 4.19.69
[openwrt/staging/mkresin.git] / target / linux / mediatek / patches-4.19 / 0001-eth-sync-from-mtk-lede.patch
1 --- a/drivers/net/ethernet/mediatek/Kconfig
2 +++ b/drivers/net/ethernet/mediatek/Kconfig
3 @@ -1,6 +1,6 @@
4 config NET_VENDOR_MEDIATEK
5 bool "MediaTek ethernet driver"
6 - depends on ARCH_MEDIATEK
7 + depends on ARCH_MEDIATEK || RALINK
8 ---help---
9 If you have a Mediatek SoC with ethernet, say Y.
10
11 --- a/drivers/net/ethernet/mediatek/Makefile
12 +++ b/drivers/net/ethernet/mediatek/Makefile
13 @@ -2,4 +2,5 @@
14 # Makefile for the Mediatek SoCs built-in ethernet macs
15 #
16
17 -obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth_soc.o
18 +obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth_soc.o mtk_sgmii.o \
19 + mtk_eth_path.o
20 --- /dev/null
21 +++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
22 @@ -0,0 +1,333 @@
23 +/*
24 + * Copyright (C) 2018 MediaTek Inc.
25 + *
26 + * This program is free software; you can redistribute it and/or modify
27 + * it under the terms of the GNU General Public License as published by
28 + * the Free Software Foundation; version 2 of the License
29 + *
30 + * This program is distributed in the hope that it will be useful,
31 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 + * GNU General Public License for more details.
34 + *
35 + * Copyright (C) 2018 Sean Wang <sean.wang@mediatek.com>
36 + */
37 +
38 +#include <linux/phy.h>
39 +#include <linux/regmap.h>
40 +
41 +#include "mtk_eth_soc.h"
42 +
43 +struct mtk_eth_muxc {
44 + int (*set_path)(struct mtk_eth *eth, int path);
45 +};
46 +
47 +static const char * const mtk_eth_mux_name[] = {
48 + "mux_gdm1_to_gmac1_esw", "mux_gmac2_gmac0_to_gephy",
49 + "mux_u3_gmac2_to_qphy", "mux_gmac1_gmac2_to_sgmii_rgmii",
50 + "mux_gmac12_to_gephy_sgmii",
51 +};
52 +
53 +static const char * const mtk_eth_path_name[] = {
54 + "gmac1_rgmii", "gmac1_trgmii", "gmac1_sgmii", "gmac2_rgmii",
55 + "gmac2_sgmii", "gmac2_gephy", "gdm1_esw",
56 +};
57 +
58 +static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
59 +{
60 + u32 val, mask, set;
61 + bool updated = true;
62 +
63 + switch (path) {
64 + case MTK_ETH_PATH_GMAC1_SGMII:
65 + mask = ~(u32)MTK_MUX_TO_ESW;
66 + set = 0;
67 + break;
68 + case MTK_ETH_PATH_GDM1_ESW:
69 + mask = ~(u32)MTK_MUX_TO_ESW;
70 + set = MTK_MUX_TO_ESW;
71 + break;
72 + default:
73 + updated = false;
74 + break;
75 + };
76 +
77 + if (updated) {
78 + val = mtk_r32(eth, MTK_MAC_MISC);
79 + val = (val & mask) | set;
80 + mtk_w32(eth, val, MTK_MAC_MISC);
81 + }
82 +
83 + dev_info(eth->dev, "path %s in %s updated = %d\n",
84 + mtk_eth_path_name[path], __func__, updated);
85 +
86 + return 0;
87 +}
88 +
89 +static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
90 +{
91 + unsigned int val = 0;
92 + bool updated = true;
93 +
94 + switch (path) {
95 + case MTK_ETH_PATH_GMAC2_GEPHY:
96 + val = ~(u32)GEPHY_MAC_SEL;
97 + break;
98 + default:
99 + updated = false;
100 + break;
101 + }
102 +
103 + if (updated)
104 + regmap_update_bits(eth->infra, INFRA_MISC2, GEPHY_MAC_SEL, val);
105 +
106 + dev_info(eth->dev, "path %s in %s updated = %d\n",
107 + mtk_eth_path_name[path], __func__, updated);
108 +
109 + return 0;
110 +}
111 +
112 +static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
113 +{
114 + unsigned int val = 0;
115 + bool updated = true;
116 +
117 + switch (path) {
118 + case MTK_ETH_PATH_GMAC2_SGMII:
119 + val = CO_QPHY_SEL;
120 + break;
121 + default:
122 + updated = false;
123 + break;
124 + }
125 +
126 + if (updated)
127 + regmap_update_bits(eth->infra, INFRA_MISC2, CO_QPHY_SEL, val);
128 +
129 + dev_info(eth->dev, "path %s in %s updated = %d\n",
130 + mtk_eth_path_name[path], __func__, updated);
131 +
132 + return 0;
133 +}
134 +
135 +static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
136 +{
137 + unsigned int val = 0;
138 + bool updated = true;
139 +
140 + switch (path) {
141 + case MTK_ETH_PATH_GMAC1_SGMII:
142 + val = SYSCFG0_SGMII_GMAC1;
143 + break;
144 + case MTK_ETH_PATH_GMAC2_SGMII:
145 + val = SYSCFG0_SGMII_GMAC2;
146 + break;
147 + case MTK_ETH_PATH_GMAC1_RGMII:
148 + case MTK_ETH_PATH_GMAC2_RGMII:
149 + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
150 + val &= SYSCFG0_SGMII_MASK;
151 +
152 + if ((path == MTK_GMAC1_RGMII && val == SYSCFG0_SGMII_GMAC1) ||
153 + (path == MTK_GMAC2_RGMII && val == SYSCFG0_SGMII_GMAC2))
154 + val = 0;
155 + else
156 + updated = false;
157 + break;
158 + default:
159 + updated = false;
160 + break;
161 + };
162 +
163 + if (updated)
164 + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
165 + SYSCFG0_SGMII_MASK, val);
166 +
167 + dev_info(eth->dev, "path %s in %s updated = %d\n",
168 + mtk_eth_path_name[path], __func__, updated);
169 +
170 + return 0;
171 +}
172 +
173 +static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
174 +{
175 + unsigned int val = 0;
176 + bool updated = true;
177 +
178 + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
179 +
180 + switch (path) {
181 + case MTK_ETH_PATH_GMAC1_SGMII:
182 + val |= SYSCFG0_SGMII_GMAC1_V2;
183 + break;
184 + case MTK_ETH_PATH_GMAC2_GEPHY:
185 + val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2;
186 + break;
187 + case MTK_ETH_PATH_GMAC2_SGMII:
188 + val |= SYSCFG0_SGMII_GMAC2_V2;
189 + break;
190 + default:
191 + updated = false;
192 + };
193 +
194 + if (updated)
195 + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
196 + SYSCFG0_SGMII_MASK, val);
197 +
198 + if (!updated)
199 + dev_info(eth->dev, "path %s no needs updatiion in %s\n",
200 + mtk_eth_path_name[path], __func__);
201 +
202 + dev_info(eth->dev, "path %s in %s updated = %d\n",
203 + mtk_eth_path_name[path], __func__, updated);
204 +
205 + return 0;
206 +}
207 +
208 +static const struct mtk_eth_muxc mtk_eth_muxc[] = {
209 + { .set_path = set_mux_gdm1_to_gmac1_esw, },
210 + { .set_path = set_mux_gmac2_gmac0_to_gephy, },
211 + { .set_path = set_mux_u3_gmac2_to_qphy, },
212 + { .set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii, },
213 + { .set_path = set_mux_gmac12_to_gephy_sgmii, }
214 +};
215 +
216 +static int mtk_eth_mux_setup(struct mtk_eth *eth, int path)
217 +{
218 + int i, err = 0;
219 +
220 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_PATH_BIT(path))) {
221 + dev_info(eth->dev, "path %s isn't support on the SoC\n",
222 + mtk_eth_path_name[path]);
223 + return -EINVAL;
224 + }
225 +
226 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_MUX))
227 + return 0;
228 +
229 + /* Setup MUX in path fabric */
230 + for (i = 0; i < MTK_ETH_MUX_MAX; i++) {
231 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_MUX_BIT(i))) {
232 + err = mtk_eth_muxc[i].set_path(eth, path);
233 + if (err)
234 + goto out;
235 + } else {
236 + dev_info(eth->dev, "mux %s isn't present on the SoC\n",
237 + mtk_eth_mux_name[i]);
238 + }
239 + }
240 +
241 +out:
242 + return err;
243 +}
244 +
245 +static int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
246 +{
247 + unsigned int val = 0;
248 + int sid, err, path;
249 +
250 + path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
251 + MTK_ETH_PATH_GMAC2_SGMII;
252 +
253 + /* Setup proper MUXes along the path */
254 + err = mtk_eth_mux_setup(eth, path);
255 + if (err)
256 + return err;
257 +
258 + /* The path GMAC to SGMII will be enabled once the SGMIISYS is being
259 + * setup done.
260 + */
261 + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
262 +
263 + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
264 + SYSCFG0_SGMII_MASK, ~(u32)SYSCFG0_SGMII_MASK);
265 +
266 + /* Decide how GMAC and SGMIISYS be mapped */
267 + sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? 0 : mac_id;
268 +
269 + /* Setup SGMIISYS with the determined property */
270 + if (MTK_HAS_FLAGS(eth->sgmii->flags[sid], MTK_SGMII_PHYSPEED_AN))
271 + err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
272 + else
273 + err = mtk_sgmii_setup_mode_force(eth->sgmii, sid);
274 +
275 + if (err)
276 + return err;
277 +
278 + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
279 + SYSCFG0_SGMII_MASK, val);
280 +
281 + return 0;
282 +}
283 +
284 +static int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
285 +{
286 + int err, path = 0;
287 +
288 + if (mac_id == 1)
289 + path = MTK_ETH_PATH_GMAC2_GEPHY;
290 +
291 + if (!path)
292 + return -EINVAL;
293 +
294 + /* Setup proper MUXes along the path */
295 + err = mtk_eth_mux_setup(eth, path);
296 + if (err)
297 + return err;
298 +
299 + return 0;
300 +}
301 +
302 +static int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
303 +{
304 + int err, path;
305 +
306 + path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
307 + MTK_ETH_PATH_GMAC2_RGMII;
308 +
309 + /* Setup proper MUXes along the path */
310 + err = mtk_eth_mux_setup(eth, path);
311 + if (err)
312 + return err;
313 +
314 + return 0;
315 +}
316 +
317 +int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode)
318 +{
319 + int err;
320 +
321 + switch (phymode) {
322 + case PHY_INTERFACE_MODE_TRGMII:
323 + case PHY_INTERFACE_MODE_RGMII_TXID:
324 + case PHY_INTERFACE_MODE_RGMII_RXID:
325 + case PHY_INTERFACE_MODE_RGMII_ID:
326 + case PHY_INTERFACE_MODE_RGMII:
327 + case PHY_INTERFACE_MODE_MII:
328 + case PHY_INTERFACE_MODE_REVMII:
329 + case PHY_INTERFACE_MODE_RMII:
330 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
331 + err = mtk_gmac_rgmii_path_setup(eth, mac_id);
332 + if (err)
333 + return err;
334 + }
335 + break;
336 + case PHY_INTERFACE_MODE_SGMII:
337 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
338 + err = mtk_gmac_sgmii_path_setup(eth, mac_id);
339 + if (err)
340 + return err;
341 + }
342 + break;
343 + case PHY_INTERFACE_MODE_GMII:
344 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
345 + err = mtk_gmac_gephy_path_setup(eth, mac_id);
346 + if (err)
347 + return err;
348 + }
349 + break;
350 + default:
351 + break;
352 + }
353 +
354 + return 0;
355 +}
356 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
357 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
358 @@ -23,6 +23,7 @@
359 #include <linux/reset.h>
360 #include <linux/tcp.h>
361 #include <linux/interrupt.h>
362 +#include <linux/mdio.h>
363 #include <linux/pinctrl/devinfo.h>
364
365 #include "mtk_eth_soc.h"
366 @@ -54,8 +55,10 @@ static const struct mtk_ethtool_stats {
367 };
368
369 static const char * const mtk_clks_source_name[] = {
370 - "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m",
371 - "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll"
372 + "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
373 + "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
374 + "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
375 + "sgmii_ck", "eth2pll",
376 };
377
378 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
379 @@ -84,8 +87,8 @@ static int mtk_mdio_busy_wait(struct mtk
380 return -1;
381 }
382
383 -static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
384 - u32 phy_register, u32 write_data)
385 +u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
386 + u32 phy_register, u32 write_data)
387 {
388 if (mtk_mdio_busy_wait(eth))
389 return -1;
390 @@ -103,7 +106,7 @@ static u32 _mtk_mdio_write(struct mtk_et
391 return 0;
392 }
393
394 -static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
395 +u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
396 {
397 u32 d;
398
399 @@ -123,6 +126,34 @@ static u32 _mtk_mdio_read(struct mtk_eth
400 return d;
401 }
402
403 +u32 mtk_cl45_ind_read(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 *data)
404 +{
405 + mutex_lock(&eth->mii_bus->mdio_lock);
406 +
407 + _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
408 + _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
409 + _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
410 + *data = _mtk_mdio_read(eth, port, MII_MMD_ADDR_DATA_REG);
411 +
412 + mutex_unlock(&eth->mii_bus->mdio_lock);
413 +
414 + return 0;
415 +}
416 +
417 +u32 mtk_cl45_ind_write(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 data)
418 +{
419 + mutex_lock(&eth->mii_bus->mdio_lock);
420 +
421 + _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
422 + _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
423 + _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
424 + _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, data);
425 +
426 + mutex_unlock(&eth->mii_bus->mdio_lock);
427 +
428 + return 0;
429 +}
430 +
431 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
432 int phy_reg, u16 val)
433 {
434 @@ -165,51 +196,12 @@ static void mtk_gmac0_rgmii_adjust(struc
435 mtk_w32(eth, val, TRGMII_TCK_CTRL);
436 }
437
438 -static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id)
439 -{
440 - u32 val;
441 -
442 - /* Setup the link timer and QPHY power up inside SGMIISYS */
443 - regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER,
444 - SGMII_LINK_TIMER_DEFAULT);
445 -
446 - regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val);
447 - val |= SGMII_REMOTE_FAULT_DIS;
448 - regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val);
449 -
450 - regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val);
451 - val |= SGMII_AN_RESTART;
452 - regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val);
453 -
454 - regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
455 - val &= ~SGMII_PHYA_PWD;
456 - regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val);
457 -
458 - /* Determine MUX for which GMAC uses the SGMII interface */
459 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) {
460 - regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
461 - val &= ~SYSCFG0_SGMII_MASK;
462 - val |= !mac_id ? SYSCFG0_SGMII_GMAC1 : SYSCFG0_SGMII_GMAC2;
463 - regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
464 -
465 - dev_info(eth->dev, "setup shared sgmii for gmac=%d\n",
466 - mac_id);
467 - }
468 -
469 - /* Setup the GMAC1 going through SGMII path when SoC also support
470 - * ESW on GMAC1
471 - */
472 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) &&
473 - !mac_id) {
474 - mtk_w32(eth, 0, MTK_MAC_MISC);
475 - dev_info(eth->dev, "setup gmac1 going through sgmii");
476 - }
477 -}
478 -
479 static void mtk_phy_link_adjust(struct net_device *dev)
480 {
481 struct mtk_mac *mac = netdev_priv(dev);
482 + struct mtk_eth *eth = mac->hw;
483 u16 lcl_adv = 0, rmt_adv = 0;
484 + u32 lcl_eee = 0, rmt_eee = 0;
485 u8 flowctrl;
486 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
487 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
488 @@ -229,7 +221,7 @@ static void mtk_phy_link_adjust(struct n
489 };
490
491 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
492 - !mac->id && !mac->trgmii)
493 + !mac->id && !mac->trgmii)
494 mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
495
496 if (dev->phydev->link)
497 @@ -259,7 +251,16 @@ static void mtk_phy_link_adjust(struct n
498 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
499 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
500 }
501 + /*EEE capability*/
502 + mtk_cl45_ind_read(eth, 0, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &lcl_eee);
503 + mtk_cl45_ind_read(eth, 0, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &rmt_eee);
504 +
505 + if ((lcl_eee & rmt_eee & MDIO_EEE_1000T) == MDIO_EEE_1000T)
506 + mcr |= MAC_MCR_MDIO_EEE_1000T;
507 + if ((lcl_eee & rmt_eee & MDIO_EEE_100TX) == MDIO_EEE_100TX)
508 + mcr |= MAC_MCR_MDIO_EEE_100TX;
509
510 + /*Setup MCR*/
511 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
512
513 if (dev->phydev->link)
514 @@ -290,10 +291,10 @@ static int mtk_phy_connect_node(struct m
515 return -ENODEV;
516 }
517
518 - dev_info(eth->dev,
519 - "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
520 - mac->id, phydev_name(phydev), phydev->phy_id,
521 - phydev->drv->name);
522 + dev_info(eth->dev,
523 + "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
524 + mac->id, phydev_name(phydev), phydev->phy_id,
525 + phydev->drv->name);
526
527 return 0;
528 }
529 @@ -304,6 +305,7 @@ static int mtk_phy_connect(struct net_de
530 struct mtk_eth *eth;
531 struct device_node *np;
532 u32 val;
533 + int err;
534
535 eth = mac->hw;
536 np = of_parse_phandle(mac->of_node, "phy-handle", 0);
537 @@ -313,6 +315,10 @@ static int mtk_phy_connect(struct net_de
538 if (!np)
539 return -ENODEV;
540
541 + err = mtk_setup_hw_path(eth, mac->id, of_get_phy_mode(np));
542 + if (err)
543 + goto err_phy;
544 +
545 mac->ge_mode = 0;
546 switch (of_get_phy_mode(np)) {
547 case PHY_INTERFACE_MODE_TRGMII:
548 @@ -323,10 +329,9 @@ static int mtk_phy_connect(struct net_de
549 case PHY_INTERFACE_MODE_RGMII:
550 break;
551 case PHY_INTERFACE_MODE_SGMII:
552 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII))
553 - mtk_gmac_sgmii_hw_setup(eth, mac->id);
554 break;
555 case PHY_INTERFACE_MODE_MII:
556 + case PHY_INTERFACE_MODE_GMII:
557 mac->ge_mode = 1;
558 break;
559 case PHY_INTERFACE_MODE_REVMII:
560 @@ -355,7 +360,7 @@ static int mtk_phy_connect(struct net_de
561 dev->phydev->speed = 0;
562 dev->phydev->duplex = 0;
563
564 - if (of_phy_is_fixed_link(mac->of_node))
565 + if (!strncmp(dev->phydev->drv->name, "Generic", 7))
566 dev->phydev->supported |=
567 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
568
569 @@ -535,37 +540,37 @@ static void mtk_stats_update(struct mtk_
570 }
571
572 static void mtk_get_stats64(struct net_device *dev,
573 - struct rtnl_link_stats64 *storage)
574 + struct rtnl_link_stats64 *storage)
575 {
576 - struct mtk_mac *mac = netdev_priv(dev);
577 - struct mtk_hw_stats *hw_stats = mac->hw_stats;
578 - unsigned int start;
579 -
580 - if (netif_running(dev) && netif_device_present(dev)) {
581 - if (spin_trylock_bh(&hw_stats->stats_lock)) {
582 - mtk_stats_update_mac(mac);
583 - spin_unlock_bh(&hw_stats->stats_lock);
584 - }
585 - }
586 -
587 - do {
588 - start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
589 - storage->rx_packets = hw_stats->rx_packets;
590 - storage->tx_packets = hw_stats->tx_packets;
591 - storage->rx_bytes = hw_stats->rx_bytes;
592 - storage->tx_bytes = hw_stats->tx_bytes;
593 - storage->collisions = hw_stats->tx_collisions;
594 - storage->rx_length_errors = hw_stats->rx_short_errors +
595 - hw_stats->rx_long_errors;
596 - storage->rx_over_errors = hw_stats->rx_overflow;
597 - storage->rx_crc_errors = hw_stats->rx_fcs_errors;
598 - storage->rx_errors = hw_stats->rx_checksum_errors;
599 - storage->tx_aborted_errors = hw_stats->tx_skip;
600 - } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
601 -
602 - storage->tx_errors = dev->stats.tx_errors;
603 - storage->rx_dropped = dev->stats.rx_dropped;
604 - storage->tx_dropped = dev->stats.tx_dropped;
605 + struct mtk_mac *mac = netdev_priv(dev);
606 + struct mtk_hw_stats *hw_stats = mac->hw_stats;
607 + unsigned int start;
608 +
609 + if (netif_running(dev) && netif_device_present(dev)) {
610 + if (spin_trylock_bh(&hw_stats->stats_lock)) {
611 + mtk_stats_update_mac(mac);
612 + spin_unlock_bh(&hw_stats->stats_lock);
613 + }
614 + }
615 +
616 + do {
617 + start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
618 + storage->rx_packets = hw_stats->rx_packets;
619 + storage->tx_packets = hw_stats->tx_packets;
620 + storage->rx_bytes = hw_stats->rx_bytes;
621 + storage->tx_bytes = hw_stats->tx_bytes;
622 + storage->collisions = hw_stats->tx_collisions;
623 + storage->rx_length_errors = hw_stats->rx_short_errors +
624 + hw_stats->rx_long_errors;
625 + storage->rx_over_errors = hw_stats->rx_overflow;
626 + storage->rx_crc_errors = hw_stats->rx_fcs_errors;
627 + storage->rx_errors = hw_stats->rx_checksum_errors;
628 + storage->tx_aborted_errors = hw_stats->tx_skip;
629 + } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
630 +
631 + storage->tx_errors = dev->stats.tx_errors;
632 + storage->rx_dropped = dev->stats.rx_dropped;
633 + storage->tx_dropped = dev->stats.tx_dropped;
634 }
635
636 static inline int mtk_max_frag_size(int mtu)
637 @@ -605,10 +610,10 @@ static int mtk_init_fq_dma(struct mtk_et
638 dma_addr_t dma_addr;
639 int i;
640
641 - eth->scratch_ring = dma_zalloc_coherent(eth->dev,
642 - cnt * sizeof(struct mtk_tx_dma),
643 - &eth->phy_scratch_ring,
644 - GFP_ATOMIC);
645 + eth->scratch_ring = dma_alloc_coherent(eth->dev,
646 + cnt * sizeof(struct mtk_tx_dma),
647 + &eth->phy_scratch_ring,
648 + GFP_ATOMIC | __GFP_ZERO);
649 if (unlikely(!eth->scratch_ring))
650 return -ENOMEM;
651
652 @@ -623,6 +628,7 @@ static int mtk_init_fq_dma(struct mtk_et
653 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
654 return -ENOMEM;
655
656 + memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
657 phy_ring_tail = eth->phy_scratch_ring +
658 (sizeof(struct mtk_tx_dma) * (cnt - 1));
659
660 @@ -673,7 +679,7 @@ static void mtk_tx_unmap(struct mtk_eth
661 }
662 tx_buf->flags = 0;
663 if (tx_buf->skb &&
664 - (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
665 + (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
666 dev_kfree_skb_any(tx_buf->skb);
667 tx_buf->skb = NULL;
668 }
669 @@ -689,6 +695,7 @@ static int mtk_tx_map(struct sk_buff *sk
670 unsigned int nr_frags;
671 int i, n_desc = 1;
672 u32 txd4 = 0, fport;
673 + u32 qid = 0;
674
675 itxd = ring->next_free;
676 if (itxd == ring->last_free)
677 @@ -708,9 +715,10 @@ static int mtk_tx_map(struct sk_buff *sk
678 if (skb->ip_summed == CHECKSUM_PARTIAL)
679 txd4 |= TX_DMA_CHKSUM;
680
681 - /* VLAN header offload */
682 - if (skb_vlan_tag_present(skb))
683 - txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
684 +#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
685 + qid = skb->mark & (MTK_QDMA_TX_MASK);
686 + qid += (!mac->id) ? (MTK_QDMA_TX_MASK + 1) : 0;
687 +#endif
688
689 mapped_addr = dma_map_single(eth->dev, skb->data,
690 skb_headlen(skb), DMA_TO_DEVICE);
691 @@ -727,6 +735,7 @@ static int mtk_tx_map(struct sk_buff *sk
692 /* TX SG offload */
693 txd = itxd;
694 nr_frags = skb_shinfo(skb)->nr_frags;
695 +
696 for (i = 0; i < nr_frags; i++) {
697 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
698 unsigned int offset = 0;
699 @@ -753,10 +762,10 @@ static int mtk_tx_map(struct sk_buff *sk
700 last_frag = true;
701
702 WRITE_ONCE(txd->txd1, mapped_addr);
703 - WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
704 + WRITE_ONCE(txd->txd3, (TX_DMA_SWC | QID_LOW_BITS(qid) |
705 TX_DMA_PLEN0(frag_map_size) |
706 last_frag * TX_DMA_LS0));
707 - WRITE_ONCE(txd->txd4, fport);
708 + WRITE_ONCE(txd->txd4, fport | QID_HIGH_BITS(qid));
709
710 tx_buf = mtk_desc_to_tx_buf(ring, txd);
711 memset(tx_buf, 0, sizeof(*tx_buf));
712 @@ -775,9 +784,9 @@ static int mtk_tx_map(struct sk_buff *sk
713 /* store skb to cleanup */
714 itx_buf->skb = skb;
715
716 - WRITE_ONCE(itxd->txd4, txd4);
717 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
718 - (!nr_frags * TX_DMA_LS0)));
719 + (!nr_frags * TX_DMA_LS0)) | QID_LOW_BITS(qid));
720 + WRITE_ONCE(itxd->txd4, txd4 | QID_HIGH_BITS(qid));
721
722 netdev_sent_queue(dev, skb->len);
723 skb_tx_timestamp(skb);
724 @@ -922,7 +931,7 @@ drop:
725 return NETDEV_TX_OK;
726 }
727
728 -static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
729 +struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
730 {
731 int i;
732 struct mtk_rx_ring *ring;
733 @@ -991,10 +1000,24 @@ static int mtk_poll_rx(struct napi_struc
734 break;
735
736 /* find out which mac the packet come from. values start at 1 */
737 +#if defined(CONFIG_NET_DSA)
738 + mac = (trxd.rxd4 >> 22) & 0x1;
739 + mac = (mac + 1) % 2;
740 +#else
741 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
742 - RX_DMA_FPORT_MASK;
743 - mac--;
744 -
745 + RX_DMA_FPORT_MASK;
746 + /* From QDMA(5). This is a external interface case of HWNAT.
747 + * When the incoming frame comes from an external interface
748 + * rather than GMAC1/GMAC2, HWNAT driver sends the original
749 + * frame to PPE via PPD(ping pong device) for HWNAT RX
750 + * frame learning. After learning, PPE transmit the
751 + * original frame back to PPD again to run SW NAT path.
752 + */
753 + if (mac == 5)
754 + mac = 0;
755 + else
756 + mac--;
757 +#endif
758 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
759 !eth->netdev[mac]))
760 goto release_desc;
761 @@ -1044,6 +1067,7 @@ static int mtk_poll_rx(struct napi_struc
762 RX_DMA_VID(trxd.rxd3))
763 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
764 RX_DMA_VID(trxd.rxd3));
765 +
766 skb_record_rx_queue(skb, 0);
767 napi_gro_receive(napi, skb);
768
769 @@ -1128,7 +1152,7 @@ static int mtk_poll_tx(struct mtk_eth *e
770 }
771
772 if (mtk_queue_stopped(eth) &&
773 - (atomic_read(&ring->free_count) > ring->thresh))
774 + (atomic_read(&ring->free_count) > ring->thresh))
775 mtk_wake_queue(eth);
776
777 return total;
778 @@ -1220,11 +1244,14 @@ static int mtk_tx_alloc(struct mtk_eth *
779 if (!ring->buf)
780 goto no_tx_mem;
781
782 - ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
783 - &ring->phys, GFP_ATOMIC);
784 + ring->dma = dma_alloc_coherent(eth->dev,
785 + MTK_DMA_SIZE * sz,
786 + &ring->phys,
787 + GFP_ATOMIC | __GFP_ZERO);
788 if (!ring->dma)
789 goto no_tx_mem;
790
791 + memset(ring->dma, 0, MTK_DMA_SIZE * sz);
792 for (i = 0; i < MTK_DMA_SIZE; i++) {
793 int next = (i + 1) % MTK_DMA_SIZE;
794 u32 next_ptr = ring->phys + next * sz;
795 @@ -1317,9 +1344,10 @@ static int mtk_rx_alloc(struct mtk_eth *
796 return -ENOMEM;
797 }
798
799 - ring->dma = dma_zalloc_coherent(eth->dev,
800 - rx_dma_size * sizeof(*ring->dma),
801 - &ring->phys, GFP_ATOMIC);
802 + ring->dma = dma_alloc_coherent(eth->dev,
803 + rx_dma_size * sizeof(*ring->dma),
804 + &ring->phys,
805 + GFP_ATOMIC | __GFP_ZERO);
806 if (!ring->dma)
807 return -ENOMEM;
808
809 @@ -1516,8 +1544,8 @@ static int mtk_hwlro_add_ipaddr(struct n
810 int hwlro_idx;
811
812 if ((fsp->flow_type != TCP_V4_FLOW) ||
813 - (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
814 - (fsp->location > 1))
815 + (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
816 + (fsp->location > 1))
817 return -EINVAL;
818
819 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
820 @@ -1744,6 +1772,34 @@ static void mtk_tx_timeout(struct net_de
821 schedule_work(&eth->pending_work);
822 }
823
824 +static irqreturn_t mtk_handle_irq_tx_rx(int irq, void *_eth)
825 +{
826 + struct mtk_eth *eth = _eth;
827 + u32 tx_status, rx_status;
828 +
829 + tx_status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
830 +
831 + if (tx_status & MTK_TX_DONE_INT) {
832 + if (likely(napi_schedule_prep(&eth->tx_napi))) {
833 + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
834 + __napi_schedule(&eth->tx_napi);
835 + }
836 + mtk_w32(eth, tx_status, MTK_QMTK_INT_STATUS);
837 + }
838 +
839 + rx_status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
840 +
841 + if (rx_status & MTK_RX_DONE_INT) {
842 + if (likely(napi_schedule_prep(&eth->rx_napi))) {
843 + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
844 + __napi_schedule(&eth->rx_napi);
845 + }
846 + mtk_w32(eth, rx_status, MTK_PDMA_INT_STATUS);
847 + }
848 +
849 + return IRQ_HANDLED;
850 +}
851 +
852 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
853 {
854 struct mtk_eth *eth = _eth;
855 @@ -1784,8 +1840,8 @@ static void mtk_poll_controller(struct n
856
857 static int mtk_start_dma(struct mtk_eth *eth)
858 {
859 - u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
860 int err;
861 + u32 rx_2b_offet = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
862
863 err = mtk_dma_init(eth);
864 if (err) {
865 @@ -1801,7 +1857,7 @@ static int mtk_start_dma(struct mtk_eth
866 MTK_QDMA_GLO_CFG);
867
868 mtk_w32(eth,
869 - MTK_RX_DMA_EN | rx_2b_offset |
870 + MTK_RX_DMA_EN | rx_2b_offet |
871 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
872 MTK_PDMA_GLO_CFG);
873
874 @@ -1814,7 +1870,7 @@ static int mtk_open(struct net_device *d
875 struct mtk_eth *eth = mac->hw;
876
877 /* we run 2 netdevs on the same dma ring so we only bring it up once */
878 - if (!refcount_read(&eth->dma_refcnt)) {
879 + if (!atomic_read(&eth->dma_refcnt)) {
880 int err = mtk_start_dma(eth);
881
882 if (err)
883 @@ -1824,10 +1880,8 @@ static int mtk_open(struct net_device *d
884 napi_enable(&eth->rx_napi);
885 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
886 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
887 - refcount_set(&eth->dma_refcnt, 1);
888 }
889 - else
890 - refcount_inc(&eth->dma_refcnt);
891 + atomic_inc(&eth->dma_refcnt);
892
893 phy_start(dev->phydev);
894 netif_start_queue(dev);
895 @@ -1867,7 +1921,7 @@ static int mtk_stop(struct net_device *d
896 phy_stop(dev->phydev);
897
898 /* only shutdown DMA if this is the last user */
899 - if (!refcount_dec_and_test(&eth->dma_refcnt))
900 + if (!atomic_dec_and_test(&eth->dma_refcnt))
901 return 0;
902
903 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
904 @@ -1973,14 +2027,16 @@ static int mtk_hw_init(struct mtk_eth *e
905 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
906 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
907
908 - /* Enable RX VLan Offloading */
909 - mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
910 + /* Disable RX VLan Offloading */
911 + mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
912 +
913 +#if defined(CONFIG_NET_DSA)
914 + mtk_w32(eth, 0x81000001, MTK_CDMP_IG_CTRL);
915 +#endif
916
917 - /* enable interrupt delay for RX */
918 - mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
919 + mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
920 + mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
921
922 - /* disable delay and normal interrupt */
923 - mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
924 mtk_tx_irq_disable(eth, ~0);
925 mtk_rx_irq_disable(eth, ~0);
926 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
927 @@ -2172,27 +2228,27 @@ static int mtk_cleanup(struct mtk_eth *e
928 }
929
930 static int mtk_get_link_ksettings(struct net_device *ndev,
931 - struct ethtool_link_ksettings *cmd)
932 + struct ethtool_link_ksettings *cmd)
933 {
934 - struct mtk_mac *mac = netdev_priv(ndev);
935 + struct mtk_mac *mac = netdev_priv(ndev);
936
937 - if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
938 - return -EBUSY;
939 + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
940 + return -EBUSY;
941
942 - phy_ethtool_ksettings_get(ndev->phydev, cmd);
943 + phy_ethtool_ksettings_get(ndev->phydev, cmd);
944
945 - return 0;
946 + return 0;
947 }
948
949 static int mtk_set_link_ksettings(struct net_device *ndev,
950 - const struct ethtool_link_ksettings *cmd)
951 + const struct ethtool_link_ksettings *cmd)
952 {
953 - struct mtk_mac *mac = netdev_priv(ndev);
954 + struct mtk_mac *mac = netdev_priv(ndev);
955
956 - if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
957 - return -EBUSY;
958 + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
959 + return -EBUSY;
960
961 - return phy_ethtool_ksettings_set(ndev->phydev, cmd);
962 + return phy_ethtool_ksettings_set(ndev->phydev, cmd);
963 }
964
965 static void mtk_get_drvinfo(struct net_device *dev,
966 @@ -2355,8 +2411,8 @@ static int mtk_set_rxnfc(struct net_devi
967 }
968
969 static const struct ethtool_ops mtk_ethtool_ops = {
970 - .get_link_ksettings = mtk_get_link_ksettings,
971 - .set_link_ksettings = mtk_set_link_ksettings,
972 + .get_link_ksettings = mtk_get_link_ksettings,
973 + .set_link_ksettings = mtk_set_link_ksettings,
974 .get_drvinfo = mtk_get_drvinfo,
975 .get_msglevel = mtk_get_msglevel,
976 .set_msglevel = mtk_set_msglevel,
977 @@ -2366,7 +2422,7 @@ static const struct ethtool_ops mtk_etht
978 .get_sset_count = mtk_get_sset_count,
979 .get_ethtool_stats = mtk_get_ethtool_stats,
980 .get_rxnfc = mtk_get_rxnfc,
981 - .set_rxnfc = mtk_set_rxnfc,
982 + .set_rxnfc = mtk_set_rxnfc,
983 };
984
985 static const struct net_device_ops mtk_netdev_ops = {
986 @@ -2463,6 +2519,7 @@ static int mtk_probe(struct platform_dev
987 {
988 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
989 struct device_node *mac_np;
990 + const struct of_device_id *match;
991 struct mtk_eth *eth;
992 int err;
993 int i;
994 @@ -2471,7 +2528,8 @@ static int mtk_probe(struct platform_dev
995 if (!eth)
996 return -ENOMEM;
997
998 - eth->soc = of_device_get_match_data(&pdev->dev);
999 + match = of_match_device(of_mtk_match, &pdev->dev);
1000 + eth->soc = (struct mtk_soc_data *)match->data;
1001
1002 eth->dev = &pdev->dev;
1003 eth->base = devm_ioremap_resource(&pdev->dev, res);
1004 @@ -2489,26 +2547,37 @@ static int mtk_probe(struct platform_dev
1005 return PTR_ERR(eth->ethsys);
1006 }
1007
1008 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
1009 - eth->sgmiisys =
1010 - syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1011 - "mediatek,sgmiisys");
1012 - if (IS_ERR(eth->sgmiisys)) {
1013 - dev_err(&pdev->dev, "no sgmiisys regmap found\n");
1014 - return PTR_ERR(eth->sgmiisys);
1015 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
1016 + eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1017 + "mediatek,infracfg");
1018 + if (IS_ERR(eth->infra)) {
1019 + dev_info(&pdev->dev, "no ethsys regmap found\n");
1020 + return PTR_ERR(eth->infra);
1021 }
1022 }
1023
1024 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
1025 + eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
1026 + GFP_KERNEL);
1027 + if (!eth->sgmii)
1028 + return -ENOMEM;
1029 +
1030 + err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
1031 + eth->soc->ana_rgc3);
1032 + if (err)
1033 + return err;
1034 + }
1035 +
1036 if (eth->soc->required_pctl) {
1037 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1038 "mediatek,pctl");
1039 if (IS_ERR(eth->pctl)) {
1040 - dev_err(&pdev->dev, "no pctl regmap found\n");
1041 + dev_info(&pdev->dev, "no pctl regmap found\n");
1042 return PTR_ERR(eth->pctl);
1043 }
1044 }
1045
1046 - for (i = 0; i < 3; i++) {
1047 + for (i = 0; i < eth->soc->irq_num; i++) {
1048 eth->irq[i] = platform_get_irq(pdev, i);
1049 if (eth->irq[i] < 0) {
1050 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
1051 @@ -2552,15 +2621,22 @@ static int mtk_probe(struct platform_dev
1052 goto err_deinit_hw;
1053 }
1054
1055 - err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
1056 - dev_name(eth->dev), eth);
1057 - if (err)
1058 - goto err_free_dev;
1059 + if (eth->soc->irq_num > 1) {
1060 + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
1061 + dev_name(eth->dev), eth);
1062 + if (err)
1063 + goto err_free_dev;
1064
1065 - err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
1066 - dev_name(eth->dev), eth);
1067 - if (err)
1068 - goto err_free_dev;
1069 + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
1070 + dev_name(eth->dev), eth);
1071 + if (err)
1072 + goto err_free_dev;
1073 + } else {
1074 + err = devm_request_irq(eth->dev, eth->irq[0], mtk_handle_irq_tx_rx, 0,
1075 + dev_name(eth->dev), eth);
1076 + if (err)
1077 + goto err_free_dev;
1078 + }
1079
1080 err = mtk_mdio_init(eth);
1081 if (err)
1082 @@ -2626,27 +2702,48 @@ static int mtk_remove(struct platform_de
1083 }
1084
1085 static const struct mtk_soc_data mt2701_data = {
1086 - .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
1087 + .caps = MT7623_CAPS | MTK_HWLRO,
1088 .required_clks = MT7623_CLKS_BITMAP,
1089 .required_pctl = true,
1090 + .irq_num = 3,
1091 };
1092
1093 static const struct mtk_soc_data mt7622_data = {
1094 - .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO,
1095 + .ana_rgc3 = 0x2028,
1096 + .caps = MT7622_CAPS | MTK_HWLRO,
1097 .required_clks = MT7622_CLKS_BITMAP,
1098 .required_pctl = false,
1099 + .irq_num = 3,
1100 };
1101
1102 static const struct mtk_soc_data mt7623_data = {
1103 - .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
1104 + .caps = MT7623_CAPS | MTK_HWLRO,
1105 .required_clks = MT7623_CLKS_BITMAP,
1106 .required_pctl = true,
1107 + .irq_num = 3,
1108 +};
1109 +
1110 +static const struct mtk_soc_data leopard_data = {
1111 + .ana_rgc3 = 0x128,
1112 + .caps = LEOPARD_CAPS | MTK_HWLRO,
1113 + .required_clks = LEOPARD_CLKS_BITMAP,
1114 + .required_pctl = false,
1115 + .irq_num = 3,
1116 +};
1117 +
1118 +static const struct mtk_soc_data mt7621_data = {
1119 + .caps = MT7621_CAPS,
1120 + .required_clks = MT7621_CLKS_BITMAP,
1121 + .required_pctl = false,
1122 + .irq_num = 1,
1123 };
1124
1125 const struct of_device_id of_mtk_match[] = {
1126 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
1127 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
1128 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
1129 + { .compatible = "mediatek,mt7629-eth", .data = &leopard_data},
1130 + { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
1131 {},
1132 };
1133 MODULE_DEVICE_TABLE(of, of_mtk_match);
1134 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
1135 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
1136 @@ -15,13 +15,17 @@
1137 #ifndef MTK_ETH_H
1138 #define MTK_ETH_H
1139
1140 +#include <linux/dma-mapping.h>
1141 +#include <linux/netdevice.h>
1142 +#include <linux/of_net.h>
1143 +#include <linux/u64_stats_sync.h>
1144 #include <linux/refcount.h>
1145
1146 #define MTK_QDMA_PAGE_SIZE 2048
1147 #define MTK_MAX_RX_LENGTH 1536
1148 #define MTK_TX_DMA_BUF_LEN 0x3fff
1149 -#define MTK_DMA_SIZE 256
1150 -#define MTK_NAPI_WEIGHT 64
1151 +#define MTK_DMA_SIZE 2048
1152 +#define MTK_NAPI_WEIGHT 256
1153 #define MTK_MAC_COUNT 2
1154 #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
1155 #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
1156 @@ -36,8 +40,6 @@
1157 NETIF_MSG_TX_ERR)
1158 #define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \
1159 NETIF_F_RXCSUM | \
1160 - NETIF_F_HW_VLAN_CTAG_TX | \
1161 - NETIF_F_HW_VLAN_CTAG_RX | \
1162 NETIF_F_SG | NETIF_F_TSO | \
1163 NETIF_F_TSO6 | \
1164 NETIF_F_IPV6_CSUM)
1165 @@ -76,6 +78,9 @@
1166 #define MTK_CDMQ_IG_CTRL 0x1400
1167 #define MTK_CDMQ_STAG_EN BIT(0)
1168
1169 +/* CDMP Ingress Control Register */
1170 +#define MTK_CDMP_IG_CTRL 0x400
1171 +
1172 /* CDMP Exgress Control Register */
1173 #define MTK_CDMP_EG_CTRL 0x404
1174
1175 @@ -225,8 +230,9 @@
1176 #define MTK_TX_DONE_INT1 BIT(1)
1177 #define MTK_TX_DONE_INT0 BIT(0)
1178 #define MTK_RX_DONE_INT MTK_RX_DONE_DLY
1179 -#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
1180 - MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
1181 +#define MTK_TX_DONE_DLY BIT(28)
1182 +#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
1183 +
1184
1185 /* QDMA Interrupt grouping registers */
1186 #define MTK_QDMA_INT_GRP1 0x1a20
1187 @@ -267,6 +273,12 @@
1188 #define MTK_GDM1_TX_GBCNT 0x2400
1189 #define MTK_STAT_OFFSET 0x40
1190
1191 +/* QDMA TX NUM */
1192 +#define MTK_QDMA_TX_NUM 16
1193 +#define MTK_QDMA_TX_MASK ((MTK_QDMA_TX_NUM / 2) - 1)
1194 +#define QID_LOW_BITS(x) ((x) & 0xf)
1195 +#define QID_HIGH_BITS(x) ((((x) >> 4) & 0x3) & GENMASK(21, 20))
1196 +
1197 /* QDMA descriptor txd4 */
1198 #define TX_DMA_CHKSUM (0x7 << 29)
1199 #define TX_DMA_TSO BIT(28)
1200 @@ -316,6 +328,8 @@
1201 #define MAC_MCR_RX_EN BIT(13)
1202 #define MAC_MCR_BACKOFF_EN BIT(9)
1203 #define MAC_MCR_BACKPR_EN BIT(8)
1204 +#define MAC_MCR_MDIO_EEE_1000T BIT(7)
1205 +#define MAC_MCR_MDIO_EEE_100TX BIT(6)
1206 #define MAC_MCR_FORCE_RX_FC BIT(5)
1207 #define MAC_MCR_FORCE_TX_FC BIT(4)
1208 #define MAC_MCR_SPEED_1000 BIT(3)
1209 @@ -368,9 +382,11 @@
1210 #define ETHSYS_SYSCFG0 0x14
1211 #define SYSCFG0_GE_MASK 0x3
1212 #define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
1213 -#define SYSCFG0_SGMII_MASK (3 << 8)
1214 -#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & GENMASK(9, 8))
1215 -#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & GENMASK(9, 8))
1216 +#define SYSCFG0_SGMII_MASK GENMASK(9, 8)
1217 +#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK)
1218 +#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK)
1219 +#define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
1220 +#define SYSCFG0_SGMII_GMAC2_V2 BIT(8)
1221
1222 /* ethernet subsystem clock register */
1223 #define ETHSYS_CLKCFG0 0x2c
1224 @@ -398,6 +414,16 @@
1225 #define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
1226 #define SGMII_PHYA_PWD BIT(4)
1227
1228 +/* Infrasys subsystem config registers */
1229 +#define INFRA_MISC2 0x70c
1230 +#define CO_QPHY_SEL BIT(0)
1231 +#define GEPHY_MAC_SEL BIT(1)
1232 +
1233 +/*MDIO control*/
1234 +#define MII_MMD_ACC_CTL_REG 0x0d
1235 +#define MII_MMD_ADDR_DATA_REG 0x0e
1236 +#define MMD_OP_MODE_DATA BIT(14)
1237 +
1238 struct mtk_rx_dma {
1239 unsigned int rxd1;
1240 unsigned int rxd2;
1241 @@ -462,15 +488,21 @@ enum mtk_tx_flags {
1242 */
1243 enum mtk_clks_map {
1244 MTK_CLK_ETHIF,
1245 + MTK_CLK_SGMIITOP,
1246 MTK_CLK_ESW,
1247 MTK_CLK_GP0,
1248 MTK_CLK_GP1,
1249 MTK_CLK_GP2,
1250 + MTK_CLK_FE,
1251 MTK_CLK_TRGPLL,
1252 MTK_CLK_SGMII_TX_250M,
1253 MTK_CLK_SGMII_RX_250M,
1254 MTK_CLK_SGMII_CDR_REF,
1255 MTK_CLK_SGMII_CDR_FB,
1256 + MTK_CLK_SGMII2_TX_250M,
1257 + MTK_CLK_SGMII2_RX_250M,
1258 + MTK_CLK_SGMII2_CDR_REF,
1259 + MTK_CLK_SGMII2_CDR_FB,
1260 MTK_CLK_SGMII_CK,
1261 MTK_CLK_ETH2PLL,
1262 MTK_CLK_MAX
1263 @@ -488,6 +520,22 @@ enum mtk_clks_map {
1264 BIT(MTK_CLK_SGMII_CDR_FB) | \
1265 BIT(MTK_CLK_SGMII_CK) | \
1266 BIT(MTK_CLK_ETH2PLL))
1267 +#define LEOPARD_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
1268 + BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
1269 + BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
1270 + BIT(MTK_CLK_SGMII_TX_250M) | \
1271 + BIT(MTK_CLK_SGMII_RX_250M) | \
1272 + BIT(MTK_CLK_SGMII_CDR_REF) | \
1273 + BIT(MTK_CLK_SGMII_CDR_FB) | \
1274 + BIT(MTK_CLK_SGMII2_TX_250M) | \
1275 + BIT(MTK_CLK_SGMII2_RX_250M) | \
1276 + BIT(MTK_CLK_SGMII2_CDR_REF) | \
1277 + BIT(MTK_CLK_SGMII2_CDR_FB) | \
1278 + BIT(MTK_CLK_SGMII_CK) | \
1279 + BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
1280 +
1281 +#define MT7621_CLKS_BITMAP 0
1282 +
1283 enum mtk_dev_state {
1284 MTK_HW_INIT,
1285 MTK_RESETTING
1286 @@ -557,35 +605,149 @@ struct mtk_rx_ring {
1287 u32 crx_idx_reg;
1288 };
1289
1290 -#define MTK_TRGMII BIT(0)
1291 -#define MTK_GMAC1_TRGMII (BIT(1) | MTK_TRGMII)
1292 -#define MTK_ESW BIT(4)
1293 -#define MTK_GMAC1_ESW (BIT(5) | MTK_ESW)
1294 -#define MTK_SGMII BIT(8)
1295 -#define MTK_GMAC1_SGMII (BIT(9) | MTK_SGMII)
1296 -#define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII)
1297 -#define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \
1298 - MTK_GMAC2_SGMII)
1299 +enum mtk_eth_mux {
1300 + MTK_ETH_MUX_GDM1_TO_GMAC1_ESW,
1301 + MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY,
1302 + MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
1303 + MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
1304 + MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII,
1305 + MTK_ETH_MUX_MAX,
1306 +};
1307 +
1308 +enum mtk_eth_path {
1309 + MTK_ETH_PATH_GMAC1_RGMII,
1310 + MTK_ETH_PATH_GMAC1_TRGMII,
1311 + MTK_ETH_PATH_GMAC1_SGMII,
1312 + MTK_ETH_PATH_GMAC2_RGMII,
1313 + MTK_ETH_PATH_GMAC2_SGMII,
1314 + MTK_ETH_PATH_GMAC2_GEPHY,
1315 + MTK_ETH_PATH_GDM1_ESW,
1316 + MTK_ETH_PATH_MAX,
1317 +};
1318 +
1319 +/* Capability for function group */
1320 +#define MTK_RGMII BIT(0)
1321 +#define MTK_TRGMII BIT(1)
1322 +#define MTK_SGMII BIT(2)
1323 +#define MTK_ESW BIT(3)
1324 +#define MTK_GEPHY BIT(4)
1325 +#define MTK_MUX BIT(5)
1326 +#define MTK_INFRA BIT(6)
1327 +#define MTK_SHARED_SGMII BIT(7)
1328 +
1329 +/* Capability for features on SoCs */
1330 +#define MTK_PATH_BIT(x) BIT((x) + 10)
1331 +
1332 +#define MTK_GMAC1_RGMII \
1333 + (MTK_PATH_BIT(MTK_ETH_PATH_GMAC1_RGMII) | MTK_RGMII)
1334 +
1335 +#define MTK_GMAC1_TRGMII \
1336 + (MTK_PATH_BIT(MTK_ETH_PATH_GMAC1_TRGMII) | MTK_TRGMII)
1337 +
1338 +#define MTK_GMAC1_SGMII \
1339 + (MTK_PATH_BIT(MTK_ETH_PATH_GMAC1_SGMII) | MTK_SGMII)
1340 +
1341 +#define MTK_GMAC2_RGMII \
1342 + (MTK_PATH_BIT(MTK_ETH_PATH_GMAC2_RGMII) | MTK_RGMII)
1343 +
1344 +#define MTK_GMAC2_SGMII \
1345 + (MTK_PATH_BIT(MTK_ETH_PATH_GMAC2_SGMII) | MTK_SGMII)
1346 +
1347 +#define MTK_GMAC2_GEPHY \
1348 + (MTK_PATH_BIT(MTK_ETH_PATH_GMAC2_GEPHY) | MTK_GEPHY)
1349 +
1350 +#define MTK_GDM1_ESW \
1351 + (MTK_PATH_BIT(MTK_ETH_PATH_GDM1_ESW) | MTK_ESW)
1352 +
1353 +#define MTK_MUX_BIT(x) BIT((x) + 20)
1354 +
1355 +/* Capability for MUXes present on SoCs */
1356 +/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
1357 +#define MTK_MUX_GDM1_TO_GMAC1_ESW \
1358 + (MTK_MUX_BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW) | MTK_MUX)
1359 +
1360 +/* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */
1361 +#define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \
1362 + (MTK_MUX_BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY) | MTK_MUX | MTK_INFRA)
1363 +
1364 +/* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */
1365 +#define MTK_MUX_U3_GMAC2_TO_QPHY \
1366 + (MTK_MUX_BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY) | MTK_MUX | MTK_INFRA)
1367 +
1368 +/* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */
1369 +#define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
1370 + (MTK_MUX_BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII) | MTK_MUX | \
1371 + MTK_SHARED_SGMII)
1372 +
1373 +/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
1374 +#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
1375 + (MTK_MUX_BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII) | MTK_MUX)
1376 +
1377 #define MTK_HWLRO BIT(12)
1378 +
1379 #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
1380
1381 +#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
1382 + MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
1383 + MTK_MUX_GDM1_TO_GMAC1_ESW | \
1384 + MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII)
1385 +
1386 +#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII)
1387 +
1388 +#define LEOPARD_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
1389 + MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
1390 + MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
1391 + MTK_MUX_U3_GMAC2_TO_QPHY | \
1392 + MTK_MUX_GMAC12_TO_GEPHY_SGMII)
1393 +
1394 +#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII)
1395 +
1396 /* struct mtk_eth_data - This is the structure holding all differences
1397 * among various plaforms
1398 + * @ana_rgc3: The offset for register ANA_RGC3 related to
1399 + * sgmiisys syscon
1400 * @caps Flags shown the extra capability for the SoC
1401 * @required_clks Flags shown the bitmap for required clocks on
1402 * the target SoC
1403 * @required_pctl A bool value to show whether the SoC requires
1404 * the extra setup for those pins used by GMAC.
1405 + * @irq_num total eth irq num support in target SoC
1406 */
1407 struct mtk_soc_data {
1408 + u32 ana_rgc3;
1409 u32 caps;
1410 u32 required_clks;
1411 bool required_pctl;
1412 + u32 irq_num;
1413 };
1414
1415 /* currently no SoC has more than 2 macs */
1416 #define MTK_MAX_DEVS 2
1417
1418 +struct mtk_eth_debug {
1419 + struct dentry *root;
1420 +};
1421 +
1422 +#define MTK_SGMII_PHYSPEED_AN BIT(31)
1423 +#define MTK_SGMII_PHYSPEED_MASK GENMASK(0, 2)
1424 +#define MTK_SGMII_PHYSPEED_1000 BIT(0)
1425 +#define MTK_SGMII_PHYSPEED_2500 BIT(1)
1426 +#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x))
1427 +
1428 +/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
1429 + * characteristics
1430 + * @regmap: The register map pointing at the range used to setup
1431 + * SGMII modes
1432 + * @flags: The enum refers to which mode the sgmii wants to run on
1433 + * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
1434 + */
1435 +
1436 +struct mtk_sgmii {
1437 + struct regmap *regmap[MTK_MAX_DEVS];
1438 + u32 flags[MTK_MAX_DEVS];
1439 + u32 ana_rgc3;
1440 +};
1441 +
1442 /* struct mtk_eth - This is the main datasructure for holding the state
1443 * of the driver
1444 * @dev: The device pointer
1445 @@ -601,14 +763,15 @@ struct mtk_soc_data {
1446 * @msg_enable: Ethtool msg level
1447 * @ethsys: The register map pointing at the range used to setup
1448 * MII modes
1449 - * @sgmiisys: The register map pointing at the range used to setup
1450 - * SGMII modes
1451 + * @infra: The register map pointing at the range used to setup
1452 + * SGMII and GePHY path
1453 * @pctl: The register map pointing at the range used to setup
1454 * GMAC port drive/slew values
1455 * @dma_refcnt: track how many netdevs are using the DMA engine
1456 * @tx_ring: Pointer to the memory holding info about the TX ring
1457 * @rx_ring: Pointer to the memory holding info about the RX ring
1458 - * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring
1459 + * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX
1460 + * ring
1461 * @tx_napi: The TX NAPI struct
1462 * @rx_napi: The RX NAPI struct
1463 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
1464 @@ -619,13 +782,16 @@ struct mtk_soc_data {
1465 * @pending_work: The workqueue used to reset the dma ring
1466 * @state: Initialization and runtime state of the device
1467 * @soc: Holding specific data among vaious SoCs
1468 + * @debug: Holding specific data for mtk_eth_dbg usage.
1469 */
1470
1471 struct mtk_eth {
1472 struct device *dev;
1473 void __iomem *base;
1474 spinlock_t page_lock;
1475 + /* spin_lock for enable/disable tx irq critial section */
1476 spinlock_t tx_irq_lock;
1477 + /* spin_lock for enable/disable rx irq critial section */
1478 spinlock_t rx_irq_lock;
1479 struct net_device dummy_dev;
1480 struct net_device *netdev[MTK_MAX_DEVS];
1481 @@ -634,10 +800,11 @@ struct mtk_eth {
1482 u32 msg_enable;
1483 unsigned long sysclk;
1484 struct regmap *ethsys;
1485 - struct regmap *sgmiisys;
1486 + struct regmap *infra;
1487 + struct mtk_sgmii *sgmii;
1488 struct regmap *pctl;
1489 bool hwlro;
1490 - refcount_t dma_refcnt;
1491 + atomic_t dma_refcnt;
1492 struct mtk_tx_ring tx_ring;
1493 struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
1494 struct mtk_rx_ring rx_ring_qdma;
1495 @@ -653,6 +820,7 @@ struct mtk_eth {
1496 unsigned long state;
1497
1498 const struct mtk_soc_data *soc;
1499 + struct mtk_eth_debug debug;
1500 };
1501
1502 /* struct mtk_mac - the structure that holds the info about the MACs of the
1503 @@ -664,6 +832,7 @@ struct mtk_eth {
1504 * @hw_stats: Packet statistics counter
1505 * @trgmii Indicate if the MAC uses TRGMII connected to internal
1506 switch
1507 + * @phy_dev: The attached PHY if available
1508 */
1509 struct mtk_mac {
1510 int id;
1511 @@ -674,6 +843,7 @@ struct mtk_mac {
1512 __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
1513 int hwlro_ip_cnt;
1514 bool trgmii;
1515 + struct phy_device *phy_dev;
1516 };
1517
1518 /* the struct describing the SoC. these are declared in the soc_xyz.c files */
1519 @@ -685,4 +855,10 @@ void mtk_stats_update_mac(struct mtk_mac
1520 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
1521 u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
1522
1523 +int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
1524 + u32 ana_rgc3);
1525 +int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
1526 +int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id);
1527 +int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode);
1528 +
1529 #endif /* MTK_ETH_H */
1530 --- /dev/null
1531 +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
1532 @@ -0,0 +1,114 @@
1533 +/*
1534 + * Copyright (C) 2018 MediaTek Inc.
1535 + *
1536 + * This program is free software; you can redistribute it and/or modify
1537 + * it under the terms of the GNU General Public License as published by
1538 + * the Free Software Foundation; version 2 of the License
1539 + *
1540 + * This program is distributed in the hope that it will be useful,
1541 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1542 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1543 + * GNU General Public License for more details.
1544 + *
1545 + * Copyright (C) 2018 Sean Wang <sean.wang@mediatek.com>
1546 + */
1547 +
1548 +#include <linux/mfd/syscon.h>
1549 +#include <linux/of.h>
1550 +#include <linux/regmap.h>
1551 +
1552 +#include "mtk_eth_soc.h"
1553 +
1554 +int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
1555 +{
1556 + struct device_node *np;
1557 + const char *str;
1558 + int i, err;
1559 +
1560 + ss->ana_rgc3 = ana_rgc3;
1561 +
1562 + for (i = 0; i < MTK_MAX_DEVS; i++) {
1563 + np = of_parse_phandle(r, "mediatek,sgmiisys", i);
1564 + if (!np)
1565 + break;
1566 +
1567 + ss->regmap[i] = syscon_node_to_regmap(np);
1568 + if (IS_ERR(ss->regmap[i]))
1569 + return PTR_ERR(ss->regmap[i]);
1570 +
1571 + err = of_property_read_string(np, "mediatek,physpeed", &str);
1572 + if (err)
1573 + return err;
1574 +
1575 + if (!strcmp(str, "2500"))
1576 + pr_info("sean debug physpeed = 2500\n");
1577 +
1578 + if (!strcmp(str, "2500"))
1579 + ss->flags[i] |= MTK_SGMII_PHYSPEED_2500;
1580 + else if (!strcmp(str, "1000"))
1581 + ss->flags[i] |= MTK_SGMII_PHYSPEED_1000;
1582 + else if (!strcmp(str, "auto"))
1583 + ss->flags[i] |= MTK_SGMII_PHYSPEED_AN;
1584 + else
1585 + return -EINVAL;
1586 + }
1587 +
1588 + return 0;
1589 +}
1590 +
1591 +int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id)
1592 +{
1593 + unsigned int val;
1594 +
1595 + if (!ss->regmap[id])
1596 + return -EINVAL;
1597 +
1598 + /* Setup the link timer and QPHY power up inside SGMIISYS */
1599 + regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER,
1600 + SGMII_LINK_TIMER_DEFAULT);
1601 +
1602 + regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
1603 + val |= SGMII_REMOTE_FAULT_DIS;
1604 + regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
1605 +
1606 + regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
1607 + val |= SGMII_AN_RESTART;
1608 + regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
1609 +
1610 + regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
1611 + val &= ~SGMII_PHYA_PWD;
1612 + regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
1613 +
1614 + return 0;
1615 +}
1616 +
1617 +int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id)
1618 +{
1619 + unsigned int val;
1620 + int mode;
1621 +
1622 + if (!ss->regmap[id])
1623 + return -EINVAL;
1624 +
1625 + regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
1626 + val &= ~GENMASK(2, 3);
1627 + mode = ss->flags[id] & MTK_SGMII_PHYSPEED_MASK;
1628 + val |= (mode == MTK_SGMII_PHYSPEED_1000) ? 0 : BIT(2);
1629 + regmap_write(ss->regmap[id], ss->ana_rgc3, val);
1630 +
1631 + /* disable SGMII AN */
1632 + regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
1633 + val &= ~BIT(12);
1634 + regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
1635 +
1636 + /* SGMII force mode setting */
1637 + val = 0x31120019;
1638 + regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
1639 +
1640 + /* Release PHYA power down state */
1641 + regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
1642 + val &= ~SGMII_PHYA_PWD;
1643 + regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
1644 +
1645 + return 0;
1646 +}