treewide: replace nbd@openwrt.org with nbd@nbd.name
[openwrt/staging/lynxis/omap.git] / target / linux / mediatek / patches-4.4 / 0049-net-next-mediatek-add-support-for-MT7623-ethernet.patch
1 From 8cc84aa65121135d7b120ce71b4f10f81230c818 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Wed, 2 Mar 2016 04:27:10 +0100
4 Subject: [PATCH 049/102] net-next: mediatek: add support for MT7623 ethernet
5
6 Add ethernet support for MediaTek SoCs from the MT7623 family. These have
7 dual GMAC. Depending on the exact version, there might be a built-in
8 Gigabit switch (MT7530). The core does not have the typical DMA ring setup.
9 Instead there is a linked list that we add descriptors to. There is only
10 one linked list that both MACs use together. There is a special field
11 inside the TX descriptors called the VQID. This allows us to assign packets
12 to different internal queues. By using a separate id for each MAC we are
13 able to get deterministic results for BQL. Additionally we need to
14 provide the core with a block of scratch memory that is the same size as
15 the RX ring and data buffer. This is really needed to make the HW datapath
16 work. Although the driver does not support this yet, we still need to
17 assign the memory and tell the core about it for RX to work.
18
19 Signed-off-by: Felix Fietkau <nbd@nbd.name>
20 Signed-off-by: Michael Lee <igvtee@gmail.com>
21 Signed-off-by: John Crispin <blogic@openwrt.org>
22 ---
23 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 1807 +++++++++++++++++++++++++++
24 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 421 +++++++
25 2 files changed, 2228 insertions(+)
26 create mode 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.c
27 create mode 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.h
28
29 --- /dev/null
30 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
31 @@ -0,0 +1,1807 @@
32 +/* This program is free software; you can redistribute it and/or modify
33 + * it under the terms of the GNU General Public License as published by
34 + * the Free Software Foundation; version 2 of the License
35 + *
36 + * This program is distributed in the hope that it will be useful,
37 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
38 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
39 + * GNU General Public License for more details.
40 + *
41 + * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
42 + * Copyright (C) 2009-2016 Felix Fietkau <nbd@nbd.name>
43 + * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
44 + */
45 +
46 +#include <linux/of_device.h>
47 +#include <linux/of_mdio.h>
48 +#include <linux/of_net.h>
49 +#include <linux/mfd/syscon.h>
50 +#include <linux/regmap.h>
51 +#include <linux/clk.h>
52 +#include <linux/if_vlan.h>
53 +#include <linux/reset.h>
54 +#include <linux/tcp.h>
55 +
56 +#include "mtk_eth_soc.h"
57 +
58 +static int mtk_msg_level = -1;
59 +module_param_named(msg_level, mtk_msg_level, int, 0);
60 +MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
61 +
62 +#define MTK_ETHTOOL_STAT(x) { #x, \
63 + offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
64 +
65 +/* strings used by ethtool */
66 +static const struct mtk_ethtool_stats {
67 + char str[ETH_GSTRING_LEN];
68 + u32 offset;
69 +} mtk_ethtool_stats[] = {
70 + MTK_ETHTOOL_STAT(tx_bytes),
71 + MTK_ETHTOOL_STAT(tx_packets),
72 + MTK_ETHTOOL_STAT(tx_skip),
73 + MTK_ETHTOOL_STAT(tx_collisions),
74 + MTK_ETHTOOL_STAT(rx_bytes),
75 + MTK_ETHTOOL_STAT(rx_packets),
76 + MTK_ETHTOOL_STAT(rx_overflow),
77 + MTK_ETHTOOL_STAT(rx_fcs_errors),
78 + MTK_ETHTOOL_STAT(rx_short_errors),
79 + MTK_ETHTOOL_STAT(rx_long_errors),
80 + MTK_ETHTOOL_STAT(rx_checksum_errors),
81 + MTK_ETHTOOL_STAT(rx_flow_control_packets),
82 +};
83 +
84 +void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
85 +{
86 + __raw_writel(val, eth->base + reg);
87 +}
88 +
89 +u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
90 +{
91 + return __raw_readl(eth->base + reg);
92 +}
93 +
94 +static int mtk_mdio_busy_wait(struct mtk_eth *eth)
95 +{
96 + unsigned long t_start = jiffies;
97 +
98 + while (1) {
99 + if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
100 + return 0;
101 + if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
102 + break;
103 + usleep_range(10, 20);
104 + }
105 +
106 + dev_err(eth->dev, "mdio: MDIO timeout\n");
107 + return -1;
108 +}
109 +
110 +u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
111 + u32 phy_register, u32 write_data)
112 +{
113 + if (mtk_mdio_busy_wait(eth))
114 + return -1;
115 +
116 + write_data &= 0xffff;
117 +
118 + mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
119 + (phy_register << PHY_IAC_REG_SHIFT) |
120 + (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
121 + MTK_PHY_IAC);
122 +
123 + if (mtk_mdio_busy_wait(eth))
124 + return -1;
125 +
126 + return 0;
127 +}
128 +
129 +u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
130 +{
131 + u32 d;
132 +
133 + if (mtk_mdio_busy_wait(eth))
134 + return 0xffff;
135 +
136 + mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
137 + (phy_reg << PHY_IAC_REG_SHIFT) |
138 + (phy_addr << PHY_IAC_ADDR_SHIFT),
139 + MTK_PHY_IAC);
140 +
141 + if (mtk_mdio_busy_wait(eth))
142 + return 0xffff;
143 +
144 + d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
145 +
146 + return d;
147 +}
148 +
149 +static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
150 + int phy_reg, u16 val)
151 +{
152 + struct mtk_eth *eth = bus->priv;
153 +
154 + return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
155 +}
156 +
157 +static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
158 +{
159 + struct mtk_eth *eth = bus->priv;
160 +
161 + return _mtk_mdio_read(eth, phy_addr, phy_reg);
162 +}
163 +
164 +static void mtk_phy_link_adjust(struct net_device *dev)
165 +{
166 + struct mtk_mac *mac = netdev_priv(dev);
167 + u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
168 + MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
169 + MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
170 + MAC_MCR_BACKPR_EN;
171 +
172 + switch (mac->phy_dev->speed) {
173 + case SPEED_1000:
174 + mcr |= MAC_MCR_SPEED_1000;
175 + break;
176 + case SPEED_100:
177 + mcr |= MAC_MCR_SPEED_100;
178 + break;
179 + };
180 +
181 + if (mac->phy_dev->link)
182 + mcr |= MAC_MCR_FORCE_LINK;
183 +
184 + if (mac->phy_dev->duplex)
185 + mcr |= MAC_MCR_FORCE_DPX;
186 +
187 + if (mac->phy_dev->pause)
188 + mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
189 +
190 + mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
191 +
192 + if (mac->phy_dev->link)
193 + netif_carrier_on(dev);
194 + else
195 + netif_carrier_off(dev);
196 +}
197 +
198 +static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
199 + struct device_node *phy_node)
200 +{
201 + const __be32 *_addr = NULL;
202 + struct phy_device *phydev;
203 + int phy_mode, addr;
204 +
205 + _addr = of_get_property(phy_node, "reg", NULL);
206 +
207 + if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
208 + pr_err("%s: invalid phy address\n", phy_node->name);
209 + return -EINVAL;
210 + }
211 + addr = be32_to_cpu(*_addr);
212 + phy_mode = of_get_phy_mode(phy_node);
213 + if (phy_mode < 0) {
214 + dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
215 + return -EINVAL;
216 + }
217 +
218 + phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
219 + mtk_phy_link_adjust, 0, phy_mode);
220 + if (IS_ERR(phydev)) {
221 + dev_err(eth->dev, "could not connect to PHY\n");
222 + return PTR_ERR(phydev);
223 + }
224 +
225 + dev_info(eth->dev,
226 + "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
227 + mac->id, phydev_name(phydev), phydev->phy_id,
228 + phydev->drv->name);
229 +
230 + mac->phy_dev = phydev;
231 +
232 + return 0;
233 +}
234 +
235 +static int mtk_phy_connect(struct mtk_mac *mac)
236 +{
237 + struct mtk_eth *eth = mac->hw;
238 + struct device_node *np;
239 + u32 val, ge_mode;
240 +
241 + np = of_parse_phandle(mac->of_node, "phy-handle", 0);
242 + if (!np)
243 + return -ENODEV;
244 +
245 + switch (of_get_phy_mode(np)) {
246 + case PHY_INTERFACE_MODE_RGMII:
247 + ge_mode = 0;
248 + break;
249 + case PHY_INTERFACE_MODE_MII:
250 + ge_mode = 1;
251 + break;
252 + case PHY_INTERFACE_MODE_RMII:
253 + ge_mode = 2;
254 + break;
255 + default:
256 + dev_err(eth->dev, "invalid phy_mode\n");
257 + return -1;
258 + }
259 +
260 + /* put the gmac into the right mode */
261 + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
262 + val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
263 + val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
264 + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
265 +
266 + mtk_phy_connect_node(eth, mac, np);
267 + mac->phy_dev->autoneg = AUTONEG_ENABLE;
268 + mac->phy_dev->speed = 0;
269 + mac->phy_dev->duplex = 0;
270 + mac->phy_dev->supported &= PHY_BASIC_FEATURES;
271 + mac->phy_dev->advertising = mac->phy_dev->supported |
272 + ADVERTISED_Autoneg;
273 + phy_start_aneg(mac->phy_dev);
274 +
275 + return 0;
276 +}
277 +
278 +static int mtk_mdio_init(struct mtk_eth *eth)
279 +{
280 + struct device_node *mii_np;
281 + int err;
282 +
283 + mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
284 + if (!mii_np) {
285 + dev_err(eth->dev, "no %s child node found", "mdio-bus");
286 + return -ENODEV;
287 + }
288 +
289 + if (!of_device_is_available(mii_np)) {
290 + err = 0;
291 + goto err_put_node;
292 + }
293 +
294 + eth->mii_bus = mdiobus_alloc();
295 + if (!eth->mii_bus) {
296 + err = -ENOMEM;
297 + goto err_put_node;
298 + }
299 +
300 + eth->mii_bus->name = "mdio";
301 + eth->mii_bus->read = mtk_mdio_read;
302 + eth->mii_bus->write = mtk_mdio_write;
303 + eth->mii_bus->priv = eth;
304 + eth->mii_bus->parent = eth->dev;
305 +
306 + snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
307 + err = of_mdiobus_register(eth->mii_bus, mii_np);
308 + if (err)
309 + goto err_free_bus;
310 +
311 + return 0;
312 +
313 +err_free_bus:
314 + kfree(eth->mii_bus);
315 +
316 +err_put_node:
317 + of_node_put(mii_np);
318 + eth->mii_bus = NULL;
319 + return err;
320 +}
321 +
322 +static void mtk_mdio_cleanup(struct mtk_eth *eth)
323 +{
324 + if (!eth->mii_bus)
325 + return;
326 +
327 + mdiobus_unregister(eth->mii_bus);
328 + of_node_put(eth->mii_bus->dev.of_node);
329 + kfree(eth->mii_bus);
330 +}
331 +
332 +static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
333 +{
334 + u32 val;
335 +
336 + val = mtk_r32(eth, MTK_QDMA_INT_MASK);
337 + mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
338 + /* flush write */
339 + mtk_r32(eth, MTK_QDMA_INT_MASK);
340 +}
341 +
342 +static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
343 +{
344 + u32 val;
345 +
346 + val = mtk_r32(eth, MTK_QDMA_INT_MASK);
347 + mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
348 + /* flush write */
349 + mtk_r32(eth, MTK_QDMA_INT_MASK);
350 +}
351 +
352 +static int mtk_set_mac_address(struct net_device *dev, void *p)
353 +{
354 + int ret = eth_mac_addr(dev, p);
355 + struct mtk_mac *mac = netdev_priv(dev);
356 + const char *macaddr = dev->dev_addr;
357 + unsigned long flags;
358 +
359 + if (ret)
360 + return ret;
361 +
362 + spin_lock_irqsave(&mac->hw->page_lock, flags);
363 + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
364 + MTK_GDMA_MAC_ADRH(mac->id));
365 + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
366 + (macaddr[4] << 8) | macaddr[5],
367 + MTK_GDMA_MAC_ADRL(mac->id));
368 + spin_unlock_irqrestore(&mac->hw->page_lock, flags);
369 +
370 + return 0;
371 +}
372 +
373 +void mtk_stats_update_mac(struct mtk_mac *mac)
374 +{
375 + struct mtk_hw_stats *hw_stats = mac->hw_stats;
376 + unsigned int base = MTK_GDM1_TX_GBCNT;
377 + u64 stats;
378 +
379 + base += hw_stats->reg_offset;
380 +
381 + u64_stats_update_begin(&hw_stats->syncp);
382 +
383 + hw_stats->rx_bytes += mtk_r32(mac->hw, base);
384 + stats = mtk_r32(mac->hw, base + 0x04);
385 + if (stats)
386 + hw_stats->rx_bytes += (stats << 32);
387 + hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
388 + hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
389 + hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
390 + hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
391 + hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
392 + hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
393 + hw_stats->rx_flow_control_packets +=
394 + mtk_r32(mac->hw, base + 0x24);
395 + hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
396 + hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
397 + hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
398 + stats = mtk_r32(mac->hw, base + 0x34);
399 + if (stats)
400 + hw_stats->tx_bytes += (stats << 32);
401 + hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
402 + u64_stats_update_end(&hw_stats->syncp);
403 +}
404 +
405 +static void mtk_stats_update(struct mtk_eth *eth)
406 +{
407 + int i;
408 +
409 + for (i = 0; i < MTK_MAC_COUNT; i++) {
410 + if (!eth->mac[i] || !eth->mac[i]->hw_stats)
411 + continue;
412 + if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
413 + mtk_stats_update_mac(eth->mac[i]);
414 + spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
415 + }
416 + }
417 +}
418 +
419 +static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
420 + struct rtnl_link_stats64 *storage)
421 +{
422 + struct mtk_mac *mac = netdev_priv(dev);
423 + struct mtk_hw_stats *hw_stats = mac->hw_stats;
424 + unsigned int start;
425 +
426 + if (netif_running(dev) && netif_device_present(dev)) {
427 + if (spin_trylock(&hw_stats->stats_lock)) {
428 + mtk_stats_update_mac(mac);
429 + spin_unlock(&hw_stats->stats_lock);
430 + }
431 + }
432 +
433 + do {
434 + start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
435 + storage->rx_packets = hw_stats->rx_packets;
436 + storage->tx_packets = hw_stats->tx_packets;
437 + storage->rx_bytes = hw_stats->rx_bytes;
438 + storage->tx_bytes = hw_stats->tx_bytes;
439 + storage->collisions = hw_stats->tx_collisions;
440 + storage->rx_length_errors = hw_stats->rx_short_errors +
441 + hw_stats->rx_long_errors;
442 + storage->rx_over_errors = hw_stats->rx_overflow;
443 + storage->rx_crc_errors = hw_stats->rx_fcs_errors;
444 + storage->rx_errors = hw_stats->rx_checksum_errors;
445 + storage->tx_aborted_errors = hw_stats->tx_skip;
446 + } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
447 +
448 + storage->tx_errors = dev->stats.tx_errors;
449 + storage->rx_dropped = dev->stats.rx_dropped;
450 + storage->tx_dropped = dev->stats.tx_dropped;
451 +
452 + return storage;
453 +}
454 +
455 +static inline int mtk_max_frag_size(int mtu)
456 +{
457 + /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
458 + if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
459 + mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
460 +
461 + return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
462 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
463 +}
464 +
465 +static inline int mtk_max_buf_size(int frag_size)
466 +{
467 + int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
468 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
469 +
470 + WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
471 +
472 + return buf_size;
473 +}
474 +
475 +static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
476 + struct mtk_rx_dma *dma_rxd)
477 +{
478 + rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
479 + rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
480 + rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
481 + rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
482 +}
483 +
484 +/* the qdma core needs scratch memory to be setup */
485 +static int mtk_init_fq_dma(struct mtk_eth *eth)
486 +{
487 + unsigned int phy_ring_head, phy_ring_tail;
488 + int cnt = MTK_DMA_SIZE;
489 + dma_addr_t dma_addr;
490 + int i;
491 +
492 + eth->scratch_ring = dma_alloc_coherent(eth->dev,
493 + cnt * sizeof(struct mtk_tx_dma),
494 + &phy_ring_head,
495 + GFP_ATOMIC | __GFP_ZERO);
496 + if (unlikely(!eth->scratch_ring))
497 + return -ENOMEM;
498 +
499 + eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
500 + GFP_KERNEL);
501 + dma_addr = dma_map_single(eth->dev,
502 + eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
503 + DMA_FROM_DEVICE);
504 + if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
505 + return -ENOMEM;
506 +
507 + memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
508 + phy_ring_tail = phy_ring_head +
509 + (sizeof(struct mtk_tx_dma) * (cnt - 1));
510 +
511 + for (i = 0; i < cnt; i++) {
512 + eth->scratch_ring[i].txd1 =
513 + (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
514 + if (i < cnt - 1)
515 + eth->scratch_ring[i].txd2 = (phy_ring_head +
516 + ((i + 1) * sizeof(struct mtk_tx_dma)));
517 + eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
518 + }
519 +
520 + mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
521 + mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
522 + mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
523 + mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
524 +
525 + return 0;
526 +}
527 +
528 +static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
529 +{
530 + void *ret = ring->dma;
531 +
532 + return ret + (desc - ring->phys);
533 +}
534 +
535 +static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
536 + struct mtk_tx_dma *txd)
537 +{
538 + int idx = txd - ring->dma;
539 +
540 + return &ring->buf[idx];
541 +}
542 +
543 +static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
544 +{
545 + if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
546 + dma_unmap_single(dev,
547 + dma_unmap_addr(tx_buf, dma_addr0),
548 + dma_unmap_len(tx_buf, dma_len0),
549 + DMA_TO_DEVICE);
550 + } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
551 + dma_unmap_page(dev,
552 + dma_unmap_addr(tx_buf, dma_addr0),
553 + dma_unmap_len(tx_buf, dma_len0),
554 + DMA_TO_DEVICE);
555 + }
556 + tx_buf->flags = 0;
557 + if (tx_buf->skb &&
558 + (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
559 + dev_kfree_skb_any(tx_buf->skb);
560 + tx_buf->skb = NULL;
561 +}
562 +
563 +static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
564 + int tx_num, struct mtk_tx_ring *ring, bool gso)
565 +{
566 + struct mtk_mac *mac = netdev_priv(dev);
567 + struct mtk_eth *eth = mac->hw;
568 + struct mtk_tx_dma *itxd, *txd;
569 + struct mtk_tx_buf *tx_buf;
570 + unsigned long flags;
571 + dma_addr_t mapped_addr;
572 + unsigned int nr_frags;
573 + int i, n_desc = 1;
574 + u32 txd4 = 0;
575 +
576 + itxd = ring->next_free;
577 + if (itxd == ring->last_free)
578 + return -ENOMEM;
579 +
580 + /* set the forward port */
581 + txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
582 +
583 + tx_buf = mtk_desc_to_tx_buf(ring, itxd);
584 + memset(tx_buf, 0, sizeof(*tx_buf));
585 +
586 + if (gso)
587 + txd4 |= TX_DMA_TSO;
588 +
589 + /* TX Checksum offload */
590 + if (skb->ip_summed == CHECKSUM_PARTIAL)
591 + txd4 |= TX_DMA_CHKSUM;
592 +
593 + /* VLAN header offload */
594 + if (skb_vlan_tag_present(skb))
595 + txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
596 +
597 + mapped_addr = dma_map_single(&dev->dev, skb->data,
598 + skb_headlen(skb), DMA_TO_DEVICE);
599 + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
600 + return -ENOMEM;
601 +
602 + /* normally we can rely on the stack not calling this more than once,
603 + * however we have 2 queues running ont he same ring so we need to lock
604 + * the ring access
605 + */
606 + spin_lock_irqsave(&eth->page_lock, flags);
607 + WRITE_ONCE(itxd->txd1, mapped_addr);
608 + tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
609 + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
610 + dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
611 +
612 + /* TX SG offload */
613 + txd = itxd;
614 + nr_frags = skb_shinfo(skb)->nr_frags;
615 + for (i = 0; i < nr_frags; i++) {
616 + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
617 + unsigned int offset = 0;
618 + int frag_size = skb_frag_size(frag);
619 +
620 + while (frag_size) {
621 + bool last_frag = false;
622 + unsigned int frag_map_size;
623 +
624 + txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
625 + if (txd == ring->last_free)
626 + goto err_dma;
627 +
628 + n_desc++;
629 + frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
630 + mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
631 + frag_map_size,
632 + DMA_TO_DEVICE);
633 + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
634 + goto err_dma;
635 +
636 + if (i == nr_frags - 1 &&
637 + (frag_size - frag_map_size) == 0)
638 + last_frag = true;
639 +
640 + WRITE_ONCE(txd->txd1, mapped_addr);
641 + WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
642 + TX_DMA_PLEN0(frag_map_size) |
643 + last_frag * TX_DMA_LS0) |
644 + mac->id);
645 + WRITE_ONCE(txd->txd4, 0);
646 +
647 + tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
648 + tx_buf = mtk_desc_to_tx_buf(ring, txd);
649 + memset(tx_buf, 0, sizeof(*tx_buf));
650 +
651 + tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
652 + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
653 + dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
654 + frag_size -= frag_map_size;
655 + offset += frag_map_size;
656 + }
657 + }
658 +
659 + /* store skb to cleanup */
660 + tx_buf->skb = skb;
661 +
662 + WRITE_ONCE(itxd->txd4, txd4);
663 + WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
664 + (!nr_frags * TX_DMA_LS0)));
665 +
666 + spin_unlock_irqrestore(&eth->page_lock, flags);
667 +
668 + netdev_sent_queue(dev, skb->len);
669 + skb_tx_timestamp(skb);
670 +
671 + ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
672 + atomic_sub(n_desc, &ring->free_count);
673 +
674 + /* make sure that all changes to the dma ring are flushed before we
675 + * continue
676 + */
677 + wmb();
678 +
679 + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
680 + mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
681 +
682 + return 0;
683 +
684 +err_dma:
685 + do {
686 + tx_buf = mtk_desc_to_tx_buf(ring, txd);
687 +
688 + /* unmap dma */
689 + mtk_tx_unmap(&dev->dev, tx_buf);
690 +
691 + itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
692 + itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
693 + } while (itxd != txd);
694 +
695 + return -ENOMEM;
696 +}
697 +
698 +static inline int mtk_cal_txd_req(struct sk_buff *skb)
699 +{
700 + int i, nfrags;
701 + struct skb_frag_struct *frag;
702 +
703 + nfrags = 1;
704 + if (skb_is_gso(skb)) {
705 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
706 + frag = &skb_shinfo(skb)->frags[i];
707 + nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
708 + }
709 + } else {
710 + nfrags += skb_shinfo(skb)->nr_frags;
711 + }
712 +
713 + return DIV_ROUND_UP(nfrags, 2);
714 +}
715 +
716 +static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
717 +{
718 + struct mtk_mac *mac = netdev_priv(dev);
719 + struct mtk_eth *eth = mac->hw;
720 + struct mtk_tx_ring *ring = &eth->tx_ring;
721 + struct net_device_stats *stats = &dev->stats;
722 + bool gso = false;
723 + int tx_num;
724 +
725 + tx_num = mtk_cal_txd_req(skb);
726 + if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
727 + netif_stop_queue(dev);
728 + netif_err(eth, tx_queued, dev,
729 + "Tx Ring full when queue awake!\n");
730 + return NETDEV_TX_BUSY;
731 + }
732 +
733 + /* TSO: fill MSS info in tcp checksum field */
734 + if (skb_is_gso(skb)) {
735 + if (skb_cow_head(skb, 0)) {
736 + netif_warn(eth, tx_err, dev,
737 + "GSO expand head fail.\n");
738 + goto drop;
739 + }
740 +
741 + if (skb_shinfo(skb)->gso_type &
742 + (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
743 + gso = true;
744 + tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
745 + }
746 + }
747 +
748 + if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
749 + goto drop;
750 +
751 + if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
752 + netif_stop_queue(dev);
753 + if (unlikely(atomic_read(&ring->free_count) >
754 + ring->thresh))
755 + netif_wake_queue(dev);
756 + }
757 +
758 + return NETDEV_TX_OK;
759 +
760 +drop:
761 + stats->tx_dropped++;
762 + dev_kfree_skb(skb);
763 + return NETDEV_TX_OK;
764 +}
765 +
766 +static int mtk_poll_rx(struct napi_struct *napi, int budget,
767 + struct mtk_eth *eth, u32 rx_intr)
768 +{
769 + struct mtk_rx_ring *ring = &eth->rx_ring;
770 + int idx = ring->calc_idx;
771 + struct sk_buff *skb;
772 + u8 *data, *new_data;
773 + struct mtk_rx_dma *rxd, trxd;
774 + int done = 0;
775 +
776 + while (done < budget) {
777 + struct net_device *netdev;
778 + unsigned int pktlen;
779 + dma_addr_t dma_addr;
780 + int mac = 0;
781 +
782 + idx = NEXT_RX_DESP_IDX(idx);
783 + rxd = &ring->dma[idx];
784 + data = ring->data[idx];
785 +
786 + mtk_rx_get_desc(&trxd, rxd);
787 + if (!(trxd.rxd2 & RX_DMA_DONE))
788 + break;
789 +
790 + /* find out which mac the packet come from. values start at 1 */
791 + mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
792 + RX_DMA_FPORT_MASK;
793 + mac--;
794 +
795 + netdev = eth->netdev[mac];
796 +
797 + /* alloc new buffer */
798 + new_data = napi_alloc_frag(ring->frag_size);
799 + if (unlikely(!new_data)) {
800 + netdev->stats.rx_dropped++;
801 + goto release_desc;
802 + }
803 + dma_addr = dma_map_single(&eth->netdev[mac]->dev,
804 + new_data + NET_SKB_PAD,
805 + ring->buf_size,
806 + DMA_FROM_DEVICE);
807 + if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
808 + skb_free_frag(new_data);
809 + goto release_desc;
810 + }
811 +
812 + /* receive data */
813 + skb = build_skb(data, ring->frag_size);
814 + if (unlikely(!skb)) {
815 + put_page(virt_to_head_page(new_data));
816 + goto release_desc;
817 + }
818 + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
819 +
820 + dma_unmap_single(&netdev->dev, trxd.rxd1,
821 + ring->buf_size, DMA_FROM_DEVICE);
822 + pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
823 + skb->dev = netdev;
824 + skb_put(skb, pktlen);
825 + if (trxd.rxd4 & RX_DMA_L4_VALID)
826 + skb->ip_summed = CHECKSUM_UNNECESSARY;
827 + else
828 + skb_checksum_none_assert(skb);
829 + skb->protocol = eth_type_trans(skb, netdev);
830 +
831 + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
832 + RX_DMA_VID(trxd.rxd3))
833 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
834 + RX_DMA_VID(trxd.rxd3));
835 + napi_gro_receive(napi, skb);
836 +
837 + ring->data[idx] = new_data;
838 + rxd->rxd1 = (unsigned int)dma_addr;
839 +
840 +release_desc:
841 + rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
842 +
843 + ring->calc_idx = idx;
844 + /* make sure that all changes to the dma ring are flushed before
845 + * we continue
846 + */
847 + wmb();
848 + mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0);
849 + done++;
850 + }
851 +
852 + if (done < budget)
853 + mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
854 +
855 + return done;
856 +}
857 +
858 +static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
859 +{
860 + struct mtk_tx_ring *ring = &eth->tx_ring;
861 + struct mtk_tx_dma *desc;
862 + struct sk_buff *skb;
863 + struct mtk_tx_buf *tx_buf;
864 + int total = 0, done[MTK_MAX_DEVS];
865 + unsigned int bytes[MTK_MAX_DEVS];
866 + u32 cpu, dma;
867 + static int condition;
868 + int i;
869 +
870 + memset(done, 0, sizeof(done));
871 + memset(bytes, 0, sizeof(bytes));
872 +
873 + cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
874 + dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
875 +
876 + desc = mtk_qdma_phys_to_virt(ring, cpu);
877 +
878 + while ((cpu != dma) && budget) {
879 + u32 next_cpu = desc->txd2;
880 + int mac;
881 +
882 + desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
883 + if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
884 + break;
885 +
886 + mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
887 + TX_DMA_FPORT_MASK;
888 + mac--;
889 +
890 + tx_buf = mtk_desc_to_tx_buf(ring, desc);
891 + skb = tx_buf->skb;
892 + if (!skb) {
893 + condition = 1;
894 + break;
895 + }
896 +
897 + if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
898 + bytes[mac] += skb->len;
899 + done[mac]++;
900 + budget--;
901 + }
902 + mtk_tx_unmap(eth->dev, tx_buf);
903 +
904 + ring->last_free->txd2 = next_cpu;
905 + ring->last_free = desc;
906 + atomic_inc(&ring->free_count);
907 +
908 + cpu = next_cpu;
909 + }
910 +
911 + mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
912 +
913 + for (i = 0; i < MTK_MAC_COUNT; i++) {
914 + if (!eth->netdev[i] || !done[i])
915 + continue;
916 + netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
917 + total += done[i];
918 + }
919 +
920 + /* read hw index again make sure no new tx packet */
921 + if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
922 + *tx_again = true;
923 + else
924 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
925 +
926 + if (!total)
927 + return 0;
928 +
929 + for (i = 0; i < MTK_MAC_COUNT; i++) {
930 + if (!eth->netdev[i] ||
931 + unlikely(!netif_queue_stopped(eth->netdev[i])))
932 + continue;
933 + if (atomic_read(&ring->free_count) > ring->thresh)
934 + netif_wake_queue(eth->netdev[i]);
935 + }
936 +
937 + return total;
938 +}
939 +
940 +static int mtk_poll(struct napi_struct *napi, int budget)
941 +{
942 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
943 + u32 status, status2, mask, tx_intr, rx_intr, status_intr;
944 + int tx_done, rx_done;
945 + bool tx_again = false;
946 +
947 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
948 + status2 = mtk_r32(eth, MTK_INT_STATUS2);
949 + tx_intr = MTK_TX_DONE_INT;
950 + rx_intr = MTK_RX_DONE_INT;
951 + status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
952 + tx_done = 0;
953 + rx_done = 0;
954 + tx_again = 0;
955 +
956 + if (status & tx_intr)
957 + tx_done = mtk_poll_tx(eth, budget, &tx_again);
958 +
959 + if (status & rx_intr)
960 + rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
961 +
962 + if (unlikely(status2 & status_intr)) {
963 + mtk_stats_update(eth);
964 + mtk_w32(eth, status_intr, MTK_INT_STATUS2);
965 + }
966 +
967 + if (unlikely(netif_msg_intr(eth))) {
968 + mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
969 + netdev_info(eth->netdev[0],
970 + "done tx %d, rx %d, intr 0x%08x/0x%x\n",
971 + tx_done, rx_done, status, mask);
972 + }
973 +
974 + if (tx_again || rx_done == budget)
975 + return budget;
976 +
977 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
978 + if (status & (tx_intr | rx_intr))
979 + return budget;
980 +
981 + napi_complete(napi);
982 + mtk_irq_enable(eth, tx_intr | rx_intr);
983 +
984 + return rx_done;
985 +}
986 +
987 +static int mtk_tx_alloc(struct mtk_eth *eth)
988 +{
989 + struct mtk_tx_ring *ring = &eth->tx_ring;
990 + int i, sz = sizeof(*ring->dma);
991 +
992 + ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
993 + GFP_KERNEL);
994 + if (!ring->buf)
995 + goto no_tx_mem;
996 +
997 + ring->dma = dma_alloc_coherent(eth->dev,
998 + MTK_DMA_SIZE * sz,
999 + &ring->phys,
1000 + GFP_ATOMIC | __GFP_ZERO);
1001 + if (!ring->dma)
1002 + goto no_tx_mem;
1003 +
1004 + memset(ring->dma, 0, MTK_DMA_SIZE * sz);
1005 + for (i = 0; i < MTK_DMA_SIZE; i++) {
1006 + int next = (i + 1) % MTK_DMA_SIZE;
1007 + u32 next_ptr = ring->phys + next * sz;
1008 +
1009 + ring->dma[i].txd2 = next_ptr;
1010 + ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1011 + }
1012 +
1013 + atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1014 + ring->next_free = &ring->dma[0];
1015 + ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
1016 + ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
1017 + MAX_SKB_FRAGS);
1018 +
1019 + /* make sure that all changes to the dma ring are flushed before we
1020 + * continue
1021 + */
1022 + wmb();
1023 +
1024 + mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1025 + mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1026 + mtk_w32(eth,
1027 + ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1028 + MTK_QTX_CRX_PTR);
1029 + mtk_w32(eth,
1030 + ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1031 + MTK_QTX_DRX_PTR);
1032 +
1033 + return 0;
1034 +
1035 +no_tx_mem:
1036 + return -ENOMEM;
1037 +}
1038 +
1039 +static void mtk_tx_clean(struct mtk_eth *eth)
1040 +{
1041 + struct mtk_tx_ring *ring = &eth->tx_ring;
1042 + int i;
1043 +
1044 + if (ring->buf) {
1045 + for (i = 0; i < MTK_DMA_SIZE; i++)
1046 + mtk_tx_unmap(eth->dev, &ring->buf[i]);
1047 + kfree(ring->buf);
1048 + ring->buf = NULL;
1049 + }
1050 +
1051 + if (ring->dma) {
1052 + dma_free_coherent(eth->dev,
1053 + MTK_DMA_SIZE * sizeof(*ring->dma),
1054 + ring->dma,
1055 + ring->phys);
1056 + ring->dma = NULL;
1057 + }
1058 +}
1059 +
1060 +static int mtk_rx_alloc(struct mtk_eth *eth)
1061 +{
1062 + struct mtk_rx_ring *ring = &eth->rx_ring;
1063 + int i;
1064 +
1065 + ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
1066 + ring->buf_size = mtk_max_buf_size(ring->frag_size);
1067 + ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
1068 + GFP_KERNEL);
1069 + if (!ring->data)
1070 + return -ENOMEM;
1071 +
1072 + for (i = 0; i < MTK_DMA_SIZE; i++) {
1073 + ring->data[i] = netdev_alloc_frag(ring->frag_size);
1074 + if (!ring->data[i])
1075 + return -ENOMEM;
1076 + }
1077 +
1078 + ring->dma = dma_alloc_coherent(eth->dev,
1079 + MTK_DMA_SIZE * sizeof(*ring->dma),
1080 + &ring->phys,
1081 + GFP_ATOMIC | __GFP_ZERO);
1082 + if (!ring->dma)
1083 + return -ENOMEM;
1084 +
1085 + for (i = 0; i < MTK_DMA_SIZE; i++) {
1086 + dma_addr_t dma_addr = dma_map_single(eth->dev,
1087 + ring->data[i] + NET_SKB_PAD,
1088 + ring->buf_size,
1089 + DMA_FROM_DEVICE);
1090 + if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1091 + return -ENOMEM;
1092 + ring->dma[i].rxd1 = (unsigned int)dma_addr;
1093 +
1094 + ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1095 + }
1096 + ring->calc_idx = MTK_DMA_SIZE - 1;
1097 + /* make sure that all changes to the dma ring are flushed before we
1098 + * continue
1099 + */
1100 + wmb();
1101 +
1102 + mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0);
1103 + mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0);
1104 + mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0);
1105 + mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
1106 + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1107 +
1108 + return 0;
1109 +}
1110 +
1111 +static void mtk_rx_clean(struct mtk_eth *eth)
1112 +{
1113 + struct mtk_rx_ring *ring = &eth->rx_ring;
1114 + int i;
1115 +
1116 + if (ring->data && ring->dma) {
1117 + for (i = 0; i < MTK_DMA_SIZE; i++) {
1118 + if (!ring->data[i])
1119 + continue;
1120 + if (!ring->dma[i].rxd1)
1121 + continue;
1122 + dma_unmap_single(eth->dev,
1123 + ring->dma[i].rxd1,
1124 + ring->buf_size,
1125 + DMA_FROM_DEVICE);
1126 + skb_free_frag(ring->data[i]);
1127 + }
1128 + kfree(ring->data);
1129 + ring->data = NULL;
1130 + }
1131 +
1132 + if (ring->dma) {
1133 + dma_free_coherent(eth->dev,
1134 + MTK_DMA_SIZE * sizeof(*ring->dma),
1135 + ring->dma,
1136 + ring->phys);
1137 + ring->dma = NULL;
1138 + }
1139 +}
1140 +
1141 +/* wait for DMA to finish whatever it is doing before we start using it again */
1142 +static int mtk_dma_busy_wait(struct mtk_eth *eth)
1143 +{
1144 + unsigned long t_start = jiffies;
1145 +
1146 + while (1) {
1147 + if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1148 + (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1149 + return 0;
1150 + if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
1151 + break;
1152 + }
1153 +
1154 + dev_err(eth->dev, "DMA init timeout\n");
1155 + return -1;
1156 +}
1157 +
1158 +static int mtk_dma_init(struct mtk_eth *eth)
1159 +{
1160 + int err;
1161 +
1162 + if (mtk_dma_busy_wait(eth))
1163 + return -EBUSY;
1164 +
1165 + /* QDMA needs scratch memory for internal reordering of the
1166 + * descriptors
1167 + */
1168 + err = mtk_init_fq_dma(eth);
1169 + if (err)
1170 + return err;
1171 +
1172 + err = mtk_tx_alloc(eth);
1173 + if (err)
1174 + return err;
1175 +
1176 + err = mtk_rx_alloc(eth);
1177 + if (err)
1178 + return err;
1179 +
1180 + /* Enable random early drop and set drop threshold automatically */
1181 + mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
1182 + MTK_QDMA_FC_THRES);
1183 + mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1184 +
1185 + return 0;
1186 +}
1187 +
1188 +static void mtk_dma_free(struct mtk_eth *eth)
1189 +{
1190 + int i;
1191 +
1192 + for (i = 0; i < MTK_MAC_COUNT; i++)
1193 + if (eth->netdev[i])
1194 + netdev_reset_queue(eth->netdev[i]);
1195 + mtk_tx_clean(eth);
1196 + mtk_rx_clean(eth);
1197 + kfree(eth->scratch_head);
1198 +}
1199 +
1200 +static void mtk_tx_timeout(struct net_device *dev)
1201 +{
1202 + struct mtk_mac *mac = netdev_priv(dev);
1203 + struct mtk_eth *eth = mac->hw;
1204 +
1205 + eth->netdev[mac->id]->stats.tx_errors++;
1206 + netif_err(eth, tx_err, dev,
1207 + "transmit timed out\n");
1208 + schedule_work(&mac->pending_work);
1209 +}
1210 +
1211 +static irqreturn_t mtk_handle_irq(int irq, void *_eth)
1212 +{
1213 + struct mtk_eth *eth = _eth;
1214 + u32 status;
1215 +
1216 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1217 + if (unlikely(!status))
1218 + return IRQ_NONE;
1219 +
1220 + if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
1221 + if (likely(napi_schedule_prep(&eth->rx_napi)))
1222 + __napi_schedule(&eth->rx_napi);
1223 + } else {
1224 + mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
1225 + }
1226 + mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
1227 +
1228 + return IRQ_HANDLED;
1229 +}
1230 +
1231 +#ifdef CONFIG_NET_POLL_CONTROLLER
1232 +static void mtk_poll_controller(struct net_device *dev)
1233 +{
1234 + struct mtk_mac *mac = netdev_priv(dev);
1235 + struct mtk_eth *eth = mac->hw;
1236 + u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
1237 +
1238 + mtk_irq_disable(eth, int_mask);
1239 + mtk_handle_irq(dev->irq, dev);
1240 + mtk_irq_enable(eth, int_mask);
1241 +}
1242 +#endif
1243 +
1244 +static int mtk_start_dma(struct mtk_eth *eth)
1245 +{
1246 + int err;
1247 +
1248 + err = mtk_dma_init(eth);
1249 + if (err) {
1250 + mtk_dma_free(eth);
1251 + return err;
1252 + }
1253 +
1254 + mtk_w32(eth,
1255 + MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
1256 + MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
1257 + MTK_RX_BT_32DWORDS,
1258 + MTK_QDMA_GLO_CFG);
1259 +
1260 + return 0;
1261 +}
1262 +
1263 +static int mtk_open(struct net_device *dev)
1264 +{
1265 + struct mtk_mac *mac = netdev_priv(dev);
1266 + struct mtk_eth *eth = mac->hw;
1267 +
1268 + /* we run 2 netdevs on the same dma ring so we only bring it up once */
1269 + if (!atomic_read(&eth->dma_refcnt)) {
1270 + int err = mtk_start_dma(eth);
1271 +
1272 + if (err)
1273 + return err;
1274 +
1275 + napi_enable(&eth->rx_napi);
1276 + mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1277 + }
1278 + atomic_inc(&eth->dma_refcnt);
1279 +
1280 + phy_start(mac->phy_dev);
1281 + netif_start_queue(dev);
1282 +
1283 + return 0;
1284 +}
1285 +
1286 +static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1287 +{
1288 + unsigned long flags;
1289 + u32 val;
1290 + int i;
1291 +
1292 + /* stop the dma engine */
1293 + spin_lock_irqsave(&eth->page_lock, flags);
1294 + val = mtk_r32(eth, glo_cfg);
1295 + mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1296 + glo_cfg);
1297 + spin_unlock_irqrestore(&eth->page_lock, flags);
1298 +
1299 + /* wait for dma stop */
1300 + for (i = 0; i < 10; i++) {
1301 + val = mtk_r32(eth, glo_cfg);
1302 + if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1303 + msleep(20);
1304 + continue;
1305 + }
1306 + break;
1307 + }
1308 +}
1309 +
1310 +static int mtk_stop(struct net_device *dev)
1311 +{
1312 + struct mtk_mac *mac = netdev_priv(dev);
1313 + struct mtk_eth *eth = mac->hw;
1314 +
1315 + netif_tx_disable(dev);
1316 + phy_stop(mac->phy_dev);
1317 +
1318 + /* only shutdown DMA if this is the last user */
1319 + if (!atomic_dec_and_test(&eth->dma_refcnt))
1320 + return 0;
1321 +
1322 + mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1323 + napi_disable(&eth->rx_napi);
1324 +
1325 + mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1326 +
1327 + mtk_dma_free(eth);
1328 +
1329 + return 0;
1330 +}
1331 +
1332 +static int __init mtk_hw_init(struct mtk_eth *eth)
1333 +{
1334 + int err, i;
1335 +
1336 + /* reset the frame engine */
1337 + reset_control_assert(eth->rstc);
1338 + usleep_range(10, 20);
1339 + reset_control_deassert(eth->rstc);
1340 + usleep_range(10, 20);
1341 +
1342 + /* Set GE2 driving and slew rate */
1343 + regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
1344 +
1345 + /* set GE2 TDSEL */
1346 + regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
1347 +
1348 + /* set GE2 TUNE */
1349 + regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1350 +
1351 + /* GE1, Force 1000M/FD, FC ON */
1352 + mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
1353 +
1354 + /* GE2, Force 1000M/FD, FC ON */
1355 + mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
1356 +
1357 + /* Enable RX VLan Offloading */
1358 + mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
1359 +
1360 + err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
1361 + dev_name(eth->dev), eth);
1362 + if (err)
1363 + return err;
1364 +
1365 + err = mtk_mdio_init(eth);
1366 + if (err)
1367 + return err;
1368 +
1369 + /* disable delay and normal interrupt */
1370 + mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1371 + mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1372 + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
1373 + mtk_w32(eth, 0, MTK_RST_GL);
1374 +
1375 + /* FE int grouping */
1376 + mtk_w32(eth, 0, MTK_FE_INT_GRP);
1377 +
1378 + for (i = 0; i < 2; i++) {
1379 + u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
1380 +
1381 + /* setup the forward port to send frame to QDMA */
1382 + val &= ~0xffff;
1383 + val |= 0x5555;
1384 +
1385 + /* Enable RX checksum */
1386 + val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
1387 +
1388 + /* setup the mac dma */
1389 + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
1390 + }
1391 +
1392 + return 0;
1393 +}
1394 +
1395 +static int __init mtk_init(struct net_device *dev)
1396 +{
1397 + struct mtk_mac *mac = netdev_priv(dev);
1398 + struct mtk_eth *eth = mac->hw;
1399 + const char *mac_addr;
1400 +
1401 + mac_addr = of_get_mac_address(mac->of_node);
1402 + if (mac_addr)
1403 + ether_addr_copy(dev->dev_addr, mac_addr);
1404 +
1405 + /* If the mac address is invalid, use random mac address */
1406 + if (!is_valid_ether_addr(dev->dev_addr)) {
1407 + random_ether_addr(dev->dev_addr);
1408 + dev_err(eth->dev, "generated random MAC address %pM\n",
1409 + dev->dev_addr);
1410 + dev->addr_assign_type = NET_ADDR_RANDOM;
1411 + }
1412 +
1413 + return mtk_phy_connect(mac);
1414 +}
1415 +
1416 +static void mtk_uninit(struct net_device *dev)
1417 +{
1418 + struct mtk_mac *mac = netdev_priv(dev);
1419 + struct mtk_eth *eth = mac->hw;
1420 +
1421 + phy_disconnect(mac->phy_dev);
1422 + mtk_mdio_cleanup(eth);
1423 + mtk_irq_disable(eth, ~0);
1424 + free_irq(dev->irq, dev);
1425 +}
1426 +
1427 +static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1428 +{
1429 + struct mtk_mac *mac = netdev_priv(dev);
1430 +
1431 + switch (cmd) {
1432 + case SIOCGMIIPHY:
1433 + case SIOCGMIIREG:
1434 + case SIOCSMIIREG:
1435 + return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
1436 + default:
1437 + break;
1438 + }
1439 +
1440 + return -EOPNOTSUPP;
1441 +}
1442 +
1443 +static void mtk_pending_work(struct work_struct *work)
1444 +{
1445 + struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
1446 + struct mtk_eth *eth = mac->hw;
1447 + struct net_device *dev = eth->netdev[mac->id];
1448 + int err;
1449 +
1450 + rtnl_lock();
1451 + mtk_stop(dev);
1452 +
1453 + err = mtk_open(dev);
1454 + if (err) {
1455 + netif_alert(eth, ifup, dev,
1456 + "Driver up/down cycle failed, closing device.\n");
1457 + dev_close(dev);
1458 + }
1459 + rtnl_unlock();
1460 +}
1461 +
1462 +static int mtk_cleanup(struct mtk_eth *eth)
1463 +{
1464 + int i;
1465 +
1466 + for (i = 0; i < MTK_MAC_COUNT; i++) {
1467 + struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
1468 +
1469 + if (!eth->netdev[i])
1470 + continue;
1471 +
1472 + unregister_netdev(eth->netdev[i]);
1473 + free_netdev(eth->netdev[i]);
1474 + cancel_work_sync(&mac->pending_work);
1475 + }
1476 +
1477 + return 0;
1478 +}
1479 +
1480 +static int mtk_get_settings(struct net_device *dev,
1481 + struct ethtool_cmd *cmd)
1482 +{
1483 + struct mtk_mac *mac = netdev_priv(dev);
1484 + int err;
1485 +
1486 + err = phy_read_status(mac->phy_dev);
1487 + if (err)
1488 + return -ENODEV;
1489 +
1490 + return phy_ethtool_gset(mac->phy_dev, cmd);
1491 +}
1492 +
1493 +static int mtk_set_settings(struct net_device *dev,
1494 + struct ethtool_cmd *cmd)
1495 +{
1496 + struct mtk_mac *mac = netdev_priv(dev);
1497 +
1498 + if (cmd->phy_address != mac->phy_dev->mdio.addr) {
1499 + mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
1500 + cmd->phy_address);
1501 + if (!mac->phy_dev)
1502 + return -ENODEV;
1503 + }
1504 +
1505 + return phy_ethtool_sset(mac->phy_dev, cmd);
1506 +}
1507 +
1508 +static void mtk_get_drvinfo(struct net_device *dev,
1509 + struct ethtool_drvinfo *info)
1510 +{
1511 + struct mtk_mac *mac = netdev_priv(dev);
1512 +
1513 + strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
1514 + strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
1515 + info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
1516 +}
1517 +
1518 +static u32 mtk_get_msglevel(struct net_device *dev)
1519 +{
1520 + struct mtk_mac *mac = netdev_priv(dev);
1521 +
1522 + return mac->hw->msg_enable;
1523 +}
1524 +
1525 +static void mtk_set_msglevel(struct net_device *dev, u32 value)
1526 +{
1527 + struct mtk_mac *mac = netdev_priv(dev);
1528 +
1529 + mac->hw->msg_enable = value;
1530 +}
1531 +
1532 +static int mtk_nway_reset(struct net_device *dev)
1533 +{
1534 + struct mtk_mac *mac = netdev_priv(dev);
1535 +
1536 + return genphy_restart_aneg(mac->phy_dev);
1537 +}
1538 +
1539 +static u32 mtk_get_link(struct net_device *dev)
1540 +{
1541 + struct mtk_mac *mac = netdev_priv(dev);
1542 + int err;
1543 +
1544 + err = genphy_update_link(mac->phy_dev);
1545 + if (err)
1546 + return ethtool_op_get_link(dev);
1547 +
1548 + return mac->phy_dev->link;
1549 +}
1550 +
1551 +static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1552 +{
1553 + int i;
1554 +
1555 + switch (stringset) {
1556 + case ETH_SS_STATS:
1557 + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
1558 + memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
1559 + data += ETH_GSTRING_LEN;
1560 + }
1561 + break;
1562 + }
1563 +}
1564 +
1565 +static int mtk_get_sset_count(struct net_device *dev, int sset)
1566 +{
1567 + switch (sset) {
1568 + case ETH_SS_STATS:
1569 + return ARRAY_SIZE(mtk_ethtool_stats);
1570 + default:
1571 + return -EOPNOTSUPP;
1572 + }
1573 +}
1574 +
1575 +static void mtk_get_ethtool_stats(struct net_device *dev,
1576 + struct ethtool_stats *stats, u64 *data)
1577 +{
1578 + struct mtk_mac *mac = netdev_priv(dev);
1579 + struct mtk_hw_stats *hwstats = mac->hw_stats;
1580 + u64 *data_src, *data_dst;
1581 + unsigned int start;
1582 + int i;
1583 +
1584 + if (netif_running(dev) && netif_device_present(dev)) {
1585 + if (spin_trylock(&hwstats->stats_lock)) {
1586 + mtk_stats_update_mac(mac);
1587 + spin_unlock(&hwstats->stats_lock);
1588 + }
1589 + }
1590 +
1591 + do {
1592 + data_src = (u64*)hwstats;
1593 + data_dst = data;
1594 + start = u64_stats_fetch_begin_irq(&hwstats->syncp);
1595 +
1596 + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
1597 + *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
1598 + } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
1599 +}
1600 +
1601 +static struct ethtool_ops mtk_ethtool_ops = {
1602 + .get_settings = mtk_get_settings,
1603 + .set_settings = mtk_set_settings,
1604 + .get_drvinfo = mtk_get_drvinfo,
1605 + .get_msglevel = mtk_get_msglevel,
1606 + .set_msglevel = mtk_set_msglevel,
1607 + .nway_reset = mtk_nway_reset,
1608 + .get_link = mtk_get_link,
1609 + .get_strings = mtk_get_strings,
1610 + .get_sset_count = mtk_get_sset_count,
1611 + .get_ethtool_stats = mtk_get_ethtool_stats,
1612 +};
1613 +
1614 +static const struct net_device_ops mtk_netdev_ops = {
1615 + .ndo_init = mtk_init,
1616 + .ndo_uninit = mtk_uninit,
1617 + .ndo_open = mtk_open,
1618 + .ndo_stop = mtk_stop,
1619 + .ndo_start_xmit = mtk_start_xmit,
1620 + .ndo_set_mac_address = mtk_set_mac_address,
1621 + .ndo_validate_addr = eth_validate_addr,
1622 + .ndo_do_ioctl = mtk_do_ioctl,
1623 + .ndo_change_mtu = eth_change_mtu,
1624 + .ndo_tx_timeout = mtk_tx_timeout,
1625 + .ndo_get_stats64 = mtk_get_stats64,
1626 +#ifdef CONFIG_NET_POLL_CONTROLLER
1627 + .ndo_poll_controller = mtk_poll_controller,
1628 +#endif
1629 +};
1630 +
1631 +static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1632 +{
1633 + struct mtk_mac *mac;
1634 + const __be32 *_id = of_get_property(np, "reg", NULL);
1635 + int id, err;
1636 +
1637 + if (!_id) {
1638 + dev_err(eth->dev, "missing mac id\n");
1639 + return -EINVAL;
1640 + }
1641 +
1642 + id = be32_to_cpup(_id);
1643 + if (id >= MTK_MAC_COUNT) {
1644 + dev_err(eth->dev, "%d is not a valid mac id\n", id);
1645 + return -EINVAL;
1646 + }
1647 +
1648 + if (eth->netdev[id]) {
1649 + dev_err(eth->dev, "duplicate mac id found: %d\n", id);
1650 + return -EINVAL;
1651 + }
1652 +
1653 + eth->netdev[id] = alloc_etherdev(sizeof(*mac));
1654 + if (!eth->netdev[id]) {
1655 + dev_err(eth->dev, "alloc_etherdev failed\n");
1656 + return -ENOMEM;
1657 + }
1658 + mac = netdev_priv(eth->netdev[id]);
1659 + eth->mac[id] = mac;
1660 + mac->id = id;
1661 + mac->hw = eth;
1662 + mac->of_node = np;
1663 + INIT_WORK(&mac->pending_work, mtk_pending_work);
1664 +
1665 + mac->hw_stats = devm_kzalloc(eth->dev,
1666 + sizeof(*mac->hw_stats),
1667 + GFP_KERNEL);
1668 + if (!mac->hw_stats) {
1669 + dev_err(eth->dev, "failed to allocate counter memory\n");
1670 + err = -ENOMEM;
1671 + goto free_netdev;
1672 + }
1673 + spin_lock_init(&mac->hw_stats->stats_lock);
1674 + mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
1675 +
1676 + SET_NETDEV_DEV(eth->netdev[id], eth->dev);
1677 + eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
1678 + eth->netdev[id]->base_addr = (unsigned long)eth->base;
1679 + eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
1680 + ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1681 + eth->netdev[id]->features |= MTK_HW_FEATURES;
1682 + eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
1683 +
1684 + err = register_netdev(eth->netdev[id]);
1685 + if (err) {
1686 + dev_err(eth->dev, "error bringing up device\n");
1687 + goto free_netdev;
1688 + }
1689 + eth->netdev[id]->irq = eth->irq;
1690 + netif_info(eth, probe, eth->netdev[id],
1691 + "mediatek frame engine at 0x%08lx, irq %d\n",
1692 + eth->netdev[id]->base_addr, eth->netdev[id]->irq);
1693 +
1694 + return 0;
1695 +
1696 +free_netdev:
1697 + free_netdev(eth->netdev[id]);
1698 + return err;
1699 +}
1700 +
1701 +static int mtk_probe(struct platform_device *pdev)
1702 +{
1703 + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1704 + struct device_node *mac_np;
1705 + const struct of_device_id *match;
1706 + struct mtk_soc_data *soc;
1707 + struct mtk_eth *eth;
1708 + int err;
1709 +
1710 + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1711 + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1712 +
1713 + device_reset(&pdev->dev);
1714 +
1715 + match = of_match_device(of_mtk_match, &pdev->dev);
1716 + soc = (struct mtk_soc_data *)match->data;
1717 +
1718 + eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
1719 + if (!eth)
1720 + return -ENOMEM;
1721 +
1722 + eth->base = devm_ioremap_resource(&pdev->dev, res);
1723 + if (!eth->base)
1724 + return -EADDRNOTAVAIL;
1725 +
1726 + spin_lock_init(&eth->page_lock);
1727 +
1728 + eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1729 + "mediatek,ethsys");
1730 + if (IS_ERR(eth->ethsys)) {
1731 + dev_err(&pdev->dev, "no ethsys regmap found\n");
1732 + return PTR_ERR(eth->ethsys);
1733 + }
1734 +
1735 + eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1736 + "mediatek,pctl");
1737 + if (IS_ERR(eth->pctl)) {
1738 + dev_err(&pdev->dev, "no pctl regmap found\n");
1739 + return PTR_ERR(eth->pctl);
1740 + }
1741 +
1742 + eth->rstc = devm_reset_control_get(&pdev->dev, "eth");
1743 + if (IS_ERR(eth->rstc)) {
1744 + dev_err(&pdev->dev, "no eth reset found\n");
1745 + return PTR_ERR(eth->rstc);
1746 + }
1747 +
1748 + eth->irq = platform_get_irq(pdev, 0);
1749 + if (eth->irq < 0) {
1750 + dev_err(&pdev->dev, "no IRQ resource found\n");
1751 + return -ENXIO;
1752 + }
1753 +
1754 + eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
1755 + eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
1756 + eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
1757 + eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
1758 + if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
1759 + IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
1760 + return -ENODEV;
1761 +
1762 + clk_prepare_enable(eth->clk_ethif);
1763 + clk_prepare_enable(eth->clk_esw);
1764 + clk_prepare_enable(eth->clk_gp1);
1765 + clk_prepare_enable(eth->clk_gp2);
1766 +
1767 + eth->dev = &pdev->dev;
1768 + eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
1769 +
1770 + err = mtk_hw_init(eth);
1771 + if (err)
1772 + return err;
1773 +
1774 + for_each_child_of_node(pdev->dev.of_node, mac_np) {
1775 + if (!of_device_is_compatible(mac_np,
1776 + "mediatek,eth-mac"))
1777 + continue;
1778 +
1779 + if (!of_device_is_available(mac_np))
1780 + continue;
1781 +
1782 + err = mtk_add_mac(eth, mac_np);
1783 + if (err)
1784 + goto err_free_dev;
1785 + }
1786 +
1787 + /* we run 2 devices on the same DMA ring so we need a dummy device
1788 + * for NAPI to work
1789 + */
1790 + init_dummy_netdev(&eth->dummy_dev);
1791 + netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
1792 + MTK_NAPI_WEIGHT);
1793 +
1794 + platform_set_drvdata(pdev, eth);
1795 +
1796 + return 0;
1797 +
1798 +err_free_dev:
1799 + mtk_cleanup(eth);
1800 + return err;
1801 +}
1802 +
1803 +static int mtk_remove(struct platform_device *pdev)
1804 +{
1805 + struct mtk_eth *eth = platform_get_drvdata(pdev);
1806 +
1807 + clk_disable_unprepare(eth->clk_ethif);
1808 + clk_disable_unprepare(eth->clk_esw);
1809 + clk_disable_unprepare(eth->clk_gp1);
1810 + clk_disable_unprepare(eth->clk_gp2);
1811 +
1812 + netif_napi_del(&eth->rx_napi);
1813 + mtk_cleanup(eth);
1814 + platform_set_drvdata(pdev, NULL);
1815 +
1816 + return 0;
1817 +}
1818 +
1819 +const struct of_device_id of_mtk_match[] = {
1820 + { .compatible = "mediatek,mt7623-eth" },
1821 + {},
1822 +};
1823 +
1824 +static struct platform_driver mtk_driver = {
1825 + .probe = mtk_probe,
1826 + .remove = mtk_remove,
1827 + .driver = {
1828 + .name = "mtk_soc_eth",
1829 + .owner = THIS_MODULE,
1830 + .of_match_table = of_mtk_match,
1831 + },
1832 +};
1833 +
1834 +module_platform_driver(mtk_driver);
1835 +
1836 +MODULE_LICENSE("GPL");
1837 +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1838 +MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
1839 --- /dev/null
1840 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
1841 @@ -0,0 +1,421 @@
1842 +/* This program is free software; you can redistribute it and/or modify
1843 + * it under the terms of the GNU General Public License as published by
1844 + * the Free Software Foundation; version 2 of the License
1845 + *
1846 + * This program is distributed in the hope that it will be useful,
1847 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1848 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1849 + * GNU General Public License for more details.
1850 + *
1851 + * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
1852 + * Copyright (C) 2009-2016 Felix Fietkau <nbd@nbd.name>
1853 + * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
1854 + */
1855 +
1856 +#ifndef MTK_ETH_H
1857 +#define MTK_ETH_H
1858 +
1859 +#define MTK_QDMA_PAGE_SIZE 2048
1860 +#define MTK_MAX_RX_LENGTH 1536
1861 +#define MTK_TX_DMA_BUF_LEN 0x3fff
1862 +#define MTK_DMA_SIZE 256
1863 +#define MTK_NAPI_WEIGHT 64
1864 +#define MTK_MAC_COUNT 2
1865 +#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
1866 +#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
1867 +#define MTK_DMA_DUMMY_DESC 0xffffffff
1868 +#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
1869 + NETIF_MSG_PROBE | \
1870 + NETIF_MSG_LINK | \
1871 + NETIF_MSG_TIMER | \
1872 + NETIF_MSG_IFDOWN | \
1873 + NETIF_MSG_IFUP | \
1874 + NETIF_MSG_RX_ERR | \
1875 + NETIF_MSG_TX_ERR)
1876 +#define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \
1877 + NETIF_F_RXCSUM | \
1878 + NETIF_F_HW_VLAN_CTAG_TX | \
1879 + NETIF_F_HW_VLAN_CTAG_RX | \
1880 + NETIF_F_SG | NETIF_F_TSO | \
1881 + NETIF_F_TSO6 | \
1882 + NETIF_F_IPV6_CSUM)
1883 +#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (MTK_DMA_SIZE - 1))
1884 +
1885 +/* Frame Engine Global Reset Register */
1886 +#define MTK_RST_GL 0x04
1887 +#define RST_GL_PSE BIT(0)
1888 +
1889 +/* Frame Engine Interrupt Status Register */
1890 +#define MTK_INT_STATUS2 0x08
1891 +#define MTK_GDM1_AF BIT(28)
1892 +#define MTK_GDM2_AF BIT(29)
1893 +
1894 +/* Frame Engine Interrupt Grouping Register */
1895 +#define MTK_FE_INT_GRP 0x20
1896 +
1897 +/* CDMP Exgress Control Register */
1898 +#define MTK_CDMP_EG_CTRL 0x404
1899 +
1900 +/* GDM Exgress Control Register */
1901 +#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000))
1902 +#define MTK_GDMA_ICS_EN BIT(22)
1903 +#define MTK_GDMA_TCS_EN BIT(21)
1904 +#define MTK_GDMA_UCS_EN BIT(20)
1905 +
1906 +/* Unicast Filter MAC Address Register - Low */
1907 +#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
1908 +
1909 +/* Unicast Filter MAC Address Register - High */
1910 +#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
1911 +
1912 +/* QDMA TX Queue Configuration Registers */
1913 +#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
1914 +#define QDMA_RES_THRES 4
1915 +
1916 +/* QDMA TX Queue Scheduler Registers */
1917 +#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
1918 +
1919 +/* QDMA RX Base Pointer Register */
1920 +#define MTK_QRX_BASE_PTR0 0x1900
1921 +
1922 +/* QDMA RX Maximum Count Register */
1923 +#define MTK_QRX_MAX_CNT0 0x1904
1924 +
1925 +/* QDMA RX CPU Pointer Register */
1926 +#define MTK_QRX_CRX_IDX0 0x1908
1927 +
1928 +/* QDMA RX DMA Pointer Register */
1929 +#define MTK_QRX_DRX_IDX0 0x190C
1930 +
1931 +/* QDMA Global Configuration Register */
1932 +#define MTK_QDMA_GLO_CFG 0x1A04
1933 +#define MTK_RX_2B_OFFSET BIT(31)
1934 +#define MTK_RX_BT_32DWORDS (3 << 11)
1935 +#define MTK_TX_WB_DDONE BIT(6)
1936 +#define MTK_DMA_SIZE_16DWORDS (2 << 4)
1937 +#define MTK_RX_DMA_BUSY BIT(3)
1938 +#define MTK_TX_DMA_BUSY BIT(1)
1939 +#define MTK_RX_DMA_EN BIT(2)
1940 +#define MTK_TX_DMA_EN BIT(0)
1941 +#define MTK_DMA_BUSY_TIMEOUT HZ
1942 +
1943 +/* QDMA Reset Index Register */
1944 +#define MTK_QDMA_RST_IDX 0x1A08
1945 +#define MTK_PST_DRX_IDX0 BIT(16)
1946 +
1947 +/* QDMA Delay Interrupt Register */
1948 +#define MTK_QDMA_DELAY_INT 0x1A0C
1949 +
1950 +/* QDMA Flow Control Register */
1951 +#define MTK_QDMA_FC_THRES 0x1A10
1952 +#define FC_THRES_DROP_MODE BIT(20)
1953 +#define FC_THRES_DROP_EN (7 << 16)
1954 +#define FC_THRES_MIN 0x4444
1955 +
1956 +/* QDMA Interrupt Status Register */
1957 +#define MTK_QMTK_INT_STATUS 0x1A18
1958 +#define MTK_RX_DONE_INT1 BIT(17)
1959 +#define MTK_RX_DONE_INT0 BIT(16)
1960 +#define MTK_TX_DONE_INT3 BIT(3)
1961 +#define MTK_TX_DONE_INT2 BIT(2)
1962 +#define MTK_TX_DONE_INT1 BIT(1)
1963 +#define MTK_TX_DONE_INT0 BIT(0)
1964 +#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1)
1965 +#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
1966 + MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
1967 +
1968 +/* QDMA Interrupt Status Register */
1969 +#define MTK_QDMA_INT_MASK 0x1A1C
1970 +
1971 +/* QDMA Interrupt Mask Register */
1972 +#define MTK_QDMA_HRED2 0x1A44
1973 +
1974 +/* QDMA TX Forward CPU Pointer Register */
1975 +#define MTK_QTX_CTX_PTR 0x1B00
1976 +
1977 +/* QDMA TX Forward DMA Pointer Register */
1978 +#define MTK_QTX_DTX_PTR 0x1B04
1979 +
1980 +/* QDMA TX Release CPU Pointer Register */
1981 +#define MTK_QTX_CRX_PTR 0x1B10
1982 +
1983 +/* QDMA TX Release DMA Pointer Register */
1984 +#define MTK_QTX_DRX_PTR 0x1B14
1985 +
1986 +/* QDMA FQ Head Pointer Register */
1987 +#define MTK_QDMA_FQ_HEAD 0x1B20
1988 +
1989 +/* QDMA FQ Head Pointer Register */
1990 +#define MTK_QDMA_FQ_TAIL 0x1B24
1991 +
1992 +/* QDMA FQ Free Page Counter Register */
1993 +#define MTK_QDMA_FQ_CNT 0x1B28
1994 +
1995 +/* QDMA FQ Free Page Buffer Length Register */
1996 +#define MTK_QDMA_FQ_BLEN 0x1B2C
1997 +
1998 +/* GMA1 Received Good Byte Count Register */
1999 +#define MTK_GDM1_TX_GBCNT 0x2400
2000 +#define MTK_STAT_OFFSET 0x40
2001 +
2002 +/* QDMA descriptor txd4 */
2003 +#define TX_DMA_CHKSUM (0x7 << 29)
2004 +#define TX_DMA_TSO BIT(28)
2005 +#define TX_DMA_FPORT_SHIFT 25
2006 +#define TX_DMA_FPORT_MASK 0x7
2007 +#define TX_DMA_INS_VLAN BIT(16)
2008 +
2009 +/* QDMA descriptor txd3 */
2010 +#define TX_DMA_OWNER_CPU BIT(31)
2011 +#define TX_DMA_LS0 BIT(30)
2012 +#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
2013 +#define TX_DMA_SWC BIT(14)
2014 +#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
2015 +
2016 +/* QDMA descriptor rxd2 */
2017 +#define RX_DMA_DONE BIT(31)
2018 +#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
2019 +#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
2020 +
2021 +/* QDMA descriptor rxd3 */
2022 +#define RX_DMA_VID(_x) ((_x) & 0xfff)
2023 +
2024 +/* QDMA descriptor rxd4 */
2025 +#define RX_DMA_L4_VALID BIT(24)
2026 +#define RX_DMA_FPORT_SHIFT 19
2027 +#define RX_DMA_FPORT_MASK 0x7
2028 +
2029 +/* PHY Indirect Access Control registers */
2030 +#define MTK_PHY_IAC 0x10004
2031 +#define PHY_IAC_ACCESS BIT(31)
2032 +#define PHY_IAC_READ BIT(19)
2033 +#define PHY_IAC_WRITE BIT(18)
2034 +#define PHY_IAC_START BIT(16)
2035 +#define PHY_IAC_ADDR_SHIFT 20
2036 +#define PHY_IAC_REG_SHIFT 25
2037 +#define PHY_IAC_TIMEOUT HZ
2038 +
2039 +/* Mac control registers */
2040 +#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
2041 +#define MAC_MCR_MAX_RX_1536 BIT(24)
2042 +#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
2043 +#define MAC_MCR_FORCE_MODE BIT(15)
2044 +#define MAC_MCR_TX_EN BIT(14)
2045 +#define MAC_MCR_RX_EN BIT(13)
2046 +#define MAC_MCR_BACKOFF_EN BIT(9)
2047 +#define MAC_MCR_BACKPR_EN BIT(8)
2048 +#define MAC_MCR_FORCE_RX_FC BIT(5)
2049 +#define MAC_MCR_FORCE_TX_FC BIT(4)
2050 +#define MAC_MCR_SPEED_1000 BIT(3)
2051 +#define MAC_MCR_SPEED_100 BIT(2)
2052 +#define MAC_MCR_FORCE_DPX BIT(1)
2053 +#define MAC_MCR_FORCE_LINK BIT(0)
2054 +#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | \
2055 + MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
2056 + MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
2057 + MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
2058 + MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
2059 + MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
2060 +
2061 +/* GPIO port control registers for GMAC 2*/
2062 +#define GPIO_OD33_CTRL8 0x4c0
2063 +#define GPIO_BIAS_CTRL 0xed0
2064 +#define GPIO_DRV_SEL10 0xf00
2065 +
2066 +/* ethernet subsystem config register */
2067 +#define ETHSYS_SYSCFG0 0x14
2068 +#define SYSCFG0_GE_MASK 0x3
2069 +#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
2070 +
2071 +struct mtk_rx_dma {
2072 + unsigned int rxd1;
2073 + unsigned int rxd2;
2074 + unsigned int rxd3;
2075 + unsigned int rxd4;
2076 +} __packed __aligned(4);
2077 +
2078 +struct mtk_tx_dma {
2079 + unsigned int txd1;
2080 + unsigned int txd2;
2081 + unsigned int txd3;
2082 + unsigned int txd4;
2083 +} __packed __aligned(4);
2084 +
2085 +struct mtk_eth;
2086 +struct mtk_mac;
2087 +
2088 +/* struct mtk_hw_stats - the structure that holds the traffic statistics.
2089 + * @stats_lock: make sure that stats operations are atomic
2090 + * @reg_offset: the status register offset of the SoC
2091 + * @syncp: the refcount
2092 + *
2093 + * All of the supported SoCs have hardware counters for traffic statistics.
2094 + * Whenever the status IRQ triggers we can read the latest stats from these
2095 + * counters and store them in this struct.
2096 + */
2097 +struct mtk_hw_stats {
2098 + u64 tx_bytes;
2099 + u64 tx_packets;
2100 + u64 tx_skip;
2101 + u64 tx_collisions;
2102 + u64 rx_bytes;
2103 + u64 rx_packets;
2104 + u64 rx_overflow;
2105 + u64 rx_fcs_errors;
2106 + u64 rx_short_errors;
2107 + u64 rx_long_errors;
2108 + u64 rx_checksum_errors;
2109 + u64 rx_flow_control_packets;
2110 +
2111 + spinlock_t stats_lock;
2112 + u32 reg_offset;
2113 + struct u64_stats_sync syncp;
2114 +};
2115 +
2116 +/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
2117 + * memory was allocated so that it can be freed properly
2118 + */
2119 +enum mtk_tx_flags {
2120 + MTK_TX_FLAGS_SINGLE0 = 0x01,
2121 + MTK_TX_FLAGS_PAGE0 = 0x02,
2122 +};
2123 +
2124 +/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
2125 + * by the TX descriptor s
2126 + * @skb: The SKB pointer of the packet being sent
2127 + * @dma_addr0: The base addr of the first segment
2128 + * @dma_len0: The length of the first segment
2129 + * @dma_addr1: The base addr of the second segment
2130 + * @dma_len1: The length of the second segment
2131 + */
2132 +struct mtk_tx_buf {
2133 + struct sk_buff *skb;
2134 + u32 flags;
2135 + DEFINE_DMA_UNMAP_ADDR(dma_addr0);
2136 + DEFINE_DMA_UNMAP_LEN(dma_len0);
2137 + DEFINE_DMA_UNMAP_ADDR(dma_addr1);
2138 + DEFINE_DMA_UNMAP_LEN(dma_len1);
2139 +};
2140 +
2141 +/* struct mtk_tx_ring - This struct holds info describing a TX ring
2142 + * @dma: The descriptor ring
2143 + * @buf: The memory pointed at by the ring
2144 + * @phys: The physical addr of tx_buf
2145 + * @next_free: Pointer to the next free descriptor
2146 + * @last_free: Pointer to the last free descriptor
2147 + * @thresh: The threshold of minimum amount of free descriptors
2148 + * @free_count: QDMA uses a linked list. Track how many free descriptors
2149 + * are present
2150 + */
2151 +struct mtk_tx_ring {
2152 + struct mtk_tx_dma *dma;
2153 + struct mtk_tx_buf *buf;
2154 + dma_addr_t phys;
2155 + struct mtk_tx_dma *next_free;
2156 + struct mtk_tx_dma *last_free;
2157 + u16 thresh;
2158 + atomic_t free_count;
2159 +};
2160 +
2161 +/* struct mtk_rx_ring - This struct holds info describing a RX ring
2162 + * @dma: The descriptor ring
2163 + * @data: The memory pointed at by the ring
2164 + * @phys: The physical addr of rx_buf
2165 + * @frag_size: How big can each fragment be
2166 + * @buf_size: The size of each packet buffer
2167 + * @calc_idx: The current head of ring
2168 + */
2169 +struct mtk_rx_ring {
2170 + struct mtk_rx_dma *dma;
2171 + u8 **data;
2172 + dma_addr_t phys;
2173 + u16 frag_size;
2174 + u16 buf_size;
2175 + u16 calc_idx;
2176 +};
2177 +
2178 +/* currently no SoC has more than 2 macs */
2179 +#define MTK_MAX_DEVS 2
2180 +
2181 +/* struct mtk_eth - This is the main datasructure for holding the state
2182 + * of the driver
2183 + * @dev: The device pointer
2184 + * @base: The mapped register i/o base
2185 + * @page_lock: Make sure that register operations are atomic
2186 + * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
2187 + * dummy for NAPI to work
2188 + * @netdev: The netdev instances
2189 + * @mac: Each netdev is linked to a physical MAC
2190 + * @irq: The IRQ that we are using
2191 + * @msg_enable: Ethtool msg level
2192 + * @ethsys: The register map pointing at the range used to setup
2193 + * MII modes
2194 + * @pctl: The register map pointing at the range used to setup
2195 + * GMAC port drive/slew values
2196 + * @dma_refcnt: track how many netdevs are using the DMA engine
2197 + * @tx_ring: Pointer to the memore holding info about the TX ring
2198 + * @rx_ring: Pointer to the memore holding info about the RX ring
2199 + * @rx_napi: The NAPI struct
2200 + * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
2201 + * @scratch_head: The scratch memory that scratch_ring points to.
2202 + * @clk_ethif: The ethif clock
2203 + * @clk_esw: The switch clock
2204 + * @clk_gp1: The gmac1 clock
2205 + * @clk_gp2: The gmac2 clock
2206 + * @mii_bus: If there is a bus we need to create an instance for it
2207 + */
2208 +
2209 +struct mtk_eth {
2210 + struct device *dev;
2211 + void __iomem *base;
2212 + struct reset_control *rstc;
2213 + spinlock_t page_lock;
2214 + struct net_device dummy_dev;
2215 + struct net_device *netdev[MTK_MAX_DEVS];
2216 + struct mtk_mac *mac[MTK_MAX_DEVS];
2217 + int irq;
2218 + u32 msg_enable;
2219 + unsigned long sysclk;
2220 + struct regmap *ethsys;
2221 + struct regmap *pctl;
2222 + atomic_t dma_refcnt;
2223 + struct mtk_tx_ring tx_ring;
2224 + struct mtk_rx_ring rx_ring;
2225 + struct napi_struct rx_napi;
2226 + struct mtk_tx_dma *scratch_ring;
2227 + void *scratch_head;
2228 + struct clk *clk_ethif;
2229 + struct clk *clk_esw;
2230 + struct clk *clk_gp1;
2231 + struct clk *clk_gp2;
2232 + struct mii_bus *mii_bus;
2233 +};
2234 +
2235 +/* struct mtk_mac - the structure that holds the info about the MACs of the
2236 + * SoC
2237 + * @id: The number of the MAC
2238 + * @of_node: Our devicetree node
2239 + * @hw: Backpointer to our main datastruture
2240 + * @hw_stats: Packet statistics counter
2241 + * @phy_dev: The attached PHY if available
2242 + * @pending_work: The workqueue used to reset the dma ring
2243 + */
2244 +struct mtk_mac {
2245 + int id;
2246 + struct device_node *of_node;
2247 + struct mtk_eth *hw;
2248 + struct mtk_hw_stats *hw_stats;
2249 + struct phy_device *phy_dev;
2250 + struct work_struct pending_work;
2251 +};
2252 +
2253 +/* the struct describing the SoC. these are declared in the soc_xyz.c files */
2254 +extern const struct of_device_id of_mtk_match[];
2255 +
2256 +/* read the hardware status register */
2257 +void mtk_stats_update_mac(struct mtk_mac *mac);
2258 +
2259 +void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
2260 +u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
2261 +
2262 +#endif /* MTK_ETH_H */