mediatek: update patches
[openwrt/staging/chunkeey.git] / target / linux / mediatek / patches-4.4 / 0049-net-next-mediatek-add-support-for-MT7623-ethernet.patch
1 From 15f1cb9603c22910f1cd6a8c63fd279a6d5acfd4 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Wed, 2 Mar 2016 04:27:10 +0100
4 Subject: [PATCH 49/91] net-next: mediatek: add support for MT7623 ethernet
5
6 Add ethernet support for MediaTek SoCs from the MT7623 family. These have
7 dual GMAC. Depending on the exact version, there might be a built-in
8 Gigabit switch (MT7530). The core does not have the typical DMA ring setup.
9 Instead there is a linked list that we add descriptors to. There is only
10 one linked list that both MACs use together. There is a special field
11 inside the TX descriptors called the VQID. This allows us to assign packets
12 to different internal queues. By using a separate id for each MAC we are
13 able to get deterministic results for BQL. Additionally we need to
14 provide the core with a block of scratch memory that is the same size as
15 the RX ring and data buffer. This is really needed to make the HW datapath
16 work. Although the driver does not support this yet, we still need to
17 assign the memory and tell the core about it for RX to work.
18
19 Signed-off-by: Felix Fietkau <nbd@openwrt.org>
20 Signed-off-by: Michael Lee <igvtee@gmail.com>
21 Signed-off-by: John Crispin <blogic@openwrt.org>
22 ---
23 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 1807 +++++++++++++++++++++++++++
24 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 421 +++++++
25 2 files changed, 2228 insertions(+)
26 create mode 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.c
27 create mode 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.h
28
29 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
30 new file mode 100644
31 index 0000000..ba3afa5
32 --- /dev/null
33 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
34 @@ -0,0 +1,1807 @@
35 +/* This program is free software; you can redistribute it and/or modify
36 + * it under the terms of the GNU General Public License as published by
37 + * the Free Software Foundation; version 2 of the License
38 + *
39 + * This program is distributed in the hope that it will be useful,
40 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
41 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
42 + * GNU General Public License for more details.
43 + *
44 + * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
45 + * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
46 + * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
47 + */
48 +
49 +#include <linux/of_device.h>
50 +#include <linux/of_mdio.h>
51 +#include <linux/of_net.h>
52 +#include <linux/mfd/syscon.h>
53 +#include <linux/regmap.h>
54 +#include <linux/clk.h>
55 +#include <linux/if_vlan.h>
56 +#include <linux/reset.h>
57 +#include <linux/tcp.h>
58 +
59 +#include "mtk_eth_soc.h"
60 +
61 +static int mtk_msg_level = -1;
62 +module_param_named(msg_level, mtk_msg_level, int, 0);
63 +MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
64 +
65 +#define MTK_ETHTOOL_STAT(x) { #x, \
66 + offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
67 +
68 +/* strings used by ethtool */
69 +static const struct mtk_ethtool_stats {
70 + char str[ETH_GSTRING_LEN];
71 + u32 offset;
72 +} mtk_ethtool_stats[] = {
73 + MTK_ETHTOOL_STAT(tx_bytes),
74 + MTK_ETHTOOL_STAT(tx_packets),
75 + MTK_ETHTOOL_STAT(tx_skip),
76 + MTK_ETHTOOL_STAT(tx_collisions),
77 + MTK_ETHTOOL_STAT(rx_bytes),
78 + MTK_ETHTOOL_STAT(rx_packets),
79 + MTK_ETHTOOL_STAT(rx_overflow),
80 + MTK_ETHTOOL_STAT(rx_fcs_errors),
81 + MTK_ETHTOOL_STAT(rx_short_errors),
82 + MTK_ETHTOOL_STAT(rx_long_errors),
83 + MTK_ETHTOOL_STAT(rx_checksum_errors),
84 + MTK_ETHTOOL_STAT(rx_flow_control_packets),
85 +};
86 +
87 +void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
88 +{
89 + __raw_writel(val, eth->base + reg);
90 +}
91 +
92 +u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
93 +{
94 + return __raw_readl(eth->base + reg);
95 +}
96 +
97 +static int mtk_mdio_busy_wait(struct mtk_eth *eth)
98 +{
99 + unsigned long t_start = jiffies;
100 +
101 + while (1) {
102 + if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
103 + return 0;
104 + if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
105 + break;
106 + usleep_range(10, 20);
107 + }
108 +
109 + dev_err(eth->dev, "mdio: MDIO timeout\n");
110 + return -1;
111 +}
112 +
113 +u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
114 + u32 phy_register, u32 write_data)
115 +{
116 + if (mtk_mdio_busy_wait(eth))
117 + return -1;
118 +
119 + write_data &= 0xffff;
120 +
121 + mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
122 + (phy_register << PHY_IAC_REG_SHIFT) |
123 + (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
124 + MTK_PHY_IAC);
125 +
126 + if (mtk_mdio_busy_wait(eth))
127 + return -1;
128 +
129 + return 0;
130 +}
131 +
132 +u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
133 +{
134 + u32 d;
135 +
136 + if (mtk_mdio_busy_wait(eth))
137 + return 0xffff;
138 +
139 + mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
140 + (phy_reg << PHY_IAC_REG_SHIFT) |
141 + (phy_addr << PHY_IAC_ADDR_SHIFT),
142 + MTK_PHY_IAC);
143 +
144 + if (mtk_mdio_busy_wait(eth))
145 + return 0xffff;
146 +
147 + d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
148 +
149 + return d;
150 +}
151 +
152 +static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
153 + int phy_reg, u16 val)
154 +{
155 + struct mtk_eth *eth = bus->priv;
156 +
157 + return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
158 +}
159 +
160 +static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
161 +{
162 + struct mtk_eth *eth = bus->priv;
163 +
164 + return _mtk_mdio_read(eth, phy_addr, phy_reg);
165 +}
166 +
167 +static void mtk_phy_link_adjust(struct net_device *dev)
168 +{
169 + struct mtk_mac *mac = netdev_priv(dev);
170 + u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
171 + MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
172 + MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
173 + MAC_MCR_BACKPR_EN;
174 +
175 + switch (mac->phy_dev->speed) {
176 + case SPEED_1000:
177 + mcr |= MAC_MCR_SPEED_1000;
178 + break;
179 + case SPEED_100:
180 + mcr |= MAC_MCR_SPEED_100;
181 + break;
182 + };
183 +
184 + if (mac->phy_dev->link)
185 + mcr |= MAC_MCR_FORCE_LINK;
186 +
187 + if (mac->phy_dev->duplex)
188 + mcr |= MAC_MCR_FORCE_DPX;
189 +
190 + if (mac->phy_dev->pause)
191 + mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
192 +
193 + mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
194 +
195 + if (mac->phy_dev->link)
196 + netif_carrier_on(dev);
197 + else
198 + netif_carrier_off(dev);
199 +}
200 +
201 +static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
202 + struct device_node *phy_node)
203 +{
204 + const __be32 *_addr = NULL;
205 + struct phy_device *phydev;
206 + int phy_mode, addr;
207 +
208 + _addr = of_get_property(phy_node, "reg", NULL);
209 +
210 + if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
211 + pr_err("%s: invalid phy address\n", phy_node->name);
212 + return -EINVAL;
213 + }
214 + addr = be32_to_cpu(*_addr);
215 + phy_mode = of_get_phy_mode(phy_node);
216 + if (phy_mode < 0) {
217 + dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
218 + return -EINVAL;
219 + }
220 +
221 + phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
222 + mtk_phy_link_adjust, 0, phy_mode);
223 + if (IS_ERR(phydev)) {
224 + dev_err(eth->dev, "could not connect to PHY\n");
225 + return PTR_ERR(phydev);
226 + }
227 +
228 + dev_info(eth->dev,
229 + "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
230 + mac->id, phydev_name(phydev), phydev->phy_id,
231 + phydev->drv->name);
232 +
233 + mac->phy_dev = phydev;
234 +
235 + return 0;
236 +}
237 +
238 +static int mtk_phy_connect(struct mtk_mac *mac)
239 +{
240 + struct mtk_eth *eth = mac->hw;
241 + struct device_node *np;
242 + u32 val, ge_mode;
243 +
244 + np = of_parse_phandle(mac->of_node, "phy-handle", 0);
245 + if (!np)
246 + return -ENODEV;
247 +
248 + switch (of_get_phy_mode(np)) {
249 + case PHY_INTERFACE_MODE_RGMII:
250 + ge_mode = 0;
251 + break;
252 + case PHY_INTERFACE_MODE_MII:
253 + ge_mode = 1;
254 + break;
255 + case PHY_INTERFACE_MODE_RMII:
256 + ge_mode = 2;
257 + break;
258 + default:
259 + dev_err(eth->dev, "invalid phy_mode\n");
260 + return -1;
261 + }
262 +
263 + /* put the gmac into the right mode */
264 + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
265 + val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
266 + val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
267 + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
268 +
269 + mtk_phy_connect_node(eth, mac, np);
270 + mac->phy_dev->autoneg = AUTONEG_ENABLE;
271 + mac->phy_dev->speed = 0;
272 + mac->phy_dev->duplex = 0;
273 + mac->phy_dev->supported &= PHY_BASIC_FEATURES;
274 + mac->phy_dev->advertising = mac->phy_dev->supported |
275 + ADVERTISED_Autoneg;
276 + phy_start_aneg(mac->phy_dev);
277 +
278 + return 0;
279 +}
280 +
281 +static int mtk_mdio_init(struct mtk_eth *eth)
282 +{
283 + struct device_node *mii_np;
284 + int err;
285 +
286 + mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
287 + if (!mii_np) {
288 + dev_err(eth->dev, "no %s child node found", "mdio-bus");
289 + return -ENODEV;
290 + }
291 +
292 + if (!of_device_is_available(mii_np)) {
293 + err = 0;
294 + goto err_put_node;
295 + }
296 +
297 + eth->mii_bus = mdiobus_alloc();
298 + if (!eth->mii_bus) {
299 + err = -ENOMEM;
300 + goto err_put_node;
301 + }
302 +
303 + eth->mii_bus->name = "mdio";
304 + eth->mii_bus->read = mtk_mdio_read;
305 + eth->mii_bus->write = mtk_mdio_write;
306 + eth->mii_bus->priv = eth;
307 + eth->mii_bus->parent = eth->dev;
308 +
309 + snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
310 + err = of_mdiobus_register(eth->mii_bus, mii_np);
311 + if (err)
312 + goto err_free_bus;
313 +
314 + return 0;
315 +
316 +err_free_bus:
317 + kfree(eth->mii_bus);
318 +
319 +err_put_node:
320 + of_node_put(mii_np);
321 + eth->mii_bus = NULL;
322 + return err;
323 +}
324 +
325 +static void mtk_mdio_cleanup(struct mtk_eth *eth)
326 +{
327 + if (!eth->mii_bus)
328 + return;
329 +
330 + mdiobus_unregister(eth->mii_bus);
331 + of_node_put(eth->mii_bus->dev.of_node);
332 + kfree(eth->mii_bus);
333 +}
334 +
335 +static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
336 +{
337 + u32 val;
338 +
339 + val = mtk_r32(eth, MTK_QDMA_INT_MASK);
340 + mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
341 + /* flush write */
342 + mtk_r32(eth, MTK_QDMA_INT_MASK);
343 +}
344 +
345 +static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
346 +{
347 + u32 val;
348 +
349 + val = mtk_r32(eth, MTK_QDMA_INT_MASK);
350 + mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
351 + /* flush write */
352 + mtk_r32(eth, MTK_QDMA_INT_MASK);
353 +}
354 +
355 +static int mtk_set_mac_address(struct net_device *dev, void *p)
356 +{
357 + int ret = eth_mac_addr(dev, p);
358 + struct mtk_mac *mac = netdev_priv(dev);
359 + const char *macaddr = dev->dev_addr;
360 + unsigned long flags;
361 +
362 + if (ret)
363 + return ret;
364 +
365 + spin_lock_irqsave(&mac->hw->page_lock, flags);
366 + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
367 + MTK_GDMA_MAC_ADRH(mac->id));
368 + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
369 + (macaddr[4] << 8) | macaddr[5],
370 + MTK_GDMA_MAC_ADRL(mac->id));
371 + spin_unlock_irqrestore(&mac->hw->page_lock, flags);
372 +
373 + return 0;
374 +}
375 +
376 +void mtk_stats_update_mac(struct mtk_mac *mac)
377 +{
378 + struct mtk_hw_stats *hw_stats = mac->hw_stats;
379 + unsigned int base = MTK_GDM1_TX_GBCNT;
380 + u64 stats;
381 +
382 + base += hw_stats->reg_offset;
383 +
384 + u64_stats_update_begin(&hw_stats->syncp);
385 +
386 + hw_stats->rx_bytes += mtk_r32(mac->hw, base);
387 + stats = mtk_r32(mac->hw, base + 0x04);
388 + if (stats)
389 + hw_stats->rx_bytes += (stats << 32);
390 + hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
391 + hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
392 + hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
393 + hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
394 + hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
395 + hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
396 + hw_stats->rx_flow_control_packets +=
397 + mtk_r32(mac->hw, base + 0x24);
398 + hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
399 + hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
400 + hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
401 + stats = mtk_r32(mac->hw, base + 0x34);
402 + if (stats)
403 + hw_stats->tx_bytes += (stats << 32);
404 + hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
405 + u64_stats_update_end(&hw_stats->syncp);
406 +}
407 +
408 +static void mtk_stats_update(struct mtk_eth *eth)
409 +{
410 + int i;
411 +
412 + for (i = 0; i < MTK_MAC_COUNT; i++) {
413 + if (!eth->mac[i] || !eth->mac[i]->hw_stats)
414 + continue;
415 + if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
416 + mtk_stats_update_mac(eth->mac[i]);
417 + spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
418 + }
419 + }
420 +}
421 +
422 +static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
423 + struct rtnl_link_stats64 *storage)
424 +{
425 + struct mtk_mac *mac = netdev_priv(dev);
426 + struct mtk_hw_stats *hw_stats = mac->hw_stats;
427 + unsigned int start;
428 +
429 + if (netif_running(dev) && netif_device_present(dev)) {
430 + if (spin_trylock(&hw_stats->stats_lock)) {
431 + mtk_stats_update_mac(mac);
432 + spin_unlock(&hw_stats->stats_lock);
433 + }
434 + }
435 +
436 + do {
437 + start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
438 + storage->rx_packets = hw_stats->rx_packets;
439 + storage->tx_packets = hw_stats->tx_packets;
440 + storage->rx_bytes = hw_stats->rx_bytes;
441 + storage->tx_bytes = hw_stats->tx_bytes;
442 + storage->collisions = hw_stats->tx_collisions;
443 + storage->rx_length_errors = hw_stats->rx_short_errors +
444 + hw_stats->rx_long_errors;
445 + storage->rx_over_errors = hw_stats->rx_overflow;
446 + storage->rx_crc_errors = hw_stats->rx_fcs_errors;
447 + storage->rx_errors = hw_stats->rx_checksum_errors;
448 + storage->tx_aborted_errors = hw_stats->tx_skip;
449 + } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
450 +
451 + storage->tx_errors = dev->stats.tx_errors;
452 + storage->rx_dropped = dev->stats.rx_dropped;
453 + storage->tx_dropped = dev->stats.tx_dropped;
454 +
455 + return storage;
456 +}
457 +
458 +static inline int mtk_max_frag_size(int mtu)
459 +{
460 + /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
461 + if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
462 + mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
463 +
464 + return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
465 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
466 +}
467 +
468 +static inline int mtk_max_buf_size(int frag_size)
469 +{
470 + int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
471 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
472 +
473 + WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
474 +
475 + return buf_size;
476 +}
477 +
478 +static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
479 + struct mtk_rx_dma *dma_rxd)
480 +{
481 + rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
482 + rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
483 + rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
484 + rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
485 +}
486 +
487 +/* the qdma core needs scratch memory to be setup */
488 +static int mtk_init_fq_dma(struct mtk_eth *eth)
489 +{
490 + unsigned int phy_ring_head, phy_ring_tail;
491 + int cnt = MTK_DMA_SIZE;
492 + dma_addr_t dma_addr;
493 + int i;
494 +
495 + eth->scratch_ring = dma_alloc_coherent(eth->dev,
496 + cnt * sizeof(struct mtk_tx_dma),
497 + &phy_ring_head,
498 + GFP_ATOMIC | __GFP_ZERO);
499 + if (unlikely(!eth->scratch_ring))
500 + return -ENOMEM;
501 +
502 + eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
503 + GFP_KERNEL);
504 + dma_addr = dma_map_single(eth->dev,
505 + eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
506 + DMA_FROM_DEVICE);
507 + if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
508 + return -ENOMEM;
509 +
510 + memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
511 + phy_ring_tail = phy_ring_head +
512 + (sizeof(struct mtk_tx_dma) * (cnt - 1));
513 +
514 + for (i = 0; i < cnt; i++) {
515 + eth->scratch_ring[i].txd1 =
516 + (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
517 + if (i < cnt - 1)
518 + eth->scratch_ring[i].txd2 = (phy_ring_head +
519 + ((i + 1) * sizeof(struct mtk_tx_dma)));
520 + eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
521 + }
522 +
523 + mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
524 + mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
525 + mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
526 + mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
527 +
528 + return 0;
529 +}
530 +
531 +static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
532 +{
533 + void *ret = ring->dma;
534 +
535 + return ret + (desc - ring->phys);
536 +}
537 +
538 +static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
539 + struct mtk_tx_dma *txd)
540 +{
541 + int idx = txd - ring->dma;
542 +
543 + return &ring->buf[idx];
544 +}
545 +
546 +static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
547 +{
548 + if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
549 + dma_unmap_single(dev,
550 + dma_unmap_addr(tx_buf, dma_addr0),
551 + dma_unmap_len(tx_buf, dma_len0),
552 + DMA_TO_DEVICE);
553 + } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
554 + dma_unmap_page(dev,
555 + dma_unmap_addr(tx_buf, dma_addr0),
556 + dma_unmap_len(tx_buf, dma_len0),
557 + DMA_TO_DEVICE);
558 + }
559 + tx_buf->flags = 0;
560 + if (tx_buf->skb &&
561 + (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
562 + dev_kfree_skb_any(tx_buf->skb);
563 + tx_buf->skb = NULL;
564 +}
565 +
566 +static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
567 + int tx_num, struct mtk_tx_ring *ring, bool gso)
568 +{
569 + struct mtk_mac *mac = netdev_priv(dev);
570 + struct mtk_eth *eth = mac->hw;
571 + struct mtk_tx_dma *itxd, *txd;
572 + struct mtk_tx_buf *tx_buf;
573 + unsigned long flags;
574 + dma_addr_t mapped_addr;
575 + unsigned int nr_frags;
576 + int i, n_desc = 1;
577 + u32 txd4 = 0;
578 +
579 + itxd = ring->next_free;
580 + if (itxd == ring->last_free)
581 + return -ENOMEM;
582 +
583 + /* set the forward port */
584 + txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
585 +
586 + tx_buf = mtk_desc_to_tx_buf(ring, itxd);
587 + memset(tx_buf, 0, sizeof(*tx_buf));
588 +
589 + if (gso)
590 + txd4 |= TX_DMA_TSO;
591 +
592 + /* TX Checksum offload */
593 + if (skb->ip_summed == CHECKSUM_PARTIAL)
594 + txd4 |= TX_DMA_CHKSUM;
595 +
596 + /* VLAN header offload */
597 + if (skb_vlan_tag_present(skb))
598 + txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
599 +
600 + mapped_addr = dma_map_single(&dev->dev, skb->data,
601 + skb_headlen(skb), DMA_TO_DEVICE);
602 + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
603 + return -ENOMEM;
604 +
605 + /* normally we can rely on the stack not calling this more than once,
606 + * however we have 2 queues running ont he same ring so we need to lock
607 + * the ring access
608 + */
609 + spin_lock_irqsave(&eth->page_lock, flags);
610 + WRITE_ONCE(itxd->txd1, mapped_addr);
611 + tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
612 + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
613 + dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
614 +
615 + /* TX SG offload */
616 + txd = itxd;
617 + nr_frags = skb_shinfo(skb)->nr_frags;
618 + for (i = 0; i < nr_frags; i++) {
619 + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
620 + unsigned int offset = 0;
621 + int frag_size = skb_frag_size(frag);
622 +
623 + while (frag_size) {
624 + bool last_frag = false;
625 + unsigned int frag_map_size;
626 +
627 + txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
628 + if (txd == ring->last_free)
629 + goto err_dma;
630 +
631 + n_desc++;
632 + frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
633 + mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
634 + frag_map_size,
635 + DMA_TO_DEVICE);
636 + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
637 + goto err_dma;
638 +
639 + if (i == nr_frags - 1 &&
640 + (frag_size - frag_map_size) == 0)
641 + last_frag = true;
642 +
643 + WRITE_ONCE(txd->txd1, mapped_addr);
644 + WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
645 + TX_DMA_PLEN0(frag_map_size) |
646 + last_frag * TX_DMA_LS0) |
647 + mac->id);
648 + WRITE_ONCE(txd->txd4, 0);
649 +
650 + tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
651 + tx_buf = mtk_desc_to_tx_buf(ring, txd);
652 + memset(tx_buf, 0, sizeof(*tx_buf));
653 +
654 + tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
655 + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
656 + dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
657 + frag_size -= frag_map_size;
658 + offset += frag_map_size;
659 + }
660 + }
661 +
662 + /* store skb to cleanup */
663 + tx_buf->skb = skb;
664 +
665 + WRITE_ONCE(itxd->txd4, txd4);
666 + WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
667 + (!nr_frags * TX_DMA_LS0)));
668 +
669 + spin_unlock_irqrestore(&eth->page_lock, flags);
670 +
671 + netdev_sent_queue(dev, skb->len);
672 + skb_tx_timestamp(skb);
673 +
674 + ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
675 + atomic_sub(n_desc, &ring->free_count);
676 +
677 + /* make sure that all changes to the dma ring are flushed before we
678 + * continue
679 + */
680 + wmb();
681 +
682 + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
683 + mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
684 +
685 + return 0;
686 +
687 +err_dma:
688 + do {
689 + tx_buf = mtk_desc_to_tx_buf(ring, txd);
690 +
691 + /* unmap dma */
692 + mtk_tx_unmap(&dev->dev, tx_buf);
693 +
694 + itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
695 + itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
696 + } while (itxd != txd);
697 +
698 + return -ENOMEM;
699 +}
700 +
701 +static inline int mtk_cal_txd_req(struct sk_buff *skb)
702 +{
703 + int i, nfrags;
704 + struct skb_frag_struct *frag;
705 +
706 + nfrags = 1;
707 + if (skb_is_gso(skb)) {
708 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
709 + frag = &skb_shinfo(skb)->frags[i];
710 + nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
711 + }
712 + } else {
713 + nfrags += skb_shinfo(skb)->nr_frags;
714 + }
715 +
716 + return DIV_ROUND_UP(nfrags, 2);
717 +}
718 +
719 +static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
720 +{
721 + struct mtk_mac *mac = netdev_priv(dev);
722 + struct mtk_eth *eth = mac->hw;
723 + struct mtk_tx_ring *ring = &eth->tx_ring;
724 + struct net_device_stats *stats = &dev->stats;
725 + bool gso = false;
726 + int tx_num;
727 +
728 + tx_num = mtk_cal_txd_req(skb);
729 + if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
730 + netif_stop_queue(dev);
731 + netif_err(eth, tx_queued, dev,
732 + "Tx Ring full when queue awake!\n");
733 + return NETDEV_TX_BUSY;
734 + }
735 +
736 + /* TSO: fill MSS info in tcp checksum field */
737 + if (skb_is_gso(skb)) {
738 + if (skb_cow_head(skb, 0)) {
739 + netif_warn(eth, tx_err, dev,
740 + "GSO expand head fail.\n");
741 + goto drop;
742 + }
743 +
744 + if (skb_shinfo(skb)->gso_type &
745 + (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
746 + gso = true;
747 + tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
748 + }
749 + }
750 +
751 + if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
752 + goto drop;
753 +
754 + if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
755 + netif_stop_queue(dev);
756 + if (unlikely(atomic_read(&ring->free_count) >
757 + ring->thresh))
758 + netif_wake_queue(dev);
759 + }
760 +
761 + return NETDEV_TX_OK;
762 +
763 +drop:
764 + stats->tx_dropped++;
765 + dev_kfree_skb(skb);
766 + return NETDEV_TX_OK;
767 +}
768 +
769 +static int mtk_poll_rx(struct napi_struct *napi, int budget,
770 + struct mtk_eth *eth, u32 rx_intr)
771 +{
772 + struct mtk_rx_ring *ring = &eth->rx_ring;
773 + int idx = ring->calc_idx;
774 + struct sk_buff *skb;
775 + u8 *data, *new_data;
776 + struct mtk_rx_dma *rxd, trxd;
777 + int done = 0;
778 +
779 + while (done < budget) {
780 + struct net_device *netdev;
781 + unsigned int pktlen;
782 + dma_addr_t dma_addr;
783 + int mac = 0;
784 +
785 + idx = NEXT_RX_DESP_IDX(idx);
786 + rxd = &ring->dma[idx];
787 + data = ring->data[idx];
788 +
789 + mtk_rx_get_desc(&trxd, rxd);
790 + if (!(trxd.rxd2 & RX_DMA_DONE))
791 + break;
792 +
793 + /* find out which mac the packet come from. values start at 1 */
794 + mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
795 + RX_DMA_FPORT_MASK;
796 + mac--;
797 +
798 + netdev = eth->netdev[mac];
799 +
800 + /* alloc new buffer */
801 + new_data = napi_alloc_frag(ring->frag_size);
802 + if (unlikely(!new_data)) {
803 + netdev->stats.rx_dropped++;
804 + goto release_desc;
805 + }
806 + dma_addr = dma_map_single(&eth->netdev[mac]->dev,
807 + new_data + NET_SKB_PAD,
808 + ring->buf_size,
809 + DMA_FROM_DEVICE);
810 + if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
811 + skb_free_frag(new_data);
812 + goto release_desc;
813 + }
814 +
815 + /* receive data */
816 + skb = build_skb(data, ring->frag_size);
817 + if (unlikely(!skb)) {
818 + put_page(virt_to_head_page(new_data));
819 + goto release_desc;
820 + }
821 + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
822 +
823 + dma_unmap_single(&netdev->dev, trxd.rxd1,
824 + ring->buf_size, DMA_FROM_DEVICE);
825 + pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
826 + skb->dev = netdev;
827 + skb_put(skb, pktlen);
828 + if (trxd.rxd4 & RX_DMA_L4_VALID)
829 + skb->ip_summed = CHECKSUM_UNNECESSARY;
830 + else
831 + skb_checksum_none_assert(skb);
832 + skb->protocol = eth_type_trans(skb, netdev);
833 +
834 + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
835 + RX_DMA_VID(trxd.rxd3))
836 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
837 + RX_DMA_VID(trxd.rxd3));
838 + napi_gro_receive(napi, skb);
839 +
840 + ring->data[idx] = new_data;
841 + rxd->rxd1 = (unsigned int)dma_addr;
842 +
843 +release_desc:
844 + rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
845 +
846 + ring->calc_idx = idx;
847 + /* make sure that all changes to the dma ring are flushed before
848 + * we continue
849 + */
850 + wmb();
851 + mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0);
852 + done++;
853 + }
854 +
855 + if (done < budget)
856 + mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
857 +
858 + return done;
859 +}
860 +
861 +static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
862 +{
863 + struct mtk_tx_ring *ring = &eth->tx_ring;
864 + struct mtk_tx_dma *desc;
865 + struct sk_buff *skb;
866 + struct mtk_tx_buf *tx_buf;
867 + int total = 0, done[MTK_MAX_DEVS];
868 + unsigned int bytes[MTK_MAX_DEVS];
869 + u32 cpu, dma;
870 + static int condition;
871 + int i;
872 +
873 + memset(done, 0, sizeof(done));
874 + memset(bytes, 0, sizeof(bytes));
875 +
876 + cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
877 + dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
878 +
879 + desc = mtk_qdma_phys_to_virt(ring, cpu);
880 +
881 + while ((cpu != dma) && budget) {
882 + u32 next_cpu = desc->txd2;
883 + int mac;
884 +
885 + desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
886 + if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
887 + break;
888 +
889 + mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
890 + TX_DMA_FPORT_MASK;
891 + mac--;
892 +
893 + tx_buf = mtk_desc_to_tx_buf(ring, desc);
894 + skb = tx_buf->skb;
895 + if (!skb) {
896 + condition = 1;
897 + break;
898 + }
899 +
900 + if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
901 + bytes[mac] += skb->len;
902 + done[mac]++;
903 + budget--;
904 + }
905 + mtk_tx_unmap(eth->dev, tx_buf);
906 +
907 + ring->last_free->txd2 = next_cpu;
908 + ring->last_free = desc;
909 + atomic_inc(&ring->free_count);
910 +
911 + cpu = next_cpu;
912 + }
913 +
914 + mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
915 +
916 + for (i = 0; i < MTK_MAC_COUNT; i++) {
917 + if (!eth->netdev[i] || !done[i])
918 + continue;
919 + netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
920 + total += done[i];
921 + }
922 +
923 + /* read hw index again make sure no new tx packet */
924 + if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
925 + *tx_again = true;
926 + else
927 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
928 +
929 + if (!total)
930 + return 0;
931 +
932 + for (i = 0; i < MTK_MAC_COUNT; i++) {
933 + if (!eth->netdev[i] ||
934 + unlikely(!netif_queue_stopped(eth->netdev[i])))
935 + continue;
936 + if (atomic_read(&ring->free_count) > ring->thresh)
937 + netif_wake_queue(eth->netdev[i]);
938 + }
939 +
940 + return total;
941 +}
942 +
943 +static int mtk_poll(struct napi_struct *napi, int budget)
944 +{
945 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
946 + u32 status, status2, mask, tx_intr, rx_intr, status_intr;
947 + int tx_done, rx_done;
948 + bool tx_again = false;
949 +
950 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
951 + status2 = mtk_r32(eth, MTK_INT_STATUS2);
952 + tx_intr = MTK_TX_DONE_INT;
953 + rx_intr = MTK_RX_DONE_INT;
954 + status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
955 + tx_done = 0;
956 + rx_done = 0;
957 + tx_again = 0;
958 +
959 + if (status & tx_intr)
960 + tx_done = mtk_poll_tx(eth, budget, &tx_again);
961 +
962 + if (status & rx_intr)
963 + rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
964 +
965 + if (unlikely(status2 & status_intr)) {
966 + mtk_stats_update(eth);
967 + mtk_w32(eth, status_intr, MTK_INT_STATUS2);
968 + }
969 +
970 + if (unlikely(netif_msg_intr(eth))) {
971 + mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
972 + netdev_info(eth->netdev[0],
973 + "done tx %d, rx %d, intr 0x%08x/0x%x\n",
974 + tx_done, rx_done, status, mask);
975 + }
976 +
977 + if (tx_again || rx_done == budget)
978 + return budget;
979 +
980 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
981 + if (status & (tx_intr | rx_intr))
982 + return budget;
983 +
984 + napi_complete(napi);
985 + mtk_irq_enable(eth, tx_intr | rx_intr);
986 +
987 + return rx_done;
988 +}
989 +
990 +static int mtk_tx_alloc(struct mtk_eth *eth)
991 +{
992 + struct mtk_tx_ring *ring = &eth->tx_ring;
993 + int i, sz = sizeof(*ring->dma);
994 +
995 + ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
996 + GFP_KERNEL);
997 + if (!ring->buf)
998 + goto no_tx_mem;
999 +
1000 + ring->dma = dma_alloc_coherent(eth->dev,
1001 + MTK_DMA_SIZE * sz,
1002 + &ring->phys,
1003 + GFP_ATOMIC | __GFP_ZERO);
1004 + if (!ring->dma)
1005 + goto no_tx_mem;
1006 +
1007 + memset(ring->dma, 0, MTK_DMA_SIZE * sz);
1008 + for (i = 0; i < MTK_DMA_SIZE; i++) {
1009 + int next = (i + 1) % MTK_DMA_SIZE;
1010 + u32 next_ptr = ring->phys + next * sz;
1011 +
1012 + ring->dma[i].txd2 = next_ptr;
1013 + ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1014 + }
1015 +
1016 + atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1017 + ring->next_free = &ring->dma[0];
1018 + ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
1019 + ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
1020 + MAX_SKB_FRAGS);
1021 +
1022 + /* make sure that all changes to the dma ring are flushed before we
1023 + * continue
1024 + */
1025 + wmb();
1026 +
1027 + mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1028 + mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1029 + mtk_w32(eth,
1030 + ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1031 + MTK_QTX_CRX_PTR);
1032 + mtk_w32(eth,
1033 + ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1034 + MTK_QTX_DRX_PTR);
1035 +
1036 + return 0;
1037 +
1038 +no_tx_mem:
1039 + return -ENOMEM;
1040 +}
1041 +
1042 +static void mtk_tx_clean(struct mtk_eth *eth)
1043 +{
1044 + struct mtk_tx_ring *ring = &eth->tx_ring;
1045 + int i;
1046 +
1047 + if (ring->buf) {
1048 + for (i = 0; i < MTK_DMA_SIZE; i++)
1049 + mtk_tx_unmap(eth->dev, &ring->buf[i]);
1050 + kfree(ring->buf);
1051 + ring->buf = NULL;
1052 + }
1053 +
1054 + if (ring->dma) {
1055 + dma_free_coherent(eth->dev,
1056 + MTK_DMA_SIZE * sizeof(*ring->dma),
1057 + ring->dma,
1058 + ring->phys);
1059 + ring->dma = NULL;
1060 + }
1061 +}
1062 +
1063 +static int mtk_rx_alloc(struct mtk_eth *eth)
1064 +{
1065 + struct mtk_rx_ring *ring = &eth->rx_ring;
1066 + int i;
1067 +
1068 + ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
1069 + ring->buf_size = mtk_max_buf_size(ring->frag_size);
1070 + ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
1071 + GFP_KERNEL);
1072 + if (!ring->data)
1073 + return -ENOMEM;
1074 +
1075 + for (i = 0; i < MTK_DMA_SIZE; i++) {
1076 + ring->data[i] = netdev_alloc_frag(ring->frag_size);
1077 + if (!ring->data[i])
1078 + return -ENOMEM;
1079 + }
1080 +
1081 + ring->dma = dma_alloc_coherent(eth->dev,
1082 + MTK_DMA_SIZE * sizeof(*ring->dma),
1083 + &ring->phys,
1084 + GFP_ATOMIC | __GFP_ZERO);
1085 + if (!ring->dma)
1086 + return -ENOMEM;
1087 +
1088 + for (i = 0; i < MTK_DMA_SIZE; i++) {
1089 + dma_addr_t dma_addr = dma_map_single(eth->dev,
1090 + ring->data[i] + NET_SKB_PAD,
1091 + ring->buf_size,
1092 + DMA_FROM_DEVICE);
1093 + if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1094 + return -ENOMEM;
1095 + ring->dma[i].rxd1 = (unsigned int)dma_addr;
1096 +
1097 + ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1098 + }
1099 + ring->calc_idx = MTK_DMA_SIZE - 1;
1100 + /* make sure that all changes to the dma ring are flushed before we
1101 + * continue
1102 + */
1103 + wmb();
1104 +
1105 + mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0);
1106 + mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0);
1107 + mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0);
1108 + mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
1109 + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1110 +
1111 + return 0;
1112 +}
1113 +
1114 +static void mtk_rx_clean(struct mtk_eth *eth)
1115 +{
1116 + struct mtk_rx_ring *ring = &eth->rx_ring;
1117 + int i;
1118 +
1119 + if (ring->data && ring->dma) {
1120 + for (i = 0; i < MTK_DMA_SIZE; i++) {
1121 + if (!ring->data[i])
1122 + continue;
1123 + if (!ring->dma[i].rxd1)
1124 + continue;
1125 + dma_unmap_single(eth->dev,
1126 + ring->dma[i].rxd1,
1127 + ring->buf_size,
1128 + DMA_FROM_DEVICE);
1129 + skb_free_frag(ring->data[i]);
1130 + }
1131 + kfree(ring->data);
1132 + ring->data = NULL;
1133 + }
1134 +
1135 + if (ring->dma) {
1136 + dma_free_coherent(eth->dev,
1137 + MTK_DMA_SIZE * sizeof(*ring->dma),
1138 + ring->dma,
1139 + ring->phys);
1140 + ring->dma = NULL;
1141 + }
1142 +}
1143 +
1144 +/* wait for DMA to finish whatever it is doing before we start using it again */
1145 +static int mtk_dma_busy_wait(struct mtk_eth *eth)
1146 +{
1147 + unsigned long t_start = jiffies;
1148 +
1149 + while (1) {
1150 + if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1151 + (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1152 + return 0;
1153 + if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
1154 + break;
1155 + }
1156 +
1157 + dev_err(eth->dev, "DMA init timeout\n");
1158 + return -1;
1159 +}
1160 +
1161 +static int mtk_dma_init(struct mtk_eth *eth)
1162 +{
1163 + int err;
1164 +
1165 + if (mtk_dma_busy_wait(eth))
1166 + return -EBUSY;
1167 +
1168 + /* QDMA needs scratch memory for internal reordering of the
1169 + * descriptors
1170 + */
1171 + err = mtk_init_fq_dma(eth);
1172 + if (err)
1173 + return err;
1174 +
1175 + err = mtk_tx_alloc(eth);
1176 + if (err)
1177 + return err;
1178 +
1179 + err = mtk_rx_alloc(eth);
1180 + if (err)
1181 + return err;
1182 +
1183 + /* Enable random early drop and set drop threshold automatically */
1184 + mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
1185 + MTK_QDMA_FC_THRES);
1186 + mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1187 +
1188 + return 0;
1189 +}
1190 +
1191 +static void mtk_dma_free(struct mtk_eth *eth)
1192 +{
1193 + int i;
1194 +
1195 + for (i = 0; i < MTK_MAC_COUNT; i++)
1196 + if (eth->netdev[i])
1197 + netdev_reset_queue(eth->netdev[i]);
1198 + mtk_tx_clean(eth);
1199 + mtk_rx_clean(eth);
1200 + kfree(eth->scratch_head);
1201 +}
1202 +
1203 +static void mtk_tx_timeout(struct net_device *dev)
1204 +{
1205 + struct mtk_mac *mac = netdev_priv(dev);
1206 + struct mtk_eth *eth = mac->hw;
1207 +
1208 + eth->netdev[mac->id]->stats.tx_errors++;
1209 + netif_err(eth, tx_err, dev,
1210 + "transmit timed out\n");
1211 + schedule_work(&mac->pending_work);
1212 +}
1213 +
1214 +static irqreturn_t mtk_handle_irq(int irq, void *_eth)
1215 +{
1216 + struct mtk_eth *eth = _eth;
1217 + u32 status;
1218 +
1219 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1220 + if (unlikely(!status))
1221 + return IRQ_NONE;
1222 +
1223 + if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
1224 + if (likely(napi_schedule_prep(&eth->rx_napi)))
1225 + __napi_schedule(&eth->rx_napi);
1226 + } else {
1227 + mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
1228 + }
1229 + mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
1230 +
1231 + return IRQ_HANDLED;
1232 +}
1233 +
1234 +#ifdef CONFIG_NET_POLL_CONTROLLER
1235 +static void mtk_poll_controller(struct net_device *dev)
1236 +{
1237 + struct mtk_mac *mac = netdev_priv(dev);
1238 + struct mtk_eth *eth = mac->hw;
1239 + u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
1240 +
1241 + mtk_irq_disable(eth, int_mask);
1242 + mtk_handle_irq(dev->irq, dev);
1243 + mtk_irq_enable(eth, int_mask);
1244 +}
1245 +#endif
1246 +
1247 +static int mtk_start_dma(struct mtk_eth *eth)
1248 +{
1249 + int err;
1250 +
1251 + err = mtk_dma_init(eth);
1252 + if (err) {
1253 + mtk_dma_free(eth);
1254 + return err;
1255 + }
1256 +
1257 + mtk_w32(eth,
1258 + MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
1259 + MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
1260 + MTK_RX_BT_32DWORDS,
1261 + MTK_QDMA_GLO_CFG);
1262 +
1263 + return 0;
1264 +}
1265 +
1266 +static int mtk_open(struct net_device *dev)
1267 +{
1268 + struct mtk_mac *mac = netdev_priv(dev);
1269 + struct mtk_eth *eth = mac->hw;
1270 +
1271 + /* we run 2 netdevs on the same dma ring so we only bring it up once */
1272 + if (!atomic_read(&eth->dma_refcnt)) {
1273 + int err = mtk_start_dma(eth);
1274 +
1275 + if (err)
1276 + return err;
1277 +
1278 + napi_enable(&eth->rx_napi);
1279 + mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1280 + }
1281 + atomic_inc(&eth->dma_refcnt);
1282 +
1283 + phy_start(mac->phy_dev);
1284 + netif_start_queue(dev);
1285 +
1286 + return 0;
1287 +}
1288 +
1289 +static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1290 +{
1291 + unsigned long flags;
1292 + u32 val;
1293 + int i;
1294 +
1295 + /* stop the dma engine */
1296 + spin_lock_irqsave(&eth->page_lock, flags);
1297 + val = mtk_r32(eth, glo_cfg);
1298 + mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1299 + glo_cfg);
1300 + spin_unlock_irqrestore(&eth->page_lock, flags);
1301 +
1302 + /* wait for dma stop */
1303 + for (i = 0; i < 10; i++) {
1304 + val = mtk_r32(eth, glo_cfg);
1305 + if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1306 + msleep(20);
1307 + continue;
1308 + }
1309 + break;
1310 + }
1311 +}
1312 +
1313 +static int mtk_stop(struct net_device *dev)
1314 +{
1315 + struct mtk_mac *mac = netdev_priv(dev);
1316 + struct mtk_eth *eth = mac->hw;
1317 +
1318 + netif_tx_disable(dev);
1319 + phy_stop(mac->phy_dev);
1320 +
1321 + /* only shutdown DMA if this is the last user */
1322 + if (!atomic_dec_and_test(&eth->dma_refcnt))
1323 + return 0;
1324 +
1325 + mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1326 + napi_disable(&eth->rx_napi);
1327 +
1328 + mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1329 +
1330 + mtk_dma_free(eth);
1331 +
1332 + return 0;
1333 +}
1334 +
1335 +static int __init mtk_hw_init(struct mtk_eth *eth)
1336 +{
1337 + int err, i;
1338 +
1339 + /* reset the frame engine */
1340 + reset_control_assert(eth->rstc);
1341 + usleep_range(10, 20);
1342 + reset_control_deassert(eth->rstc);
1343 + usleep_range(10, 20);
1344 +
1345 + /* Set GE2 driving and slew rate */
1346 + regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
1347 +
1348 + /* set GE2 TDSEL */
1349 + regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
1350 +
1351 + /* set GE2 TUNE */
1352 + regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1353 +
1354 + /* GE1, Force 1000M/FD, FC ON */
1355 + mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
1356 +
1357 + /* GE2, Force 1000M/FD, FC ON */
1358 + mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
1359 +
1360 + /* Enable RX VLan Offloading */
1361 + mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
1362 +
1363 + err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
1364 + dev_name(eth->dev), eth);
1365 + if (err)
1366 + return err;
1367 +
1368 + err = mtk_mdio_init(eth);
1369 + if (err)
1370 + return err;
1371 +
1372 + /* disable delay and normal interrupt */
1373 + mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1374 + mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1375 + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
1376 + mtk_w32(eth, 0, MTK_RST_GL);
1377 +
1378 + /* FE int grouping */
1379 + mtk_w32(eth, 0, MTK_FE_INT_GRP);
1380 +
1381 + for (i = 0; i < 2; i++) {
1382 + u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
1383 +
1384 + /* setup the forward port to send frame to QDMA */
1385 + val &= ~0xffff;
1386 + val |= 0x5555;
1387 +
1388 + /* Enable RX checksum */
1389 + val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
1390 +
1391 + /* setup the mac dma */
1392 + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
1393 + }
1394 +
1395 + return 0;
1396 +}
1397 +
1398 +static int __init mtk_init(struct net_device *dev)
1399 +{
1400 + struct mtk_mac *mac = netdev_priv(dev);
1401 + struct mtk_eth *eth = mac->hw;
1402 + const char *mac_addr;
1403 +
1404 + mac_addr = of_get_mac_address(mac->of_node);
1405 + if (mac_addr)
1406 + ether_addr_copy(dev->dev_addr, mac_addr);
1407 +
1408 + /* If the mac address is invalid, use random mac address */
1409 + if (!is_valid_ether_addr(dev->dev_addr)) {
1410 + random_ether_addr(dev->dev_addr);
1411 + dev_err(eth->dev, "generated random MAC address %pM\n",
1412 + dev->dev_addr);
1413 + dev->addr_assign_type = NET_ADDR_RANDOM;
1414 + }
1415 +
1416 + return mtk_phy_connect(mac);
1417 +}
1418 +
1419 +static void mtk_uninit(struct net_device *dev)
1420 +{
1421 + struct mtk_mac *mac = netdev_priv(dev);
1422 + struct mtk_eth *eth = mac->hw;
1423 +
1424 + phy_disconnect(mac->phy_dev);
1425 + mtk_mdio_cleanup(eth);
1426 + mtk_irq_disable(eth, ~0);
1427 + free_irq(dev->irq, dev);
1428 +}
1429 +
1430 +static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1431 +{
1432 + struct mtk_mac *mac = netdev_priv(dev);
1433 +
1434 + switch (cmd) {
1435 + case SIOCGMIIPHY:
1436 + case SIOCGMIIREG:
1437 + case SIOCSMIIREG:
1438 + return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
1439 + default:
1440 + break;
1441 + }
1442 +
1443 + return -EOPNOTSUPP;
1444 +}
1445 +
1446 +static void mtk_pending_work(struct work_struct *work)
1447 +{
1448 + struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
1449 + struct mtk_eth *eth = mac->hw;
1450 + struct net_device *dev = eth->netdev[mac->id];
1451 + int err;
1452 +
1453 + rtnl_lock();
1454 + mtk_stop(dev);
1455 +
1456 + err = mtk_open(dev);
1457 + if (err) {
1458 + netif_alert(eth, ifup, dev,
1459 + "Driver up/down cycle failed, closing device.\n");
1460 + dev_close(dev);
1461 + }
1462 + rtnl_unlock();
1463 +}
1464 +
1465 +static int mtk_cleanup(struct mtk_eth *eth)
1466 +{
1467 + int i;
1468 +
1469 + for (i = 0; i < MTK_MAC_COUNT; i++) {
1470 + struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
1471 +
1472 + if (!eth->netdev[i])
1473 + continue;
1474 +
1475 + unregister_netdev(eth->netdev[i]);
1476 + free_netdev(eth->netdev[i]);
1477 + cancel_work_sync(&mac->pending_work);
1478 + }
1479 +
1480 + return 0;
1481 +}
1482 +
1483 +static int mtk_get_settings(struct net_device *dev,
1484 + struct ethtool_cmd *cmd)
1485 +{
1486 + struct mtk_mac *mac = netdev_priv(dev);
1487 + int err;
1488 +
1489 + err = phy_read_status(mac->phy_dev);
1490 + if (err)
1491 + return -ENODEV;
1492 +
1493 + return phy_ethtool_gset(mac->phy_dev, cmd);
1494 +}
1495 +
1496 +static int mtk_set_settings(struct net_device *dev,
1497 + struct ethtool_cmd *cmd)
1498 +{
1499 + struct mtk_mac *mac = netdev_priv(dev);
1500 +
1501 + if (cmd->phy_address != mac->phy_dev->mdio.addr) {
1502 + mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
1503 + cmd->phy_address);
1504 + if (!mac->phy_dev)
1505 + return -ENODEV;
1506 + }
1507 +
1508 + return phy_ethtool_sset(mac->phy_dev, cmd);
1509 +}
1510 +
1511 +static void mtk_get_drvinfo(struct net_device *dev,
1512 + struct ethtool_drvinfo *info)
1513 +{
1514 + struct mtk_mac *mac = netdev_priv(dev);
1515 +
1516 + strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
1517 + strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
1518 + info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
1519 +}
1520 +
1521 +static u32 mtk_get_msglevel(struct net_device *dev)
1522 +{
1523 + struct mtk_mac *mac = netdev_priv(dev);
1524 +
1525 + return mac->hw->msg_enable;
1526 +}
1527 +
1528 +static void mtk_set_msglevel(struct net_device *dev, u32 value)
1529 +{
1530 + struct mtk_mac *mac = netdev_priv(dev);
1531 +
1532 + mac->hw->msg_enable = value;
1533 +}
1534 +
1535 +static int mtk_nway_reset(struct net_device *dev)
1536 +{
1537 + struct mtk_mac *mac = netdev_priv(dev);
1538 +
1539 + return genphy_restart_aneg(mac->phy_dev);
1540 +}
1541 +
1542 +static u32 mtk_get_link(struct net_device *dev)
1543 +{
1544 + struct mtk_mac *mac = netdev_priv(dev);
1545 + int err;
1546 +
1547 + err = genphy_update_link(mac->phy_dev);
1548 + if (err)
1549 + return ethtool_op_get_link(dev);
1550 +
1551 + return mac->phy_dev->link;
1552 +}
1553 +
1554 +static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1555 +{
1556 + int i;
1557 +
1558 + switch (stringset) {
1559 + case ETH_SS_STATS:
1560 + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
1561 + memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
1562 + data += ETH_GSTRING_LEN;
1563 + }
1564 + break;
1565 + }
1566 +}
1567 +
1568 +static int mtk_get_sset_count(struct net_device *dev, int sset)
1569 +{
1570 + switch (sset) {
1571 + case ETH_SS_STATS:
1572 + return ARRAY_SIZE(mtk_ethtool_stats);
1573 + default:
1574 + return -EOPNOTSUPP;
1575 + }
1576 +}
1577 +
1578 +static void mtk_get_ethtool_stats(struct net_device *dev,
1579 + struct ethtool_stats *stats, u64 *data)
1580 +{
1581 + struct mtk_mac *mac = netdev_priv(dev);
1582 + struct mtk_hw_stats *hwstats = mac->hw_stats;
1583 + u64 *data_src, *data_dst;
1584 + unsigned int start;
1585 + int i;
1586 +
1587 + if (netif_running(dev) && netif_device_present(dev)) {
1588 + if (spin_trylock(&hwstats->stats_lock)) {
1589 + mtk_stats_update_mac(mac);
1590 + spin_unlock(&hwstats->stats_lock);
1591 + }
1592 + }
1593 +
1594 + do {
1595 + data_src = (u64*)hwstats;
1596 + data_dst = data;
1597 + start = u64_stats_fetch_begin_irq(&hwstats->syncp);
1598 +
1599 + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
1600 + *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
1601 + } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
1602 +}
1603 +
1604 +static struct ethtool_ops mtk_ethtool_ops = {
1605 + .get_settings = mtk_get_settings,
1606 + .set_settings = mtk_set_settings,
1607 + .get_drvinfo = mtk_get_drvinfo,
1608 + .get_msglevel = mtk_get_msglevel,
1609 + .set_msglevel = mtk_set_msglevel,
1610 + .nway_reset = mtk_nway_reset,
1611 + .get_link = mtk_get_link,
1612 + .get_strings = mtk_get_strings,
1613 + .get_sset_count = mtk_get_sset_count,
1614 + .get_ethtool_stats = mtk_get_ethtool_stats,
1615 +};
1616 +
1617 +static const struct net_device_ops mtk_netdev_ops = {
1618 + .ndo_init = mtk_init,
1619 + .ndo_uninit = mtk_uninit,
1620 + .ndo_open = mtk_open,
1621 + .ndo_stop = mtk_stop,
1622 + .ndo_start_xmit = mtk_start_xmit,
1623 + .ndo_set_mac_address = mtk_set_mac_address,
1624 + .ndo_validate_addr = eth_validate_addr,
1625 + .ndo_do_ioctl = mtk_do_ioctl,
1626 + .ndo_change_mtu = eth_change_mtu,
1627 + .ndo_tx_timeout = mtk_tx_timeout,
1628 + .ndo_get_stats64 = mtk_get_stats64,
1629 +#ifdef CONFIG_NET_POLL_CONTROLLER
1630 + .ndo_poll_controller = mtk_poll_controller,
1631 +#endif
1632 +};
1633 +
1634 +static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1635 +{
1636 + struct mtk_mac *mac;
1637 + const __be32 *_id = of_get_property(np, "reg", NULL);
1638 + int id, err;
1639 +
1640 + if (!_id) {
1641 + dev_err(eth->dev, "missing mac id\n");
1642 + return -EINVAL;
1643 + }
1644 +
1645 + id = be32_to_cpup(_id);
1646 + if (id >= MTK_MAC_COUNT) {
1647 + dev_err(eth->dev, "%d is not a valid mac id\n", id);
1648 + return -EINVAL;
1649 + }
1650 +
1651 + if (eth->netdev[id]) {
1652 + dev_err(eth->dev, "duplicate mac id found: %d\n", id);
1653 + return -EINVAL;
1654 + }
1655 +
1656 + eth->netdev[id] = alloc_etherdev(sizeof(*mac));
1657 + if (!eth->netdev[id]) {
1658 + dev_err(eth->dev, "alloc_etherdev failed\n");
1659 + return -ENOMEM;
1660 + }
1661 + mac = netdev_priv(eth->netdev[id]);
1662 + eth->mac[id] = mac;
1663 + mac->id = id;
1664 + mac->hw = eth;
1665 + mac->of_node = np;
1666 + INIT_WORK(&mac->pending_work, mtk_pending_work);
1667 +
1668 + mac->hw_stats = devm_kzalloc(eth->dev,
1669 + sizeof(*mac->hw_stats),
1670 + GFP_KERNEL);
1671 + if (!mac->hw_stats) {
1672 + dev_err(eth->dev, "failed to allocate counter memory\n");
1673 + err = -ENOMEM;
1674 + goto free_netdev;
1675 + }
1676 + spin_lock_init(&mac->hw_stats->stats_lock);
1677 + mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
1678 +
1679 + SET_NETDEV_DEV(eth->netdev[id], eth->dev);
1680 + eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
1681 + eth->netdev[id]->base_addr = (unsigned long)eth->base;
1682 + eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
1683 + ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1684 + eth->netdev[id]->features |= MTK_HW_FEATURES;
1685 + eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
1686 +
1687 + err = register_netdev(eth->netdev[id]);
1688 + if (err) {
1689 + dev_err(eth->dev, "error bringing up device\n");
1690 + goto free_netdev;
1691 + }
1692 + eth->netdev[id]->irq = eth->irq;
1693 + netif_info(eth, probe, eth->netdev[id],
1694 + "mediatek frame engine at 0x%08lx, irq %d\n",
1695 + eth->netdev[id]->base_addr, eth->netdev[id]->irq);
1696 +
1697 + return 0;
1698 +
1699 +free_netdev:
1700 + free_netdev(eth->netdev[id]);
1701 + return err;
1702 +}
1703 +
1704 +static int mtk_probe(struct platform_device *pdev)
1705 +{
1706 + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1707 + struct device_node *mac_np;
1708 + const struct of_device_id *match;
1709 + struct mtk_soc_data *soc;
1710 + struct mtk_eth *eth;
1711 + int err;
1712 +
1713 + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1714 + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1715 +
1716 + device_reset(&pdev->dev);
1717 +
1718 + match = of_match_device(of_mtk_match, &pdev->dev);
1719 + soc = (struct mtk_soc_data *)match->data;
1720 +
1721 + eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
1722 + if (!eth)
1723 + return -ENOMEM;
1724 +
1725 + eth->base = devm_ioremap_resource(&pdev->dev, res);
1726 + if (!eth->base)
1727 + return -EADDRNOTAVAIL;
1728 +
1729 + spin_lock_init(&eth->page_lock);
1730 +
1731 + eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1732 + "mediatek,ethsys");
1733 + if (IS_ERR(eth->ethsys)) {
1734 + dev_err(&pdev->dev, "no ethsys regmap found\n");
1735 + return PTR_ERR(eth->ethsys);
1736 + }
1737 +
1738 + eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1739 + "mediatek,pctl");
1740 + if (IS_ERR(eth->pctl)) {
1741 + dev_err(&pdev->dev, "no pctl regmap found\n");
1742 + return PTR_ERR(eth->pctl);
1743 + }
1744 +
1745 + eth->rstc = devm_reset_control_get(&pdev->dev, "eth");
1746 + if (IS_ERR(eth->rstc)) {
1747 + dev_err(&pdev->dev, "no eth reset found\n");
1748 + return PTR_ERR(eth->rstc);
1749 + }
1750 +
1751 + eth->irq = platform_get_irq(pdev, 0);
1752 + if (eth->irq < 0) {
1753 + dev_err(&pdev->dev, "no IRQ resource found\n");
1754 + return -ENXIO;
1755 + }
1756 +
1757 + eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
1758 + eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
1759 + eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
1760 + eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
1761 + if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
1762 + IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
1763 + return -ENODEV;
1764 +
1765 + clk_prepare_enable(eth->clk_ethif);
1766 + clk_prepare_enable(eth->clk_esw);
1767 + clk_prepare_enable(eth->clk_gp1);
1768 + clk_prepare_enable(eth->clk_gp2);
1769 +
1770 + eth->dev = &pdev->dev;
1771 + eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
1772 +
1773 + err = mtk_hw_init(eth);
1774 + if (err)
1775 + return err;
1776 +
1777 + for_each_child_of_node(pdev->dev.of_node, mac_np) {
1778 + if (!of_device_is_compatible(mac_np,
1779 + "mediatek,eth-mac"))
1780 + continue;
1781 +
1782 + if (!of_device_is_available(mac_np))
1783 + continue;
1784 +
1785 + err = mtk_add_mac(eth, mac_np);
1786 + if (err)
1787 + goto err_free_dev;
1788 + }
1789 +
1790 + /* we run 2 devices on the same DMA ring so we need a dummy device
1791 + * for NAPI to work
1792 + */
1793 + init_dummy_netdev(&eth->dummy_dev);
1794 + netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
1795 + MTK_NAPI_WEIGHT);
1796 +
1797 + platform_set_drvdata(pdev, eth);
1798 +
1799 + return 0;
1800 +
1801 +err_free_dev:
1802 + mtk_cleanup(eth);
1803 + return err;
1804 +}
1805 +
1806 +static int mtk_remove(struct platform_device *pdev)
1807 +{
1808 + struct mtk_eth *eth = platform_get_drvdata(pdev);
1809 +
1810 + clk_disable_unprepare(eth->clk_ethif);
1811 + clk_disable_unprepare(eth->clk_esw);
1812 + clk_disable_unprepare(eth->clk_gp1);
1813 + clk_disable_unprepare(eth->clk_gp2);
1814 +
1815 + netif_napi_del(&eth->rx_napi);
1816 + mtk_cleanup(eth);
1817 + platform_set_drvdata(pdev, NULL);
1818 +
1819 + return 0;
1820 +}
1821 +
1822 +const struct of_device_id of_mtk_match[] = {
1823 + { .compatible = "mediatek,mt7623-eth" },
1824 + {},
1825 +};
1826 +
1827 +static struct platform_driver mtk_driver = {
1828 + .probe = mtk_probe,
1829 + .remove = mtk_remove,
1830 + .driver = {
1831 + .name = "mtk_soc_eth",
1832 + .owner = THIS_MODULE,
1833 + .of_match_table = of_mtk_match,
1834 + },
1835 +};
1836 +
1837 +module_platform_driver(mtk_driver);
1838 +
1839 +MODULE_LICENSE("GPL");
1840 +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1841 +MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
1842 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
1843 new file mode 100644
1844 index 0000000..48a5292
1845 --- /dev/null
1846 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
1847 @@ -0,0 +1,421 @@
1848 +/* This program is free software; you can redistribute it and/or modify
1849 + * it under the terms of the GNU General Public License as published by
1850 + * the Free Software Foundation; version 2 of the License
1851 + *
1852 + * This program is distributed in the hope that it will be useful,
1853 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1854 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1855 + * GNU General Public License for more details.
1856 + *
1857 + * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
1858 + * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
1859 + * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
1860 + */
1861 +
1862 +#ifndef MTK_ETH_H
1863 +#define MTK_ETH_H
1864 +
1865 +#define MTK_QDMA_PAGE_SIZE 2048
1866 +#define MTK_MAX_RX_LENGTH 1536
1867 +#define MTK_TX_DMA_BUF_LEN 0x3fff
1868 +#define MTK_DMA_SIZE 256
1869 +#define MTK_NAPI_WEIGHT 64
1870 +#define MTK_MAC_COUNT 2
1871 +#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
1872 +#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
1873 +#define MTK_DMA_DUMMY_DESC 0xffffffff
1874 +#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
1875 + NETIF_MSG_PROBE | \
1876 + NETIF_MSG_LINK | \
1877 + NETIF_MSG_TIMER | \
1878 + NETIF_MSG_IFDOWN | \
1879 + NETIF_MSG_IFUP | \
1880 + NETIF_MSG_RX_ERR | \
1881 + NETIF_MSG_TX_ERR)
1882 +#define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \
1883 + NETIF_F_RXCSUM | \
1884 + NETIF_F_HW_VLAN_CTAG_TX | \
1885 + NETIF_F_HW_VLAN_CTAG_RX | \
1886 + NETIF_F_SG | NETIF_F_TSO | \
1887 + NETIF_F_TSO6 | \
1888 + NETIF_F_IPV6_CSUM)
1889 +#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (MTK_DMA_SIZE - 1))
1890 +
1891 +/* Frame Engine Global Reset Register */
1892 +#define MTK_RST_GL 0x04
1893 +#define RST_GL_PSE BIT(0)
1894 +
1895 +/* Frame Engine Interrupt Status Register */
1896 +#define MTK_INT_STATUS2 0x08
1897 +#define MTK_GDM1_AF BIT(28)
1898 +#define MTK_GDM2_AF BIT(29)
1899 +
1900 +/* Frame Engine Interrupt Grouping Register */
1901 +#define MTK_FE_INT_GRP 0x20
1902 +
1903 +/* CDMP Exgress Control Register */
1904 +#define MTK_CDMP_EG_CTRL 0x404
1905 +
1906 +/* GDM Exgress Control Register */
1907 +#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000))
1908 +#define MTK_GDMA_ICS_EN BIT(22)
1909 +#define MTK_GDMA_TCS_EN BIT(21)
1910 +#define MTK_GDMA_UCS_EN BIT(20)
1911 +
1912 +/* Unicast Filter MAC Address Register - Low */
1913 +#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
1914 +
1915 +/* Unicast Filter MAC Address Register - High */
1916 +#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
1917 +
1918 +/* QDMA TX Queue Configuration Registers */
1919 +#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
1920 +#define QDMA_RES_THRES 4
1921 +
1922 +/* QDMA TX Queue Scheduler Registers */
1923 +#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
1924 +
1925 +/* QDMA RX Base Pointer Register */
1926 +#define MTK_QRX_BASE_PTR0 0x1900
1927 +
1928 +/* QDMA RX Maximum Count Register */
1929 +#define MTK_QRX_MAX_CNT0 0x1904
1930 +
1931 +/* QDMA RX CPU Pointer Register */
1932 +#define MTK_QRX_CRX_IDX0 0x1908
1933 +
1934 +/* QDMA RX DMA Pointer Register */
1935 +#define MTK_QRX_DRX_IDX0 0x190C
1936 +
1937 +/* QDMA Global Configuration Register */
1938 +#define MTK_QDMA_GLO_CFG 0x1A04
1939 +#define MTK_RX_2B_OFFSET BIT(31)
1940 +#define MTK_RX_BT_32DWORDS (3 << 11)
1941 +#define MTK_TX_WB_DDONE BIT(6)
1942 +#define MTK_DMA_SIZE_16DWORDS (2 << 4)
1943 +#define MTK_RX_DMA_BUSY BIT(3)
1944 +#define MTK_TX_DMA_BUSY BIT(1)
1945 +#define MTK_RX_DMA_EN BIT(2)
1946 +#define MTK_TX_DMA_EN BIT(0)
1947 +#define MTK_DMA_BUSY_TIMEOUT HZ
1948 +
1949 +/* QDMA Reset Index Register */
1950 +#define MTK_QDMA_RST_IDX 0x1A08
1951 +#define MTK_PST_DRX_IDX0 BIT(16)
1952 +
1953 +/* QDMA Delay Interrupt Register */
1954 +#define MTK_QDMA_DELAY_INT 0x1A0C
1955 +
1956 +/* QDMA Flow Control Register */
1957 +#define MTK_QDMA_FC_THRES 0x1A10
1958 +#define FC_THRES_DROP_MODE BIT(20)
1959 +#define FC_THRES_DROP_EN (7 << 16)
1960 +#define FC_THRES_MIN 0x4444
1961 +
1962 +/* QDMA Interrupt Status Register */
1963 +#define MTK_QMTK_INT_STATUS 0x1A18
1964 +#define MTK_RX_DONE_INT1 BIT(17)
1965 +#define MTK_RX_DONE_INT0 BIT(16)
1966 +#define MTK_TX_DONE_INT3 BIT(3)
1967 +#define MTK_TX_DONE_INT2 BIT(2)
1968 +#define MTK_TX_DONE_INT1 BIT(1)
1969 +#define MTK_TX_DONE_INT0 BIT(0)
1970 +#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1)
1971 +#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
1972 + MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
1973 +
1974 +/* QDMA Interrupt Status Register */
1975 +#define MTK_QDMA_INT_MASK 0x1A1C
1976 +
1977 +/* QDMA Interrupt Mask Register */
1978 +#define MTK_QDMA_HRED2 0x1A44
1979 +
1980 +/* QDMA TX Forward CPU Pointer Register */
1981 +#define MTK_QTX_CTX_PTR 0x1B00
1982 +
1983 +/* QDMA TX Forward DMA Pointer Register */
1984 +#define MTK_QTX_DTX_PTR 0x1B04
1985 +
1986 +/* QDMA TX Release CPU Pointer Register */
1987 +#define MTK_QTX_CRX_PTR 0x1B10
1988 +
1989 +/* QDMA TX Release DMA Pointer Register */
1990 +#define MTK_QTX_DRX_PTR 0x1B14
1991 +
1992 +/* QDMA FQ Head Pointer Register */
1993 +#define MTK_QDMA_FQ_HEAD 0x1B20
1994 +
1995 +/* QDMA FQ Head Pointer Register */
1996 +#define MTK_QDMA_FQ_TAIL 0x1B24
1997 +
1998 +/* QDMA FQ Free Page Counter Register */
1999 +#define MTK_QDMA_FQ_CNT 0x1B28
2000 +
2001 +/* QDMA FQ Free Page Buffer Length Register */
2002 +#define MTK_QDMA_FQ_BLEN 0x1B2C
2003 +
2004 +/* GMA1 Received Good Byte Count Register */
2005 +#define MTK_GDM1_TX_GBCNT 0x2400
2006 +#define MTK_STAT_OFFSET 0x40
2007 +
2008 +/* QDMA descriptor txd4 */
2009 +#define TX_DMA_CHKSUM (0x7 << 29)
2010 +#define TX_DMA_TSO BIT(28)
2011 +#define TX_DMA_FPORT_SHIFT 25
2012 +#define TX_DMA_FPORT_MASK 0x7
2013 +#define TX_DMA_INS_VLAN BIT(16)
2014 +
2015 +/* QDMA descriptor txd3 */
2016 +#define TX_DMA_OWNER_CPU BIT(31)
2017 +#define TX_DMA_LS0 BIT(30)
2018 +#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
2019 +#define TX_DMA_SWC BIT(14)
2020 +#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
2021 +
2022 +/* QDMA descriptor rxd2 */
2023 +#define RX_DMA_DONE BIT(31)
2024 +#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
2025 +#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
2026 +
2027 +/* QDMA descriptor rxd3 */
2028 +#define RX_DMA_VID(_x) ((_x) & 0xfff)
2029 +
2030 +/* QDMA descriptor rxd4 */
2031 +#define RX_DMA_L4_VALID BIT(24)
2032 +#define RX_DMA_FPORT_SHIFT 19
2033 +#define RX_DMA_FPORT_MASK 0x7
2034 +
2035 +/* PHY Indirect Access Control registers */
2036 +#define MTK_PHY_IAC 0x10004
2037 +#define PHY_IAC_ACCESS BIT(31)
2038 +#define PHY_IAC_READ BIT(19)
2039 +#define PHY_IAC_WRITE BIT(18)
2040 +#define PHY_IAC_START BIT(16)
2041 +#define PHY_IAC_ADDR_SHIFT 20
2042 +#define PHY_IAC_REG_SHIFT 25
2043 +#define PHY_IAC_TIMEOUT HZ
2044 +
2045 +/* Mac control registers */
2046 +#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
2047 +#define MAC_MCR_MAX_RX_1536 BIT(24)
2048 +#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
2049 +#define MAC_MCR_FORCE_MODE BIT(15)
2050 +#define MAC_MCR_TX_EN BIT(14)
2051 +#define MAC_MCR_RX_EN BIT(13)
2052 +#define MAC_MCR_BACKOFF_EN BIT(9)
2053 +#define MAC_MCR_BACKPR_EN BIT(8)
2054 +#define MAC_MCR_FORCE_RX_FC BIT(5)
2055 +#define MAC_MCR_FORCE_TX_FC BIT(4)
2056 +#define MAC_MCR_SPEED_1000 BIT(3)
2057 +#define MAC_MCR_SPEED_100 BIT(2)
2058 +#define MAC_MCR_FORCE_DPX BIT(1)
2059 +#define MAC_MCR_FORCE_LINK BIT(0)
2060 +#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | \
2061 + MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
2062 + MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
2063 + MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
2064 + MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
2065 + MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
2066 +
2067 +/* GPIO port control registers for GMAC 2*/
2068 +#define GPIO_OD33_CTRL8 0x4c0
2069 +#define GPIO_BIAS_CTRL 0xed0
2070 +#define GPIO_DRV_SEL10 0xf00
2071 +
2072 +/* ethernet subsystem config register */
2073 +#define ETHSYS_SYSCFG0 0x14
2074 +#define SYSCFG0_GE_MASK 0x3
2075 +#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
2076 +
2077 +struct mtk_rx_dma {
2078 + unsigned int rxd1;
2079 + unsigned int rxd2;
2080 + unsigned int rxd3;
2081 + unsigned int rxd4;
2082 +} __packed __aligned(4);
2083 +
2084 +struct mtk_tx_dma {
2085 + unsigned int txd1;
2086 + unsigned int txd2;
2087 + unsigned int txd3;
2088 + unsigned int txd4;
2089 +} __packed __aligned(4);
2090 +
2091 +struct mtk_eth;
2092 +struct mtk_mac;
2093 +
2094 +/* struct mtk_hw_stats - the structure that holds the traffic statistics.
2095 + * @stats_lock: make sure that stats operations are atomic
2096 + * @reg_offset: the status register offset of the SoC
2097 + * @syncp: the refcount
2098 + *
2099 + * All of the supported SoCs have hardware counters for traffic statistics.
2100 + * Whenever the status IRQ triggers we can read the latest stats from these
2101 + * counters and store them in this struct.
2102 + */
2103 +struct mtk_hw_stats {
2104 + u64 tx_bytes;
2105 + u64 tx_packets;
2106 + u64 tx_skip;
2107 + u64 tx_collisions;
2108 + u64 rx_bytes;
2109 + u64 rx_packets;
2110 + u64 rx_overflow;
2111 + u64 rx_fcs_errors;
2112 + u64 rx_short_errors;
2113 + u64 rx_long_errors;
2114 + u64 rx_checksum_errors;
2115 + u64 rx_flow_control_packets;
2116 +
2117 + spinlock_t stats_lock;
2118 + u32 reg_offset;
2119 + struct u64_stats_sync syncp;
2120 +};
2121 +
2122 +/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
2123 + * memory was allocated so that it can be freed properly
2124 + */
2125 +enum mtk_tx_flags {
2126 + MTK_TX_FLAGS_SINGLE0 = 0x01,
2127 + MTK_TX_FLAGS_PAGE0 = 0x02,
2128 +};
2129 +
2130 +/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
2131 + * by the TX descriptor s
2132 + * @skb: The SKB pointer of the packet being sent
2133 + * @dma_addr0: The base addr of the first segment
2134 + * @dma_len0: The length of the first segment
2135 + * @dma_addr1: The base addr of the second segment
2136 + * @dma_len1: The length of the second segment
2137 + */
2138 +struct mtk_tx_buf {
2139 + struct sk_buff *skb;
2140 + u32 flags;
2141 + DEFINE_DMA_UNMAP_ADDR(dma_addr0);
2142 + DEFINE_DMA_UNMAP_LEN(dma_len0);
2143 + DEFINE_DMA_UNMAP_ADDR(dma_addr1);
2144 + DEFINE_DMA_UNMAP_LEN(dma_len1);
2145 +};
2146 +
2147 +/* struct mtk_tx_ring - This struct holds info describing a TX ring
2148 + * @dma: The descriptor ring
2149 + * @buf: The memory pointed at by the ring
2150 + * @phys: The physical addr of tx_buf
2151 + * @next_free: Pointer to the next free descriptor
2152 + * @last_free: Pointer to the last free descriptor
2153 + * @thresh: The threshold of minimum amount of free descriptors
2154 + * @free_count: QDMA uses a linked list. Track how many free descriptors
2155 + * are present
2156 + */
2157 +struct mtk_tx_ring {
2158 + struct mtk_tx_dma *dma;
2159 + struct mtk_tx_buf *buf;
2160 + dma_addr_t phys;
2161 + struct mtk_tx_dma *next_free;
2162 + struct mtk_tx_dma *last_free;
2163 + u16 thresh;
2164 + atomic_t free_count;
2165 +};
2166 +
2167 +/* struct mtk_rx_ring - This struct holds info describing a RX ring
2168 + * @dma: The descriptor ring
2169 + * @data: The memory pointed at by the ring
2170 + * @phys: The physical addr of rx_buf
2171 + * @frag_size: How big can each fragment be
2172 + * @buf_size: The size of each packet buffer
2173 + * @calc_idx: The current head of ring
2174 + */
2175 +struct mtk_rx_ring {
2176 + struct mtk_rx_dma *dma;
2177 + u8 **data;
2178 + dma_addr_t phys;
2179 + u16 frag_size;
2180 + u16 buf_size;
2181 + u16 calc_idx;
2182 +};
2183 +
2184 +/* currently no SoC has more than 2 macs */
2185 +#define MTK_MAX_DEVS 2
2186 +
2187 +/* struct mtk_eth - This is the main datasructure for holding the state
2188 + * of the driver
2189 + * @dev: The device pointer
2190 + * @base: The mapped register i/o base
2191 + * @page_lock: Make sure that register operations are atomic
2192 + * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
2193 + * dummy for NAPI to work
2194 + * @netdev: The netdev instances
2195 + * @mac: Each netdev is linked to a physical MAC
2196 + * @irq: The IRQ that we are using
2197 + * @msg_enable: Ethtool msg level
2198 + * @ethsys: The register map pointing at the range used to setup
2199 + * MII modes
2200 + * @pctl: The register map pointing at the range used to setup
2201 + * GMAC port drive/slew values
2202 + * @dma_refcnt: track how many netdevs are using the DMA engine
2203 + * @tx_ring: Pointer to the memore holding info about the TX ring
2204 + * @rx_ring: Pointer to the memore holding info about the RX ring
2205 + * @rx_napi: The NAPI struct
2206 + * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
2207 + * @scratch_head: The scratch memory that scratch_ring points to.
2208 + * @clk_ethif: The ethif clock
2209 + * @clk_esw: The switch clock
2210 + * @clk_gp1: The gmac1 clock
2211 + * @clk_gp2: The gmac2 clock
2212 + * @mii_bus: If there is a bus we need to create an instance for it
2213 + */
2214 +
2215 +struct mtk_eth {
2216 + struct device *dev;
2217 + void __iomem *base;
2218 + struct reset_control *rstc;
2219 + spinlock_t page_lock;
2220 + struct net_device dummy_dev;
2221 + struct net_device *netdev[MTK_MAX_DEVS];
2222 + struct mtk_mac *mac[MTK_MAX_DEVS];
2223 + int irq;
2224 + u32 msg_enable;
2225 + unsigned long sysclk;
2226 + struct regmap *ethsys;
2227 + struct regmap *pctl;
2228 + atomic_t dma_refcnt;
2229 + struct mtk_tx_ring tx_ring;
2230 + struct mtk_rx_ring rx_ring;
2231 + struct napi_struct rx_napi;
2232 + struct mtk_tx_dma *scratch_ring;
2233 + void *scratch_head;
2234 + struct clk *clk_ethif;
2235 + struct clk *clk_esw;
2236 + struct clk *clk_gp1;
2237 + struct clk *clk_gp2;
2238 + struct mii_bus *mii_bus;
2239 +};
2240 +
2241 +/* struct mtk_mac - the structure that holds the info about the MACs of the
2242 + * SoC
2243 + * @id: The number of the MAC
2244 + * @of_node: Our devicetree node
2245 + * @hw: Backpointer to our main datastruture
2246 + * @hw_stats: Packet statistics counter
2247 + * @phy_dev: The attached PHY if available
2248 + * @pending_work: The workqueue used to reset the dma ring
2249 + */
2250 +struct mtk_mac {
2251 + int id;
2252 + struct device_node *of_node;
2253 + struct mtk_eth *hw;
2254 + struct mtk_hw_stats *hw_stats;
2255 + struct phy_device *phy_dev;
2256 + struct work_struct pending_work;
2257 +};
2258 +
2259 +/* the struct describing the SoC. these are declared in the soc_xyz.c files */
2260 +extern const struct of_device_id of_mtk_match[];
2261 +
2262 +/* read the hardware status register */
2263 +void mtk_stats_update_mac(struct mtk_mac *mac);
2264 +
2265 +void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
2266 +u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
2267 +
2268 +#endif /* MTK_ETH_H */
2269 --
2270 1.7.10.4
2271