1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name>
12 * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/platform_device.h>
24 #include <linux/of_device.h>
25 #include <linux/clk.h>
26 #include <linux/of_net.h>
27 #include <linux/of_mdio.h>
28 #include <linux/if_vlan.h>
29 #include <linux/reset.h>
30 #include <linux/tcp.h>
32 #include <linux/bug.h>
33 #include <linux/netfilter.h>
34 #include <net/netfilter/nf_flow_table.h>
35 #include <linux/of_gpio.h>
36 #include <linux/gpio.h>
37 #include <linux/gpio/consumer.h>
39 #include <asm/mach-ralink/ralink_regs.h>
41 #include "mtk_eth_soc.h"
45 #define MAX_RX_LENGTH 1536
46 #define FE_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
47 #define FE_RX_HLEN (NET_SKB_PAD + FE_RX_ETH_HLEN + NET_IP_ALIGN)
48 #define DMA_DUMMY_DESC 0xffffffff
49 #define FE_DEFAULT_MSG_ENABLE \
59 #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
60 #define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
61 #define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1))
62 #define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1))
64 #define SYSC_REG_RSTCTRL 0x34
66 static int fe_msg_level
= -1;
67 module_param_named(msg_level
, fe_msg_level
, int, 0);
68 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
70 static const u16 fe_reg_table_default
[FE_REG_COUNT
] = {
71 [FE_REG_PDMA_GLO_CFG
] = FE_PDMA_GLO_CFG
,
72 [FE_REG_PDMA_RST_CFG
] = FE_PDMA_RST_CFG
,
73 [FE_REG_DLY_INT_CFG
] = FE_DLY_INT_CFG
,
74 [FE_REG_TX_BASE_PTR0
] = FE_TX_BASE_PTR0
,
75 [FE_REG_TX_MAX_CNT0
] = FE_TX_MAX_CNT0
,
76 [FE_REG_TX_CTX_IDX0
] = FE_TX_CTX_IDX0
,
77 [FE_REG_TX_DTX_IDX0
] = FE_TX_DTX_IDX0
,
78 [FE_REG_RX_BASE_PTR0
] = FE_RX_BASE_PTR0
,
79 [FE_REG_RX_MAX_CNT0
] = FE_RX_MAX_CNT0
,
80 [FE_REG_RX_CALC_IDX0
] = FE_RX_CALC_IDX0
,
81 [FE_REG_RX_DRX_IDX0
] = FE_RX_DRX_IDX0
,
82 [FE_REG_FE_INT_ENABLE
] = FE_FE_INT_ENABLE
,
83 [FE_REG_FE_INT_STATUS
] = FE_FE_INT_STATUS
,
84 [FE_REG_FE_DMA_VID_BASE
] = FE_DMA_VID0
,
85 [FE_REG_FE_COUNTER_BASE
] = FE_GDMA1_TX_GBCNT
,
86 [FE_REG_FE_RST_GL
] = FE_FE_RST_GL
,
89 static const u16
*fe_reg_table
= fe_reg_table_default
;
93 void (*action
)(struct fe_priv
*);
96 static void __iomem
*fe_base
;
98 void fe_w32(u32 val
, unsigned reg
)
100 __raw_writel(val
, fe_base
+ reg
);
103 u32
fe_r32(unsigned reg
)
105 return __raw_readl(fe_base
+ reg
);
108 void fe_reg_w32(u32 val
, enum fe_reg reg
)
110 fe_w32(val
, fe_reg_table
[reg
]);
113 u32
fe_reg_r32(enum fe_reg reg
)
115 return fe_r32(fe_reg_table
[reg
]);
118 void fe_m32(struct fe_priv
*eth
, u32 clear
, u32 set
, unsigned reg
)
122 spin_lock(ð
->page_lock
);
123 val
= __raw_readl(fe_base
+ reg
);
126 __raw_writel(val
, fe_base
+ reg
);
127 spin_unlock(ð
->page_lock
);
130 void fe_reset(u32 reset_bits
)
134 t
= rt_sysc_r32(SYSC_REG_RSTCTRL
);
136 rt_sysc_w32(t
, SYSC_REG_RSTCTRL
);
137 usleep_range(10, 20);
140 rt_sysc_w32(t
, SYSC_REG_RSTCTRL
);
141 usleep_range(10, 20);
144 static inline void fe_int_disable(u32 mask
)
146 fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE
) & ~mask
,
147 FE_REG_FE_INT_ENABLE
);
149 fe_reg_r32(FE_REG_FE_INT_ENABLE
);
152 static inline void fe_int_enable(u32 mask
)
154 fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE
) | mask
,
155 FE_REG_FE_INT_ENABLE
);
157 fe_reg_r32(FE_REG_FE_INT_ENABLE
);
160 static inline void fe_hw_set_macaddr(struct fe_priv
*priv
, unsigned char *mac
)
164 spin_lock_irqsave(&priv
->page_lock
, flags
);
165 fe_w32((mac
[0] << 8) | mac
[1], FE_GDMA1_MAC_ADRH
);
166 fe_w32((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5],
168 spin_unlock_irqrestore(&priv
->page_lock
, flags
);
171 static int fe_set_mac_address(struct net_device
*dev
, void *p
)
173 int ret
= eth_mac_addr(dev
, p
);
176 struct fe_priv
*priv
= netdev_priv(dev
);
178 if (priv
->soc
->set_mac
)
179 priv
->soc
->set_mac(priv
, dev
->dev_addr
);
181 fe_hw_set_macaddr(priv
, p
);
187 static inline int fe_max_frag_size(int mtu
)
189 /* make sure buf_size will be at least MAX_RX_LENGTH */
190 if (mtu
+ FE_RX_ETH_HLEN
< MAX_RX_LENGTH
)
191 mtu
= MAX_RX_LENGTH
- FE_RX_ETH_HLEN
;
193 return SKB_DATA_ALIGN(FE_RX_HLEN
+ mtu
) +
194 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
197 static inline int fe_max_buf_size(int frag_size
)
199 int buf_size
= frag_size
- NET_SKB_PAD
- NET_IP_ALIGN
-
200 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
202 BUG_ON(buf_size
< MAX_RX_LENGTH
);
206 static inline void fe_get_rxd(struct fe_rx_dma
*rxd
, struct fe_rx_dma
*dma_rxd
)
208 rxd
->rxd1
= dma_rxd
->rxd1
;
209 rxd
->rxd2
= dma_rxd
->rxd2
;
210 rxd
->rxd3
= dma_rxd
->rxd3
;
211 rxd
->rxd4
= dma_rxd
->rxd4
;
214 static inline void fe_set_txd(struct fe_tx_dma
*txd
, struct fe_tx_dma
*dma_txd
)
216 dma_txd
->txd1
= txd
->txd1
;
217 dma_txd
->txd3
= txd
->txd3
;
218 dma_txd
->txd4
= txd
->txd4
;
219 /* clean dma done flag last */
220 dma_txd
->txd2
= txd
->txd2
;
223 static void fe_clean_rx(struct fe_priv
*priv
)
225 struct fe_rx_ring
*ring
= &priv
->rx_ring
;
230 for (i
= 0; i
< ring
->rx_ring_size
; i
++)
231 if (ring
->rx_data
[i
]) {
232 if (ring
->rx_dma
&& ring
->rx_dma
[i
].rxd1
)
233 dma_unmap_single(priv
->dev
,
234 ring
->rx_dma
[i
].rxd1
,
237 skb_free_frag(ring
->rx_data
[i
]);
240 kfree(ring
->rx_data
);
241 ring
->rx_data
= NULL
;
245 dma_free_coherent(priv
->dev
,
246 ring
->rx_ring_size
* sizeof(*ring
->rx_dma
),
252 if (!ring
->frag_cache
.va
)
255 page
= virt_to_page(ring
->frag_cache
.va
);
256 __page_frag_cache_drain(page
, ring
->frag_cache
.pagecnt_bias
);
257 memset(&ring
->frag_cache
, 0, sizeof(ring
->frag_cache
));
260 static int fe_alloc_rx(struct fe_priv
*priv
)
262 struct fe_rx_ring
*ring
= &priv
->rx_ring
;
265 ring
->rx_data
= kcalloc(ring
->rx_ring_size
, sizeof(*ring
->rx_data
),
270 for (i
= 0; i
< ring
->rx_ring_size
; i
++) {
271 ring
->rx_data
[i
] = page_frag_alloc(&ring
->frag_cache
,
274 if (!ring
->rx_data
[i
])
278 ring
->rx_dma
= dma_alloc_coherent(priv
->dev
,
279 ring
->rx_ring_size
* sizeof(*ring
->rx_dma
),
281 GFP_ATOMIC
| __GFP_ZERO
);
285 if (priv
->flags
& FE_FLAG_RX_2B_OFFSET
)
289 for (i
= 0; i
< ring
->rx_ring_size
; i
++) {
290 dma_addr_t dma_addr
= dma_map_single(priv
->dev
,
291 ring
->rx_data
[i
] + NET_SKB_PAD
+ pad
,
294 if (unlikely(dma_mapping_error(priv
->dev
, dma_addr
)))
296 ring
->rx_dma
[i
].rxd1
= (unsigned int)dma_addr
;
298 if (priv
->flags
& FE_FLAG_RX_SG_DMA
)
299 ring
->rx_dma
[i
].rxd2
= RX_DMA_PLEN0(ring
->rx_buf_size
);
301 ring
->rx_dma
[i
].rxd2
= RX_DMA_LSO
;
303 ring
->rx_calc_idx
= ring
->rx_ring_size
- 1;
304 /* make sure that all changes to the dma ring are flushed before we
309 fe_reg_w32(ring
->rx_phys
, FE_REG_RX_BASE_PTR0
);
310 fe_reg_w32(ring
->rx_ring_size
, FE_REG_RX_MAX_CNT0
);
311 fe_reg_w32(ring
->rx_calc_idx
, FE_REG_RX_CALC_IDX0
);
312 fe_reg_w32(FE_PST_DRX_IDX0
, FE_REG_PDMA_RST_CFG
);
320 static void fe_txd_unmap(struct device
*dev
, struct fe_tx_buf
*tx_buf
)
322 if (dma_unmap_len(tx_buf
, dma_len0
))
324 dma_unmap_addr(tx_buf
, dma_addr0
),
325 dma_unmap_len(tx_buf
, dma_len0
),
328 if (dma_unmap_len(tx_buf
, dma_len1
))
330 dma_unmap_addr(tx_buf
, dma_addr1
),
331 dma_unmap_len(tx_buf
, dma_len1
),
334 dma_unmap_len_set(tx_buf
, dma_addr0
, 0);
335 dma_unmap_len_set(tx_buf
, dma_addr1
, 0);
336 if (tx_buf
->skb
&& (tx_buf
->skb
!= (struct sk_buff
*)DMA_DUMMY_DESC
))
337 dev_kfree_skb_any(tx_buf
->skb
);
341 static void fe_clean_tx(struct fe_priv
*priv
)
344 struct device
*dev
= priv
->dev
;
345 struct fe_tx_ring
*ring
= &priv
->tx_ring
;
348 for (i
= 0; i
< ring
->tx_ring_size
; i
++)
349 fe_txd_unmap(dev
, &ring
->tx_buf
[i
]);
355 dma_free_coherent(dev
,
356 ring
->tx_ring_size
* sizeof(*ring
->tx_dma
),
362 netdev_reset_queue(priv
->netdev
);
365 static int fe_alloc_tx(struct fe_priv
*priv
)
368 struct fe_tx_ring
*ring
= &priv
->tx_ring
;
370 ring
->tx_free_idx
= 0;
371 ring
->tx_next_idx
= 0;
372 ring
->tx_thresh
= max((unsigned long)ring
->tx_ring_size
>> 2,
375 ring
->tx_buf
= kcalloc(ring
->tx_ring_size
, sizeof(*ring
->tx_buf
),
380 ring
->tx_dma
= dma_alloc_coherent(priv
->dev
,
381 ring
->tx_ring_size
* sizeof(*ring
->tx_dma
),
383 GFP_ATOMIC
| __GFP_ZERO
);
387 for (i
= 0; i
< ring
->tx_ring_size
; i
++) {
388 if (priv
->soc
->tx_dma
)
389 priv
->soc
->tx_dma(&ring
->tx_dma
[i
]);
390 ring
->tx_dma
[i
].txd2
= TX_DMA_DESP2_DEF
;
392 /* make sure that all changes to the dma ring are flushed before we
397 fe_reg_w32(ring
->tx_phys
, FE_REG_TX_BASE_PTR0
);
398 fe_reg_w32(ring
->tx_ring_size
, FE_REG_TX_MAX_CNT0
);
399 fe_reg_w32(0, FE_REG_TX_CTX_IDX0
);
400 fe_reg_w32(FE_PST_DTX_IDX0
, FE_REG_PDMA_RST_CFG
);
408 static int fe_init_dma(struct fe_priv
*priv
)
412 err
= fe_alloc_tx(priv
);
416 err
= fe_alloc_rx(priv
);
423 static void fe_free_dma(struct fe_priv
*priv
)
429 void fe_stats_update(struct fe_priv
*priv
)
431 struct fe_hw_stats
*hwstats
= priv
->hw_stats
;
432 unsigned int base
= fe_reg_table
[FE_REG_FE_COUNTER_BASE
];
435 u64_stats_update_begin(&hwstats
->syncp
);
437 if (IS_ENABLED(CONFIG_SOC_MT7621
)) {
438 hwstats
->rx_bytes
+= fe_r32(base
);
439 stats
= fe_r32(base
+ 0x04);
441 hwstats
->rx_bytes
+= (stats
<< 32);
442 hwstats
->rx_packets
+= fe_r32(base
+ 0x08);
443 hwstats
->rx_overflow
+= fe_r32(base
+ 0x10);
444 hwstats
->rx_fcs_errors
+= fe_r32(base
+ 0x14);
445 hwstats
->rx_short_errors
+= fe_r32(base
+ 0x18);
446 hwstats
->rx_long_errors
+= fe_r32(base
+ 0x1c);
447 hwstats
->rx_checksum_errors
+= fe_r32(base
+ 0x20);
448 hwstats
->rx_flow_control_packets
+= fe_r32(base
+ 0x24);
449 hwstats
->tx_skip
+= fe_r32(base
+ 0x28);
450 hwstats
->tx_collisions
+= fe_r32(base
+ 0x2c);
451 hwstats
->tx_bytes
+= fe_r32(base
+ 0x30);
452 stats
= fe_r32(base
+ 0x34);
454 hwstats
->tx_bytes
+= (stats
<< 32);
455 hwstats
->tx_packets
+= fe_r32(base
+ 0x38);
457 hwstats
->tx_bytes
+= fe_r32(base
);
458 hwstats
->tx_packets
+= fe_r32(base
+ 0x04);
459 hwstats
->tx_skip
+= fe_r32(base
+ 0x08);
460 hwstats
->tx_collisions
+= fe_r32(base
+ 0x0c);
461 hwstats
->rx_bytes
+= fe_r32(base
+ 0x20);
462 hwstats
->rx_packets
+= fe_r32(base
+ 0x24);
463 hwstats
->rx_overflow
+= fe_r32(base
+ 0x28);
464 hwstats
->rx_fcs_errors
+= fe_r32(base
+ 0x2c);
465 hwstats
->rx_short_errors
+= fe_r32(base
+ 0x30);
466 hwstats
->rx_long_errors
+= fe_r32(base
+ 0x34);
467 hwstats
->rx_checksum_errors
+= fe_r32(base
+ 0x38);
468 hwstats
->rx_flow_control_packets
+= fe_r32(base
+ 0x3c);
471 u64_stats_update_end(&hwstats
->syncp
);
474 static void fe_get_stats64(struct net_device
*dev
,
475 struct rtnl_link_stats64
*storage
)
477 struct fe_priv
*priv
= netdev_priv(dev
);
478 struct fe_hw_stats
*hwstats
= priv
->hw_stats
;
479 unsigned int base
= fe_reg_table
[FE_REG_FE_COUNTER_BASE
];
483 netdev_stats_to_stats64(storage
, &dev
->stats
);
487 if (netif_running(dev
) && netif_device_present(dev
)) {
488 if (spin_trylock_bh(&hwstats
->stats_lock
)) {
489 fe_stats_update(priv
);
490 spin_unlock_bh(&hwstats
->stats_lock
);
495 start
= u64_stats_fetch_begin_irq(&hwstats
->syncp
);
496 storage
->rx_packets
= hwstats
->rx_packets
;
497 storage
->tx_packets
= hwstats
->tx_packets
;
498 storage
->rx_bytes
= hwstats
->rx_bytes
;
499 storage
->tx_bytes
= hwstats
->tx_bytes
;
500 storage
->collisions
= hwstats
->tx_collisions
;
501 storage
->rx_length_errors
= hwstats
->rx_short_errors
+
502 hwstats
->rx_long_errors
;
503 storage
->rx_over_errors
= hwstats
->rx_overflow
;
504 storage
->rx_crc_errors
= hwstats
->rx_fcs_errors
;
505 storage
->rx_errors
= hwstats
->rx_checksum_errors
;
506 storage
->tx_aborted_errors
= hwstats
->tx_skip
;
507 } while (u64_stats_fetch_retry_irq(&hwstats
->syncp
, start
));
509 storage
->tx_errors
= priv
->netdev
->stats
.tx_errors
;
510 storage
->rx_dropped
= priv
->netdev
->stats
.rx_dropped
;
511 storage
->tx_dropped
= priv
->netdev
->stats
.tx_dropped
;
514 static int fe_vlan_rx_add_vid(struct net_device
*dev
,
515 __be16 proto
, u16 vid
)
517 struct fe_priv
*priv
= netdev_priv(dev
);
518 u32 idx
= (vid
& 0xf);
521 if (!((fe_reg_table
[FE_REG_FE_DMA_VID_BASE
]) &&
522 (dev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)))
525 if (test_bit(idx
, &priv
->vlan_map
)) {
526 netdev_warn(dev
, "disable tx vlan offload\n");
527 dev
->wanted_features
&= ~NETIF_F_HW_VLAN_CTAG_TX
;
528 netdev_update_features(dev
);
530 vlan_cfg
= fe_r32(fe_reg_table
[FE_REG_FE_DMA_VID_BASE
] +
534 vlan_cfg
|= (vid
<< 16);
536 vlan_cfg
&= 0xffff0000;
539 fe_w32(vlan_cfg
, fe_reg_table
[FE_REG_FE_DMA_VID_BASE
] +
541 set_bit(idx
, &priv
->vlan_map
);
547 static int fe_vlan_rx_kill_vid(struct net_device
*dev
,
548 __be16 proto
, u16 vid
)
550 struct fe_priv
*priv
= netdev_priv(dev
);
551 u32 idx
= (vid
& 0xf);
553 if (!((fe_reg_table
[FE_REG_FE_DMA_VID_BASE
]) &&
554 (dev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)))
557 clear_bit(idx
, &priv
->vlan_map
);
562 static inline u32
fe_empty_txd(struct fe_tx_ring
*ring
)
565 return (u32
)(ring
->tx_ring_size
-
566 ((ring
->tx_next_idx
- ring
->tx_free_idx
) &
567 (ring
->tx_ring_size
- 1)));
570 struct fe_map_state
{
572 struct fe_tx_dma txd
;
578 static void fe_tx_dma_write_desc(struct fe_tx_ring
*ring
, struct fe_map_state
*st
)
580 fe_set_txd(&st
->txd
, &ring
->tx_dma
[st
->ring_idx
]);
581 memset(&st
->txd
, 0, sizeof(st
->txd
));
582 st
->txd
.txd4
= st
->def_txd4
;
583 st
->ring_idx
= NEXT_TX_DESP_IDX(st
->ring_idx
);
586 static int __fe_tx_dma_map_page(struct fe_tx_ring
*ring
, struct fe_map_state
*st
,
587 struct page
*page
, size_t offset
, size_t size
)
589 struct device
*dev
= st
->dev
;
590 struct fe_tx_buf
*tx_buf
;
591 dma_addr_t mapped_addr
;
593 mapped_addr
= dma_map_page(dev
, page
, offset
, size
, DMA_TO_DEVICE
);
594 if (unlikely(dma_mapping_error(dev
, mapped_addr
)))
597 if (st
->i
&& !(st
->i
& 1))
598 fe_tx_dma_write_desc(ring
, st
);
600 tx_buf
= &ring
->tx_buf
[st
->ring_idx
];
602 st
->txd
.txd3
= mapped_addr
;
603 st
->txd
.txd2
|= TX_DMA_PLEN1(size
);
604 dma_unmap_addr_set(tx_buf
, dma_addr1
, mapped_addr
);
605 dma_unmap_len_set(tx_buf
, dma_len1
, size
);
607 tx_buf
->skb
= (struct sk_buff
*)DMA_DUMMY_DESC
;
608 st
->txd
.txd1
= mapped_addr
;
609 st
->txd
.txd2
= TX_DMA_PLEN0(size
);
610 dma_unmap_addr_set(tx_buf
, dma_addr0
, mapped_addr
);
611 dma_unmap_len_set(tx_buf
, dma_len0
, size
);
618 static int fe_tx_dma_map_page(struct fe_tx_ring
*ring
, struct fe_map_state
*st
,
619 struct page
*page
, size_t offset
, size_t size
)
625 cur_size
= min_t(size_t, size
, TX_DMA_BUF_LEN
);
627 ret
= __fe_tx_dma_map_page(ring
, st
, page
, offset
, cur_size
);
638 static int fe_tx_dma_map_skb(struct fe_tx_ring
*ring
, struct fe_map_state
*st
,
641 struct page
*page
= virt_to_page(skb
->data
);
642 size_t offset
= offset_in_page(skb
->data
);
643 size_t size
= skb_headlen(skb
);
645 return fe_tx_dma_map_page(ring
, st
, page
, offset
, size
);
648 static inline struct sk_buff
*
649 fe_next_frag(struct sk_buff
*head
, struct sk_buff
*skb
)
654 if (skb_has_frag_list(skb
))
655 return skb_shinfo(skb
)->frag_list
;
661 static int fe_tx_map_dma(struct sk_buff
*skb
, struct net_device
*dev
,
662 int tx_num
, struct fe_tx_ring
*ring
)
664 struct fe_priv
*priv
= netdev_priv(dev
);
665 struct fe_map_state st
= {
667 .ring_idx
= ring
->tx_next_idx
,
669 struct sk_buff
*head
= skb
;
670 struct fe_tx_buf
*tx_buf
;
671 unsigned int nr_frags
;
674 /* init tx descriptor */
675 if (priv
->soc
->tx_dma
)
676 priv
->soc
->tx_dma(&st
.txd
);
678 st
.txd
.txd4
= TX_DMA_DESP4_DEF
;
679 st
.def_txd4
= st
.txd
.txd4
;
681 /* TX Checksum offload */
682 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
683 st
.txd
.txd4
|= TX_DMA_CHKSUM
;
685 /* VLAN header offload */
686 if (skb_vlan_tag_present(skb
)) {
687 u16 tag
= skb_vlan_tag_get(skb
);
689 if (IS_ENABLED(CONFIG_SOC_MT7621
))
690 st
.txd
.txd4
|= TX_DMA_INS_VLAN_MT7621
| tag
;
692 st
.txd
.txd4
|= TX_DMA_INS_VLAN
|
693 ((tag
>> VLAN_PRIO_SHIFT
) << 4) |
697 /* TSO: fill MSS info in tcp checksum field */
698 if (skb_is_gso(skb
)) {
699 if (skb_cow_head(skb
, 0)) {
700 netif_warn(priv
, tx_err
, dev
,
701 "GSO expand head fail.\n");
704 if (skb_shinfo(skb
)->gso_type
&
705 (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)) {
706 st
.txd
.txd4
|= TX_DMA_TSO
;
707 tcp_hdr(skb
)->check
= htons(skb_shinfo(skb
)->gso_size
);
712 if (skb_headlen(skb
) && fe_tx_dma_map_skb(ring
, &st
, skb
))
716 nr_frags
= skb_shinfo(skb
)->nr_frags
;
717 for (i
= 0; i
< nr_frags
; i
++) {
718 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
719 struct skb_frag_struct
*frag
;
724 frag
= &skb_shinfo(skb
)->frags
[i
];
725 if (fe_tx_dma_map_page(ring
, &st
, skb_frag_page(frag
),
726 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
727 frag
->page_offset
, skb_frag_size(frag
)))
729 skb_frag_off(frag
), skb_frag_size(frag
)))
734 skb
= fe_next_frag(head
, skb
);
738 /* set last segment */
740 st
.txd
.txd2
|= TX_DMA_LS0
;
742 st
.txd
.txd2
|= TX_DMA_LS1
;
744 /* store skb to cleanup */
745 tx_buf
= &ring
->tx_buf
[st
.ring_idx
];
748 netdev_sent_queue(dev
, head
->len
);
749 skb_tx_timestamp(head
);
751 fe_tx_dma_write_desc(ring
, &st
);
752 ring
->tx_next_idx
= st
.ring_idx
;
754 /* make sure that all changes to the dma ring are flushed before we
758 if (unlikely(fe_empty_txd(ring
) <= ring
->tx_thresh
)) {
759 netif_stop_queue(dev
);
761 if (unlikely(fe_empty_txd(ring
) > ring
->tx_thresh
))
762 netif_wake_queue(dev
);
765 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)
766 if (netif_xmit_stopped(netdev_get_tx_queue(dev
, 0)) || !head
->xmit_more
)
768 if (netif_xmit_stopped(netdev_get_tx_queue(dev
, 0)) || !netdev_xmit_more())
770 fe_reg_w32(ring
->tx_next_idx
, FE_REG_TX_CTX_IDX0
);
775 j
= ring
->tx_next_idx
;
776 for (i
= 0; i
< tx_num
; i
++) {
778 fe_txd_unmap(priv
->dev
, &ring
->tx_buf
[j
]);
779 ring
->tx_dma
[j
].txd2
= TX_DMA_DESP2_DEF
;
781 j
= NEXT_TX_DESP_IDX(j
);
783 /* make sure that all changes to the dma ring are flushed before we
792 static inline int fe_skb_padto(struct sk_buff
*skb
, struct fe_priv
*priv
)
798 if (unlikely(skb
->len
< VLAN_ETH_ZLEN
)) {
799 if ((priv
->flags
& FE_FLAG_PADDING_64B
) &&
800 !(priv
->flags
& FE_FLAG_PADDING_BUG
))
803 if (skb_vlan_tag_present(skb
))
805 else if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
))
807 else if (!(priv
->flags
& FE_FLAG_PADDING_64B
))
812 if (skb
->len
< len
) {
813 ret
= skb_pad(skb
, len
- skb
->len
);
817 skb_set_tail_pointer(skb
, len
);
824 static inline int fe_cal_txd_req(struct sk_buff
*skb
)
826 struct sk_buff
*head
= skb
;
828 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
829 struct skb_frag_struct
*frag
;
836 if (skb_is_gso(skb
)) {
837 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
838 frag
= &skb_shinfo(skb
)->frags
[i
];
839 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
840 nfrags
+= DIV_ROUND_UP(frag
->size
, TX_DMA_BUF_LEN
);
842 nfrags
+= DIV_ROUND_UP(skb_frag_size(frag
), TX_DMA_BUF_LEN
);
846 nfrags
+= skb_shinfo(skb
)->nr_frags
;
849 skb
= fe_next_frag(head
, skb
);
853 return DIV_ROUND_UP(nfrags
, 2);
856 static int fe_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
858 struct fe_priv
*priv
= netdev_priv(dev
);
859 struct fe_tx_ring
*ring
= &priv
->tx_ring
;
860 struct net_device_stats
*stats
= &dev
->stats
;
864 if (fe_skb_padto(skb
, priv
)) {
865 netif_warn(priv
, tx_err
, dev
, "tx padding failed!\n");
869 tx_num
= fe_cal_txd_req(skb
);
870 if (unlikely(fe_empty_txd(ring
) <= tx_num
)) {
871 netif_stop_queue(dev
);
872 netif_err(priv
, tx_queued
, dev
,
873 "Tx Ring full when queue awake!\n");
874 return NETDEV_TX_BUSY
;
877 if (fe_tx_map_dma(skb
, dev
, tx_num
, ring
) < 0) {
881 stats
->tx_bytes
+= len
;
887 static int fe_poll_rx(struct napi_struct
*napi
, int budget
,
888 struct fe_priv
*priv
, u32 rx_intr
)
890 struct net_device
*netdev
= priv
->netdev
;
891 struct net_device_stats
*stats
= &netdev
->stats
;
892 struct fe_soc_data
*soc
= priv
->soc
;
893 struct fe_rx_ring
*ring
= &priv
->rx_ring
;
894 int idx
= ring
->rx_calc_idx
;
898 struct fe_rx_dma
*rxd
, trxd
;
901 if (netdev
->features
& NETIF_F_RXCSUM
)
902 checksum_bit
= soc
->checksum_bit
;
906 if (priv
->flags
& FE_FLAG_RX_2B_OFFSET
)
911 while (done
< budget
) {
915 idx
= NEXT_RX_DESP_IDX(idx
);
916 rxd
= &ring
->rx_dma
[idx
];
917 data
= ring
->rx_data
[idx
];
919 fe_get_rxd(&trxd
, rxd
);
920 if (!(trxd
.rxd2
& RX_DMA_DONE
))
923 /* alloc new buffer */
924 new_data
= page_frag_alloc(&ring
->frag_cache
, ring
->frag_size
,
926 if (unlikely(!new_data
)) {
930 dma_addr
= dma_map_single(priv
->dev
,
931 new_data
+ NET_SKB_PAD
+ pad
,
934 if (unlikely(dma_mapping_error(priv
->dev
, dma_addr
))) {
935 skb_free_frag(new_data
);
940 skb
= build_skb(data
, ring
->frag_size
);
941 if (unlikely(!skb
)) {
942 skb_free_frag(new_data
);
945 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
947 dma_unmap_single(priv
->dev
, trxd
.rxd1
,
948 ring
->rx_buf_size
, DMA_FROM_DEVICE
);
949 pktlen
= RX_DMA_GET_PLEN0(trxd
.rxd2
);
951 skb_put(skb
, pktlen
);
952 if (trxd
.rxd4
& checksum_bit
)
953 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
955 skb_checksum_none_assert(skb
);
956 skb
->protocol
= eth_type_trans(skb
, netdev
);
958 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
&&
959 RX_DMA_VID(trxd
.rxd3
))
960 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
961 RX_DMA_VID(trxd
.rxd3
));
964 stats
->rx_bytes
+= pktlen
;
966 napi_gro_receive(napi
, skb
);
968 ring
->rx_data
[idx
] = new_data
;
969 rxd
->rxd1
= (unsigned int)dma_addr
;
972 if (priv
->flags
& FE_FLAG_RX_SG_DMA
)
973 rxd
->rxd2
= RX_DMA_PLEN0(ring
->rx_buf_size
);
975 rxd
->rxd2
= RX_DMA_LSO
;
977 ring
->rx_calc_idx
= idx
;
978 /* make sure that all changes to the dma ring are flushed before
982 fe_reg_w32(ring
->rx_calc_idx
, FE_REG_RX_CALC_IDX0
);
987 fe_reg_w32(rx_intr
, FE_REG_FE_INT_STATUS
);
992 static int fe_poll_tx(struct fe_priv
*priv
, int budget
, u32 tx_intr
,
995 struct net_device
*netdev
= priv
->netdev
;
996 unsigned int bytes_compl
= 0;
998 struct fe_tx_buf
*tx_buf
;
1001 struct fe_tx_ring
*ring
= &priv
->tx_ring
;
1003 idx
= ring
->tx_free_idx
;
1004 hwidx
= fe_reg_r32(FE_REG_TX_DTX_IDX0
);
1006 while ((idx
!= hwidx
) && budget
) {
1007 tx_buf
= &ring
->tx_buf
[idx
];
1013 if (skb
!= (struct sk_buff
*)DMA_DUMMY_DESC
) {
1014 bytes_compl
+= skb
->len
;
1018 fe_txd_unmap(priv
->dev
, tx_buf
);
1019 idx
= NEXT_TX_DESP_IDX(idx
);
1021 ring
->tx_free_idx
= idx
;
1024 /* read hw index again make sure no new tx packet */
1025 hwidx
= fe_reg_r32(FE_REG_TX_DTX_IDX0
);
1027 fe_reg_w32(tx_intr
, FE_REG_FE_INT_STATUS
);
1035 netdev_completed_queue(netdev
, done
, bytes_compl
);
1037 if (unlikely(netif_queue_stopped(netdev
) &&
1038 (fe_empty_txd(ring
) > ring
->tx_thresh
)))
1039 netif_wake_queue(netdev
);
1045 static int fe_poll(struct napi_struct
*napi
, int budget
)
1047 struct fe_priv
*priv
= container_of(napi
, struct fe_priv
, rx_napi
);
1048 struct fe_hw_stats
*hwstat
= priv
->hw_stats
;
1049 int tx_done
, rx_done
, tx_again
;
1050 u32 status
, fe_status
, status_reg
, mask
;
1051 u32 tx_intr
, rx_intr
, status_intr
;
1053 status
= fe_reg_r32(FE_REG_FE_INT_STATUS
);
1055 tx_intr
= priv
->soc
->tx_int
;
1056 rx_intr
= priv
->soc
->rx_int
;
1057 status_intr
= priv
->soc
->status_int
;
1062 if (fe_reg_table
[FE_REG_FE_INT_STATUS2
]) {
1063 fe_status
= fe_reg_r32(FE_REG_FE_INT_STATUS2
);
1064 status_reg
= FE_REG_FE_INT_STATUS2
;
1066 status_reg
= FE_REG_FE_INT_STATUS
;
1069 if (status
& tx_intr
)
1070 tx_done
= fe_poll_tx(priv
, budget
, tx_intr
, &tx_again
);
1072 if (status
& rx_intr
)
1073 rx_done
= fe_poll_rx(napi
, budget
, priv
, rx_intr
);
1075 if (unlikely(fe_status
& status_intr
)) {
1076 if (hwstat
&& spin_trylock(&hwstat
->stats_lock
)) {
1077 fe_stats_update(priv
);
1078 spin_unlock(&hwstat
->stats_lock
);
1080 fe_reg_w32(status_intr
, status_reg
);
1083 if (unlikely(netif_msg_intr(priv
))) {
1084 mask
= fe_reg_r32(FE_REG_FE_INT_ENABLE
);
1085 netdev_info(priv
->netdev
,
1086 "done tx %d, rx %d, intr 0x%08x/0x%x\n",
1087 tx_done
, rx_done
, status
, mask
);
1090 if (!tx_again
&& (rx_done
< budget
)) {
1091 status
= fe_reg_r32(FE_REG_FE_INT_STATUS
);
1092 if (status
& (tx_intr
| rx_intr
)) {
1093 /* let napi poll again */
1098 napi_complete_done(napi
, rx_done
);
1099 fe_int_enable(tx_intr
| rx_intr
);
1108 static void fe_tx_timeout(struct net_device
*dev
)
1110 struct fe_priv
*priv
= netdev_priv(dev
);
1111 struct fe_tx_ring
*ring
= &priv
->tx_ring
;
1113 priv
->netdev
->stats
.tx_errors
++;
1114 netif_err(priv
, tx_err
, dev
,
1115 "transmit timed out\n");
1116 netif_info(priv
, drv
, dev
, "dma_cfg:%08x\n",
1117 fe_reg_r32(FE_REG_PDMA_GLO_CFG
));
1118 netif_info(priv
, drv
, dev
, "tx_ring=%d, "
1119 "base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
1120 0, fe_reg_r32(FE_REG_TX_BASE_PTR0
),
1121 fe_reg_r32(FE_REG_TX_MAX_CNT0
),
1122 fe_reg_r32(FE_REG_TX_CTX_IDX0
),
1123 fe_reg_r32(FE_REG_TX_DTX_IDX0
),
1126 netif_info(priv
, drv
, dev
,
1127 "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
1128 0, fe_reg_r32(FE_REG_RX_BASE_PTR0
),
1129 fe_reg_r32(FE_REG_RX_MAX_CNT0
),
1130 fe_reg_r32(FE_REG_RX_CALC_IDX0
),
1131 fe_reg_r32(FE_REG_RX_DRX_IDX0
));
1133 if (!test_and_set_bit(FE_FLAG_RESET_PENDING
, priv
->pending_flags
))
1134 schedule_work(&priv
->pending_work
);
1137 static irqreturn_t
fe_handle_irq(int irq
, void *dev
)
1139 struct fe_priv
*priv
= netdev_priv(dev
);
1140 u32 status
, int_mask
;
1142 status
= fe_reg_r32(FE_REG_FE_INT_STATUS
);
1144 if (unlikely(!status
))
1147 int_mask
= (priv
->soc
->rx_int
| priv
->soc
->tx_int
);
1148 if (likely(status
& int_mask
)) {
1149 if (likely(napi_schedule_prep(&priv
->rx_napi
))) {
1150 fe_int_disable(int_mask
);
1151 __napi_schedule(&priv
->rx_napi
);
1154 fe_reg_w32(status
, FE_REG_FE_INT_STATUS
);
1160 #ifdef CONFIG_NET_POLL_CONTROLLER
1161 static void fe_poll_controller(struct net_device
*dev
)
1163 struct fe_priv
*priv
= netdev_priv(dev
);
1164 u32 int_mask
= priv
->soc
->tx_int
| priv
->soc
->rx_int
;
1166 fe_int_disable(int_mask
);
1167 fe_handle_irq(dev
->irq
, dev
);
1168 fe_int_enable(int_mask
);
1172 int fe_set_clock_cycle(struct fe_priv
*priv
)
1174 unsigned long sysclk
= priv
->sysclk
;
1176 sysclk
/= FE_US_CYC_CNT_DIVISOR
;
1177 sysclk
<<= FE_US_CYC_CNT_SHIFT
;
1179 fe_w32((fe_r32(FE_FE_GLO_CFG
) &
1180 ~(FE_US_CYC_CNT_MASK
<< FE_US_CYC_CNT_SHIFT
)) |
1186 void fe_fwd_config(struct fe_priv
*priv
)
1190 fwd_cfg
= fe_r32(FE_GDMA1_FWD_CFG
);
1192 /* disable jumbo frame */
1193 if (priv
->flags
& FE_FLAG_JUMBO_FRAME
)
1194 fwd_cfg
&= ~FE_GDM1_JMB_EN
;
1196 /* set unicast/multicast/broadcast frame to cpu */
1199 fe_w32(fwd_cfg
, FE_GDMA1_FWD_CFG
);
1202 static void fe_rxcsum_config(bool enable
)
1205 fe_w32(fe_r32(FE_GDMA1_FWD_CFG
) | (FE_GDM1_ICS_EN
|
1206 FE_GDM1_TCS_EN
| FE_GDM1_UCS_EN
),
1209 fe_w32(fe_r32(FE_GDMA1_FWD_CFG
) & ~(FE_GDM1_ICS_EN
|
1210 FE_GDM1_TCS_EN
| FE_GDM1_UCS_EN
),
1214 static void fe_txcsum_config(bool enable
)
1217 fe_w32(fe_r32(FE_CDMA_CSG_CFG
) | (FE_ICS_GEN_EN
|
1218 FE_TCS_GEN_EN
| FE_UCS_GEN_EN
),
1221 fe_w32(fe_r32(FE_CDMA_CSG_CFG
) & ~(FE_ICS_GEN_EN
|
1222 FE_TCS_GEN_EN
| FE_UCS_GEN_EN
),
1226 void fe_csum_config(struct fe_priv
*priv
)
1228 struct net_device
*dev
= priv_netdev(priv
);
1230 fe_txcsum_config((dev
->features
& NETIF_F_IP_CSUM
));
1231 fe_rxcsum_config((dev
->features
& NETIF_F_RXCSUM
));
1234 static int fe_hw_init(struct net_device
*dev
)
1236 struct fe_priv
*priv
= netdev_priv(dev
);
1239 err
= devm_request_irq(priv
->dev
, dev
->irq
, fe_handle_irq
, 0,
1240 dev_name(priv
->dev
), dev
);
1244 if (priv
->soc
->set_mac
)
1245 priv
->soc
->set_mac(priv
, dev
->dev_addr
);
1247 fe_hw_set_macaddr(priv
, dev
->dev_addr
);
1249 /* disable delay interrupt */
1250 fe_reg_w32(0, FE_REG_DLY_INT_CFG
);
1252 fe_int_disable(priv
->soc
->tx_int
| priv
->soc
->rx_int
);
1254 /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc */
1255 if (fe_reg_table
[FE_REG_FE_DMA_VID_BASE
])
1256 for (i
= 0; i
< 16; i
+= 2)
1257 fe_w32(((i
+ 1) << 16) + i
,
1258 fe_reg_table
[FE_REG_FE_DMA_VID_BASE
] +
1261 if (priv
->soc
->fwd_config(priv
))
1262 netdev_err(dev
, "unable to get clock\n");
1264 if (fe_reg_table
[FE_REG_FE_RST_GL
]) {
1265 fe_reg_w32(1, FE_REG_FE_RST_GL
);
1266 fe_reg_w32(0, FE_REG_FE_RST_GL
);
1272 static int fe_open(struct net_device
*dev
)
1274 struct fe_priv
*priv
= netdev_priv(dev
);
1275 unsigned long flags
;
1279 err
= fe_init_dma(priv
);
1285 spin_lock_irqsave(&priv
->page_lock
, flags
);
1287 val
= FE_TX_WB_DDONE
| FE_RX_DMA_EN
| FE_TX_DMA_EN
;
1288 if (priv
->flags
& FE_FLAG_RX_2B_OFFSET
)
1289 val
|= FE_RX_2B_OFFSET
;
1290 val
|= priv
->soc
->pdma_glo_cfg
;
1291 fe_reg_w32(val
, FE_REG_PDMA_GLO_CFG
);
1293 spin_unlock_irqrestore(&priv
->page_lock
, flags
);
1296 priv
->phy
->start(priv
);
1298 if (priv
->soc
->has_carrier
&& priv
->soc
->has_carrier(priv
))
1299 netif_carrier_on(dev
);
1301 napi_enable(&priv
->rx_napi
);
1302 fe_int_enable(priv
->soc
->tx_int
| priv
->soc
->rx_int
);
1303 netif_start_queue(dev
);
1308 static int fe_stop(struct net_device
*dev
)
1310 struct fe_priv
*priv
= netdev_priv(dev
);
1311 unsigned long flags
;
1314 netif_tx_disable(dev
);
1315 fe_int_disable(priv
->soc
->tx_int
| priv
->soc
->rx_int
);
1316 napi_disable(&priv
->rx_napi
);
1319 priv
->phy
->stop(priv
);
1321 spin_lock_irqsave(&priv
->page_lock
, flags
);
1323 fe_reg_w32(fe_reg_r32(FE_REG_PDMA_GLO_CFG
) &
1324 ~(FE_TX_WB_DDONE
| FE_RX_DMA_EN
| FE_TX_DMA_EN
),
1325 FE_REG_PDMA_GLO_CFG
);
1326 spin_unlock_irqrestore(&priv
->page_lock
, flags
);
1329 for (i
= 0; i
< 10; i
++) {
1330 if (fe_reg_r32(FE_REG_PDMA_GLO_CFG
) &
1331 (FE_TX_DMA_BUSY
| FE_RX_DMA_BUSY
)) {
1343 static void fe_reset_phy(struct fe_priv
*priv
)
1346 struct gpio_desc
*phy_reset
;
1348 phy_reset
= devm_gpiod_get_optional(priv
->dev
, "phy-reset",
1353 if (IS_ERR(phy_reset
)) {
1354 dev_err(priv
->dev
, "Error acquiring reset gpio pins: %ld\n",
1355 PTR_ERR(phy_reset
));
1359 err
= of_property_read_u32(priv
->dev
->of_node
, "phy-reset-duration",
1361 if (!err
&& msec
> 1000)
1367 usleep_range(msec
* 1000, msec
* 1000 + 1000);
1369 gpiod_set_value(phy_reset
, 0);
1372 static int __init
fe_init(struct net_device
*dev
)
1374 struct fe_priv
*priv
= netdev_priv(dev
);
1375 struct device_node
*port
;
1376 const char *mac_addr
;
1379 priv
->soc
->reset_fe();
1381 if (priv
->soc
->switch_init
)
1382 if (priv
->soc
->switch_init(priv
)) {
1383 netdev_err(dev
, "failed to initialize switch core\n");
1389 mac_addr
= of_get_mac_address(priv
->dev
->of_node
);
1390 if (!IS_ERR_OR_NULL(mac_addr
))
1391 ether_addr_copy(dev
->dev_addr
, mac_addr
);
1393 /* If the mac address is invalid, use random mac address */
1394 if (!is_valid_ether_addr(dev
->dev_addr
)) {
1395 eth_hw_addr_random(dev
);
1396 dev_err(priv
->dev
, "generated random MAC address %pM\n",
1400 err
= fe_mdio_init(priv
);
1404 if (priv
->soc
->port_init
)
1405 for_each_child_of_node(priv
->dev
->of_node
, port
)
1406 if (of_device_is_compatible(port
, "mediatek,eth-port") &&
1407 of_device_is_available(port
))
1408 priv
->soc
->port_init(priv
, port
);
1411 err
= priv
->phy
->connect(priv
);
1413 goto err_phy_disconnect
;
1416 err
= fe_hw_init(dev
);
1418 goto err_phy_disconnect
;
1420 if ((priv
->flags
& FE_FLAG_HAS_SWITCH
) && priv
->soc
->switch_config
)
1421 priv
->soc
->switch_config(priv
);
1427 priv
->phy
->disconnect(priv
);
1428 fe_mdio_cleanup(priv
);
1433 static void fe_uninit(struct net_device
*dev
)
1435 struct fe_priv
*priv
= netdev_priv(dev
);
1438 priv
->phy
->disconnect(priv
);
1439 fe_mdio_cleanup(priv
);
1441 fe_reg_w32(0, FE_REG_FE_INT_ENABLE
);
1442 free_irq(dev
->irq
, dev
);
1445 static int fe_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1447 struct fe_priv
*priv
= netdev_priv(dev
);
1453 return phy_mii_ioctl(priv
->phy_dev
, ifr
, cmd
);
1456 static int fe_change_mtu(struct net_device
*dev
, int new_mtu
)
1458 struct fe_priv
*priv
= netdev_priv(dev
);
1459 int frag_size
, old_mtu
;
1465 if (!(priv
->flags
& FE_FLAG_JUMBO_FRAME
))
1468 /* return early if the buffer sizes will not change */
1469 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
1471 if (old_mtu
> ETH_DATA_LEN
&& new_mtu
> ETH_DATA_LEN
)
1474 if (new_mtu
<= ETH_DATA_LEN
)
1475 priv
->rx_ring
.frag_size
= fe_max_frag_size(ETH_DATA_LEN
);
1477 priv
->rx_ring
.frag_size
= PAGE_SIZE
;
1478 priv
->rx_ring
.rx_buf_size
= fe_max_buf_size(priv
->rx_ring
.frag_size
);
1480 if (!netif_running(dev
))
1484 if (!IS_ENABLED(CONFIG_SOC_MT7621
)) {
1485 fwd_cfg
= fe_r32(FE_GDMA1_FWD_CFG
);
1486 if (new_mtu
<= ETH_DATA_LEN
) {
1487 fwd_cfg
&= ~FE_GDM1_JMB_EN
;
1489 frag_size
= fe_max_frag_size(new_mtu
);
1490 fwd_cfg
&= ~(FE_GDM1_JMB_LEN_MASK
<< FE_GDM1_JMB_LEN_SHIFT
);
1491 fwd_cfg
|= (DIV_ROUND_UP(frag_size
, 1024) <<
1492 FE_GDM1_JMB_LEN_SHIFT
) | FE_GDM1_JMB_EN
;
1494 fe_w32(fwd_cfg
, FE_GDMA1_FWD_CFG
);
1497 return fe_open(dev
);
1500 static const struct net_device_ops fe_netdev_ops
= {
1501 .ndo_init
= fe_init
,
1502 .ndo_uninit
= fe_uninit
,
1503 .ndo_open
= fe_open
,
1504 .ndo_stop
= fe_stop
,
1505 .ndo_start_xmit
= fe_start_xmit
,
1506 .ndo_set_mac_address
= fe_set_mac_address
,
1507 .ndo_validate_addr
= eth_validate_addr
,
1508 .ndo_do_ioctl
= fe_do_ioctl
,
1509 .ndo_change_mtu
= fe_change_mtu
,
1510 .ndo_tx_timeout
= fe_tx_timeout
,
1511 .ndo_get_stats64
= fe_get_stats64
,
1512 .ndo_vlan_rx_add_vid
= fe_vlan_rx_add_vid
,
1513 .ndo_vlan_rx_kill_vid
= fe_vlan_rx_kill_vid
,
1514 #ifdef CONFIG_NET_POLL_CONTROLLER
1515 .ndo_poll_controller
= fe_poll_controller
,
1519 static void fe_reset_pending(struct fe_priv
*priv
)
1521 struct net_device
*dev
= priv
->netdev
;
1529 netif_alert(priv
, ifup
, dev
,
1530 "Driver up/down cycle failed, closing device.\n");
1536 static const struct fe_work_t fe_work
[] = {
1537 {FE_FLAG_RESET_PENDING
, fe_reset_pending
},
1540 static void fe_pending_work(struct work_struct
*work
)
1542 struct fe_priv
*priv
= container_of(work
, struct fe_priv
, pending_work
);
1546 for (i
= 0; i
< ARRAY_SIZE(fe_work
); i
++) {
1547 pending
= test_and_clear_bit(fe_work
[i
].bitnr
,
1548 priv
->pending_flags
);
1550 fe_work
[i
].action(priv
);
1554 static int fe_probe(struct platform_device
*pdev
)
1556 struct resource
*res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1557 const struct of_device_id
*match
;
1558 struct fe_soc_data
*soc
;
1559 struct net_device
*netdev
;
1560 struct fe_priv
*priv
;
1562 int err
, napi_weight
;
1564 device_reset(&pdev
->dev
);
1566 match
= of_match_device(of_fe_match
, &pdev
->dev
);
1567 soc
= (struct fe_soc_data
*)match
->data
;
1570 fe_reg_table
= soc
->reg_table
;
1572 soc
->reg_table
= fe_reg_table
;
1574 fe_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1575 if (IS_ERR(fe_base
)) {
1576 err
= -EADDRNOTAVAIL
;
1580 netdev
= alloc_etherdev(sizeof(*priv
));
1582 dev_err(&pdev
->dev
, "alloc_etherdev failed\n");
1587 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1588 netdev
->netdev_ops
= &fe_netdev_ops
;
1589 netdev
->base_addr
= (unsigned long)fe_base
;
1591 netdev
->irq
= platform_get_irq(pdev
, 0);
1592 if (netdev
->irq
< 0) {
1593 dev_err(&pdev
->dev
, "no IRQ resource found\n");
1599 soc
->init_data(soc
, netdev
);
1600 netdev
->vlan_features
= netdev
->hw_features
&
1601 ~(NETIF_F_HW_VLAN_CTAG_TX
|
1602 NETIF_F_HW_VLAN_CTAG_RX
);
1603 netdev
->features
|= netdev
->hw_features
;
1605 if (IS_ENABLED(CONFIG_SOC_MT7621
))
1606 netdev
->max_mtu
= 2048;
1608 /* fake rx vlan filter func. to support tx vlan offload func */
1609 if (fe_reg_table
[FE_REG_FE_DMA_VID_BASE
])
1610 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1612 priv
= netdev_priv(netdev
);
1613 spin_lock_init(&priv
->page_lock
);
1614 if (fe_reg_table
[FE_REG_FE_COUNTER_BASE
]) {
1615 priv
->hw_stats
= kzalloc(sizeof(*priv
->hw_stats
), GFP_KERNEL
);
1616 if (!priv
->hw_stats
) {
1620 spin_lock_init(&priv
->hw_stats
->stats_lock
);
1623 sysclk
= devm_clk_get(&pdev
->dev
, NULL
);
1624 if (!IS_ERR(sysclk
)) {
1625 priv
->sysclk
= clk_get_rate(sysclk
);
1626 } else if ((priv
->flags
& FE_FLAG_CALIBRATE_CLK
)) {
1627 dev_err(&pdev
->dev
, "this soc needs a clk for calibration\n");
1632 priv
->switch_np
= of_parse_phandle(pdev
->dev
.of_node
, "mediatek,switch", 0);
1633 if ((priv
->flags
& FE_FLAG_HAS_SWITCH
) && !priv
->switch_np
) {
1634 dev_err(&pdev
->dev
, "failed to read switch phandle\n");
1639 priv
->netdev
= netdev
;
1640 priv
->dev
= &pdev
->dev
;
1642 priv
->msg_enable
= netif_msg_init(fe_msg_level
, FE_DEFAULT_MSG_ENABLE
);
1643 priv
->rx_ring
.frag_size
= fe_max_frag_size(ETH_DATA_LEN
);
1644 priv
->rx_ring
.rx_buf_size
= fe_max_buf_size(priv
->rx_ring
.frag_size
);
1645 priv
->tx_ring
.tx_ring_size
= NUM_DMA_DESC
;
1646 priv
->rx_ring
.rx_ring_size
= NUM_DMA_DESC
;
1647 INIT_WORK(&priv
->pending_work
, fe_pending_work
);
1648 u64_stats_init(&priv
->hw_stats
->syncp
);
1651 if (priv
->flags
& FE_FLAG_NAPI_WEIGHT
) {
1653 priv
->tx_ring
.tx_ring_size
*= 4;
1654 priv
->rx_ring
.rx_ring_size
*= 4;
1656 netif_napi_add(netdev
, &priv
->rx_napi
, fe_poll
, napi_weight
);
1657 fe_set_ethtool_ops(netdev
);
1659 err
= register_netdev(netdev
);
1661 dev_err(&pdev
->dev
, "error bringing up device\n");
1665 platform_set_drvdata(pdev
, netdev
);
1667 netif_info(priv
, probe
, netdev
, "mediatek frame engine at 0x%08lx, irq %d\n",
1668 netdev
->base_addr
, netdev
->irq
);
1673 free_netdev(netdev
);
1675 devm_iounmap(&pdev
->dev
, fe_base
);
1680 static int fe_remove(struct platform_device
*pdev
)
1682 struct net_device
*dev
= platform_get_drvdata(pdev
);
1683 struct fe_priv
*priv
= netdev_priv(dev
);
1685 netif_napi_del(&priv
->rx_napi
);
1686 kfree(priv
->hw_stats
);
1688 cancel_work_sync(&priv
->pending_work
);
1690 unregister_netdev(dev
);
1692 platform_set_drvdata(pdev
, NULL
);
1697 static struct platform_driver fe_driver
= {
1699 .remove
= fe_remove
,
1701 .name
= "mtk_soc_eth",
1702 .owner
= THIS_MODULE
,
1703 .of_match_table
= of_fe_match
,
1707 module_platform_driver(fe_driver
);
1709 MODULE_LICENSE("GPL");
1710 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1711 MODULE_DESCRIPTION("Ethernet driver for Ralink SoC");
1712 MODULE_VERSION(MTK_FE_DRV_VERSION
);