2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
15 * Copyright (C) 2009-2013 John Crispin <blogic@openwrt.org>
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/platform_device.h>
27 #include <linux/of_device.h>
28 #include <linux/clk.h>
29 #include <linux/of_net.h>
30 #include <linux/of_mdio.h>
31 #include <linux/if_vlan.h>
32 #include <linux/reset.h>
33 #include <linux/tcp.h>
35 #include <linux/bug.h>
37 #include <asm/mach-ralink/ralink_regs.h>
39 #include "ralink_soc_eth.h"
40 #include "esw_rt3052.h"
42 #include "ralink_ethtool.h"
44 #define MAX_RX_LENGTH 1536
45 #define FE_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
46 #define FE_RX_HLEN (NET_SKB_PAD + FE_RX_ETH_HLEN + NET_IP_ALIGN)
47 #define DMA_DUMMY_DESC 0xffffffff
48 #define FE_DEFAULT_MSG_ENABLE \
58 #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
59 #define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
60 #define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1))
61 #define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1))
63 #define SYSC_REG_RSTCTRL 0x34
65 static int fe_msg_level
= -1;
66 module_param_named(msg_level
, fe_msg_level
, int, 0);
67 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
69 static const u16 fe_reg_table_default
[FE_REG_COUNT
] = {
70 [FE_REG_PDMA_GLO_CFG
] = FE_PDMA_GLO_CFG
,
71 [FE_REG_PDMA_RST_CFG
] = FE_PDMA_RST_CFG
,
72 [FE_REG_DLY_INT_CFG
] = FE_DLY_INT_CFG
,
73 [FE_REG_TX_BASE_PTR0
] = FE_TX_BASE_PTR0
,
74 [FE_REG_TX_MAX_CNT0
] = FE_TX_MAX_CNT0
,
75 [FE_REG_TX_CTX_IDX0
] = FE_TX_CTX_IDX0
,
76 [FE_REG_TX_DTX_IDX0
] = FE_TX_DTX_IDX0
,
77 [FE_REG_RX_BASE_PTR0
] = FE_RX_BASE_PTR0
,
78 [FE_REG_RX_MAX_CNT0
] = FE_RX_MAX_CNT0
,
79 [FE_REG_RX_CALC_IDX0
] = FE_RX_CALC_IDX0
,
80 [FE_REG_RX_DRX_IDX0
] = FE_RX_DRX_IDX0
,
81 [FE_REG_FE_INT_ENABLE
] = FE_FE_INT_ENABLE
,
82 [FE_REG_FE_INT_STATUS
] = FE_FE_INT_STATUS
,
83 [FE_REG_FE_DMA_VID_BASE
] = FE_DMA_VID0
,
84 [FE_REG_FE_COUNTER_BASE
] = FE_GDMA1_TX_GBCNT
,
85 [FE_REG_FE_RST_GL
] = FE_FE_RST_GL
,
88 static const u16
*fe_reg_table
= fe_reg_table_default
;
92 void (*action
)(struct fe_priv
*);
95 static void __iomem
*fe_base
= 0;
97 void fe_w32(u32 val
, unsigned reg
)
99 __raw_writel(val
, fe_base
+ reg
);
102 u32
fe_r32(unsigned reg
)
104 return __raw_readl(fe_base
+ reg
);
107 void fe_reg_w32(u32 val
, enum fe_reg reg
)
109 fe_w32(val
, fe_reg_table
[reg
]);
112 u32
fe_reg_r32(enum fe_reg reg
)
114 return fe_r32(fe_reg_table
[reg
]);
117 void fe_reset(u32 reset_bits
)
121 t
= rt_sysc_r32(SYSC_REG_RSTCTRL
);
123 rt_sysc_w32(t
, SYSC_REG_RSTCTRL
);
127 rt_sysc_w32(t
, SYSC_REG_RSTCTRL
);
131 static inline void fe_int_disable(u32 mask
)
133 fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE
) & ~mask
,
134 FE_REG_FE_INT_ENABLE
);
136 fe_reg_r32(FE_REG_FE_INT_ENABLE
);
139 static inline void fe_int_enable(u32 mask
)
141 fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE
) | mask
,
142 FE_REG_FE_INT_ENABLE
);
144 fe_reg_r32(FE_REG_FE_INT_ENABLE
);
147 static inline void fe_hw_set_macaddr(struct fe_priv
*priv
, unsigned char *mac
)
151 spin_lock_irqsave(&priv
->page_lock
, flags
);
152 fe_w32((mac
[0] << 8) | mac
[1], FE_GDMA1_MAC_ADRH
);
153 fe_w32((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5],
155 spin_unlock_irqrestore(&priv
->page_lock
, flags
);
158 static int fe_set_mac_address(struct net_device
*dev
, void *p
)
160 int ret
= eth_mac_addr(dev
, p
);
163 struct fe_priv
*priv
= netdev_priv(dev
);
165 if (priv
->soc
->set_mac
)
166 priv
->soc
->set_mac(priv
, dev
->dev_addr
);
168 fe_hw_set_macaddr(priv
, p
);
174 static inline int fe_max_frag_size(int mtu
)
176 /* make sure buf_size will be at least MAX_RX_LENGTH */
177 if (mtu
+ FE_RX_ETH_HLEN
< MAX_RX_LENGTH
)
178 mtu
= MAX_RX_LENGTH
- FE_RX_ETH_HLEN
;
180 return SKB_DATA_ALIGN(FE_RX_HLEN
+ mtu
) +
181 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
184 static inline int fe_max_buf_size(int frag_size
)
186 int buf_size
= frag_size
- NET_SKB_PAD
- NET_IP_ALIGN
-
187 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
189 BUG_ON(buf_size
< MAX_RX_LENGTH
);
193 static inline void fe_get_rxd(struct fe_rx_dma
*rxd
, struct fe_rx_dma
*dma_rxd
)
195 rxd
->rxd1
= dma_rxd
->rxd1
;
196 rxd
->rxd2
= dma_rxd
->rxd2
;
197 rxd
->rxd3
= dma_rxd
->rxd3
;
198 rxd
->rxd4
= dma_rxd
->rxd4
;
201 static inline void fe_set_txd(struct fe_tx_dma
*txd
, struct fe_tx_dma
*dma_txd
)
203 dma_txd
->txd1
= txd
->txd1
;
204 dma_txd
->txd3
= txd
->txd3
;
205 dma_txd
->txd4
= txd
->txd4
;
206 /* clean dma done flag last */
207 dma_txd
->txd2
= txd
->txd2
;
210 static void fe_clean_rx(struct fe_priv
*priv
)
213 struct fe_rx_ring
*ring
= &priv
->rx_ring
;
216 for (i
= 0; i
< ring
->rx_ring_size
; i
++)
217 if (ring
->rx_data
[i
]) {
218 if (ring
->rx_dma
&& ring
->rx_dma
[i
].rxd1
)
219 dma_unmap_single(&priv
->netdev
->dev
,
220 ring
->rx_dma
[i
].rxd1
,
223 put_page(virt_to_head_page(ring
->rx_data
[i
]));
226 kfree(ring
->rx_data
);
227 ring
->rx_data
= NULL
;
231 dma_free_coherent(&priv
->netdev
->dev
,
232 ring
->rx_ring_size
* sizeof(*ring
->rx_dma
),
239 static int fe_alloc_rx(struct fe_priv
*priv
)
241 struct net_device
*netdev
= priv
->netdev
;
242 struct fe_rx_ring
*ring
= &priv
->rx_ring
;
245 ring
->rx_data
= kcalloc(ring
->rx_ring_size
, sizeof(*ring
->rx_data
),
250 for (i
= 0; i
< ring
->rx_ring_size
; i
++) {
251 ring
->rx_data
[i
] = netdev_alloc_frag(ring
->frag_size
);
252 if (!ring
->rx_data
[i
])
256 ring
->rx_dma
= dma_alloc_coherent(&netdev
->dev
,
257 ring
->rx_ring_size
* sizeof(*ring
->rx_dma
),
259 GFP_ATOMIC
| __GFP_ZERO
);
263 if (priv
->flags
& FE_FLAG_RX_2B_OFFSET
)
267 for (i
= 0; i
< ring
->rx_ring_size
; i
++) {
268 dma_addr_t dma_addr
= dma_map_single(&netdev
->dev
,
269 ring
->rx_data
[i
] + NET_SKB_PAD
+ pad
,
272 if (unlikely(dma_mapping_error(&netdev
->dev
, dma_addr
)))
274 ring
->rx_dma
[i
].rxd1
= (unsigned int) dma_addr
;
276 if (priv
->flags
& FE_FLAG_RX_SG_DMA
)
277 ring
->rx_dma
[i
].rxd2
= RX_DMA_PLEN0(ring
->rx_buf_size
);
279 ring
->rx_dma
[i
].rxd2
= RX_DMA_LSO
;
281 ring
->rx_calc_idx
= ring
->rx_ring_size
- 1;
284 fe_reg_w32(ring
->rx_phys
, FE_REG_RX_BASE_PTR0
);
285 fe_reg_w32(ring
->rx_ring_size
, FE_REG_RX_MAX_CNT0
);
286 fe_reg_w32(ring
->rx_calc_idx
, FE_REG_RX_CALC_IDX0
);
287 fe_reg_w32(FE_PST_DRX_IDX0
, FE_REG_PDMA_RST_CFG
);
295 static void fe_txd_unmap(struct device
*dev
, struct fe_tx_buf
*tx_buf
)
297 if (tx_buf
->flags
& FE_TX_FLAGS_SINGLE0
) {
298 dma_unmap_single(dev
,
299 dma_unmap_addr(tx_buf
, dma_addr0
),
300 dma_unmap_len(tx_buf
, dma_len0
),
302 } else if (tx_buf
->flags
& FE_TX_FLAGS_PAGE0
) {
304 dma_unmap_addr(tx_buf
, dma_addr0
),
305 dma_unmap_len(tx_buf
, dma_len0
),
308 if (tx_buf
->flags
& FE_TX_FLAGS_PAGE1
)
310 dma_unmap_addr(tx_buf
, dma_addr1
),
311 dma_unmap_len(tx_buf
, dma_len1
),
315 if (tx_buf
->skb
&& (tx_buf
->skb
!= (struct sk_buff
*) DMA_DUMMY_DESC
)) {
316 dev_kfree_skb_any(tx_buf
->skb
);
321 static void fe_clean_tx(struct fe_priv
*priv
)
324 struct device
*dev
= &priv
->netdev
->dev
;
325 struct fe_tx_ring
*ring
= &priv
->tx_ring
;
328 for (i
= 0; i
< ring
->tx_ring_size
; i
++)
329 fe_txd_unmap(dev
, &ring
->tx_buf
[i
]);
335 dma_free_coherent(dev
,
336 ring
->tx_ring_size
* sizeof(*ring
->tx_dma
),
342 netdev_reset_queue(priv
->netdev
);
345 static int fe_alloc_tx(struct fe_priv
*priv
)
348 struct fe_tx_ring
*ring
= &priv
->tx_ring
;
350 ring
->tx_free_idx
= 0;
351 ring
->tx_next_idx
= 0;
352 ring
->tx_thresh
= max((unsigned long)ring
->tx_ring_size
>> 2, MAX_SKB_FRAGS
);
354 ring
->tx_buf
= kcalloc(ring
->tx_ring_size
, sizeof(*ring
->tx_buf
),
359 ring
->tx_dma
= dma_alloc_coherent(&priv
->netdev
->dev
,
360 ring
->tx_ring_size
* sizeof(*ring
->tx_dma
),
362 GFP_ATOMIC
| __GFP_ZERO
);
366 for (i
= 0; i
< ring
->tx_ring_size
; i
++) {
367 if (priv
->soc
->tx_dma
) {
368 priv
->soc
->tx_dma(&ring
->tx_dma
[i
]);
370 ring
->tx_dma
[i
].txd2
= TX_DMA_DESP2_DEF
;
374 fe_reg_w32(ring
->tx_phys
, FE_REG_TX_BASE_PTR0
);
375 fe_reg_w32(ring
->tx_ring_size
, FE_REG_TX_MAX_CNT0
);
376 fe_reg_w32(0, FE_REG_TX_CTX_IDX0
);
377 fe_reg_w32(FE_PST_DTX_IDX0
, FE_REG_PDMA_RST_CFG
);
385 static int fe_init_dma(struct fe_priv
*priv
)
389 err
= fe_alloc_tx(priv
);
393 err
= fe_alloc_rx(priv
);
400 static void fe_free_dma(struct fe_priv
*priv
)
406 void fe_stats_update(struct fe_priv
*priv
)
408 struct fe_hw_stats
*hwstats
= priv
->hw_stats
;
409 unsigned int base
= fe_reg_table
[FE_REG_FE_COUNTER_BASE
];
412 u64_stats_update_begin(&hwstats
->syncp
);
414 if (IS_ENABLED(CONFIG_SOC_MT7621
)) {
415 hwstats
->rx_bytes
+= fe_r32(base
);
416 stats
= fe_r32(base
+ 0x04);
418 hwstats
->rx_bytes
+= (stats
<< 32);
419 hwstats
->rx_packets
+= fe_r32(base
+ 0x08);
420 hwstats
->rx_overflow
+= fe_r32(base
+ 0x10);
421 hwstats
->rx_fcs_errors
+= fe_r32(base
+ 0x14);
422 hwstats
->rx_short_errors
+= fe_r32(base
+ 0x18);
423 hwstats
->rx_long_errors
+= fe_r32(base
+ 0x1c);
424 hwstats
->rx_checksum_errors
+= fe_r32(base
+ 0x20);
425 hwstats
->rx_flow_control_packets
+= fe_r32(base
+ 0x24);
426 hwstats
->tx_skip
+= fe_r32(base
+ 0x28);
427 hwstats
->tx_collisions
+= fe_r32(base
+ 0x2c);
428 hwstats
->tx_bytes
+= fe_r32(base
+ 0x30);
429 stats
= fe_r32(base
+ 0x34);
431 hwstats
->tx_bytes
+= (stats
<< 32);
432 hwstats
->tx_packets
+= fe_r32(base
+ 0x38);
434 hwstats
->tx_bytes
+= fe_r32(base
);
435 hwstats
->tx_packets
+= fe_r32(base
+ 0x04);
436 hwstats
->tx_skip
+= fe_r32(base
+ 0x08);
437 hwstats
->tx_collisions
+= fe_r32(base
+ 0x0c);
438 hwstats
->rx_bytes
+= fe_r32(base
+ 0x20);
439 hwstats
->rx_packets
+= fe_r32(base
+ 0x24);
440 hwstats
->rx_overflow
+= fe_r32(base
+ 0x28);
441 hwstats
->rx_fcs_errors
+= fe_r32(base
+ 0x2c);
442 hwstats
->rx_short_errors
+= fe_r32(base
+ 0x30);
443 hwstats
->rx_long_errors
+= fe_r32(base
+ 0x34);
444 hwstats
->rx_checksum_errors
+= fe_r32(base
+ 0x38);
445 hwstats
->rx_flow_control_packets
+= fe_r32(base
+ 0x3c);
448 u64_stats_update_end(&hwstats
->syncp
);
451 static struct rtnl_link_stats64
*fe_get_stats64(struct net_device
*dev
,
452 struct rtnl_link_stats64
*storage
)
454 struct fe_priv
*priv
= netdev_priv(dev
);
455 struct fe_hw_stats
*hwstats
= priv
->hw_stats
;
456 unsigned int base
= fe_reg_table
[FE_REG_FE_COUNTER_BASE
];
460 netdev_stats_to_stats64(storage
, &dev
->stats
);
464 if (netif_running(dev
) && netif_device_present(dev
)) {
465 if (spin_trylock(&hwstats
->stats_lock
)) {
466 fe_stats_update(priv
);
467 spin_unlock(&hwstats
->stats_lock
);
472 start
= u64_stats_fetch_begin_irq(&hwstats
->syncp
);
473 storage
->rx_packets
= hwstats
->rx_packets
;
474 storage
->tx_packets
= hwstats
->tx_packets
;
475 storage
->rx_bytes
= hwstats
->rx_bytes
;
476 storage
->tx_bytes
= hwstats
->tx_bytes
;
477 storage
->collisions
= hwstats
->tx_collisions
;
478 storage
->rx_length_errors
= hwstats
->rx_short_errors
+
479 hwstats
->rx_long_errors
;
480 storage
->rx_over_errors
= hwstats
->rx_overflow
;
481 storage
->rx_crc_errors
= hwstats
->rx_fcs_errors
;
482 storage
->rx_errors
= hwstats
->rx_checksum_errors
;
483 storage
->tx_aborted_errors
= hwstats
->tx_skip
;
484 } while (u64_stats_fetch_retry_irq(&hwstats
->syncp
, start
));
486 storage
->tx_errors
= priv
->netdev
->stats
.tx_errors
;
487 storage
->rx_dropped
= priv
->netdev
->stats
.rx_dropped
;
488 storage
->tx_dropped
= priv
->netdev
->stats
.tx_dropped
;
493 static int fe_vlan_rx_add_vid(struct net_device
*dev
,
494 __be16 proto
, u16 vid
)
496 struct fe_priv
*priv
= netdev_priv(dev
);
497 u32 idx
= (vid
& 0xf);
500 if (!((fe_reg_table
[FE_REG_FE_DMA_VID_BASE
]) &&
501 (dev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)))
504 if (test_bit(idx
, &priv
->vlan_map
)) {
505 netdev_warn(dev
, "disable tx vlan offload\n");
506 dev
->wanted_features
&= ~NETIF_F_HW_VLAN_CTAG_TX
;
507 netdev_update_features(dev
);
509 vlan_cfg
= fe_r32(fe_reg_table
[FE_REG_FE_DMA_VID_BASE
] +
513 vlan_cfg
|= (vid
<< 16);
515 vlan_cfg
&= 0xffff0000;
518 fe_w32(vlan_cfg
, fe_reg_table
[FE_REG_FE_DMA_VID_BASE
] +
520 set_bit(idx
, &priv
->vlan_map
);
526 static int fe_vlan_rx_kill_vid(struct net_device
*dev
,
527 __be16 proto
, u16 vid
)
529 struct fe_priv
*priv
= netdev_priv(dev
);
530 u32 idx
= (vid
& 0xf);
532 if (!((fe_reg_table
[FE_REG_FE_DMA_VID_BASE
]) &&
533 (dev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)))
536 clear_bit(idx
, &priv
->vlan_map
);
541 static inline u32
fe_empty_txd(struct fe_tx_ring
*ring
)
544 return (u32
)(ring
->tx_ring_size
-
545 ((ring
->tx_next_idx
- ring
->tx_free_idx
) &
546 (ring
->tx_ring_size
- 1)));
549 static int fe_tx_map_dma(struct sk_buff
*skb
, struct net_device
*dev
,
550 int tx_num
, struct fe_tx_ring
*ring
)
552 struct fe_priv
*priv
= netdev_priv(dev
);
553 struct skb_frag_struct
*frag
;
554 struct fe_tx_dma txd
, *ptxd
;
555 struct fe_tx_buf
*tx_buf
;
556 dma_addr_t mapped_addr
;
557 unsigned int nr_frags
;
559 int i
, j
, k
, frag_size
, frag_map_size
, offset
;
561 tx_buf
= &ring
->tx_buf
[ring
->tx_next_idx
];
562 memset(tx_buf
, 0, sizeof(*tx_buf
));
563 memset(&txd
, 0, sizeof(txd
));
564 nr_frags
= skb_shinfo(skb
)->nr_frags
;
566 /* init tx descriptor */
567 if (priv
->soc
->tx_dma
)
568 priv
->soc
->tx_dma(&txd
);
570 txd
.txd4
= TX_DMA_DESP4_DEF
;
573 /* TX Checksum offload */
574 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
575 txd
.txd4
|= TX_DMA_CHKSUM
;
577 /* VLAN header offload */
578 if (skb_vlan_tag_present(skb
)) {
579 u16 tag
= skb_vlan_tag_get(skb
);
581 if (IS_ENABLED(CONFIG_SOC_MT7621
))
582 txd
.txd4
|= TX_DMA_INS_VLAN_MT7621
| tag
;
584 txd
.txd4
|= TX_DMA_INS_VLAN
|
585 ((tag
>> VLAN_PRIO_SHIFT
) << 4) |
589 /* TSO: fill MSS info in tcp checksum field */
590 if (skb_is_gso(skb
)) {
591 if (skb_cow_head(skb
, 0)) {
592 netif_warn(priv
, tx_err
, dev
,
593 "GSO expand head fail.\n");
596 if (skb_shinfo(skb
)->gso_type
&
597 (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)) {
598 txd
.txd4
|= TX_DMA_TSO
;
599 tcp_hdr(skb
)->check
= htons(skb_shinfo(skb
)->gso_size
);
603 mapped_addr
= dma_map_single(&dev
->dev
, skb
->data
,
604 skb_headlen(skb
), DMA_TO_DEVICE
);
605 if (unlikely(dma_mapping_error(&dev
->dev
, mapped_addr
)))
607 txd
.txd1
= mapped_addr
;
608 txd
.txd2
= TX_DMA_PLEN0(skb_headlen(skb
));
610 tx_buf
->flags
|= FE_TX_FLAGS_SINGLE0
;
611 dma_unmap_addr_set(tx_buf
, dma_addr0
, mapped_addr
);
612 dma_unmap_len_set(tx_buf
, dma_len0
, skb_headlen(skb
));
615 j
= ring
->tx_next_idx
;
617 for (i
= 0; i
< nr_frags
; i
++) {
619 frag
= &skb_shinfo(skb
)->frags
[i
];
620 frag_size
= skb_frag_size(frag
);
622 while (frag_size
> 0) {
623 frag_map_size
= min(frag_size
, TX_DMA_BUF_LEN
);
624 mapped_addr
= skb_frag_dma_map(&dev
->dev
, frag
, offset
,
625 frag_map_size
, DMA_TO_DEVICE
);
626 if (unlikely(dma_mapping_error(&dev
->dev
, mapped_addr
)))
630 j
= NEXT_TX_DESP_IDX(j
);
631 txd
.txd1
= mapped_addr
;
632 txd
.txd2
= TX_DMA_PLEN0(frag_map_size
);
635 tx_buf
= &ring
->tx_buf
[j
];
636 memset(tx_buf
, 0, sizeof(*tx_buf
));
638 tx_buf
->flags
|= FE_TX_FLAGS_PAGE0
;
639 dma_unmap_addr_set(tx_buf
, dma_addr0
, mapped_addr
);
640 dma_unmap_len_set(tx_buf
, dma_len0
, frag_map_size
);
642 txd
.txd3
= mapped_addr
;
643 txd
.txd2
|= TX_DMA_PLEN1(frag_map_size
);
645 tx_buf
->skb
= (struct sk_buff
*) DMA_DUMMY_DESC
;
646 tx_buf
->flags
|= FE_TX_FLAGS_PAGE1
;
647 dma_unmap_addr_set(tx_buf
, dma_addr1
, mapped_addr
);
648 dma_unmap_len_set(tx_buf
, dma_len1
, frag_map_size
);
650 if (!((i
== (nr_frags
-1)) &&
651 (frag_map_size
== frag_size
))) {
652 fe_set_txd(&txd
, &ring
->tx_dma
[j
]);
653 memset(&txd
, 0, sizeof(txd
));
656 frag_size
-= frag_map_size
;
657 offset
+= frag_map_size
;
662 /* set last segment */
664 txd
.txd2
|= TX_DMA_LS1
;
666 txd
.txd2
|= TX_DMA_LS0
;
667 fe_set_txd(&txd
, &ring
->tx_dma
[j
]);
669 /* store skb to cleanup */
672 netdev_sent_queue(dev
, skb
->len
);
673 skb_tx_timestamp(skb
);
675 ring
->tx_next_idx
= NEXT_TX_DESP_IDX(j
);
677 if (unlikely(fe_empty_txd(ring
) <= ring
->tx_thresh
)) {
678 netif_stop_queue(dev
);
680 if (unlikely(fe_empty_txd(ring
) > ring
->tx_thresh
))
681 netif_wake_queue(dev
);
684 if (netif_xmit_stopped(netdev_get_tx_queue(dev
, 0)) || !skb
->xmit_more
)
685 fe_reg_w32(ring
->tx_next_idx
, FE_REG_TX_CTX_IDX0
);
690 j
= ring
->tx_next_idx
;
691 for (i
= 0; i
< tx_num
; i
++) {
692 ptxd
= &ring
->tx_dma
[j
];
693 tx_buf
= &ring
->tx_buf
[j
];
696 fe_txd_unmap(&dev
->dev
, tx_buf
);
698 ptxd
->txd2
= TX_DMA_DESP2_DEF
;
699 j
= NEXT_TX_DESP_IDX(j
);
707 static inline int fe_skb_padto(struct sk_buff
*skb
, struct fe_priv
*priv
) {
712 if (unlikely(skb
->len
< VLAN_ETH_ZLEN
)) {
713 if ((priv
->flags
& FE_FLAG_PADDING_64B
) &&
714 !(priv
->flags
& FE_FLAG_PADDING_BUG
))
716 if (skb_vlan_tag_present(skb
))
718 else if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
))
720 else if(!(priv
->flags
& FE_FLAG_PADDING_64B
))
725 if (skb
->len
< len
) {
726 if ((ret
= skb_pad(skb
, len
- skb
->len
)) < 0)
729 skb_set_tail_pointer(skb
, len
);
736 static inline int fe_cal_txd_req(struct sk_buff
*skb
)
739 struct skb_frag_struct
*frag
;
742 if (skb_is_gso(skb
)) {
743 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
744 frag
= &skb_shinfo(skb
)->frags
[i
];
745 nfrags
+= DIV_ROUND_UP(frag
->size
, TX_DMA_BUF_LEN
);
748 nfrags
+= skb_shinfo(skb
)->nr_frags
;
751 return DIV_ROUND_UP(nfrags
, 2);
754 static int fe_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
756 struct fe_priv
*priv
= netdev_priv(dev
);
757 struct fe_tx_ring
*ring
= &priv
->tx_ring
;
758 struct net_device_stats
*stats
= &dev
->stats
;
762 if (fe_skb_padto(skb
, priv
)) {
763 netif_warn(priv
, tx_err
, dev
, "tx padding failed!\n");
767 tx_num
= fe_cal_txd_req(skb
);
768 if (unlikely(fe_empty_txd(ring
) <= tx_num
))
770 netif_stop_queue(dev
);
771 netif_err(priv
, tx_queued
,dev
,
772 "Tx Ring full when queue awake!\n");
773 return NETDEV_TX_BUSY
;
776 if (fe_tx_map_dma(skb
, dev
, tx_num
, ring
) < 0) {
780 stats
->tx_bytes
+= len
;
786 static inline void fe_rx_vlan(struct sk_buff
*skb
)
791 if (!__vlan_get_tag(skb
, &vlanid
)) {
792 /* pop the vlan tag */
793 ehdr
= (struct ethhdr
*)skb
->data
;
794 memmove(skb
->data
+ VLAN_HLEN
, ehdr
, ETH_ALEN
* 2);
795 skb_pull(skb
, VLAN_HLEN
);
796 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlanid
);
800 static int fe_poll_rx(struct napi_struct
*napi
, int budget
,
801 struct fe_priv
*priv
, u32 rx_intr
)
803 struct net_device
*netdev
= priv
->netdev
;
804 struct net_device_stats
*stats
= &netdev
->stats
;
805 struct fe_soc_data
*soc
= priv
->soc
;
806 struct fe_rx_ring
*ring
= &priv
->rx_ring
;
807 int idx
= ring
->rx_calc_idx
;
811 struct fe_rx_dma
*rxd
, trxd
;
813 bool rx_vlan
= netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
;
815 if (netdev
->features
& NETIF_F_RXCSUM
)
816 checksum_bit
= soc
->checksum_bit
;
820 if (priv
->flags
& FE_FLAG_RX_2B_OFFSET
)
825 while (done
< budget
) {
828 idx
= NEXT_RX_DESP_IDX(idx
);
829 rxd
= &ring
->rx_dma
[idx
];
830 data
= ring
->rx_data
[idx
];
832 fe_get_rxd(&trxd
, rxd
);
833 if (!(trxd
.rxd2
& RX_DMA_DONE
))
836 /* alloc new buffer */
837 new_data
= netdev_alloc_frag(ring
->frag_size
);
838 if (unlikely(!new_data
)) {
842 dma_addr
= dma_map_single(&netdev
->dev
,
843 new_data
+ NET_SKB_PAD
+ pad
,
846 if (unlikely(dma_mapping_error(&netdev
->dev
, dma_addr
))) {
847 put_page(virt_to_head_page(new_data
));
852 skb
= build_skb(data
, ring
->frag_size
);
853 if (unlikely(!skb
)) {
854 put_page(virt_to_head_page(new_data
));
857 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
859 dma_unmap_single(&netdev
->dev
, trxd
.rxd1
,
860 ring
->rx_buf_size
, DMA_FROM_DEVICE
);
861 pktlen
= RX_DMA_GET_PLEN0(trxd
.rxd2
);
863 skb_put(skb
, pktlen
);
864 if (trxd
.rxd4
& checksum_bit
) {
865 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
867 skb_checksum_none_assert(skb
);
871 skb
->protocol
= eth_type_trans(skb
, netdev
);
874 stats
->rx_bytes
+= pktlen
;
876 napi_gro_receive(napi
, skb
);
878 ring
->rx_data
[idx
] = new_data
;
879 rxd
->rxd1
= (unsigned int) dma_addr
;
882 if (priv
->flags
& FE_FLAG_RX_SG_DMA
)
883 rxd
->rxd2
= RX_DMA_PLEN0(ring
->rx_buf_size
);
885 rxd
->rxd2
= RX_DMA_LSO
;
887 ring
->rx_calc_idx
= idx
;
889 fe_reg_w32(ring
->rx_calc_idx
, FE_REG_RX_CALC_IDX0
);
894 fe_reg_w32(rx_intr
, FE_REG_FE_INT_STATUS
);
899 static int fe_poll_tx(struct fe_priv
*priv
, int budget
, u32 tx_intr
,
902 struct net_device
*netdev
= priv
->netdev
;
903 struct device
*dev
= &netdev
->dev
;
904 unsigned int bytes_compl
= 0;
906 struct fe_tx_buf
*tx_buf
;
909 struct fe_tx_ring
*ring
= &priv
->tx_ring
;
911 idx
= ring
->tx_free_idx
;
912 hwidx
= fe_reg_r32(FE_REG_TX_DTX_IDX0
);
914 while ((idx
!= hwidx
) && budget
) {
915 tx_buf
= &ring
->tx_buf
[idx
];
921 if (skb
!= (struct sk_buff
*) DMA_DUMMY_DESC
) {
922 bytes_compl
+= skb
->len
;
926 fe_txd_unmap(dev
, tx_buf
);
927 idx
= NEXT_TX_DESP_IDX(idx
);
929 ring
->tx_free_idx
= idx
;
932 /* read hw index again make sure no new tx packet */
933 hwidx
= fe_reg_r32(FE_REG_TX_DTX_IDX0
);
935 fe_reg_w32(tx_intr
, FE_REG_FE_INT_STATUS
);
942 netdev_completed_queue(netdev
, done
, bytes_compl
);
944 if (unlikely(netif_queue_stopped(netdev
) &&
945 (fe_empty_txd(ring
) > ring
->tx_thresh
)))
946 netif_wake_queue(netdev
);
952 static int fe_poll(struct napi_struct
*napi
, int budget
)
954 struct fe_priv
*priv
= container_of(napi
, struct fe_priv
, rx_napi
);
955 struct fe_hw_stats
*hwstat
= priv
->hw_stats
;
956 int tx_done
, rx_done
, tx_again
;
957 u32 status
, fe_status
, status_reg
, mask
;
958 u32 tx_intr
, rx_intr
, status_intr
;
960 fe_status
= status
= fe_reg_r32(FE_REG_FE_INT_STATUS
);
961 tx_intr
= priv
->soc
->tx_int
;
962 rx_intr
= priv
->soc
->rx_int
;
963 status_intr
= priv
->soc
->status_int
;
964 tx_done
= rx_done
= tx_again
= 0;
966 if (fe_reg_table
[FE_REG_FE_INT_STATUS2
]) {
967 fe_status
= fe_reg_r32(FE_REG_FE_INT_STATUS2
);
968 status_reg
= FE_REG_FE_INT_STATUS2
;
970 status_reg
= FE_REG_FE_INT_STATUS
;
972 if (status
& tx_intr
)
973 tx_done
= fe_poll_tx(priv
, budget
, tx_intr
, &tx_again
);
975 if (status
& rx_intr
)
976 rx_done
= fe_poll_rx(napi
, budget
, priv
, rx_intr
);
978 if (unlikely(fe_status
& status_intr
)) {
979 if (hwstat
&& spin_trylock(&hwstat
->stats_lock
)) {
980 fe_stats_update(priv
);
981 spin_unlock(&hwstat
->stats_lock
);
983 fe_reg_w32(status_intr
, status_reg
);
986 if (unlikely(netif_msg_intr(priv
))) {
987 mask
= fe_reg_r32(FE_REG_FE_INT_ENABLE
);
988 netdev_info(priv
->netdev
,
989 "done tx %d, rx %d, intr 0x%08x/0x%x\n",
990 tx_done
, rx_done
, status
, mask
);
993 if (!tx_again
&& (rx_done
< budget
)) {
994 status
= fe_reg_r32(FE_REG_FE_INT_STATUS
);
995 if (status
& (tx_intr
| rx_intr
)) {
996 /* let napi poll again */
1001 napi_complete(napi
);
1002 fe_int_enable(tx_intr
| rx_intr
);
1011 static void fe_tx_timeout(struct net_device
*dev
)
1013 struct fe_priv
*priv
= netdev_priv(dev
);
1014 struct fe_tx_ring
*ring
= &priv
->tx_ring
;
1016 priv
->netdev
->stats
.tx_errors
++;
1017 netif_err(priv
, tx_err
, dev
,
1018 "transmit timed out\n");
1019 netif_info(priv
, drv
, dev
, "dma_cfg:%08x\n",
1020 fe_reg_r32(FE_REG_PDMA_GLO_CFG
));
1021 netif_info(priv
, drv
, dev
, "tx_ring=%d, " \
1022 "base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n", 0,
1023 fe_reg_r32(FE_REG_TX_BASE_PTR0
),
1024 fe_reg_r32(FE_REG_TX_MAX_CNT0
),
1025 fe_reg_r32(FE_REG_TX_CTX_IDX0
),
1026 fe_reg_r32(FE_REG_TX_DTX_IDX0
),
1030 netif_info(priv
, drv
, dev
, "rx_ring=%d, " \
1031 "base=%08x, max=%u, calc=%u, drx=%u\n", 0,
1032 fe_reg_r32(FE_REG_RX_BASE_PTR0
),
1033 fe_reg_r32(FE_REG_RX_MAX_CNT0
),
1034 fe_reg_r32(FE_REG_RX_CALC_IDX0
),
1035 fe_reg_r32(FE_REG_RX_DRX_IDX0
)
1038 if (!test_and_set_bit(FE_FLAG_RESET_PENDING
, priv
->pending_flags
))
1039 schedule_work(&priv
->pending_work
);
1042 static irqreturn_t
fe_handle_irq(int irq
, void *dev
)
1044 struct fe_priv
*priv
= netdev_priv(dev
);
1045 u32 status
, int_mask
;
1047 status
= fe_reg_r32(FE_REG_FE_INT_STATUS
);
1049 if (unlikely(!status
))
1052 int_mask
= (priv
->soc
->rx_int
| priv
->soc
->tx_int
);
1053 if (likely(status
& int_mask
)) {
1054 if (likely(napi_schedule_prep(&priv
->rx_napi
))) {
1055 fe_int_disable(int_mask
);
1056 __napi_schedule(&priv
->rx_napi
);
1059 fe_reg_w32(status
, FE_REG_FE_INT_STATUS
);
1065 #ifdef CONFIG_NET_POLL_CONTROLLER
1066 static void fe_poll_controller(struct net_device
*dev
)
1068 struct fe_priv
*priv
= netdev_priv(dev
);
1069 u32 int_mask
= priv
->soc
->tx_int
| priv
->soc
->rx_int
;
1071 fe_int_disable(int_mask
);
1072 fe_handle_irq(dev
->irq
, dev
);
1073 fe_int_enable(int_mask
);
1077 int fe_set_clock_cycle(struct fe_priv
*priv
)
1079 unsigned long sysclk
= priv
->sysclk
;
1085 sysclk
/= FE_US_CYC_CNT_DIVISOR
;
1086 sysclk
<<= FE_US_CYC_CNT_SHIFT
;
1088 fe_w32((fe_r32(FE_FE_GLO_CFG
) &
1089 ~(FE_US_CYC_CNT_MASK
<< FE_US_CYC_CNT_SHIFT
)) |
1095 void fe_fwd_config(struct fe_priv
*priv
)
1099 fwd_cfg
= fe_r32(FE_GDMA1_FWD_CFG
);
1101 /* disable jumbo frame */
1102 if (priv
->flags
& FE_FLAG_JUMBO_FRAME
)
1103 fwd_cfg
&= ~FE_GDM1_JMB_EN
;
1105 /* set unicast/multicast/broadcast frame to cpu */
1108 fe_w32(fwd_cfg
, FE_GDMA1_FWD_CFG
);
1111 static void fe_rxcsum_config(bool enable
)
1114 fe_w32(fe_r32(FE_GDMA1_FWD_CFG
) | (FE_GDM1_ICS_EN
|
1115 FE_GDM1_TCS_EN
| FE_GDM1_UCS_EN
),
1118 fe_w32(fe_r32(FE_GDMA1_FWD_CFG
) & ~(FE_GDM1_ICS_EN
|
1119 FE_GDM1_TCS_EN
| FE_GDM1_UCS_EN
),
1123 static void fe_txcsum_config(bool enable
)
1126 fe_w32(fe_r32(FE_CDMA_CSG_CFG
) | (FE_ICS_GEN_EN
|
1127 FE_TCS_GEN_EN
| FE_UCS_GEN_EN
),
1130 fe_w32(fe_r32(FE_CDMA_CSG_CFG
) & ~(FE_ICS_GEN_EN
|
1131 FE_TCS_GEN_EN
| FE_UCS_GEN_EN
),
1135 void fe_csum_config(struct fe_priv
*priv
)
1137 struct net_device
*dev
= priv_netdev(priv
);
1139 fe_txcsum_config((dev
->features
& NETIF_F_IP_CSUM
));
1140 fe_rxcsum_config((dev
->features
& NETIF_F_RXCSUM
));
1143 static int fe_hw_init(struct net_device
*dev
)
1145 struct fe_priv
*priv
= netdev_priv(dev
);
1148 err
= devm_request_irq(priv
->device
, dev
->irq
, fe_handle_irq
, 0,
1149 dev_name(priv
->device
), dev
);
1153 if (priv
->soc
->set_mac
)
1154 priv
->soc
->set_mac(priv
, dev
->dev_addr
);
1156 fe_hw_set_macaddr(priv
, dev
->dev_addr
);
1158 /* disable delay interrupt */
1159 fe_reg_w32(0, FE_REG_DLY_INT_CFG
);
1161 fe_int_disable(priv
->soc
->tx_int
| priv
->soc
->rx_int
);
1163 /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
1164 if (fe_reg_table
[FE_REG_FE_DMA_VID_BASE
])
1165 for (i
= 0; i
< 16; i
+= 2)
1166 fe_w32(((i
+ 1) << 16) + i
,
1167 fe_reg_table
[FE_REG_FE_DMA_VID_BASE
] +
1170 BUG_ON(!priv
->soc
->fwd_config
);
1171 if (priv
->soc
->fwd_config(priv
))
1172 netdev_err(dev
, "unable to get clock\n");
1174 if (fe_reg_table
[FE_REG_FE_RST_GL
]) {
1175 fe_reg_w32(1, FE_REG_FE_RST_GL
);
1176 fe_reg_w32(0, FE_REG_FE_RST_GL
);
1182 static int fe_open(struct net_device
*dev
)
1184 struct fe_priv
*priv
= netdev_priv(dev
);
1185 unsigned long flags
;
1189 err
= fe_init_dma(priv
);
1193 spin_lock_irqsave(&priv
->page_lock
, flags
);
1195 val
= FE_TX_WB_DDONE
| FE_RX_DMA_EN
| FE_TX_DMA_EN
;
1196 if (priv
->flags
& FE_FLAG_RX_2B_OFFSET
)
1197 val
|= FE_RX_2B_OFFSET
;
1198 val
|= priv
->soc
->pdma_glo_cfg
;
1199 fe_reg_w32(val
, FE_REG_PDMA_GLO_CFG
);
1201 spin_unlock_irqrestore(&priv
->page_lock
, flags
);
1204 priv
->phy
->start(priv
);
1206 if (priv
->soc
->has_carrier
&& priv
->soc
->has_carrier(priv
))
1207 netif_carrier_on(dev
);
1209 napi_enable(&priv
->rx_napi
);
1210 fe_int_enable(priv
->soc
->tx_int
| priv
->soc
->rx_int
);
1211 netif_start_queue(dev
);
1220 static int fe_stop(struct net_device
*dev
)
1222 struct fe_priv
*priv
= netdev_priv(dev
);
1223 unsigned long flags
;
1226 netif_tx_disable(dev
);
1227 fe_int_disable(priv
->soc
->tx_int
| priv
->soc
->rx_int
);
1228 napi_disable(&priv
->rx_napi
);
1231 priv
->phy
->stop(priv
);
1233 spin_lock_irqsave(&priv
->page_lock
, flags
);
1235 fe_reg_w32(fe_reg_r32(FE_REG_PDMA_GLO_CFG
) &
1236 ~(FE_TX_WB_DDONE
| FE_RX_DMA_EN
| FE_TX_DMA_EN
),
1237 FE_REG_PDMA_GLO_CFG
);
1238 spin_unlock_irqrestore(&priv
->page_lock
, flags
);
1241 for (i
= 0; i
< 10; i
++) {
1242 if (fe_reg_r32(FE_REG_PDMA_GLO_CFG
) &
1243 (FE_TX_DMA_BUSY
| FE_RX_DMA_BUSY
)) {
1255 static int __init
fe_init(struct net_device
*dev
)
1257 struct fe_priv
*priv
= netdev_priv(dev
);
1258 struct device_node
*port
;
1261 BUG_ON(!priv
->soc
->reset_fe
);
1262 priv
->soc
->reset_fe();
1264 if (priv
->soc
->switch_init
)
1265 priv
->soc
->switch_init(priv
);
1267 of_get_mac_address_mtd(priv
->device
->of_node
, dev
->dev_addr
);
1268 /*If the mac address is invalid, use random mac address */
1269 if (!is_valid_ether_addr(dev
->dev_addr
)) {
1270 random_ether_addr(dev
->dev_addr
);
1271 dev_err(priv
->device
, "generated random MAC address %pM\n",
1275 err
= fe_mdio_init(priv
);
1279 if (priv
->soc
->port_init
)
1280 for_each_child_of_node(priv
->device
->of_node
, port
)
1281 if (of_device_is_compatible(port
, "ralink,eth-port") && of_device_is_available(port
))
1282 priv
->soc
->port_init(priv
, port
);
1285 err
= priv
->phy
->connect(priv
);
1287 goto err_phy_disconnect
;
1290 err
= fe_hw_init(dev
);
1292 goto err_phy_disconnect
;
1294 if (priv
->soc
->switch_config
)
1295 priv
->soc
->switch_config(priv
);
1301 priv
->phy
->disconnect(priv
);
1302 fe_mdio_cleanup(priv
);
1307 static void fe_uninit(struct net_device
*dev
)
1309 struct fe_priv
*priv
= netdev_priv(dev
);
1312 priv
->phy
->disconnect(priv
);
1313 fe_mdio_cleanup(priv
);
1315 fe_reg_w32(0, FE_REG_FE_INT_ENABLE
);
1316 free_irq(dev
->irq
, dev
);
1319 static int fe_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1321 struct fe_priv
*priv
= netdev_priv(dev
);
1328 return phy_ethtool_ioctl(priv
->phy_dev
,
1329 (void *) ifr
->ifr_data
);
1333 return phy_mii_ioctl(priv
->phy_dev
, ifr
, cmd
);
1341 static int fe_change_mtu(struct net_device
*dev
, int new_mtu
)
1343 struct fe_priv
*priv
= netdev_priv(dev
);
1344 int frag_size
, old_mtu
;
1347 if (!(priv
->flags
& FE_FLAG_JUMBO_FRAME
))
1348 return eth_change_mtu(dev
, new_mtu
);
1350 frag_size
= fe_max_frag_size(new_mtu
);
1351 if (new_mtu
< 68 || frag_size
> PAGE_SIZE
)
1357 /* return early if the buffer sizes will not change */
1358 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
1360 if (old_mtu
> ETH_DATA_LEN
&& new_mtu
> ETH_DATA_LEN
)
1363 if (new_mtu
<= ETH_DATA_LEN
)
1364 priv
->rx_ring
.frag_size
= fe_max_frag_size(ETH_DATA_LEN
);
1366 priv
->rx_ring
.frag_size
= PAGE_SIZE
;
1367 priv
->rx_ring
.rx_buf_size
= fe_max_buf_size(priv
->rx_ring
.frag_size
);
1369 if (!netif_running(dev
))
1373 fwd_cfg
= fe_r32(FE_GDMA1_FWD_CFG
);
1374 if (new_mtu
<= ETH_DATA_LEN
)
1375 fwd_cfg
&= ~FE_GDM1_JMB_EN
;
1377 fwd_cfg
&= ~(FE_GDM1_JMB_LEN_MASK
<< FE_GDM1_JMB_LEN_SHIFT
);
1378 fwd_cfg
|= (DIV_ROUND_UP(frag_size
, 1024) <<
1379 FE_GDM1_JMB_LEN_SHIFT
) | FE_GDM1_JMB_EN
;
1381 fe_w32(fwd_cfg
, FE_GDMA1_FWD_CFG
);
1383 return fe_open(dev
);
1386 static const struct net_device_ops fe_netdev_ops
= {
1387 .ndo_init
= fe_init
,
1388 .ndo_uninit
= fe_uninit
,
1389 .ndo_open
= fe_open
,
1390 .ndo_stop
= fe_stop
,
1391 .ndo_start_xmit
= fe_start_xmit
,
1392 .ndo_set_mac_address
= fe_set_mac_address
,
1393 .ndo_validate_addr
= eth_validate_addr
,
1394 .ndo_do_ioctl
= fe_do_ioctl
,
1395 .ndo_change_mtu
= fe_change_mtu
,
1396 .ndo_tx_timeout
= fe_tx_timeout
,
1397 .ndo_get_stats64
= fe_get_stats64
,
1398 .ndo_vlan_rx_add_vid
= fe_vlan_rx_add_vid
,
1399 .ndo_vlan_rx_kill_vid
= fe_vlan_rx_kill_vid
,
1400 #ifdef CONFIG_NET_POLL_CONTROLLER
1401 .ndo_poll_controller
= fe_poll_controller
,
1405 static void fe_reset_pending(struct fe_priv
*priv
)
1407 struct net_device
*dev
= priv
->netdev
;
1420 netif_alert(priv
, ifup
, dev
,
1421 "Driver up/down cycle failed, closing device.\n");
1426 static const struct fe_work_t fe_work
[] = {
1427 {FE_FLAG_RESET_PENDING
, fe_reset_pending
},
1430 static void fe_pending_work(struct work_struct
*work
)
1432 struct fe_priv
*priv
= container_of(work
, struct fe_priv
, pending_work
);
1436 for (i
= 0; i
< ARRAY_SIZE(fe_work
); i
++) {
1437 pending
= test_and_clear_bit(fe_work
[i
].bitnr
,
1438 priv
->pending_flags
);
1440 fe_work
[i
].action(priv
);
1444 static int fe_probe(struct platform_device
*pdev
)
1446 struct resource
*res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1447 const struct of_device_id
*match
;
1448 struct fe_soc_data
*soc
;
1449 struct net_device
*netdev
;
1450 struct fe_priv
*priv
;
1452 int err
, napi_weight
;
1454 device_reset(&pdev
->dev
);
1456 match
= of_match_device(of_fe_match
, &pdev
->dev
);
1457 soc
= (struct fe_soc_data
*) match
->data
;
1460 fe_reg_table
= soc
->reg_table
;
1462 soc
->reg_table
= fe_reg_table
;
1464 fe_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1466 err
= -EADDRNOTAVAIL
;
1470 netdev
= alloc_etherdev(sizeof(*priv
));
1472 dev_err(&pdev
->dev
, "alloc_etherdev failed\n");
1477 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1478 netdev
->netdev_ops
= &fe_netdev_ops
;
1479 netdev
->base_addr
= (unsigned long) fe_base
;
1481 netdev
->irq
= platform_get_irq(pdev
, 0);
1482 if (netdev
->irq
< 0) {
1483 dev_err(&pdev
->dev
, "no IRQ resource found\n");
1489 soc
->init_data(soc
, netdev
);
1490 /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
1491 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
1492 netdev
->vlan_features
= netdev
->hw_features
&
1493 ~(NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
);
1494 netdev
->features
|= netdev
->hw_features
;
1496 /* fake rx vlan filter func. to support tx vlan offload func */
1497 if (fe_reg_table
[FE_REG_FE_DMA_VID_BASE
])
1498 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1500 priv
= netdev_priv(netdev
);
1501 spin_lock_init(&priv
->page_lock
);
1502 if (fe_reg_table
[FE_REG_FE_COUNTER_BASE
]) {
1503 priv
->hw_stats
= kzalloc(sizeof(*priv
->hw_stats
), GFP_KERNEL
);
1504 if (!priv
->hw_stats
) {
1508 spin_lock_init(&priv
->hw_stats
->stats_lock
);
1511 sysclk
= devm_clk_get(&pdev
->dev
, NULL
);
1512 if (!IS_ERR(sysclk
))
1513 priv
->sysclk
= clk_get_rate(sysclk
);
1515 priv
->netdev
= netdev
;
1516 priv
->device
= &pdev
->dev
;
1518 priv
->msg_enable
= netif_msg_init(fe_msg_level
, FE_DEFAULT_MSG_ENABLE
);
1519 priv
->rx_ring
.frag_size
= fe_max_frag_size(ETH_DATA_LEN
);
1520 priv
->rx_ring
.rx_buf_size
= fe_max_buf_size(priv
->rx_ring
.frag_size
);
1521 priv
->tx_ring
.tx_ring_size
= priv
->rx_ring
.rx_ring_size
= NUM_DMA_DESC
;
1522 INIT_WORK(&priv
->pending_work
, fe_pending_work
);
1525 if (priv
->flags
& FE_FLAG_NAPI_WEIGHT
) {
1527 priv
->tx_ring
.tx_ring_size
*= 4;
1528 priv
->rx_ring
.rx_ring_size
*= 4;
1530 netif_napi_add(netdev
, &priv
->rx_napi
, fe_poll
, napi_weight
);
1531 fe_set_ethtool_ops(netdev
);
1533 err
= register_netdev(netdev
);
1535 dev_err(&pdev
->dev
, "error bringing up device\n");
1539 platform_set_drvdata(pdev
, netdev
);
1541 netif_info(priv
, probe
, netdev
, "ralink at 0x%08lx, irq %d\n",
1542 netdev
->base_addr
, netdev
->irq
);
1547 free_netdev(netdev
);
1549 devm_iounmap(&pdev
->dev
, fe_base
);
1554 static int fe_remove(struct platform_device
*pdev
)
1556 struct net_device
*dev
= platform_get_drvdata(pdev
);
1557 struct fe_priv
*priv
= netdev_priv(dev
);
1559 netif_napi_del(&priv
->rx_napi
);
1561 kfree(priv
->hw_stats
);
1563 cancel_work_sync(&priv
->pending_work
);
1565 unregister_netdev(dev
);
1567 platform_set_drvdata(pdev
, NULL
);
1572 static struct platform_driver fe_driver
= {
1574 .remove
= fe_remove
,
1576 .name
= "ralink_soc_eth",
1577 .owner
= THIS_MODULE
,
1578 .of_match_table
= of_fe_match
,
1582 static int __init
init_rtfe(void)
1590 ret
= platform_driver_register(&fe_driver
);
1597 static void __exit
exit_rtfe(void)
1599 platform_driver_unregister(&fe_driver
);
1603 module_init(init_rtfe
);
1604 module_exit(exit_rtfe
);
1606 MODULE_LICENSE("GPL");
1607 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1608 MODULE_DESCRIPTION("Ethernet driver for Ralink SoC");
1609 MODULE_VERSION(FE_DRV_VERSION
);