2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
15 * Copyright (C) 2009-2013 John Crispin <blogic@openwrt.org>
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/platform_device.h>
27 #include <linux/of_device.h>
28 #include <linux/clk.h>
29 #include <linux/of_net.h>
30 #include <linux/of_mdio.h>
31 #include <linux/if_vlan.h>
32 #include <linux/reset.h>
33 #include <linux/tcp.h>
36 #include <asm/mach-ralink/ralink_regs.h>
38 #include "ralink_soc_eth.h"
39 #include "esw_rt3052.h"
41 #include "ralink_ethtool.h"
43 #define MAX_RX_LENGTH 1536
44 #define FE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
45 #define FE_RX_HLEN (FE_RX_OFFSET + VLAN_ETH_HLEN + VLAN_HLEN + \
47 #define DMA_DUMMY_DESC 0xffffffff
48 #define FE_DEFAULT_MSG_ENABLE \
58 #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
59 #define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
60 #define NEXT_TX_DESP_IDX(X) (((X) + 1) & (NUM_DMA_DESC - 1))
61 #define NEXT_RX_DESP_IDX(X) (((X) + 1) & (NUM_DMA_DESC - 1))
63 static int fe_msg_level
= -1;
64 module_param_named(msg_level
, fe_msg_level
, int, 0);
65 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
67 static const u32 fe_reg_table_default
[FE_REG_COUNT
] = {
68 [FE_REG_PDMA_GLO_CFG
] = FE_PDMA_GLO_CFG
,
69 [FE_REG_PDMA_RST_CFG
] = FE_PDMA_RST_CFG
,
70 [FE_REG_DLY_INT_CFG
] = FE_DLY_INT_CFG
,
71 [FE_REG_TX_BASE_PTR0
] = FE_TX_BASE_PTR0
,
72 [FE_REG_TX_MAX_CNT0
] = FE_TX_MAX_CNT0
,
73 [FE_REG_TX_CTX_IDX0
] = FE_TX_CTX_IDX0
,
74 [FE_REG_TX_DTX_IDX0
] = FE_TX_DTX_IDX0
,
75 [FE_REG_RX_BASE_PTR0
] = FE_RX_BASE_PTR0
,
76 [FE_REG_RX_MAX_CNT0
] = FE_RX_MAX_CNT0
,
77 [FE_REG_RX_CALC_IDX0
] = FE_RX_CALC_IDX0
,
78 [FE_REG_RX_DRX_IDX0
] = FE_RX_DRX_IDX0
,
79 [FE_REG_FE_INT_ENABLE
] = FE_FE_INT_ENABLE
,
80 [FE_REG_FE_INT_STATUS
] = FE_FE_INT_STATUS
,
81 [FE_REG_FE_DMA_VID_BASE
] = FE_DMA_VID0
,
82 [FE_REG_FE_COUNTER_BASE
] = FE_GDMA1_TX_GBCNT
,
83 [FE_REG_FE_RST_GL
] = FE_FE_RST_GL
,
86 static const u32
*fe_reg_table
= fe_reg_table_default
;
90 void (*action
)(struct fe_priv
*);
93 static void __iomem
*fe_base
= 0;
95 void fe_w32(u32 val
, unsigned reg
)
97 __raw_writel(val
, fe_base
+ reg
);
100 u32
fe_r32(unsigned reg
)
102 return __raw_readl(fe_base
+ reg
);
105 void fe_reg_w32(u32 val
, enum fe_reg reg
)
107 fe_w32(val
, fe_reg_table
[reg
]);
110 u32
fe_reg_r32(enum fe_reg reg
)
112 return fe_r32(fe_reg_table
[reg
]);
115 static inline void fe_int_disable(u32 mask
)
117 fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE
) & ~mask
,
118 FE_REG_FE_INT_ENABLE
);
120 fe_reg_r32(FE_REG_FE_INT_ENABLE
);
123 static inline void fe_int_enable(u32 mask
)
125 fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE
) | mask
,
126 FE_REG_FE_INT_ENABLE
);
128 fe_reg_r32(FE_REG_FE_INT_ENABLE
);
131 static inline void fe_hw_set_macaddr(struct fe_priv
*priv
, unsigned char *mac
)
135 spin_lock_irqsave(&priv
->page_lock
, flags
);
136 fe_w32((mac
[0] << 8) | mac
[1], FE_GDMA1_MAC_ADRH
);
137 fe_w32((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5],
139 spin_unlock_irqrestore(&priv
->page_lock
, flags
);
142 static int fe_set_mac_address(struct net_device
*dev
, void *p
)
144 int ret
= eth_mac_addr(dev
, p
);
147 struct fe_priv
*priv
= netdev_priv(dev
);
149 if (priv
->soc
->set_mac
)
150 priv
->soc
->set_mac(priv
, dev
->dev_addr
);
152 fe_hw_set_macaddr(priv
, p
);
158 static inline int fe_max_frag_size(int mtu
)
160 return SKB_DATA_ALIGN(FE_RX_HLEN
+ mtu
) +
161 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
164 static inline int fe_max_buf_size(int frag_size
)
166 return frag_size
- FE_RX_HLEN
-
167 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
170 static inline void fe_get_rxd(struct fe_rx_dma
*rxd
, struct fe_rx_dma
*dma_rxd
)
172 rxd
->rxd1
= dma_rxd
->rxd1
;
173 rxd
->rxd2
= dma_rxd
->rxd2
;
174 rxd
->rxd3
= dma_rxd
->rxd3
;
175 rxd
->rxd4
= dma_rxd
->rxd4
;
178 static inline void fe_get_txd(struct fe_tx_dma
*txd
, struct fe_tx_dma
*dma_txd
)
180 txd
->txd1
= dma_txd
->txd1
;
181 txd
->txd2
= dma_txd
->txd2
;
182 txd
->txd3
= dma_txd
->txd3
;
183 txd
->txd4
= dma_txd
->txd4
;
186 static inline void fe_set_txd(struct fe_tx_dma
*txd
, struct fe_tx_dma
*dma_txd
)
188 dma_txd
->txd1
= txd
->txd1
;
189 dma_txd
->txd3
= txd
->txd3
;
190 dma_txd
->txd4
= txd
->txd4
;
191 /* clean dma done flag last */
192 dma_txd
->txd2
= txd
->txd2
;
195 static void fe_clean_rx(struct fe_priv
*priv
)
200 for (i
= 0; i
< NUM_DMA_DESC
; i
++)
201 if (priv
->rx_data
[i
]) {
202 if (priv
->rx_dma
&& priv
->rx_dma
[i
].rxd1
)
203 dma_unmap_single(&priv
->netdev
->dev
,
204 priv
->rx_dma
[i
].rxd1
,
207 put_page(virt_to_head_page(priv
->rx_data
[i
]));
210 kfree(priv
->rx_data
);
211 priv
->rx_data
= NULL
;
215 dma_free_coherent(&priv
->netdev
->dev
,
216 NUM_DMA_DESC
* sizeof(*priv
->rx_dma
),
223 static int fe_alloc_rx(struct fe_priv
*priv
)
225 struct net_device
*netdev
= priv
->netdev
;
228 priv
->rx_data
= kcalloc(NUM_DMA_DESC
, sizeof(*priv
->rx_data
),
233 for (i
= 0; i
< NUM_DMA_DESC
; i
++) {
234 priv
->rx_data
[i
] = netdev_alloc_frag(priv
->frag_size
);
235 if (!priv
->rx_data
[i
])
239 priv
->rx_dma
= dma_alloc_coherent(&netdev
->dev
,
240 NUM_DMA_DESC
* sizeof(*priv
->rx_dma
),
242 GFP_ATOMIC
| __GFP_ZERO
);
246 for (i
= 0; i
< NUM_DMA_DESC
; i
++) {
247 dma_addr_t dma_addr
= dma_map_single(&netdev
->dev
,
248 priv
->rx_data
[i
] + FE_RX_OFFSET
,
251 if (unlikely(dma_mapping_error(&netdev
->dev
, dma_addr
)))
253 priv
->rx_dma
[i
].rxd1
= (unsigned int) dma_addr
;
255 if (priv
->soc
->rx_dma
)
256 priv
->soc
->rx_dma(&priv
->rx_dma
[i
], priv
->rx_buf_size
);
258 priv
->rx_dma
[i
].rxd2
= RX_DMA_LSO
;
262 fe_reg_w32(priv
->rx_phys
, FE_REG_RX_BASE_PTR0
);
263 fe_reg_w32(NUM_DMA_DESC
, FE_REG_RX_MAX_CNT0
);
264 fe_reg_w32((NUM_DMA_DESC
- 1), FE_REG_RX_CALC_IDX0
);
265 fe_reg_w32(FE_PST_DRX_IDX0
, FE_REG_PDMA_RST_CFG
);
273 static void fe_clean_tx(struct fe_priv
*priv
)
278 for (i
= 0; i
< NUM_DMA_DESC
; i
++) {
280 dev_kfree_skb_any(priv
->tx_skb
[i
]);
287 dma_free_coherent(&priv
->netdev
->dev
,
288 NUM_DMA_DESC
* sizeof(*priv
->tx_dma
),
295 static int fe_alloc_tx(struct fe_priv
*priv
)
299 priv
->tx_free_idx
= 0;
301 priv
->tx_skb
= kcalloc(NUM_DMA_DESC
, sizeof(*priv
->tx_skb
),
306 priv
->tx_dma
= dma_alloc_coherent(&priv
->netdev
->dev
,
307 NUM_DMA_DESC
* sizeof(*priv
->tx_dma
),
309 GFP_ATOMIC
| __GFP_ZERO
);
313 for (i
= 0; i
< NUM_DMA_DESC
; i
++) {
314 if (priv
->soc
->tx_dma
) {
315 priv
->soc
->tx_dma(&priv
->tx_dma
[i
]);
318 priv
->tx_dma
[i
].txd2
= TX_DMA_DESP2_DEF
;
322 fe_reg_w32(priv
->tx_phys
, FE_REG_TX_BASE_PTR0
);
323 fe_reg_w32(NUM_DMA_DESC
, FE_REG_TX_MAX_CNT0
);
324 fe_reg_w32(0, FE_REG_TX_CTX_IDX0
);
325 fe_reg_w32(FE_PST_DTX_IDX0
, FE_REG_PDMA_RST_CFG
);
333 static int fe_init_dma(struct fe_priv
*priv
)
337 err
= fe_alloc_tx(priv
);
341 err
= fe_alloc_rx(priv
);
348 static void fe_free_dma(struct fe_priv
*priv
)
353 netdev_reset_queue(priv
->netdev
);
356 static inline void txd_unmap_single(struct device
*dev
, struct fe_tx_dma
*txd
)
358 if (txd
->txd1
&& TX_DMA_GET_PLEN0(txd
->txd2
))
359 dma_unmap_single(dev
, txd
->txd1
,
360 TX_DMA_GET_PLEN0(txd
->txd2
),
364 static inline void txd_unmap_page0(struct device
*dev
, struct fe_tx_dma
*txd
)
366 if (txd
->txd1
&& TX_DMA_GET_PLEN0(txd
->txd2
))
367 dma_unmap_page(dev
, txd
->txd1
,
368 TX_DMA_GET_PLEN0(txd
->txd2
),
372 static inline void txd_unmap_page1(struct device
*dev
, struct fe_tx_dma
*txd
)
374 if (txd
->txd3
&& TX_DMA_GET_PLEN1(txd
->txd2
))
375 dma_unmap_page(dev
, txd
->txd3
,
376 TX_DMA_GET_PLEN1(txd
->txd2
),
380 void fe_stats_update(struct fe_priv
*priv
)
382 struct fe_hw_stats
*hwstats
= priv
->hw_stats
;
383 unsigned int base
= fe_reg_table
[FE_REG_FE_COUNTER_BASE
];
385 u64_stats_update_begin(&hwstats
->syncp
);
387 if (IS_ENABLED(CONFIG_SOC_MT7621
)) {
388 hwstats
->rx_bytes
+= fe_r32(base
);
389 hwstats
->rx_packets
+= fe_r32(base
+ 0x08);
390 hwstats
->rx_overflow
+= fe_r32(base
+ 0x10);
391 hwstats
->rx_fcs_errors
+= fe_r32(base
+ 0x14);
392 hwstats
->rx_short_errors
+= fe_r32(base
+ 0x18);
393 hwstats
->rx_long_errors
+= fe_r32(base
+ 0x1c);
394 hwstats
->rx_checksum_errors
+= fe_r32(base
+ 0x20);
395 hwstats
->rx_flow_control_packets
+= fe_r32(base
+ 0x24);
396 hwstats
->tx_skip
+= fe_r32(base
+ 0x28);
397 hwstats
->tx_collisions
+= fe_r32(base
+ 0x2c);
398 hwstats
->tx_bytes
+= fe_r32(base
+ 0x30);
399 hwstats
->tx_packets
+= fe_r32(base
+ 0x38);
401 hwstats
->tx_bytes
+= fe_r32(base
);
402 hwstats
->tx_packets
+= fe_r32(base
+ 0x04);
403 hwstats
->tx_skip
+= fe_r32(base
+ 0x08);
404 hwstats
->tx_collisions
+= fe_r32(base
+ 0x0c);
405 hwstats
->rx_bytes
+= fe_r32(base
+ 0x20);
406 hwstats
->rx_packets
+= fe_r32(base
+ 0x24);
407 hwstats
->rx_overflow
+= fe_r32(base
+ 0x28);
408 hwstats
->rx_fcs_errors
+= fe_r32(base
+ 0x2c);
409 hwstats
->rx_short_errors
+= fe_r32(base
+ 0x30);
410 hwstats
->rx_long_errors
+= fe_r32(base
+ 0x34);
411 hwstats
->rx_checksum_errors
+= fe_r32(base
+ 0x38);
412 hwstats
->rx_flow_control_packets
+= fe_r32(base
+ 0x3c);
415 u64_stats_update_end(&hwstats
->syncp
);
418 static struct rtnl_link_stats64
*fe_get_stats64(struct net_device
*dev
,
419 struct rtnl_link_stats64
*storage
)
421 struct fe_priv
*priv
= netdev_priv(dev
);
422 struct fe_hw_stats
*hwstats
= priv
->hw_stats
;
423 unsigned int base
= fe_reg_table
[FE_REG_FE_COUNTER_BASE
];
427 netdev_stats_to_stats64(storage
, &dev
->stats
);
431 if (netif_running(dev
) && netif_device_present(dev
)) {
432 if (spin_trylock(&hwstats
->stats_lock
)) {
433 fe_stats_update(priv
);
434 spin_unlock(&hwstats
->stats_lock
);
439 start
= u64_stats_fetch_begin_bh(&hwstats
->syncp
);
440 storage
->rx_packets
= hwstats
->rx_packets
;
441 storage
->tx_packets
= hwstats
->tx_packets
;
442 storage
->rx_bytes
= hwstats
->rx_bytes
;
443 storage
->tx_bytes
= hwstats
->tx_bytes
;
444 storage
->collisions
= hwstats
->tx_collisions
;
445 storage
->rx_length_errors
= hwstats
->rx_short_errors
+
446 hwstats
->rx_long_errors
;
447 storage
->rx_over_errors
= hwstats
->rx_overflow
;
448 storage
->rx_crc_errors
= hwstats
->rx_fcs_errors
;
449 storage
->rx_errors
= hwstats
->rx_checksum_errors
;
450 storage
->tx_aborted_errors
= hwstats
->tx_skip
;
451 } while (u64_stats_fetch_retry_bh(&hwstats
->syncp
, start
));
453 storage
->tx_errors
= priv
->netdev
->stats
.tx_errors
;
454 storage
->rx_dropped
= priv
->netdev
->stats
.rx_dropped
;
455 storage
->tx_dropped
= priv
->netdev
->stats
.tx_dropped
;
460 static int fe_vlan_rx_add_vid(struct net_device
*dev
,
461 __be16 proto
, u16 vid
)
463 struct fe_priv
*priv
= netdev_priv(dev
);
464 u32 idx
= (vid
& 0xf);
467 if (!((fe_reg_table
[FE_REG_FE_DMA_VID_BASE
]) &&
468 (dev
->features
| NETIF_F_HW_VLAN_CTAG_TX
)))
471 if (test_bit(idx
, &priv
->vlan_map
)) {
472 netdev_warn(dev
, "disable tx vlan offload\n");
473 dev
->wanted_features
&= ~NETIF_F_HW_VLAN_CTAG_TX
;
474 netdev_update_features(dev
);
476 vlan_cfg
= fe_r32(fe_reg_table
[FE_REG_FE_DMA_VID_BASE
] +
480 vlan_cfg
|= (vid
<< 16);
482 vlan_cfg
&= 0xffff0000;
485 fe_w32(vlan_cfg
, fe_reg_table
[FE_REG_FE_DMA_VID_BASE
] +
487 set_bit(idx
, &priv
->vlan_map
);
493 static int fe_vlan_rx_kill_vid(struct net_device
*dev
,
494 __be16 proto
, u16 vid
)
496 struct fe_priv
*priv
= netdev_priv(dev
);
497 u32 idx
= (vid
& 0xf);
499 if (!((fe_reg_table
[FE_REG_FE_DMA_VID_BASE
]) &&
500 (dev
->features
| NETIF_F_HW_VLAN_CTAG_TX
)))
503 clear_bit(idx
, &priv
->vlan_map
);
508 static int fe_tx_map_dma(struct sk_buff
*skb
, struct net_device
*dev
,
511 struct fe_priv
*priv
= netdev_priv(dev
);
512 struct skb_frag_struct
*frag
;
513 struct fe_tx_dma txd
, *ptxd
;
514 dma_addr_t mapped_addr
;
515 unsigned int nr_frags
;
517 int i
, j
, unmap_idx
, tx_num
;
519 memset(&txd
, 0, sizeof(txd
));
520 nr_frags
= skb_shinfo(skb
)->nr_frags
;
521 tx_num
= 1 + (nr_frags
>> 1);
523 /* init tx descriptor */
524 if (priv
->soc
->tx_dma
)
525 priv
->soc
->tx_dma(&txd
);
527 txd
.txd4
= TX_DMA_DESP4_DEF
;
530 /* use dma_unmap_single to free it */
531 txd
.txd4
|= priv
->soc
->tx_udf_bit
;
533 /* TX Checksum offload */
534 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
535 txd
.txd4
|= TX_DMA_CHKSUM
;
537 /* VLAN header offload */
538 if (vlan_tx_tag_present(skb
)) {
539 if (IS_ENABLED(CONFIG_SOC_MT7621
))
540 txd
.txd4
|= TX_DMA_INS_VLAN_MT7621
| vlan_tx_tag_get(skb
);
542 txd
.txd4
|= TX_DMA_INS_VLAN
|
543 ((vlan_tx_tag_get(skb
) >> VLAN_PRIO_SHIFT
) << 4) |
544 (vlan_tx_tag_get(skb
) & 0xF);
547 /* TSO: fill MSS info in tcp checksum field */
548 if (skb_is_gso(skb
)) {
549 if (skb_cow_head(skb
, 0)) {
550 netif_warn(priv
, tx_err
, dev
,
551 "GSO expand head fail.\n");
554 if (skb_shinfo(skb
)->gso_type
&
555 (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)) {
556 txd
.txd4
|= TX_DMA_TSO
;
557 tcp_hdr(skb
)->check
= htons(skb_shinfo(skb
)->gso_size
);
561 mapped_addr
= dma_map_single(&dev
->dev
, skb
->data
,
562 skb_headlen(skb
), DMA_TO_DEVICE
);
563 if (unlikely(dma_mapping_error(&dev
->dev
, mapped_addr
)))
565 txd
.txd1
= mapped_addr
;
566 txd
.txd2
= TX_DMA_PLEN0(skb_headlen(skb
));
570 for (i
= 0; i
< nr_frags
; i
++) {
572 frag
= &skb_shinfo(skb
)->frags
[i
];
573 mapped_addr
= skb_frag_dma_map(&dev
->dev
, frag
, 0,
574 skb_frag_size(frag
), DMA_TO_DEVICE
);
575 if (unlikely(dma_mapping_error(&dev
->dev
, mapped_addr
)))
579 j
= NEXT_TX_DESP_IDX(j
);
580 txd
.txd1
= mapped_addr
;
581 txd
.txd2
= TX_DMA_PLEN0(frag
->size
);
584 txd
.txd3
= mapped_addr
;
585 txd
.txd2
|= TX_DMA_PLEN1(frag
->size
);
586 if (i
!= (nr_frags
-1)) {
587 fe_set_txd(&txd
, &priv
->tx_dma
[j
]);
588 memset(&txd
, 0, sizeof(txd
));
590 priv
->tx_skb
[j
] = (struct sk_buff
*) DMA_DUMMY_DESC
;
594 /* set last segment */
596 txd
.txd2
|= TX_DMA_LS1
;
598 txd
.txd2
|= TX_DMA_LS0
;
599 fe_set_txd(&txd
, &priv
->tx_dma
[j
]);
601 /* store skb to cleanup */
602 priv
->tx_skb
[j
] = skb
;
604 netdev_sent_queue(dev
, skb
->len
);
605 skb_tx_timestamp(skb
);
608 j
= NEXT_TX_DESP_IDX(j
);
609 fe_reg_w32(j
, FE_REG_TX_CTX_IDX0
);
615 ptxd
= &priv
->tx_dma
[idx
];
616 txd_unmap_single(&dev
->dev
, ptxd
);
620 for (i
= 0; i
< unmap_idx
; i
++) {
622 j
= NEXT_TX_DESP_IDX(j
);
623 ptxd
= &priv
->tx_dma
[j
];
624 txd_unmap_page0(&dev
->dev
, ptxd
);
626 txd_unmap_page1(&dev
->dev
, ptxd
);
631 /* reinit descriptors and skb */
633 for (i
= 0; i
< tx_num
; i
++) {
634 priv
->tx_dma
[j
].txd2
= TX_DMA_DESP2_DEF
;
635 priv
->tx_skb
[j
] = NULL
;
636 j
= NEXT_TX_DESP_IDX(j
);
643 static inline int fe_skb_padto(struct sk_buff
*skb
, struct fe_priv
*priv
) {
648 if (unlikely(skb
->len
< VLAN_ETH_ZLEN
)) {
649 if ((priv
->flags
& FE_FLAG_PADDING_64B
) &&
650 !(priv
->flags
& FE_FLAG_PADDING_BUG
))
653 if (vlan_tx_tag_present(skb
))
655 else if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
))
657 else if(!(priv
->flags
& FE_FLAG_PADDING_64B
))
662 if (skb
->len
< len
) {
663 if ((ret
= skb_pad(skb
, len
- skb
->len
)) < 0)
666 skb_set_tail_pointer(skb
, len
);
673 static inline u32
fe_empty_txd(struct fe_priv
*priv
, u32 tx_fill_idx
)
675 return (u32
)(NUM_DMA_DESC
- ((tx_fill_idx
- priv
->tx_free_idx
) &
676 (NUM_DMA_DESC
- 1)));
679 static int fe_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
681 struct fe_priv
*priv
= netdev_priv(dev
);
682 struct net_device_stats
*stats
= &dev
->stats
;
687 if (fe_skb_padto(skb
, priv
)) {
688 netif_warn(priv
, tx_err
, dev
, "tx padding failed!\n");
692 tx_num
= 1 + (skb_shinfo(skb
)->nr_frags
>> 1);
693 tx
= fe_reg_r32(FE_REG_TX_CTX_IDX0
);
694 if (unlikely(fe_empty_txd(priv
, tx
) <= tx_num
))
696 netif_stop_queue(dev
);
697 netif_err(priv
, tx_queued
,dev
,
698 "Tx Ring full when queue awake!\n");
699 return NETDEV_TX_BUSY
;
702 if (fe_tx_map_dma(skb
, dev
, tx
) < 0) {
708 stats
->tx_bytes
+= len
;
714 static inline void fe_rx_vlan(struct sk_buff
*skb
)
719 if (!__vlan_get_tag(skb
, &vlanid
)) {
720 /* pop the vlan tag */
721 ehdr
= (struct ethhdr
*)skb
->data
;
722 memmove(skb
->data
+ VLAN_HLEN
, ehdr
, ETH_ALEN
* 2);
723 skb_pull(skb
, VLAN_HLEN
);
724 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlanid
);
728 static int fe_poll_rx(struct napi_struct
*napi
, int budget
,
729 struct fe_priv
*priv
)
731 struct net_device
*netdev
= priv
->netdev
;
732 struct net_device_stats
*stats
= &netdev
->stats
;
733 struct fe_soc_data
*soc
= priv
->soc
;
735 int idx
= fe_reg_r32(FE_REG_RX_CALC_IDX0
);
738 struct fe_rx_dma
*rxd
, trxd
;
740 bool rx_vlan
= netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
;
742 if (netdev
->features
& NETIF_F_RXCSUM
)
743 checksum_bit
= soc
->checksum_bit
;
747 while (done
< budget
) {
750 idx
= NEXT_RX_DESP_IDX(idx
);
751 rxd
= &priv
->rx_dma
[idx
];
752 data
= priv
->rx_data
[idx
];
754 fe_get_rxd(&trxd
, rxd
);
755 if (!(trxd
.rxd2
& RX_DMA_DONE
))
758 /* alloc new buffer */
759 new_data
= netdev_alloc_frag(priv
->frag_size
);
760 if (unlikely(!new_data
)) {
764 dma_addr
= dma_map_single(&netdev
->dev
,
765 new_data
+ FE_RX_OFFSET
,
768 if (unlikely(dma_mapping_error(&netdev
->dev
, dma_addr
))) {
769 put_page(virt_to_head_page(new_data
));
774 skb
= build_skb(data
, priv
->frag_size
);
775 if (unlikely(!skb
)) {
776 put_page(virt_to_head_page(new_data
));
779 skb_reserve(skb
, FE_RX_OFFSET
);
781 dma_unmap_single(&netdev
->dev
, trxd
.rxd1
,
782 priv
->rx_buf_size
, DMA_FROM_DEVICE
);
783 pktlen
= RX_DMA_PLEN0(trxd
.rxd2
);
785 skb_put(skb
, pktlen
);
786 if (trxd
.rxd4
& checksum_bit
) {
787 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
789 skb_checksum_none_assert(skb
);
793 skb
->protocol
= eth_type_trans(skb
, netdev
);
796 stats
->rx_bytes
+= pktlen
;
798 if (skb
->ip_summed
== CHECKSUM_NONE
)
799 netif_receive_skb(skb
);
801 napi_gro_receive(napi
, skb
);
803 priv
->rx_data
[idx
] = new_data
;
804 rxd
->rxd1
= (unsigned int) dma_addr
;
808 soc
->rx_dma(rxd
, priv
->rx_buf_size
);
810 rxd
->rxd2
= RX_DMA_LSO
;
813 fe_reg_w32(idx
, FE_REG_RX_CALC_IDX0
);
820 static int fe_poll_tx(struct fe_priv
*priv
, int budget
)
822 struct net_device
*netdev
= priv
->netdev
;
823 struct device
*dev
= &netdev
->dev
;
824 unsigned int bytes_compl
= 0;
826 struct fe_tx_dma txd
;
828 u32 udf_bit
= priv
->soc
->tx_udf_bit
;
830 idx
= priv
->tx_free_idx
;
831 while (done
< budget
) {
832 fe_get_txd(&txd
, &priv
->tx_dma
[idx
]);
833 skb
= priv
->tx_skb
[idx
];
835 if (!(txd
.txd2
& TX_DMA_DONE
) || !skb
)
838 txd_unmap_page1(dev
, &txd
);
840 if (txd
.txd4
& udf_bit
)
841 txd_unmap_single(dev
, &txd
);
843 txd_unmap_page0(dev
, &txd
);
845 if (skb
!= (struct sk_buff
*) DMA_DUMMY_DESC
) {
846 bytes_compl
+= skb
->len
;
847 dev_kfree_skb_any(skb
);
850 priv
->tx_skb
[idx
] = NULL
;
851 idx
= NEXT_TX_DESP_IDX(idx
);
853 priv
->tx_free_idx
= idx
;
858 netdev_completed_queue(netdev
, done
, bytes_compl
);
859 if (unlikely(netif_queue_stopped(netdev
) &&
860 netif_carrier_ok(netdev
))) {
861 netif_wake_queue(netdev
);
867 static int fe_poll(struct napi_struct
*napi
, int budget
)
869 struct fe_priv
*priv
= container_of(napi
, struct fe_priv
, rx_napi
);
870 struct fe_hw_stats
*hwstat
= priv
->hw_stats
;
871 int tx_done
, rx_done
;
873 u32 tx_intr
, rx_intr
;
875 status
= fe_reg_r32(FE_REG_FE_INT_STATUS
);
876 tx_intr
= priv
->soc
->tx_int
;
877 rx_intr
= priv
->soc
->rx_int
;
878 tx_done
= rx_done
= 0;
881 if (status
& tx_intr
) {
882 tx_done
+= fe_poll_tx(priv
, budget
- tx_done
);
883 if (tx_done
< budget
) {
884 fe_reg_w32(tx_intr
, FE_REG_FE_INT_STATUS
);
886 status
= fe_reg_r32(FE_REG_FE_INT_STATUS
);
889 if (status
& rx_intr
) {
890 rx_done
+= fe_poll_rx(napi
, budget
- rx_done
, priv
);
891 if (rx_done
< budget
) {
892 fe_reg_w32(rx_intr
, FE_REG_FE_INT_STATUS
);
896 if (unlikely(hwstat
&& (status
& FE_CNT_GDM_AF
))) {
897 if (spin_trylock(&hwstat
->stats_lock
)) {
898 fe_stats_update(priv
);
899 spin_unlock(&hwstat
->stats_lock
);
901 fe_reg_w32(FE_CNT_GDM_AF
, FE_REG_FE_INT_STATUS
);
904 if (unlikely(netif_msg_intr(priv
))) {
905 mask
= fe_reg_r32(FE_REG_FE_INT_ENABLE
);
906 netdev_info(priv
->netdev
,
907 "done tx %d, rx %d, intr 0x%08x/0x%x\n",
908 tx_done
, rx_done
, status
, mask
);
911 if ((tx_done
< budget
) && (rx_done
< budget
)) {
912 status
= fe_reg_r32(FE_REG_FE_INT_STATUS
);
913 if (status
& (tx_intr
| rx_intr
)) {
917 fe_int_enable(tx_intr
| rx_intr
);
923 static void fe_tx_timeout(struct net_device
*dev
)
925 struct fe_priv
*priv
= netdev_priv(dev
);
927 priv
->netdev
->stats
.tx_errors
++;
928 netif_err(priv
, tx_err
, dev
,
929 "transmit timed out\n");
930 netif_info(priv
, drv
, dev
, "dma_cfg:%08x\n",
931 fe_reg_r32(FE_REG_PDMA_GLO_CFG
));
932 netif_info(priv
, drv
, dev
, "tx_ring=%d, " \
933 "base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%d\n", 0,
934 fe_reg_r32(FE_REG_TX_BASE_PTR0
),
935 fe_reg_r32(FE_REG_TX_MAX_CNT0
),
936 fe_reg_r32(FE_REG_TX_CTX_IDX0
),
937 fe_reg_r32(FE_REG_TX_DTX_IDX0
),
940 netif_info(priv
, drv
, dev
, "rx_ring=%d, " \
941 "base=%08x, max=%u, calc=%u, drx=%u\n", 0,
942 fe_reg_r32(FE_REG_RX_BASE_PTR0
),
943 fe_reg_r32(FE_REG_RX_MAX_CNT0
),
944 fe_reg_r32(FE_REG_RX_CALC_IDX0
),
945 fe_reg_r32(FE_REG_RX_DRX_IDX0
)
948 if (!test_and_set_bit(FE_FLAG_RESET_PENDING
, priv
->pending_flags
))
949 schedule_work(&priv
->pending_work
);
952 static irqreturn_t
fe_handle_irq(int irq
, void *dev
)
954 struct fe_priv
*priv
= netdev_priv(dev
);
955 u32 status
, int_mask
;
957 status
= fe_reg_r32(FE_REG_FE_INT_STATUS
);
959 if (unlikely(!status
))
962 int_mask
= (priv
->soc
->rx_int
| priv
->soc
->tx_int
);
963 if (likely(status
& int_mask
)) {
964 fe_int_disable(int_mask
);
965 napi_schedule(&priv
->rx_napi
);
967 fe_reg_w32(status
, FE_REG_FE_INT_STATUS
);
973 #ifdef CONFIG_NET_POLL_CONTROLLER
974 static void fe_poll_controller(struct net_device
*dev
)
976 struct fe_priv
*priv
= netdev_priv(dev
);
977 u32 int_mask
= priv
->soc
->tx_int
| priv
->soc
->rx_int
;
979 fe_int_disable(int_mask
);
980 fe_handle_irq(dev
->irq
, dev
);
981 fe_int_enable(int_mask
);
985 int fe_set_clock_cycle(struct fe_priv
*priv
)
987 unsigned long sysclk
= priv
->sysclk
;
993 sysclk
/= FE_US_CYC_CNT_DIVISOR
;
994 sysclk
<<= FE_US_CYC_CNT_SHIFT
;
996 fe_w32((fe_r32(FE_FE_GLO_CFG
) &
997 ~(FE_US_CYC_CNT_MASK
<< FE_US_CYC_CNT_SHIFT
)) |
1003 void fe_fwd_config(struct fe_priv
*priv
)
1007 fwd_cfg
= fe_r32(FE_GDMA1_FWD_CFG
);
1009 /* disable jumbo frame */
1010 if (priv
->flags
& FE_FLAG_JUMBO_FRAME
)
1011 fwd_cfg
&= ~FE_GDM1_JMB_EN
;
1013 /* set unicast/multicast/broadcast frame to cpu */
1016 fe_w32(fwd_cfg
, FE_GDMA1_FWD_CFG
);
1019 static void fe_rxcsum_config(bool enable
)
1022 fe_w32(fe_r32(FE_GDMA1_FWD_CFG
) | (FE_GDM1_ICS_EN
|
1023 FE_GDM1_TCS_EN
| FE_GDM1_UCS_EN
),
1026 fe_w32(fe_r32(FE_GDMA1_FWD_CFG
) & ~(FE_GDM1_ICS_EN
|
1027 FE_GDM1_TCS_EN
| FE_GDM1_UCS_EN
),
1031 static void fe_txcsum_config(bool enable
)
1034 fe_w32(fe_r32(FE_CDMA_CSG_CFG
) | (FE_ICS_GEN_EN
|
1035 FE_TCS_GEN_EN
| FE_UCS_GEN_EN
),
1038 fe_w32(fe_r32(FE_CDMA_CSG_CFG
) & ~(FE_ICS_GEN_EN
|
1039 FE_TCS_GEN_EN
| FE_UCS_GEN_EN
),
1043 void fe_csum_config(struct fe_priv
*priv
)
1045 struct net_device
*dev
= priv_netdev(priv
);
1047 fe_txcsum_config((dev
->features
& NETIF_F_IP_CSUM
));
1048 fe_rxcsum_config((dev
->features
& NETIF_F_RXCSUM
));
1051 static int fe_hw_init(struct net_device
*dev
)
1053 struct fe_priv
*priv
= netdev_priv(dev
);
1056 err
= devm_request_irq(priv
->device
, dev
->irq
, fe_handle_irq
, 0,
1057 dev_name(priv
->device
), dev
);
1061 if (priv
->soc
->set_mac
)
1062 priv
->soc
->set_mac(priv
, dev
->dev_addr
);
1064 fe_hw_set_macaddr(priv
, dev
->dev_addr
);
1066 fe_int_disable(priv
->soc
->tx_int
| priv
->soc
->rx_int
);
1068 /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
1069 if (fe_reg_table
[FE_REG_FE_DMA_VID_BASE
])
1070 for (i
= 0; i
< 16; i
+= 2)
1071 fe_w32(((i
+ 1) << 16) + i
,
1072 fe_reg_table
[FE_REG_FE_DMA_VID_BASE
] +
1075 BUG_ON(!priv
->soc
->fwd_config
);
1076 if (priv
->soc
->fwd_config(priv
))
1077 netdev_err(dev
, "unable to get clock\n");
1079 if (fe_reg_table
[FE_REG_FE_RST_GL
]) {
1080 fe_reg_w32(1, FE_REG_FE_RST_GL
);
1081 fe_reg_w32(0, FE_REG_FE_RST_GL
);
1087 static int fe_open(struct net_device
*dev
)
1089 struct fe_priv
*priv
= netdev_priv(dev
);
1090 unsigned long flags
;
1094 err
= fe_init_dma(priv
);
1098 spin_lock_irqsave(&priv
->page_lock
, flags
);
1099 napi_enable(&priv
->rx_napi
);
1101 val
= FE_TX_WB_DDONE
| FE_RX_DMA_EN
| FE_TX_DMA_EN
;
1102 val
|= priv
->soc
->pdma_glo_cfg
;
1103 fe_reg_w32(val
, FE_REG_PDMA_GLO_CFG
);
1105 spin_unlock_irqrestore(&priv
->page_lock
, flags
);
1108 priv
->phy
->start(priv
);
1110 if (priv
->soc
->has_carrier
&& priv
->soc
->has_carrier(priv
))
1111 netif_carrier_on(dev
);
1113 netif_start_queue(dev
);
1114 fe_int_enable(priv
->soc
->tx_int
| priv
->soc
->rx_int
);
1123 static int fe_stop(struct net_device
*dev
)
1125 struct fe_priv
*priv
= netdev_priv(dev
);
1126 unsigned long flags
;
1129 fe_int_disable(priv
->soc
->tx_int
| priv
->soc
->rx_int
);
1131 netif_tx_disable(dev
);
1134 priv
->phy
->stop(priv
);
1136 spin_lock_irqsave(&priv
->page_lock
, flags
);
1137 napi_disable(&priv
->rx_napi
);
1139 fe_reg_w32(fe_reg_r32(FE_REG_PDMA_GLO_CFG
) &
1140 ~(FE_TX_WB_DDONE
| FE_RX_DMA_EN
| FE_TX_DMA_EN
),
1141 FE_REG_PDMA_GLO_CFG
);
1142 spin_unlock_irqrestore(&priv
->page_lock
, flags
);
1145 for (i
= 0; i
< 10; i
++) {
1146 if (fe_reg_r32(FE_REG_PDMA_GLO_CFG
) &
1147 (FE_TX_DMA_BUSY
| FE_RX_DMA_BUSY
)) {
1159 static int __init
fe_init(struct net_device
*dev
)
1161 struct fe_priv
*priv
= netdev_priv(dev
);
1162 struct device_node
*port
;
1165 BUG_ON(!priv
->soc
->reset_fe
);
1166 priv
->soc
->reset_fe();
1168 if (priv
->soc
->switch_init
)
1169 priv
->soc
->switch_init(priv
);
1171 memcpy(dev
->dev_addr
, priv
->soc
->mac
, ETH_ALEN
);
1172 of_get_mac_address_mtd(priv
->device
->of_node
, dev
->dev_addr
);
1174 err
= fe_mdio_init(priv
);
1178 if (priv
->soc
->port_init
)
1179 for_each_child_of_node(priv
->device
->of_node
, port
)
1180 if (of_device_is_compatible(port
, "ralink,eth-port") && of_device_is_available(port
))
1181 priv
->soc
->port_init(priv
, port
);
1184 err
= priv
->phy
->connect(priv
);
1186 goto err_phy_disconnect
;
1189 err
= fe_hw_init(dev
);
1191 goto err_phy_disconnect
;
1193 if (priv
->soc
->switch_config
)
1194 priv
->soc
->switch_config(priv
);
1200 priv
->phy
->disconnect(priv
);
1201 fe_mdio_cleanup(priv
);
1206 static void fe_uninit(struct net_device
*dev
)
1208 struct fe_priv
*priv
= netdev_priv(dev
);
1211 priv
->phy
->disconnect(priv
);
1212 fe_mdio_cleanup(priv
);
1214 fe_reg_w32(0, FE_REG_FE_INT_ENABLE
);
1215 free_irq(dev
->irq
, dev
);
1218 static int fe_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1220 struct fe_priv
*priv
= netdev_priv(dev
);
1227 return phy_ethtool_ioctl(priv
->phy_dev
,
1228 (void *) ifr
->ifr_data
);
1232 return phy_mii_ioctl(priv
->phy_dev
, ifr
, cmd
);
1240 static int fe_change_mtu(struct net_device
*dev
, int new_mtu
)
1242 struct fe_priv
*priv
= netdev_priv(dev
);
1243 int frag_size
, old_mtu
;
1246 if (!(priv
->flags
& FE_FLAG_JUMBO_FRAME
))
1247 return eth_change_mtu(dev
, new_mtu
);
1249 frag_size
= fe_max_frag_size(new_mtu
);
1250 if (new_mtu
< 68 || frag_size
> PAGE_SIZE
)
1256 /* return early if the buffer sizes will not change */
1257 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
1259 if (old_mtu
> ETH_DATA_LEN
&& new_mtu
> ETH_DATA_LEN
)
1262 if (new_mtu
<= ETH_DATA_LEN
) {
1263 priv
->frag_size
= fe_max_frag_size(ETH_DATA_LEN
);
1264 priv
->rx_buf_size
= fe_max_buf_size(ETH_DATA_LEN
);
1266 priv
->frag_size
= PAGE_SIZE
;
1267 priv
->rx_buf_size
= fe_max_buf_size(PAGE_SIZE
);
1270 if (!netif_running(dev
))
1274 fwd_cfg
= fe_r32(FE_GDMA1_FWD_CFG
);
1275 if (new_mtu
<= ETH_DATA_LEN
)
1276 fwd_cfg
&= ~FE_GDM1_JMB_EN
;
1278 fwd_cfg
&= ~(FE_GDM1_JMB_LEN_MASK
<< FE_GDM1_JMB_LEN_SHIFT
);
1279 fwd_cfg
|= (DIV_ROUND_UP(frag_size
, 1024) <<
1280 FE_GDM1_JMB_LEN_SHIFT
) | FE_GDM1_JMB_EN
;
1282 fe_w32(fwd_cfg
, FE_GDMA1_FWD_CFG
);
1284 return fe_open(dev
);
1287 static const struct net_device_ops fe_netdev_ops
= {
1288 .ndo_init
= fe_init
,
1289 .ndo_uninit
= fe_uninit
,
1290 .ndo_open
= fe_open
,
1291 .ndo_stop
= fe_stop
,
1292 .ndo_start_xmit
= fe_start_xmit
,
1293 .ndo_set_mac_address
= fe_set_mac_address
,
1294 .ndo_validate_addr
= eth_validate_addr
,
1295 .ndo_do_ioctl
= fe_do_ioctl
,
1296 .ndo_change_mtu
= fe_change_mtu
,
1297 .ndo_tx_timeout
= fe_tx_timeout
,
1298 .ndo_get_stats64
= fe_get_stats64
,
1299 .ndo_vlan_rx_add_vid
= fe_vlan_rx_add_vid
,
1300 .ndo_vlan_rx_kill_vid
= fe_vlan_rx_kill_vid
,
1301 #ifdef CONFIG_NET_POLL_CONTROLLER
1302 .ndo_poll_controller
= fe_poll_controller
,
1306 static void fe_reset_pending(struct fe_priv
*priv
)
1308 struct net_device
*dev
= priv
->netdev
;
1321 netif_alert(priv
, ifup
, dev
,
1322 "Driver up/down cycle failed, closing device.\n");
1327 static const struct fe_work_t fe_work
[] = {
1328 {FE_FLAG_RESET_PENDING
, fe_reset_pending
},
1331 static void fe_pending_work(struct work_struct
*work
)
1333 struct fe_priv
*priv
= container_of(work
, struct fe_priv
, pending_work
);
1337 for (i
= 0; i
< ARRAY_SIZE(fe_work
); i
++) {
1338 pending
= test_and_clear_bit(fe_work
[i
].bitnr
,
1339 priv
->pending_flags
);
1341 fe_work
[i
].action(priv
);
1345 static int fe_probe(struct platform_device
*pdev
)
1347 struct resource
*res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1348 const struct of_device_id
*match
;
1349 struct fe_soc_data
*soc
;
1350 struct net_device
*netdev
;
1351 struct fe_priv
*priv
;
1355 device_reset(&pdev
->dev
);
1357 match
= of_match_device(of_fe_match
, &pdev
->dev
);
1358 soc
= (struct fe_soc_data
*) match
->data
;
1361 fe_reg_table
= soc
->reg_table
;
1363 soc
->reg_table
= fe_reg_table
;
1365 fe_base
= devm_request_and_ioremap(&pdev
->dev
, res
);
1367 err
= -EADDRNOTAVAIL
;
1371 netdev
= alloc_etherdev(sizeof(*priv
));
1373 dev_err(&pdev
->dev
, "alloc_etherdev failed\n");
1378 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1379 netdev
->netdev_ops
= &fe_netdev_ops
;
1380 netdev
->base_addr
= (unsigned long) fe_base
;
1382 netdev
->irq
= platform_get_irq(pdev
, 0);
1383 if (netdev
->irq
< 0) {
1384 dev_err(&pdev
->dev
, "no IRQ resource found\n");
1390 soc
->init_data(soc
, netdev
);
1391 /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
1392 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
1393 netdev
->vlan_features
= netdev
->hw_features
&
1394 ~(NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
);
1395 netdev
->features
|= netdev
->hw_features
;
1397 /* fake rx vlan filter func. to support tx vlan offload func */
1398 if (fe_reg_table
[FE_REG_FE_DMA_VID_BASE
])
1399 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1401 priv
= netdev_priv(netdev
);
1402 spin_lock_init(&priv
->page_lock
);
1403 if (fe_reg_table
[FE_REG_FE_COUNTER_BASE
]) {
1404 priv
->hw_stats
= kzalloc(sizeof(*priv
->hw_stats
), GFP_KERNEL
);
1405 if (!priv
->hw_stats
) {
1409 spin_lock_init(&priv
->hw_stats
->stats_lock
);
1412 sysclk
= devm_clk_get(&pdev
->dev
, NULL
);
1413 if (!IS_ERR(sysclk
))
1414 priv
->sysclk
= clk_get_rate(sysclk
);
1416 priv
->netdev
= netdev
;
1417 priv
->device
= &pdev
->dev
;
1419 priv
->msg_enable
= netif_msg_init(fe_msg_level
, FE_DEFAULT_MSG_ENABLE
);
1420 priv
->frag_size
= fe_max_frag_size(ETH_DATA_LEN
);
1421 priv
->rx_buf_size
= fe_max_buf_size(ETH_DATA_LEN
);
1422 if (priv
->frag_size
> PAGE_SIZE
) {
1423 dev_err(&pdev
->dev
, "error frag size.\n");
1427 INIT_WORK(&priv
->pending_work
, fe_pending_work
);
1429 netif_napi_add(netdev
, &priv
->rx_napi
, fe_poll
, 32);
1430 fe_set_ethtool_ops(netdev
);
1432 err
= register_netdev(netdev
);
1434 dev_err(&pdev
->dev
, "error bringing up device\n");
1438 platform_set_drvdata(pdev
, netdev
);
1440 netif_info(priv
, probe
, netdev
, "ralink at 0x%08lx, irq %d\n",
1441 netdev
->base_addr
, netdev
->irq
);
1446 free_netdev(netdev
);
1448 devm_iounmap(&pdev
->dev
, fe_base
);
1453 static int fe_remove(struct platform_device
*pdev
)
1455 struct net_device
*dev
= platform_get_drvdata(pdev
);
1456 struct fe_priv
*priv
= netdev_priv(dev
);
1458 netif_napi_del(&priv
->rx_napi
);
1460 kfree(priv
->hw_stats
);
1462 cancel_work_sync(&priv
->pending_work
);
1464 unregister_netdev(dev
);
1466 platform_set_drvdata(pdev
, NULL
);
1471 static struct platform_driver fe_driver
= {
1473 .remove
= fe_remove
,
1475 .name
= "ralink_soc_eth",
1476 .owner
= THIS_MODULE
,
1477 .of_match_table
= of_fe_match
,
1481 static int __init
init_rtfe(void)
1489 ret
= platform_driver_register(&fe_driver
);
1496 static void __exit
exit_rtfe(void)
1498 platform_driver_unregister(&fe_driver
);
1502 module_init(init_rtfe
);
1503 module_exit(exit_rtfe
);
1505 MODULE_LICENSE("GPL");
1506 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1507 MODULE_DESCRIPTION("Ethernet driver for Ralink SoC");
1508 MODULE_VERSION(FE_DRV_VERSION
);