2 * Atheros AR71xx built-in ethernet mac driver
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 * Based on Atheros' AG7100 driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
16 #define AG71XX_DEFAULT_MSG_ENABLE \
26 static int ag71xx_msg_level
= -1;
28 module_param_named(msg_level
, ag71xx_msg_level
, int, 0);
29 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
31 #define ETH_SWITCH_HEADER_LEN 2
33 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
);
34 static void ag71xx_qca955x_sgmii_init(void);
36 static inline unsigned int ag71xx_max_frame_len(unsigned int mtu
)
38 return ETH_SWITCH_HEADER_LEN
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
+ ETH_FCS_LEN
;
41 static void ag71xx_dump_dma_regs(struct ag71xx
*ag
)
43 DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
45 ag71xx_rr(ag
, AG71XX_REG_TX_CTRL
),
46 ag71xx_rr(ag
, AG71XX_REG_TX_DESC
),
47 ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
));
49 DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
51 ag71xx_rr(ag
, AG71XX_REG_RX_CTRL
),
52 ag71xx_rr(ag
, AG71XX_REG_RX_DESC
),
53 ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
));
56 static void ag71xx_dump_regs(struct ag71xx
*ag
)
58 DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
60 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG1
),
61 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
),
62 ag71xx_rr(ag
, AG71XX_REG_MAC_IPG
),
63 ag71xx_rr(ag
, AG71XX_REG_MAC_HDX
),
64 ag71xx_rr(ag
, AG71XX_REG_MAC_MFL
));
65 DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
67 ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
),
68 ag71xx_rr(ag
, AG71XX_REG_MAC_ADDR1
),
69 ag71xx_rr(ag
, AG71XX_REG_MAC_ADDR2
));
70 DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
72 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG0
),
73 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG1
),
74 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG2
));
75 DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
77 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG3
),
78 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG4
),
79 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
));
82 static inline void ag71xx_dump_intr(struct ag71xx
*ag
, char *label
, u32 intr
)
84 DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
85 ag
->dev
->name
, label
, intr
,
86 (intr
& AG71XX_INT_TX_PS
) ? "TXPS " : "",
87 (intr
& AG71XX_INT_TX_UR
) ? "TXUR " : "",
88 (intr
& AG71XX_INT_TX_BE
) ? "TXBE " : "",
89 (intr
& AG71XX_INT_RX_PR
) ? "RXPR " : "",
90 (intr
& AG71XX_INT_RX_OF
) ? "RXOF " : "",
91 (intr
& AG71XX_INT_RX_BE
) ? "RXBE " : "");
94 static void ag71xx_ring_tx_clean(struct ag71xx
*ag
)
96 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
97 struct net_device
*dev
= ag
->dev
;
98 int ring_mask
= BIT(ring
->order
) - 1;
99 u32 bytes_compl
= 0, pkts_compl
= 0;
101 while (ring
->curr
!= ring
->dirty
) {
102 struct ag71xx_desc
*desc
;
103 u32 i
= ring
->dirty
& ring_mask
;
105 desc
= ag71xx_ring_desc(ring
, i
);
106 if (!ag71xx_desc_empty(desc
)) {
108 dev
->stats
.tx_errors
++;
111 if (ring
->buf
[i
].skb
) {
112 bytes_compl
+= ring
->buf
[i
].len
;
114 dev_kfree_skb_any(ring
->buf
[i
].skb
);
116 ring
->buf
[i
].skb
= NULL
;
120 /* flush descriptors */
123 netdev_completed_queue(dev
, pkts_compl
, bytes_compl
);
126 static void ag71xx_ring_tx_init(struct ag71xx
*ag
)
128 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
129 int ring_size
= BIT(ring
->order
);
130 int ring_mask
= BIT(ring
->order
) - 1;
133 for (i
= 0; i
< ring_size
; i
++) {
134 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
136 desc
->next
= (u32
) (ring
->descs_dma
+
137 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
139 desc
->ctrl
= DESC_EMPTY
;
140 ring
->buf
[i
].skb
= NULL
;
143 /* flush descriptors */
148 netdev_reset_queue(ag
->dev
);
151 static void ag71xx_ring_rx_clean(struct ag71xx
*ag
)
153 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
154 int ring_size
= BIT(ring
->order
);
160 for (i
= 0; i
< ring_size
; i
++)
161 if (ring
->buf
[i
].rx_buf
) {
162 dma_unmap_single(&ag
->pdev
->dev
, ring
->buf
[i
].dma_addr
,
163 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
164 skb_free_frag(ring
->buf
[i
].rx_buf
);
168 static int ag71xx_buffer_offset(struct ag71xx
*ag
)
170 int offset
= NET_SKB_PAD
;
173 * On AR71xx/AR91xx packets must be 4-byte aligned.
175 * When using builtin AR8216 support, hardware adds a 2-byte header,
176 * so we don't need any extra alignment in that case.
178 if (!ag71xx_get_pdata(ag
)->is_ar724x
|| ag71xx_has_ar8216(ag
))
181 return offset
+ NET_IP_ALIGN
;
184 static int ag71xx_buffer_size(struct ag71xx
*ag
)
186 return ag
->rx_buf_size
+
187 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
190 static bool ag71xx_fill_rx_buf(struct ag71xx
*ag
, struct ag71xx_buf
*buf
,
192 void *(*alloc
)(unsigned int size
))
194 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
195 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, buf
- &ring
->buf
[0]);
198 data
= alloc(ag71xx_buffer_size(ag
));
203 buf
->dma_addr
= dma_map_single(&ag
->pdev
->dev
, data
, ag
->rx_buf_size
,
205 desc
->data
= (u32
) buf
->dma_addr
+ offset
;
209 static int ag71xx_ring_rx_init(struct ag71xx
*ag
)
211 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
212 int ring_size
= BIT(ring
->order
);
213 int ring_mask
= BIT(ring
->order
) - 1;
216 int offset
= ag71xx_buffer_offset(ag
);
219 for (i
= 0; i
< ring_size
; i
++) {
220 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
222 desc
->next
= (u32
) (ring
->descs_dma
+
223 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
225 DBG("ag71xx: RX desc at %p, next is %08x\n",
229 for (i
= 0; i
< ring_size
; i
++) {
230 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
232 if (!ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
,
233 netdev_alloc_frag
)) {
238 desc
->ctrl
= DESC_EMPTY
;
241 /* flush descriptors */
250 static int ag71xx_ring_rx_refill(struct ag71xx
*ag
)
252 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
253 int ring_mask
= BIT(ring
->order
) - 1;
255 int offset
= ag71xx_buffer_offset(ag
);
258 for (; ring
->curr
- ring
->dirty
> 0; ring
->dirty
++) {
259 struct ag71xx_desc
*desc
;
262 i
= ring
->dirty
& ring_mask
;
263 desc
= ag71xx_ring_desc(ring
, i
);
265 if (!ring
->buf
[i
].rx_buf
&&
266 !ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
,
270 desc
->ctrl
= DESC_EMPTY
;
274 /* flush descriptors */
277 DBG("%s: %u rx descriptors refilled\n", ag
->dev
->name
, count
);
282 static int ag71xx_rings_init(struct ag71xx
*ag
)
284 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
285 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
286 int ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
287 int tx_size
= BIT(tx
->order
);
289 tx
->buf
= kzalloc(ring_size
* sizeof(*tx
->buf
), GFP_KERNEL
);
293 tx
->descs_cpu
= dma_alloc_coherent(&ag
->pdev
->dev
, ring_size
* AG71XX_DESC_SIZE
,
294 &tx
->descs_dma
, GFP_KERNEL
);
295 if (!tx
->descs_cpu
) {
301 rx
->buf
= &tx
->buf
[tx_size
];
302 rx
->descs_cpu
= ((void *)tx
->descs_cpu
) + tx_size
* AG71XX_DESC_SIZE
;
303 rx
->descs_dma
= tx
->descs_dma
+ tx_size
* AG71XX_DESC_SIZE
;
305 ag71xx_ring_tx_init(ag
);
306 return ag71xx_ring_rx_init(ag
);
309 static void ag71xx_rings_free(struct ag71xx
*ag
)
311 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
312 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
313 int ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
316 dma_free_coherent(&ag
->pdev
->dev
, ring_size
* AG71XX_DESC_SIZE
,
317 tx
->descs_cpu
, tx
->descs_dma
);
321 tx
->descs_cpu
= NULL
;
322 rx
->descs_cpu
= NULL
;
327 static void ag71xx_rings_cleanup(struct ag71xx
*ag
)
329 ag71xx_ring_rx_clean(ag
);
330 ag71xx_ring_tx_clean(ag
);
331 ag71xx_rings_free(ag
);
333 netdev_reset_queue(ag
->dev
);
336 static unsigned char *ag71xx_speed_str(struct ag71xx
*ag
)
350 static void ag71xx_hw_set_macaddr(struct ag71xx
*ag
, unsigned char *mac
)
354 t
= (((u32
) mac
[5]) << 24) | (((u32
) mac
[4]) << 16)
355 | (((u32
) mac
[3]) << 8) | ((u32
) mac
[2]);
357 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR1
, t
);
359 t
= (((u32
) mac
[1]) << 24) | (((u32
) mac
[0]) << 16);
360 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR2
, t
);
363 static void ag71xx_dma_reset(struct ag71xx
*ag
)
368 ag71xx_dump_dma_regs(ag
);
371 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
372 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
375 * give the hardware some time to really stop all rx/tx activity
376 * clearing the descriptors too early causes random memory corruption
380 /* clear descriptor addresses */
381 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->stop_desc_dma
);
382 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->stop_desc_dma
);
384 /* clear pending RX/TX interrupts */
385 for (i
= 0; i
< 256; i
++) {
386 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
387 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
390 /* clear pending errors */
391 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
| RX_STATUS_OF
);
392 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
| TX_STATUS_UR
);
394 val
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
396 pr_alert("%s: unable to clear DMA Rx status: %08x\n",
399 val
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
401 /* mask out reserved bits */
405 pr_alert("%s: unable to clear DMA Tx status: %08x\n",
408 ag71xx_dump_dma_regs(ag
);
411 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
412 MAC_CFG1_SRX | MAC_CFG1_STX)
414 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
416 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
417 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
418 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
419 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
420 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
423 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
424 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
425 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
426 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
427 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
428 FIFO_CFG5_17 | FIFO_CFG5_SF)
430 static void ag71xx_hw_stop(struct ag71xx
*ag
)
432 /* disable all interrupts and stop the rx/tx engine */
433 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, 0);
434 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
435 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
438 static void ag71xx_hw_setup(struct ag71xx
*ag
)
440 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
441 u32 init
= MAC_CFG1_INIT
;
443 /* setup MAC configuration registers */
444 if (pdata
->use_flow_control
)
445 init
|= MAC_CFG1_TFC
| MAC_CFG1_RFC
;
446 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, init
);
448 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG2
,
449 MAC_CFG2_PAD_CRC_EN
| MAC_CFG2_LEN_CHECK
);
451 /* setup max frame length to zero */
452 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, 0);
454 /* setup FIFO configuration registers */
455 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG0
, FIFO_CFG0_INIT
);
456 if (pdata
->is_ar724x
) {
457 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, 0x0010ffff);
458 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, 0x015500aa);
460 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, 0x0fff0000);
461 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, 0x00001fff);
463 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG4
, FIFO_CFG4_INIT
);
464 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, FIFO_CFG5_INIT
);
467 static void ag71xx_hw_init(struct ag71xx
*ag
)
469 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
470 u32 reset_mask
= pdata
->reset_bit
;
474 if (pdata
->is_ar724x
) {
475 u32 reset_phy
= reset_mask
;
477 reset_phy
&= AR71XX_RESET_GE0_PHY
| AR71XX_RESET_GE1_PHY
;
478 reset_mask
&= ~(AR71XX_RESET_GE0_PHY
| AR71XX_RESET_GE1_PHY
);
480 ath79_device_reset_set(reset_phy
);
482 ath79_device_reset_clear(reset_phy
);
486 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG1
, MAC_CFG1_SR
);
489 ath79_device_reset_set(reset_mask
);
491 ath79_device_reset_clear(reset_mask
);
496 ag71xx_dma_reset(ag
);
499 static void ag71xx_fast_reset(struct ag71xx
*ag
)
501 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
502 struct net_device
*dev
= ag
->dev
;
503 u32 reset_mask
= pdata
->reset_bit
;
507 reset_mask
&= AR71XX_RESET_GE0_MAC
| AR71XX_RESET_GE1_MAC
;
512 mii_reg
= ag71xx_rr(ag
, AG71XX_REG_MII_CFG
);
513 rx_ds
= ag71xx_rr(ag
, AG71XX_REG_RX_DESC
);
515 ag71xx_tx_packets(ag
, true);
517 ath79_device_reset_set(reset_mask
);
519 ath79_device_reset_clear(reset_mask
);
522 ag71xx_dma_reset(ag
);
524 ag
->tx_ring
.curr
= 0;
525 ag
->tx_ring
.dirty
= 0;
526 netdev_reset_queue(ag
->dev
);
528 /* setup max frame length */
529 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
530 ag71xx_max_frame_len(ag
->dev
->mtu
));
532 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, rx_ds
);
533 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
534 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, mii_reg
);
536 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
539 static void ag71xx_hw_start(struct ag71xx
*ag
)
541 /* start RX engine */
542 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
544 /* enable interrupts */
545 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, AG71XX_INT_INIT
);
547 netif_wake_queue(ag
->dev
);
551 __ag71xx_link_adjust(struct ag71xx
*ag
, bool update
)
553 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
559 if (!ag
->link
&& update
) {
561 netif_carrier_off(ag
->dev
);
562 if (netif_msg_link(ag
))
563 pr_info("%s: link down\n", ag
->dev
->name
);
567 if (pdata
->is_ar724x
)
568 ag71xx_fast_reset(ag
);
570 cfg2
= ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
);
571 cfg2
&= ~(MAC_CFG2_IF_1000
| MAC_CFG2_IF_10_100
| MAC_CFG2_FDX
);
572 cfg2
|= (ag
->duplex
) ? MAC_CFG2_FDX
: 0;
574 ifctl
= ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
);
575 ifctl
&= ~(MAC_IFCTL_SPEED
);
577 fifo5
= ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
);
578 fifo5
&= ~FIFO_CFG5_BM
;
582 cfg2
|= MAC_CFG2_IF_1000
;
583 fifo5
|= FIFO_CFG5_BM
;
586 cfg2
|= MAC_CFG2_IF_10_100
;
587 ifctl
|= MAC_IFCTL_SPEED
;
590 cfg2
|= MAC_CFG2_IF_10_100
;
597 if (pdata
->is_ar91xx
)
599 else if (pdata
->is_ar724x
)
604 if (ag
->tx_ring
.desc_split
) {
606 fifo3
|= ((2048 - ag
->tx_ring
.desc_split
) / 4) << 16;
609 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG3
, fifo3
);
611 if (update
&& pdata
->set_speed
)
612 pdata
->set_speed(ag
->speed
);
614 if (update
&& pdata
->enable_sgmii_fixup
)
615 ag71xx_qca955x_sgmii_init();
617 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG2
, cfg2
);
618 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, fifo5
);
619 ag71xx_wr(ag
, AG71XX_REG_MAC_IFCTL
, ifctl
);
621 if (pdata
->disable_inline_checksum_engine
) {
623 * The rx ring buffer can stall on small packets on QCA953x and
624 * QCA956x. Disabling the inline checksum engine fixes the stall.
625 * The wr, rr functions cannot be used since this hidden register
626 * is outside of the normal ag71xx register block.
628 void __iomem
*dam
= ioremap_nocache(0xb90001bc, 0x4);
630 __raw_writel(__raw_readl(dam
) & ~BIT(27), dam
);
631 (void)__raw_readl(dam
);
638 netif_carrier_on(ag
->dev
);
639 if (update
&& netif_msg_link(ag
))
640 pr_info("%s: link up (%sMbps/%s duplex)\n",
642 ag71xx_speed_str(ag
),
643 (DUPLEX_FULL
== ag
->duplex
) ? "Full" : "Half");
645 ag71xx_dump_regs(ag
);
648 void ag71xx_link_adjust(struct ag71xx
*ag
)
650 __ag71xx_link_adjust(ag
, true);
653 static int ag71xx_hw_enable(struct ag71xx
*ag
)
657 ret
= ag71xx_rings_init(ag
);
661 napi_enable(&ag
->napi
);
662 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
663 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->rx_ring
.descs_dma
);
664 netif_start_queue(ag
->dev
);
669 static void ag71xx_hw_disable(struct ag71xx
*ag
)
671 netif_stop_queue(ag
->dev
);
674 ag71xx_dma_reset(ag
);
676 napi_disable(&ag
->napi
);
677 del_timer_sync(&ag
->oom_timer
);
679 ag71xx_rings_cleanup(ag
);
682 static int ag71xx_open(struct net_device
*dev
)
684 struct ag71xx
*ag
= netdev_priv(dev
);
685 unsigned int max_frame_len
;
688 netif_carrier_off(dev
);
689 max_frame_len
= ag71xx_max_frame_len(dev
->mtu
);
690 ag
->rx_buf_size
= SKB_DATA_ALIGN(max_frame_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
);
692 /* setup max frame length */
693 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, max_frame_len
);
694 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
696 ret
= ag71xx_hw_enable(ag
);
700 ag71xx_phy_start(ag
);
705 ag71xx_rings_cleanup(ag
);
709 static int ag71xx_stop(struct net_device
*dev
)
711 struct ag71xx
*ag
= netdev_priv(dev
);
713 netif_carrier_off(dev
);
715 ag71xx_hw_disable(ag
);
720 static int ag71xx_fill_dma_desc(struct ag71xx_ring
*ring
, u32 addr
, int len
)
723 struct ag71xx_desc
*desc
;
724 int ring_mask
= BIT(ring
->order
) - 1;
726 int split
= ring
->desc_split
;
732 unsigned int cur_len
= len
;
734 i
= (ring
->curr
+ ndesc
) & ring_mask
;
735 desc
= ag71xx_ring_desc(ring
, i
);
737 if (!ag71xx_desc_empty(desc
))
740 if (cur_len
> split
) {
744 * TX will hang if DMA transfers <= 4 bytes,
745 * make sure next segment is more than 4 bytes long.
747 if (len
<= split
+ 4)
756 cur_len
|= DESC_MORE
;
758 /* prevent early tx attempt of this descriptor */
760 cur_len
|= DESC_EMPTY
;
762 desc
->ctrl
= cur_len
;
769 static netdev_tx_t
ag71xx_hard_start_xmit(struct sk_buff
*skb
,
770 struct net_device
*dev
)
772 struct ag71xx
*ag
= netdev_priv(dev
);
773 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
774 int ring_mask
= BIT(ring
->order
) - 1;
775 int ring_size
= BIT(ring
->order
);
776 struct ag71xx_desc
*desc
;
780 if (ag71xx_has_ar8216(ag
))
781 ag71xx_add_ar8216_header(ag
, skb
);
784 DBG("%s: packet len is too small\n", ag
->dev
->name
);
788 dma_addr
= dma_map_single(&ag
->pdev
->dev
, skb
->data
, skb
->len
,
791 i
= ring
->curr
& ring_mask
;
792 desc
= ag71xx_ring_desc(ring
, i
);
794 /* setup descriptor fields */
795 n
= ag71xx_fill_dma_desc(ring
, (u32
) dma_addr
, skb
->len
& ag
->desc_pktlen_mask
);
799 i
= (ring
->curr
+ n
- 1) & ring_mask
;
800 ring
->buf
[i
].len
= skb
->len
;
801 ring
->buf
[i
].skb
= skb
;
803 netdev_sent_queue(dev
, skb
->len
);
805 skb_tx_timestamp(skb
);
807 desc
->ctrl
&= ~DESC_EMPTY
;
810 /* flush descriptor */
814 if (ring
->desc_split
)
815 ring_min
*= AG71XX_TX_RING_DS_PER_PKT
;
817 if (ring
->curr
- ring
->dirty
>= ring_size
- ring_min
) {
818 DBG("%s: tx queue full\n", dev
->name
);
819 netif_stop_queue(dev
);
822 DBG("%s: packet injected into TX queue\n", ag
->dev
->name
);
824 /* enable TX engine */
825 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, TX_CTRL_TXE
);
830 dma_unmap_single(&ag
->pdev
->dev
, dma_addr
, skb
->len
, DMA_TO_DEVICE
);
833 dev
->stats
.tx_dropped
++;
839 static int ag71xx_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
841 struct ag71xx
*ag
= netdev_priv(dev
);
846 (dev
->dev_addr
, ifr
->ifr_data
, sizeof(dev
->dev_addr
)))
852 (ifr
->ifr_data
, dev
->dev_addr
, sizeof(dev
->dev_addr
)))
859 if (ag
->phy_dev
== NULL
)
862 return phy_mii_ioctl(ag
->phy_dev
, ifr
, cmd
);
871 static void ag71xx_oom_timer_handler(unsigned long data
)
873 struct net_device
*dev
= (struct net_device
*) data
;
874 struct ag71xx
*ag
= netdev_priv(dev
);
876 napi_schedule(&ag
->napi
);
879 static void ag71xx_tx_timeout(struct net_device
*dev
)
881 struct ag71xx
*ag
= netdev_priv(dev
);
883 if (netif_msg_tx_err(ag
))
884 pr_info("%s: tx timeout\n", ag
->dev
->name
);
886 schedule_delayed_work(&ag
->restart_work
, 1);
889 static void ag71xx_bit_set(void __iomem
*reg
, u32 bit
)
891 u32 val
= __raw_readl(reg
) | bit
;
892 __raw_writel(val
, reg
);
896 static void ag71xx_bit_clear(void __iomem
*reg
, u32 bit
)
898 u32 val
= __raw_readl(reg
) & ~bit
;
899 __raw_writel(val
, reg
);
903 static void ag71xx_qca955x_sgmii_init()
905 void __iomem
*gmac_base
;
906 u32 mr_an_status
, sgmii_status
;
909 gmac_base
= ioremap_nocache(QCA955X_GMAC_BASE
, QCA955X_GMAC_SIZE
);
914 mr_an_status
= __raw_readl(gmac_base
+ QCA955X_GMAC_REG_MR_AN_STATUS
);
915 if (!(mr_an_status
& QCA955X_MR_AN_STATUS_AN_ABILITY
))
918 __raw_writel(QCA955X_SGMII_RESET_RX_CLK_N_RESET
,
919 gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
);
920 __raw_readl(gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
);
924 ag71xx_bit_set(gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
,
925 QCA955X_SGMII_RESET_HW_RX_125M_N
);
928 ag71xx_bit_set(gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
,
929 QCA955X_SGMII_RESET_RX_125M_N
);
932 ag71xx_bit_set(gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
,
933 QCA955X_SGMII_RESET_TX_125M_N
);
936 ag71xx_bit_set(gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
,
937 QCA955X_SGMII_RESET_RX_CLK_N
);
940 ag71xx_bit_set(gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
,
941 QCA955X_SGMII_RESET_TX_CLK_N
);
945 ag71xx_bit_set(gmac_base
+ QCA955X_GMAC_REG_MR_AN_CONTROL
,
946 QCA955X_MR_AN_CONTROL_PHY_RESET
|
947 QCA955X_MR_AN_CONTROL_AN_ENABLE
);
949 ag71xx_bit_clear(gmac_base
+ QCA955X_GMAC_REG_MR_AN_CONTROL
,
950 QCA955X_MR_AN_CONTROL_PHY_RESET
);
952 sgmii_status
= __raw_readl(gmac_base
+ QCA955X_GMAC_REG_SGMII_DEBUG
) & 0xF;
954 if (tries
++ >= QCA955X_SGMII_LINK_WAR_MAX_TRY
) {
955 pr_warn("ag71xx: max retries for SGMII fixup exceeded!\n");
958 } while (!(sgmii_status
== 0xf || sgmii_status
== 0x10));
964 static void ag71xx_restart_work_func(struct work_struct
*work
)
966 struct ag71xx
*ag
= container_of(work
, struct ag71xx
, restart_work
.work
);
969 ag71xx_hw_disable(ag
);
970 ag71xx_hw_enable(ag
);
972 __ag71xx_link_adjust(ag
, false);
976 static bool ag71xx_check_dma_stuck(struct ag71xx
*ag
)
978 unsigned long timestamp
;
979 u32 rx_sm
, tx_sm
, rx_fd
;
981 timestamp
= netdev_get_tx_queue(ag
->dev
, 0)->trans_start
;
982 if (likely(time_before(jiffies
, timestamp
+ HZ
/10)))
985 if (!netif_carrier_ok(ag
->dev
))
988 rx_sm
= ag71xx_rr(ag
, AG71XX_REG_RX_SM
);
989 if ((rx_sm
& 0x7) == 0x3 && ((rx_sm
>> 4) & 0x7) == 0x6)
992 tx_sm
= ag71xx_rr(ag
, AG71XX_REG_TX_SM
);
993 rx_fd
= ag71xx_rr(ag
, AG71XX_REG_FIFO_DEPTH
);
994 if (((tx_sm
>> 4) & 0x7) == 0 && ((rx_sm
& 0x7) == 0) &&
995 ((rx_sm
>> 4) & 0x7) == 0 && rx_fd
== 0)
1001 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
)
1003 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
1004 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
1005 bool dma_stuck
= false;
1006 int ring_mask
= BIT(ring
->order
) - 1;
1007 int ring_size
= BIT(ring
->order
);
1009 int bytes_compl
= 0;
1012 DBG("%s: processing TX ring\n", ag
->dev
->name
);
1014 while (ring
->dirty
+ n
!= ring
->curr
) {
1015 unsigned int i
= (ring
->dirty
+ n
) & ring_mask
;
1016 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1017 struct sk_buff
*skb
= ring
->buf
[i
].skb
;
1019 if (!flush
&& !ag71xx_desc_empty(desc
)) {
1020 if (pdata
->is_ar724x
&&
1021 ag71xx_check_dma_stuck(ag
)) {
1022 schedule_delayed_work(&ag
->restart_work
, HZ
/ 2);
1029 desc
->ctrl
|= DESC_EMPTY
;
1035 dev_kfree_skb_any(skb
);
1036 ring
->buf
[i
].skb
= NULL
;
1038 bytes_compl
+= ring
->buf
[i
].len
;
1044 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
1049 DBG("%s: %d packets sent out\n", ag
->dev
->name
, sent
);
1054 ag
->dev
->stats
.tx_bytes
+= bytes_compl
;
1055 ag
->dev
->stats
.tx_packets
+= sent
;
1057 netdev_completed_queue(ag
->dev
, sent
, bytes_compl
);
1058 if ((ring
->curr
- ring
->dirty
) < (ring_size
* 3) / 4)
1059 netif_wake_queue(ag
->dev
);
1062 cancel_delayed_work(&ag
->restart_work
);
1067 static int ag71xx_rx_packets(struct ag71xx
*ag
, int limit
)
1069 struct net_device
*dev
= ag
->dev
;
1070 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1071 int offset
= ag71xx_buffer_offset(ag
);
1072 unsigned int pktlen_mask
= ag
->desc_pktlen_mask
;
1073 int ring_mask
= BIT(ring
->order
) - 1;
1074 int ring_size
= BIT(ring
->order
);
1075 struct sk_buff_head queue
;
1076 struct sk_buff
*skb
;
1079 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
1080 dev
->name
, limit
, ring
->curr
, ring
->dirty
);
1082 skb_queue_head_init(&queue
);
1084 while (done
< limit
) {
1085 unsigned int i
= ring
->curr
& ring_mask
;
1086 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1090 if (ag71xx_desc_empty(desc
))
1093 if ((ring
->dirty
+ ring_size
) == ring
->curr
) {
1098 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
1100 pktlen
= desc
->ctrl
& pktlen_mask
;
1101 pktlen
-= ETH_FCS_LEN
;
1103 dma_unmap_single(&ag
->pdev
->dev
, ring
->buf
[i
].dma_addr
,
1104 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
1106 dev
->stats
.rx_packets
++;
1107 dev
->stats
.rx_bytes
+= pktlen
;
1109 skb
= build_skb(ring
->buf
[i
].rx_buf
, ag71xx_buffer_size(ag
));
1111 skb_free_frag(ring
->buf
[i
].rx_buf
);
1115 skb_reserve(skb
, offset
);
1116 skb_put(skb
, pktlen
);
1118 if (ag71xx_has_ar8216(ag
))
1119 err
= ag71xx_remove_ar8216_header(ag
, skb
, pktlen
);
1122 dev
->stats
.rx_dropped
++;
1126 skb
->ip_summed
= CHECKSUM_NONE
;
1127 __skb_queue_tail(&queue
, skb
);
1131 ring
->buf
[i
].rx_buf
= NULL
;
1137 ag71xx_ring_rx_refill(ag
);
1139 while ((skb
= __skb_dequeue(&queue
)) != NULL
) {
1140 skb
->protocol
= eth_type_trans(skb
, dev
);
1141 netif_receive_skb(skb
);
1144 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1145 dev
->name
, ring
->curr
, ring
->dirty
, done
);
1150 static int ag71xx_poll(struct napi_struct
*napi
, int limit
)
1152 struct ag71xx
*ag
= container_of(napi
, struct ag71xx
, napi
);
1153 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
1154 struct net_device
*dev
= ag
->dev
;
1155 struct ag71xx_ring
*rx_ring
= &ag
->rx_ring
;
1156 int rx_ring_size
= BIT(rx_ring
->order
);
1157 unsigned long flags
;
1163 tx_done
= ag71xx_tx_packets(ag
, false);
1165 DBG("%s: processing RX ring\n", dev
->name
);
1166 rx_done
= ag71xx_rx_packets(ag
, limit
);
1168 ag71xx_debugfs_update_napi_stats(ag
, rx_done
, tx_done
);
1170 if (rx_ring
->buf
[rx_ring
->dirty
% rx_ring_size
].rx_buf
== NULL
)
1173 status
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
1174 if (unlikely(status
& RX_STATUS_OF
)) {
1175 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_OF
);
1176 dev
->stats
.rx_fifo_errors
++;
1179 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
1182 if (rx_done
< limit
) {
1183 if (status
& RX_STATUS_PR
)
1186 status
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
1187 if (status
& TX_STATUS_PS
)
1190 DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1191 dev
->name
, rx_done
, tx_done
, limit
);
1193 napi_complete(napi
);
1195 /* enable interrupts */
1196 spin_lock_irqsave(&ag
->lock
, flags
);
1197 ag71xx_int_enable(ag
, AG71XX_INT_POLL
);
1198 spin_unlock_irqrestore(&ag
->lock
, flags
);
1203 DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1204 dev
->name
, rx_done
, tx_done
, limit
);
1208 if (netif_msg_rx_err(ag
))
1209 pr_info("%s: out of memory\n", dev
->name
);
1211 mod_timer(&ag
->oom_timer
, jiffies
+ AG71XX_OOM_REFILL
);
1212 napi_complete(napi
);
1216 static irqreturn_t
ag71xx_interrupt(int irq
, void *dev_id
)
1218 struct net_device
*dev
= dev_id
;
1219 struct ag71xx
*ag
= netdev_priv(dev
);
1222 status
= ag71xx_rr(ag
, AG71XX_REG_INT_STATUS
);
1223 ag71xx_dump_intr(ag
, "raw", status
);
1225 if (unlikely(!status
))
1228 if (unlikely(status
& AG71XX_INT_ERR
)) {
1229 if (status
& AG71XX_INT_TX_BE
) {
1230 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
);
1231 dev_err(&dev
->dev
, "TX BUS error\n");
1233 if (status
& AG71XX_INT_RX_BE
) {
1234 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
);
1235 dev_err(&dev
->dev
, "RX BUS error\n");
1239 if (likely(status
& AG71XX_INT_POLL
)) {
1240 ag71xx_int_disable(ag
, AG71XX_INT_POLL
);
1241 DBG("%s: enable polling mode\n", dev
->name
);
1242 napi_schedule(&ag
->napi
);
1245 ag71xx_debugfs_update_int_stats(ag
, status
);
1250 #ifdef CONFIG_NET_POLL_CONTROLLER
1252 * Polling 'interrupt' - used by things like netconsole to send skbs
1253 * without having to re-enable interrupts. It's not called while
1254 * the interrupt routine is executing.
1256 static void ag71xx_netpoll(struct net_device
*dev
)
1258 disable_irq(dev
->irq
);
1259 ag71xx_interrupt(dev
->irq
, dev
);
1260 enable_irq(dev
->irq
);
1264 static int ag71xx_change_mtu(struct net_device
*dev
, int new_mtu
)
1266 struct ag71xx
*ag
= netdev_priv(dev
);
1267 unsigned int max_frame_len
;
1269 max_frame_len
= ag71xx_max_frame_len(new_mtu
);
1270 if (new_mtu
< 68 || max_frame_len
> ag
->max_frame_len
)
1273 if (netif_running(dev
))
1277 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
1278 ag71xx_max_frame_len(dev
->mtu
));
1283 static const struct net_device_ops ag71xx_netdev_ops
= {
1284 .ndo_open
= ag71xx_open
,
1285 .ndo_stop
= ag71xx_stop
,
1286 .ndo_start_xmit
= ag71xx_hard_start_xmit
,
1287 .ndo_do_ioctl
= ag71xx_do_ioctl
,
1288 .ndo_tx_timeout
= ag71xx_tx_timeout
,
1289 .ndo_change_mtu
= ag71xx_change_mtu
,
1290 .ndo_set_mac_address
= eth_mac_addr
,
1291 .ndo_validate_addr
= eth_validate_addr
,
1292 #ifdef CONFIG_NET_POLL_CONTROLLER
1293 .ndo_poll_controller
= ag71xx_netpoll
,
1297 static int ag71xx_probe(struct platform_device
*pdev
)
1299 struct net_device
*dev
;
1300 struct resource
*res
;
1302 struct ag71xx_platform_data
*pdata
;
1305 pdata
= pdev
->dev
.platform_data
;
1307 dev_err(&pdev
->dev
, "no platform data specified\n");
1312 if (pdata
->mii_bus_dev
== NULL
&& pdata
->phy_mask
) {
1313 dev_err(&pdev
->dev
, "no MII bus device specified\n");
1317 dev
= devm_alloc_etherdev(&pdev
->dev
, sizeof(*ag
));
1321 if (!pdata
->max_frame_len
|| !pdata
->desc_pktlen_mask
)
1324 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1328 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1330 ag
= netdev_priv(dev
);
1333 ag
->msg_enable
= netif_msg_init(ag71xx_msg_level
,
1334 AG71XX_DEFAULT_MSG_ENABLE
);
1335 spin_lock_init(&ag
->lock
);
1337 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "mac_base");
1339 dev_err(&pdev
->dev
, "no mac_base resource found\n");
1343 ag
->mac_base
= devm_ioremap_nocache(&pdev
->dev
, res
->start
,
1344 res
->end
- res
->start
+ 1);
1348 dev
->irq
= platform_get_irq(pdev
, 0);
1349 err
= devm_request_irq(&pdev
->dev
, dev
->irq
, ag71xx_interrupt
,
1350 0x0, dev_name(&pdev
->dev
), dev
);
1352 dev_err(&pdev
->dev
, "unable to request IRQ %d\n", dev
->irq
);
1356 dev
->base_addr
= (unsigned long)ag
->mac_base
;
1357 dev
->netdev_ops
= &ag71xx_netdev_ops
;
1358 dev
->ethtool_ops
= &ag71xx_ethtool_ops
;
1360 INIT_DELAYED_WORK(&ag
->restart_work
, ag71xx_restart_work_func
);
1362 init_timer(&ag
->oom_timer
);
1363 ag
->oom_timer
.data
= (unsigned long) dev
;
1364 ag
->oom_timer
.function
= ag71xx_oom_timer_handler
;
1366 tx_size
= AG71XX_TX_RING_SIZE_DEFAULT
;
1367 ag
->rx_ring
.order
= ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT
);
1369 ag
->max_frame_len
= pdata
->max_frame_len
;
1370 ag
->desc_pktlen_mask
= pdata
->desc_pktlen_mask
;
1372 if (!pdata
->is_ar724x
&& !pdata
->is_ar91xx
) {
1373 ag
->tx_ring
.desc_split
= AG71XX_TX_RING_SPLIT
;
1374 tx_size
*= AG71XX_TX_RING_DS_PER_PKT
;
1376 ag
->tx_ring
.order
= ag71xx_ring_size_order(tx_size
);
1378 ag
->stop_desc
= dmam_alloc_coherent(&pdev
->dev
,
1379 sizeof(struct ag71xx_desc
),
1380 &ag
->stop_desc_dma
, GFP_KERNEL
);
1385 ag
->stop_desc
->data
= 0;
1386 ag
->stop_desc
->ctrl
= 0;
1387 ag
->stop_desc
->next
= (u32
) ag
->stop_desc_dma
;
1389 memcpy(dev
->dev_addr
, pdata
->mac_addr
, ETH_ALEN
);
1391 netif_napi_add(dev
, &ag
->napi
, ag71xx_poll
, AG71XX_NAPI_WEIGHT
);
1393 ag71xx_dump_regs(ag
);
1395 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, 0);
1399 ag71xx_dump_regs(ag
);
1401 err
= ag71xx_phy_connect(ag
);
1405 err
= ag71xx_debugfs_init(ag
);
1407 goto err_phy_disconnect
;
1409 platform_set_drvdata(pdev
, dev
);
1411 err
= register_netdev(dev
);
1413 dev_err(&pdev
->dev
, "unable to register net device\n");
1414 platform_set_drvdata(pdev
, NULL
);
1415 ag71xx_debugfs_exit(ag
);
1416 goto err_phy_disconnect
;
1419 pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode: %s\n",
1420 dev
->name
, (unsigned long) ag
->mac_base
, dev
->irq
,
1421 phy_modes(pdata
->phy_if_mode
));
1426 ag71xx_phy_disconnect(ag
);
1430 static int ag71xx_remove(struct platform_device
*pdev
)
1432 struct net_device
*dev
= platform_get_drvdata(pdev
);
1438 ag
= netdev_priv(dev
);
1439 ag71xx_debugfs_exit(ag
);
1440 ag71xx_phy_disconnect(ag
);
1441 unregister_netdev(dev
);
1442 platform_set_drvdata(pdev
, NULL
);
1446 static struct platform_driver ag71xx_driver
= {
1447 .probe
= ag71xx_probe
,
1448 .remove
= ag71xx_remove
,
1450 .name
= AG71XX_DRV_NAME
,
1454 static int __init
ag71xx_module_init(void)
1458 ret
= ag71xx_debugfs_root_init();
1462 ret
= ag71xx_mdio_driver_init();
1464 goto err_debugfs_exit
;
1466 ret
= platform_driver_register(&ag71xx_driver
);
1473 ag71xx_mdio_driver_exit();
1475 ag71xx_debugfs_root_exit();
1480 static void __exit
ag71xx_module_exit(void)
1482 platform_driver_unregister(&ag71xx_driver
);
1483 ag71xx_mdio_driver_exit();
1484 ag71xx_debugfs_root_exit();
1487 module_init(ag71xx_module_init
);
1488 module_exit(ag71xx_module_exit
);
1490 MODULE_VERSION(AG71XX_DRV_VERSION
);
1491 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1492 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1493 MODULE_LICENSE("GPL v2");
1494 MODULE_ALIAS("platform:" AG71XX_DRV_NAME
);