2 * Atheros AR71xx built-in ethernet mac driver
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 * Based on Atheros' AG7100 driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
16 #define AG71XX_DEFAULT_MSG_ENABLE \
26 static int ag71xx_msg_level
= -1;
28 module_param_named(msg_level
, ag71xx_msg_level
, int, 0);
29 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
31 #define ETH_SWITCH_HEADER_LEN 2
33 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
);
35 static inline unsigned int ag71xx_max_frame_len(unsigned int mtu
)
37 return ETH_SWITCH_HEADER_LEN
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
+ ETH_FCS_LEN
;
40 static void ag71xx_dump_dma_regs(struct ag71xx
*ag
)
42 DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
44 ag71xx_rr(ag
, AG71XX_REG_TX_CTRL
),
45 ag71xx_rr(ag
, AG71XX_REG_TX_DESC
),
46 ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
));
48 DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
50 ag71xx_rr(ag
, AG71XX_REG_RX_CTRL
),
51 ag71xx_rr(ag
, AG71XX_REG_RX_DESC
),
52 ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
));
55 static void ag71xx_dump_regs(struct ag71xx
*ag
)
57 DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
59 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG1
),
60 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
),
61 ag71xx_rr(ag
, AG71XX_REG_MAC_IPG
),
62 ag71xx_rr(ag
, AG71XX_REG_MAC_HDX
),
63 ag71xx_rr(ag
, AG71XX_REG_MAC_MFL
));
64 DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
66 ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
),
67 ag71xx_rr(ag
, AG71XX_REG_MAC_ADDR1
),
68 ag71xx_rr(ag
, AG71XX_REG_MAC_ADDR2
));
69 DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
71 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG0
),
72 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG1
),
73 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG2
));
74 DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
76 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG3
),
77 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG4
),
78 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
));
81 static inline void ag71xx_dump_intr(struct ag71xx
*ag
, char *label
, u32 intr
)
83 DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
84 ag
->dev
->name
, label
, intr
,
85 (intr
& AG71XX_INT_TX_PS
) ? "TXPS " : "",
86 (intr
& AG71XX_INT_TX_UR
) ? "TXUR " : "",
87 (intr
& AG71XX_INT_TX_BE
) ? "TXBE " : "",
88 (intr
& AG71XX_INT_RX_PR
) ? "RXPR " : "",
89 (intr
& AG71XX_INT_RX_OF
) ? "RXOF " : "",
90 (intr
& AG71XX_INT_RX_BE
) ? "RXBE " : "");
93 static void ag71xx_ring_free(struct ag71xx_ring
*ring
)
98 dma_free_coherent(NULL
, ring
->size
* ring
->desc_size
,
99 ring
->descs_cpu
, ring
->descs_dma
);
102 static int ag71xx_ring_alloc(struct ag71xx_ring
*ring
)
106 ring
->desc_size
= sizeof(struct ag71xx_desc
);
107 if (ring
->desc_size
% cache_line_size()) {
108 DBG("ag71xx: ring %p, desc size %u rounded to %u\n",
109 ring
, ring
->desc_size
,
110 roundup(ring
->desc_size
, cache_line_size()));
111 ring
->desc_size
= roundup(ring
->desc_size
, cache_line_size());
114 ring
->descs_cpu
= dma_alloc_coherent(NULL
, ring
->size
* ring
->desc_size
,
115 &ring
->descs_dma
, GFP_ATOMIC
);
116 if (!ring
->descs_cpu
) {
122 ring
->buf
= kzalloc(ring
->size
* sizeof(*ring
->buf
), GFP_KERNEL
);
134 static void ag71xx_ring_tx_clean(struct ag71xx
*ag
)
136 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
137 struct net_device
*dev
= ag
->dev
;
138 u32 bytes_compl
= 0, pkts_compl
= 0;
140 while (ring
->curr
!= ring
->dirty
) {
141 struct ag71xx_desc
*desc
;
142 u32 i
= ring
->dirty
% ring
->size
;
144 desc
= ag71xx_ring_desc(ring
, i
);
145 if (!ag71xx_desc_empty(desc
)) {
147 dev
->stats
.tx_errors
++;
150 if (ring
->buf
[i
].skb
) {
151 bytes_compl
+= ring
->buf
[i
].len
;
153 dev_kfree_skb_any(ring
->buf
[i
].skb
);
155 ring
->buf
[i
].skb
= NULL
;
159 /* flush descriptors */
162 netdev_completed_queue(dev
, pkts_compl
, bytes_compl
);
165 static void ag71xx_ring_tx_init(struct ag71xx
*ag
)
167 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
170 for (i
= 0; i
< ring
->size
; i
++) {
171 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
173 desc
->next
= (u32
) (ring
->descs_dma
+
174 ring
->desc_size
* ((i
+ 1) % ring
->size
));
176 desc
->ctrl
= DESC_EMPTY
;
177 ring
->buf
[i
].skb
= NULL
;
180 /* flush descriptors */
185 netdev_reset_queue(ag
->dev
);
188 static void ag71xx_ring_rx_clean(struct ag71xx
*ag
)
190 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
196 for (i
= 0; i
< ring
->size
; i
++)
197 if (ring
->buf
[i
].rx_buf
) {
198 dma_unmap_single(&ag
->dev
->dev
, ring
->buf
[i
].dma_addr
,
199 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
200 kfree(ring
->buf
[i
].rx_buf
);
204 static int ag71xx_buffer_offset(struct ag71xx
*ag
)
206 int offset
= NET_SKB_PAD
;
209 * On AR71xx/AR91xx packets must be 4-byte aligned.
211 * When using builtin AR8216 support, hardware adds a 2-byte header,
212 * so we don't need any extra alignment in that case.
214 if (!ag71xx_get_pdata(ag
)->is_ar724x
|| ag71xx_has_ar8216(ag
))
217 return offset
+ NET_IP_ALIGN
;
220 static bool ag71xx_fill_rx_buf(struct ag71xx
*ag
, struct ag71xx_buf
*buf
,
223 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
224 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, buf
- &ring
->buf
[0]);
227 data
= kmalloc(ag
->rx_buf_size
+
228 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)),
234 buf
->dma_addr
= dma_map_single(&ag
->dev
->dev
, data
, ag
->rx_buf_size
,
236 desc
->data
= (u32
) buf
->dma_addr
+ offset
;
240 static int ag71xx_ring_rx_init(struct ag71xx
*ag
)
242 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
245 int offset
= ag71xx_buffer_offset(ag
);
248 for (i
= 0; i
< ring
->size
; i
++) {
249 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
251 desc
->next
= (u32
) (ring
->descs_dma
+
252 ring
->desc_size
* ((i
+ 1) % ring
->size
));
254 DBG("ag71xx: RX desc at %p, next is %08x\n",
258 for (i
= 0; i
< ring
->size
; i
++) {
259 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
261 if (!ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
)) {
266 desc
->ctrl
= DESC_EMPTY
;
269 /* flush descriptors */
278 static int ag71xx_ring_rx_refill(struct ag71xx
*ag
)
280 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
282 int offset
= ag71xx_buffer_offset(ag
);
285 for (; ring
->curr
- ring
->dirty
> 0; ring
->dirty
++) {
286 struct ag71xx_desc
*desc
;
289 i
= ring
->dirty
% ring
->size
;
290 desc
= ag71xx_ring_desc(ring
, i
);
292 if (!ring
->buf
[i
].rx_buf
&&
293 !ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
))
296 desc
->ctrl
= DESC_EMPTY
;
300 /* flush descriptors */
303 DBG("%s: %u rx descriptors refilled\n", ag
->dev
->name
, count
);
308 static int ag71xx_rings_init(struct ag71xx
*ag
)
312 ret
= ag71xx_ring_alloc(&ag
->tx_ring
);
316 ag71xx_ring_tx_init(ag
);
318 ret
= ag71xx_ring_alloc(&ag
->rx_ring
);
322 ret
= ag71xx_ring_rx_init(ag
);
326 static void ag71xx_rings_cleanup(struct ag71xx
*ag
)
328 ag71xx_ring_rx_clean(ag
);
329 ag71xx_ring_free(&ag
->rx_ring
);
331 ag71xx_ring_tx_clean(ag
);
332 netdev_reset_queue(ag
->dev
);
333 ag71xx_ring_free(&ag
->tx_ring
);
336 static unsigned char *ag71xx_speed_str(struct ag71xx
*ag
)
350 static void ag71xx_hw_set_macaddr(struct ag71xx
*ag
, unsigned char *mac
)
354 t
= (((u32
) mac
[5]) << 24) | (((u32
) mac
[4]) << 16)
355 | (((u32
) mac
[3]) << 8) | ((u32
) mac
[2]);
357 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR1
, t
);
359 t
= (((u32
) mac
[1]) << 24) | (((u32
) mac
[0]) << 16);
360 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR2
, t
);
363 static void ag71xx_dma_reset(struct ag71xx
*ag
)
368 ag71xx_dump_dma_regs(ag
);
371 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
372 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
375 * give the hardware some time to really stop all rx/tx activity
376 * clearing the descriptors too early causes random memory corruption
380 /* clear descriptor addresses */
381 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->stop_desc_dma
);
382 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->stop_desc_dma
);
384 /* clear pending RX/TX interrupts */
385 for (i
= 0; i
< 256; i
++) {
386 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
387 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
390 /* clear pending errors */
391 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
| RX_STATUS_OF
);
392 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
| TX_STATUS_UR
);
394 val
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
396 pr_alert("%s: unable to clear DMA Rx status: %08x\n",
399 val
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
401 /* mask out reserved bits */
405 pr_alert("%s: unable to clear DMA Tx status: %08x\n",
408 ag71xx_dump_dma_regs(ag
);
411 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
412 MAC_CFG1_SRX | MAC_CFG1_STX)
414 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
416 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
417 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
418 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
419 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
420 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
423 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
424 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
425 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
426 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
427 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
428 FIFO_CFG5_17 | FIFO_CFG5_SF)
430 static void ag71xx_hw_stop(struct ag71xx
*ag
)
432 /* disable all interrupts and stop the rx/tx engine */
433 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, 0);
434 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
435 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
438 static void ag71xx_hw_setup(struct ag71xx
*ag
)
440 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
442 /* setup MAC configuration registers */
443 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, MAC_CFG1_INIT
);
445 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG2
,
446 MAC_CFG2_PAD_CRC_EN
| MAC_CFG2_LEN_CHECK
);
448 /* setup max frame length to zero */
449 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, 0);
451 /* setup FIFO configuration registers */
452 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG0
, FIFO_CFG0_INIT
);
453 if (pdata
->is_ar724x
) {
454 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, pdata
->fifo_cfg1
);
455 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, pdata
->fifo_cfg2
);
457 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, 0x0fff0000);
458 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, 0x00001fff);
460 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG4
, FIFO_CFG4_INIT
);
461 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, FIFO_CFG5_INIT
);
464 static void ag71xx_hw_init(struct ag71xx
*ag
)
466 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
467 u32 reset_mask
= pdata
->reset_bit
;
471 if (pdata
->is_ar724x
) {
472 u32 reset_phy
= reset_mask
;
474 reset_phy
&= AR71XX_RESET_GE0_PHY
| AR71XX_RESET_GE1_PHY
;
475 reset_mask
&= ~(AR71XX_RESET_GE0_PHY
| AR71XX_RESET_GE1_PHY
);
477 ath79_device_reset_set(reset_phy
);
479 ath79_device_reset_clear(reset_phy
);
483 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG1
, MAC_CFG1_SR
);
486 ath79_device_reset_set(reset_mask
);
488 ath79_device_reset_clear(reset_mask
);
493 ag71xx_dma_reset(ag
);
496 static void ag71xx_fast_reset(struct ag71xx
*ag
)
498 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
499 struct net_device
*dev
= ag
->dev
;
500 u32 reset_mask
= pdata
->reset_bit
;
504 reset_mask
&= AR71XX_RESET_GE0_MAC
| AR71XX_RESET_GE1_MAC
;
506 mii_reg
= ag71xx_rr(ag
, AG71XX_REG_MII_CFG
);
507 rx_ds
= ag71xx_rr(ag
, AG71XX_REG_RX_DESC
);
508 tx_ds
= ag71xx_rr(ag
, AG71XX_REG_TX_DESC
);
510 ath79_device_reset_set(reset_mask
);
512 ath79_device_reset_clear(reset_mask
);
515 ag71xx_dma_reset(ag
);
517 ag71xx_tx_packets(ag
, true);
519 /* setup max frame length */
520 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
521 ag71xx_max_frame_len(ag
->dev
->mtu
));
523 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, rx_ds
);
524 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, tx_ds
);
525 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, mii_reg
);
527 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
530 static void ag71xx_hw_start(struct ag71xx
*ag
)
532 /* start RX engine */
533 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
535 /* enable interrupts */
536 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, AG71XX_INT_INIT
);
538 netif_wake_queue(ag
->dev
);
542 __ag71xx_link_adjust(struct ag71xx
*ag
, bool update
)
544 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
550 if (!ag
->link
&& update
) {
552 netif_carrier_off(ag
->dev
);
553 if (netif_msg_link(ag
))
554 pr_info("%s: link down\n", ag
->dev
->name
);
558 if (pdata
->is_ar724x
)
559 ag71xx_fast_reset(ag
);
561 cfg2
= ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
);
562 cfg2
&= ~(MAC_CFG2_IF_1000
| MAC_CFG2_IF_10_100
| MAC_CFG2_FDX
);
563 cfg2
|= (ag
->duplex
) ? MAC_CFG2_FDX
: 0;
565 ifctl
= ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
);
566 ifctl
&= ~(MAC_IFCTL_SPEED
);
568 fifo5
= ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
);
569 fifo5
&= ~FIFO_CFG5_BM
;
573 cfg2
|= MAC_CFG2_IF_1000
;
574 fifo5
|= FIFO_CFG5_BM
;
577 cfg2
|= MAC_CFG2_IF_10_100
;
578 ifctl
|= MAC_IFCTL_SPEED
;
581 cfg2
|= MAC_CFG2_IF_10_100
;
588 if (pdata
->is_ar91xx
)
590 else if (pdata
->is_ar724x
)
591 fifo3
= pdata
->fifo_cfg3
;
595 if (ag
->tx_ring
.desc_split
) {
597 fifo3
|= ((2048 - ag
->tx_ring
.desc_split
) / 4) << 16;
600 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG3
, fifo3
);
602 if (update
&& pdata
->set_speed
)
603 pdata
->set_speed(ag
->speed
);
605 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG2
, cfg2
);
606 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, fifo5
);
607 ag71xx_wr(ag
, AG71XX_REG_MAC_IFCTL
, ifctl
);
610 netif_carrier_on(ag
->dev
);
611 if (update
&& netif_msg_link(ag
))
612 pr_info("%s: link up (%sMbps/%s duplex)\n",
614 ag71xx_speed_str(ag
),
615 (DUPLEX_FULL
== ag
->duplex
) ? "Full" : "Half");
617 DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n",
619 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG0
),
620 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG1
),
621 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG2
));
623 DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n",
625 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG3
),
626 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG4
),
627 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
));
629 DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x\n",
631 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
),
632 ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
));
635 void ag71xx_link_adjust(struct ag71xx
*ag
)
637 __ag71xx_link_adjust(ag
, true);
640 static int ag71xx_hw_enable(struct ag71xx
*ag
)
644 ret
= ag71xx_rings_init(ag
);
648 napi_enable(&ag
->napi
);
649 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
650 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->rx_ring
.descs_dma
);
651 netif_start_queue(ag
->dev
);
656 static void ag71xx_hw_disable(struct ag71xx
*ag
)
660 spin_lock_irqsave(&ag
->lock
, flags
);
662 netif_stop_queue(ag
->dev
);
665 ag71xx_dma_reset(ag
);
667 napi_disable(&ag
->napi
);
668 del_timer_sync(&ag
->oom_timer
);
670 spin_unlock_irqrestore(&ag
->lock
, flags
);
672 ag71xx_rings_cleanup(ag
);
675 static int ag71xx_open(struct net_device
*dev
)
677 struct ag71xx
*ag
= netdev_priv(dev
);
678 unsigned int max_frame_len
;
681 netif_carrier_off(dev
);
682 max_frame_len
= ag71xx_max_frame_len(dev
->mtu
);
683 ag
->rx_buf_size
= max_frame_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
;
685 /* setup max frame length */
686 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, max_frame_len
);
687 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
689 ret
= ag71xx_hw_enable(ag
);
693 ag71xx_phy_start(ag
);
698 ag71xx_rings_cleanup(ag
);
702 static int ag71xx_stop(struct net_device
*dev
)
704 struct ag71xx
*ag
= netdev_priv(dev
);
706 netif_carrier_off(dev
);
708 ag71xx_hw_disable(ag
);
713 static int ag71xx_fill_dma_desc(struct ag71xx_ring
*ring
, u32 addr
, int len
)
716 struct ag71xx_desc
*desc
;
718 int split
= ring
->desc_split
;
724 unsigned int cur_len
= len
;
726 i
= (ring
->curr
+ ndesc
) % ring
->size
;
727 desc
= ag71xx_ring_desc(ring
, i
);
729 if (!ag71xx_desc_empty(desc
))
732 if (cur_len
> split
) {
736 * TX will hang if DMA transfers <= 4 bytes,
737 * make sure next segment is more than 4 bytes long.
739 if (len
<= split
+ 4)
748 cur_len
|= DESC_MORE
;
750 /* prevent early tx attempt of this descriptor */
752 cur_len
|= DESC_EMPTY
;
754 desc
->ctrl
= cur_len
;
761 static netdev_tx_t
ag71xx_hard_start_xmit(struct sk_buff
*skb
,
762 struct net_device
*dev
)
764 struct ag71xx
*ag
= netdev_priv(dev
);
765 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
766 struct ag71xx_desc
*desc
;
770 if (ag71xx_has_ar8216(ag
))
771 ag71xx_add_ar8216_header(ag
, skb
);
774 DBG("%s: packet len is too small\n", ag
->dev
->name
);
778 dma_addr
= dma_map_single(&dev
->dev
, skb
->data
, skb
->len
,
781 i
= ring
->curr
% ring
->size
;
782 desc
= ag71xx_ring_desc(ring
, i
);
784 /* setup descriptor fields */
785 n
= ag71xx_fill_dma_desc(ring
, (u32
) dma_addr
, skb
->len
& ag
->desc_pktlen_mask
);
789 i
= (ring
->curr
+ n
- 1) % ring
->size
;
790 ring
->buf
[i
].len
= skb
->len
;
791 ring
->buf
[i
].skb
= skb
;
792 ring
->buf
[i
].timestamp
= jiffies
;
794 netdev_sent_queue(dev
, skb
->len
);
796 desc
->ctrl
&= ~DESC_EMPTY
;
799 /* flush descriptor */
803 if (ring
->desc_split
)
804 ring_min
*= AG71XX_TX_RING_DS_PER_PKT
;
806 if (ring
->curr
- ring
->dirty
>= ring
->size
- ring_min
) {
807 DBG("%s: tx queue full\n", dev
->name
);
808 netif_stop_queue(dev
);
811 DBG("%s: packet injected into TX queue\n", ag
->dev
->name
);
813 /* enable TX engine */
814 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, TX_CTRL_TXE
);
819 dma_unmap_single(&dev
->dev
, dma_addr
, skb
->len
, DMA_TO_DEVICE
);
822 dev
->stats
.tx_dropped
++;
828 static int ag71xx_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
830 struct ag71xx
*ag
= netdev_priv(dev
);
835 if (ag
->phy_dev
== NULL
)
838 spin_lock_irq(&ag
->lock
);
839 ret
= phy_ethtool_ioctl(ag
->phy_dev
, (void *) ifr
->ifr_data
);
840 spin_unlock_irq(&ag
->lock
);
845 (dev
->dev_addr
, ifr
->ifr_data
, sizeof(dev
->dev_addr
)))
851 (ifr
->ifr_data
, dev
->dev_addr
, sizeof(dev
->dev_addr
)))
858 if (ag
->phy_dev
== NULL
)
861 return phy_mii_ioctl(ag
->phy_dev
, ifr
, cmd
);
870 static void ag71xx_oom_timer_handler(unsigned long data
)
872 struct net_device
*dev
= (struct net_device
*) data
;
873 struct ag71xx
*ag
= netdev_priv(dev
);
875 napi_schedule(&ag
->napi
);
878 static void ag71xx_tx_timeout(struct net_device
*dev
)
880 struct ag71xx
*ag
= netdev_priv(dev
);
882 if (netif_msg_tx_err(ag
))
883 pr_info("%s: tx timeout\n", ag
->dev
->name
);
885 schedule_work(&ag
->restart_work
);
888 static void ag71xx_restart_work_func(struct work_struct
*work
)
890 struct ag71xx
*ag
= container_of(work
, struct ag71xx
, restart_work
);
893 ag71xx_hw_disable(ag
);
894 ag71xx_hw_enable(ag
);
896 __ag71xx_link_adjust(ag
, false);
900 static bool ag71xx_check_dma_stuck(struct ag71xx
*ag
, unsigned long timestamp
)
902 u32 rx_sm
, tx_sm
, rx_fd
;
904 if (likely(time_before(jiffies
, timestamp
+ HZ
/10)))
907 if (!netif_carrier_ok(ag
->dev
))
910 rx_sm
= ag71xx_rr(ag
, AG71XX_REG_RX_SM
);
911 if ((rx_sm
& 0x7) == 0x3 && ((rx_sm
>> 4) & 0x7) == 0x6)
914 tx_sm
= ag71xx_rr(ag
, AG71XX_REG_TX_SM
);
915 rx_fd
= ag71xx_rr(ag
, AG71XX_REG_FIFO_DEPTH
);
916 if (((tx_sm
>> 4) & 0x7) == 0 && ((rx_sm
& 0x7) == 0) &&
917 ((rx_sm
>> 4) & 0x7) == 0 && rx_fd
== 0)
923 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
)
925 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
926 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
931 DBG("%s: processing TX ring\n", ag
->dev
->name
);
933 while (ring
->dirty
+ n
!= ring
->curr
) {
934 unsigned int i
= (ring
->dirty
+ n
) % ring
->size
;
935 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
936 struct sk_buff
*skb
= ring
->buf
[i
].skb
;
938 if (!flush
&& !ag71xx_desc_empty(desc
)) {
939 if (pdata
->is_ar724x
&&
940 ag71xx_check_dma_stuck(ag
, ring
->buf
[i
].timestamp
))
941 schedule_work(&ag
->restart_work
);
949 dev_kfree_skb_any(skb
);
950 ring
->buf
[i
].skb
= NULL
;
952 bytes_compl
+= ring
->buf
[i
].len
;
958 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
963 DBG("%s: %d packets sent out\n", ag
->dev
->name
, sent
);
965 ag
->dev
->stats
.tx_bytes
+= bytes_compl
;
966 ag
->dev
->stats
.tx_packets
+= sent
;
971 netdev_completed_queue(ag
->dev
, sent
, bytes_compl
);
972 if ((ring
->curr
- ring
->dirty
) < (ring
->size
* 3) / 4)
973 netif_wake_queue(ag
->dev
);
978 static int ag71xx_rx_packets(struct ag71xx
*ag
, int limit
)
980 struct net_device
*dev
= ag
->dev
;
981 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
982 int offset
= ag71xx_buffer_offset(ag
);
983 unsigned int pktlen_mask
= ag
->desc_pktlen_mask
;
986 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
987 dev
->name
, limit
, ring
->curr
, ring
->dirty
);
989 while (done
< limit
) {
990 unsigned int i
= ring
->curr
% ring
->size
;
991 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
996 if (ag71xx_desc_empty(desc
))
999 if ((ring
->dirty
+ ring
->size
) == ring
->curr
) {
1004 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
1006 pktlen
= desc
->ctrl
& pktlen_mask
;
1007 pktlen
-= ETH_FCS_LEN
;
1009 dma_unmap_single(&dev
->dev
, ring
->buf
[i
].dma_addr
,
1010 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
1012 dev
->stats
.rx_packets
++;
1013 dev
->stats
.rx_bytes
+= pktlen
;
1015 skb
= build_skb(ring
->buf
[i
].rx_buf
, 0);
1017 kfree(ring
->buf
[i
].rx_buf
);
1021 skb_reserve(skb
, offset
);
1022 skb_put(skb
, pktlen
);
1024 if (ag71xx_has_ar8216(ag
))
1025 err
= ag71xx_remove_ar8216_header(ag
, skb
, pktlen
);
1028 dev
->stats
.rx_dropped
++;
1032 skb
->ip_summed
= CHECKSUM_NONE
;
1033 skb
->protocol
= eth_type_trans(skb
, dev
);
1034 netif_receive_skb(skb
);
1038 ring
->buf
[i
].rx_buf
= NULL
;
1044 ag71xx_ring_rx_refill(ag
);
1046 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1047 dev
->name
, ring
->curr
, ring
->dirty
, done
);
1052 static int ag71xx_poll(struct napi_struct
*napi
, int limit
)
1054 struct ag71xx
*ag
= container_of(napi
, struct ag71xx
, napi
);
1055 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
1056 struct net_device
*dev
= ag
->dev
;
1057 struct ag71xx_ring
*rx_ring
;
1058 unsigned long flags
;
1064 tx_done
= ag71xx_tx_packets(ag
, false);
1066 DBG("%s: processing RX ring\n", dev
->name
);
1067 rx_done
= ag71xx_rx_packets(ag
, limit
);
1069 ag71xx_debugfs_update_napi_stats(ag
, rx_done
, tx_done
);
1071 rx_ring
= &ag
->rx_ring
;
1072 if (rx_ring
->buf
[rx_ring
->dirty
% rx_ring
->size
].rx_buf
== NULL
)
1075 status
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
1076 if (unlikely(status
& RX_STATUS_OF
)) {
1077 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_OF
);
1078 dev
->stats
.rx_fifo_errors
++;
1081 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
1084 if (rx_done
< limit
) {
1085 if (status
& RX_STATUS_PR
)
1088 status
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
1089 if (status
& TX_STATUS_PS
)
1092 DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1093 dev
->name
, rx_done
, tx_done
, limit
);
1095 napi_complete(napi
);
1097 /* enable interrupts */
1098 spin_lock_irqsave(&ag
->lock
, flags
);
1099 ag71xx_int_enable(ag
, AG71XX_INT_POLL
);
1100 spin_unlock_irqrestore(&ag
->lock
, flags
);
1105 DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1106 dev
->name
, rx_done
, tx_done
, limit
);
1110 if (netif_msg_rx_err(ag
))
1111 pr_info("%s: out of memory\n", dev
->name
);
1113 mod_timer(&ag
->oom_timer
, jiffies
+ AG71XX_OOM_REFILL
);
1114 napi_complete(napi
);
1118 static irqreturn_t
ag71xx_interrupt(int irq
, void *dev_id
)
1120 struct net_device
*dev
= dev_id
;
1121 struct ag71xx
*ag
= netdev_priv(dev
);
1124 status
= ag71xx_rr(ag
, AG71XX_REG_INT_STATUS
);
1125 ag71xx_dump_intr(ag
, "raw", status
);
1127 if (unlikely(!status
))
1130 if (unlikely(status
& AG71XX_INT_ERR
)) {
1131 if (status
& AG71XX_INT_TX_BE
) {
1132 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
);
1133 dev_err(&dev
->dev
, "TX BUS error\n");
1135 if (status
& AG71XX_INT_RX_BE
) {
1136 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
);
1137 dev_err(&dev
->dev
, "RX BUS error\n");
1141 if (likely(status
& AG71XX_INT_POLL
)) {
1142 ag71xx_int_disable(ag
, AG71XX_INT_POLL
);
1143 DBG("%s: enable polling mode\n", dev
->name
);
1144 napi_schedule(&ag
->napi
);
1147 ag71xx_debugfs_update_int_stats(ag
, status
);
1152 #ifdef CONFIG_NET_POLL_CONTROLLER
1154 * Polling 'interrupt' - used by things like netconsole to send skbs
1155 * without having to re-enable interrupts. It's not called while
1156 * the interrupt routine is executing.
1158 static void ag71xx_netpoll(struct net_device
*dev
)
1160 disable_irq(dev
->irq
);
1161 ag71xx_interrupt(dev
->irq
, dev
);
1162 enable_irq(dev
->irq
);
1166 static int ag71xx_change_mtu(struct net_device
*dev
, int new_mtu
)
1168 struct ag71xx
*ag
= netdev_priv(dev
);
1169 unsigned int max_frame_len
;
1171 max_frame_len
= ag71xx_max_frame_len(new_mtu
);
1172 if (new_mtu
< 68 || max_frame_len
> ag
->max_frame_len
)
1175 if (netif_running(dev
))
1182 static const struct net_device_ops ag71xx_netdev_ops
= {
1183 .ndo_open
= ag71xx_open
,
1184 .ndo_stop
= ag71xx_stop
,
1185 .ndo_start_xmit
= ag71xx_hard_start_xmit
,
1186 .ndo_do_ioctl
= ag71xx_do_ioctl
,
1187 .ndo_tx_timeout
= ag71xx_tx_timeout
,
1188 .ndo_change_mtu
= ag71xx_change_mtu
,
1189 .ndo_set_mac_address
= eth_mac_addr
,
1190 .ndo_validate_addr
= eth_validate_addr
,
1191 #ifdef CONFIG_NET_POLL_CONTROLLER
1192 .ndo_poll_controller
= ag71xx_netpoll
,
1196 static const char *ag71xx_get_phy_if_mode_name(phy_interface_t mode
)
1199 case PHY_INTERFACE_MODE_MII
:
1201 case PHY_INTERFACE_MODE_GMII
:
1203 case PHY_INTERFACE_MODE_RMII
:
1205 case PHY_INTERFACE_MODE_RGMII
:
1207 case PHY_INTERFACE_MODE_SGMII
:
1217 static int ag71xx_probe(struct platform_device
*pdev
)
1219 struct net_device
*dev
;
1220 struct resource
*res
;
1222 struct ag71xx_platform_data
*pdata
;
1225 pdata
= pdev
->dev
.platform_data
;
1227 dev_err(&pdev
->dev
, "no platform data specified\n");
1232 if (pdata
->mii_bus_dev
== NULL
&& pdata
->phy_mask
) {
1233 dev_err(&pdev
->dev
, "no MII bus device specified\n");
1238 dev
= alloc_etherdev(sizeof(*ag
));
1240 dev_err(&pdev
->dev
, "alloc_etherdev failed\n");
1245 if (!pdata
->max_frame_len
|| !pdata
->desc_pktlen_mask
)
1248 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1250 ag
= netdev_priv(dev
);
1253 ag
->msg_enable
= netif_msg_init(ag71xx_msg_level
,
1254 AG71XX_DEFAULT_MSG_ENABLE
);
1255 spin_lock_init(&ag
->lock
);
1257 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "mac_base");
1259 dev_err(&pdev
->dev
, "no mac_base resource found\n");
1264 ag
->mac_base
= ioremap_nocache(res
->start
, res
->end
- res
->start
+ 1);
1265 if (!ag
->mac_base
) {
1266 dev_err(&pdev
->dev
, "unable to ioremap mac_base\n");
1271 dev
->irq
= platform_get_irq(pdev
, 0);
1272 err
= request_irq(dev
->irq
, ag71xx_interrupt
,
1276 dev_err(&pdev
->dev
, "unable to request IRQ %d\n", dev
->irq
);
1277 goto err_unmap_base
;
1280 dev
->base_addr
= (unsigned long)ag
->mac_base
;
1281 dev
->netdev_ops
= &ag71xx_netdev_ops
;
1282 dev
->ethtool_ops
= &ag71xx_ethtool_ops
;
1284 INIT_WORK(&ag
->restart_work
, ag71xx_restart_work_func
);
1286 init_timer(&ag
->oom_timer
);
1287 ag
->oom_timer
.data
= (unsigned long) dev
;
1288 ag
->oom_timer
.function
= ag71xx_oom_timer_handler
;
1290 ag
->tx_ring
.size
= AG71XX_TX_RING_SIZE_DEFAULT
;
1291 ag
->rx_ring
.size
= AG71XX_RX_RING_SIZE_DEFAULT
;
1293 ag
->max_frame_len
= pdata
->max_frame_len
;
1294 ag
->desc_pktlen_mask
= pdata
->desc_pktlen_mask
;
1296 if (!pdata
->is_ar724x
&& !pdata
->is_ar91xx
) {
1297 ag
->tx_ring
.desc_split
= AG71XX_TX_RING_SPLIT
;
1298 ag
->tx_ring
.size
*= AG71XX_TX_RING_DS_PER_PKT
;
1301 ag
->stop_desc
= dma_alloc_coherent(NULL
,
1302 sizeof(struct ag71xx_desc
), &ag
->stop_desc_dma
, GFP_KERNEL
);
1307 ag
->stop_desc
->data
= 0;
1308 ag
->stop_desc
->ctrl
= 0;
1309 ag
->stop_desc
->next
= (u32
) ag
->stop_desc_dma
;
1311 memcpy(dev
->dev_addr
, pdata
->mac_addr
, ETH_ALEN
);
1313 netif_napi_add(dev
, &ag
->napi
, ag71xx_poll
, AG71XX_NAPI_WEIGHT
);
1315 ag71xx_dump_regs(ag
);
1319 ag71xx_dump_regs(ag
);
1321 err
= ag71xx_phy_connect(ag
);
1325 err
= ag71xx_debugfs_init(ag
);
1327 goto err_phy_disconnect
;
1329 platform_set_drvdata(pdev
, dev
);
1331 err
= register_netdev(dev
);
1333 dev_err(&pdev
->dev
, "unable to register net device\n");
1334 goto err_debugfs_exit
;
1337 pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1338 dev
->name
, dev
->base_addr
, dev
->irq
,
1339 ag71xx_get_phy_if_mode_name(pdata
->phy_if_mode
));
1344 ag71xx_debugfs_exit(ag
);
1346 ag71xx_phy_disconnect(ag
);
1348 dma_free_coherent(NULL
, sizeof(struct ag71xx_desc
), ag
->stop_desc
,
1351 free_irq(dev
->irq
, dev
);
1353 iounmap(ag
->mac_base
);
1357 platform_set_drvdata(pdev
, NULL
);
1361 static int ag71xx_remove(struct platform_device
*pdev
)
1363 struct net_device
*dev
= platform_get_drvdata(pdev
);
1366 struct ag71xx
*ag
= netdev_priv(dev
);
1368 ag71xx_debugfs_exit(ag
);
1369 ag71xx_phy_disconnect(ag
);
1370 unregister_netdev(dev
);
1371 free_irq(dev
->irq
, dev
);
1372 iounmap(ag
->mac_base
);
1374 platform_set_drvdata(pdev
, NULL
);
1380 static struct platform_driver ag71xx_driver
= {
1381 .probe
= ag71xx_probe
,
1382 .remove
= ag71xx_remove
,
1384 .name
= AG71XX_DRV_NAME
,
1388 static int __init
ag71xx_module_init(void)
1392 ret
= ag71xx_debugfs_root_init();
1396 ret
= ag71xx_mdio_driver_init();
1398 goto err_debugfs_exit
;
1400 ret
= platform_driver_register(&ag71xx_driver
);
1407 ag71xx_mdio_driver_exit();
1409 ag71xx_debugfs_root_exit();
1414 static void __exit
ag71xx_module_exit(void)
1416 platform_driver_unregister(&ag71xx_driver
);
1417 ag71xx_mdio_driver_exit();
1418 ag71xx_debugfs_root_exit();
1421 module_init(ag71xx_module_init
);
1422 module_exit(ag71xx_module_exit
);
1424 MODULE_VERSION(AG71XX_DRV_VERSION
);
1425 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1426 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1427 MODULE_LICENSE("GPL v2");
1428 MODULE_ALIAS("platform:" AG71XX_DRV_NAME
);