2 * Atheros AR71xx built-in ethernet mac driver
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 * Based on Atheros' AG7100 driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
16 #define AG71XX_DEFAULT_MSG_ENABLE \
26 static int ag71xx_msg_level
= -1;
28 module_param_named(msg_level
, ag71xx_msg_level
, int, 0);
29 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
31 #define ETH_SWITCH_HEADER_LEN 2
33 static inline unsigned int ag71xx_max_frame_len(unsigned int mtu
)
35 return ETH_SWITCH_HEADER_LEN
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
+ ETH_FCS_LEN
;
38 static void ag71xx_dump_dma_regs(struct ag71xx
*ag
)
40 DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
42 ag71xx_rr(ag
, AG71XX_REG_TX_CTRL
),
43 ag71xx_rr(ag
, AG71XX_REG_TX_DESC
),
44 ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
));
46 DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
48 ag71xx_rr(ag
, AG71XX_REG_RX_CTRL
),
49 ag71xx_rr(ag
, AG71XX_REG_RX_DESC
),
50 ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
));
53 static void ag71xx_dump_regs(struct ag71xx
*ag
)
55 DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
57 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG1
),
58 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
),
59 ag71xx_rr(ag
, AG71XX_REG_MAC_IPG
),
60 ag71xx_rr(ag
, AG71XX_REG_MAC_HDX
),
61 ag71xx_rr(ag
, AG71XX_REG_MAC_MFL
));
62 DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
64 ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
),
65 ag71xx_rr(ag
, AG71XX_REG_MAC_ADDR1
),
66 ag71xx_rr(ag
, AG71XX_REG_MAC_ADDR2
));
67 DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
69 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG0
),
70 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG1
),
71 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG2
));
72 DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
74 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG3
),
75 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG4
),
76 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
));
79 static inline void ag71xx_dump_intr(struct ag71xx
*ag
, char *label
, u32 intr
)
81 DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
82 ag
->dev
->name
, label
, intr
,
83 (intr
& AG71XX_INT_TX_PS
) ? "TXPS " : "",
84 (intr
& AG71XX_INT_TX_UR
) ? "TXUR " : "",
85 (intr
& AG71XX_INT_TX_BE
) ? "TXBE " : "",
86 (intr
& AG71XX_INT_RX_PR
) ? "RXPR " : "",
87 (intr
& AG71XX_INT_RX_OF
) ? "RXOF " : "",
88 (intr
& AG71XX_INT_RX_BE
) ? "RXBE " : "");
91 static void ag71xx_ring_free(struct ag71xx_ring
*ring
)
96 dma_free_coherent(NULL
, ring
->size
* ring
->desc_size
,
97 ring
->descs_cpu
, ring
->descs_dma
);
100 static int ag71xx_ring_alloc(struct ag71xx_ring
*ring
)
104 ring
->desc_size
= sizeof(struct ag71xx_desc
);
105 if (ring
->desc_size
% cache_line_size()) {
106 DBG("ag71xx: ring %p, desc size %u rounded to %u\n",
107 ring
, ring
->desc_size
,
108 roundup(ring
->desc_size
, cache_line_size()));
109 ring
->desc_size
= roundup(ring
->desc_size
, cache_line_size());
112 ring
->descs_cpu
= dma_alloc_coherent(NULL
, ring
->size
* ring
->desc_size
,
113 &ring
->descs_dma
, GFP_ATOMIC
);
114 if (!ring
->descs_cpu
) {
120 ring
->buf
= kzalloc(ring
->size
* sizeof(*ring
->buf
), GFP_KERNEL
);
132 static void ag71xx_ring_tx_clean(struct ag71xx
*ag
)
134 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
135 struct net_device
*dev
= ag
->dev
;
136 u32 bytes_compl
= 0, pkts_compl
= 0;
138 while (ring
->curr
!= ring
->dirty
) {
139 struct ag71xx_desc
*desc
;
140 u32 i
= ring
->dirty
% ring
->size
;
142 desc
= ag71xx_ring_desc(ring
, i
);
143 if (!ag71xx_desc_empty(desc
)) {
145 dev
->stats
.tx_errors
++;
148 if (ring
->buf
[i
].skb
) {
149 bytes_compl
+= ring
->buf
[i
].len
;
151 dev_kfree_skb_any(ring
->buf
[i
].skb
);
153 ring
->buf
[i
].skb
= NULL
;
157 /* flush descriptors */
160 netdev_completed_queue(dev
, pkts_compl
, bytes_compl
);
163 static void ag71xx_ring_tx_init(struct ag71xx
*ag
)
165 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
168 for (i
= 0; i
< ring
->size
; i
++) {
169 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
171 desc
->next
= (u32
) (ring
->descs_dma
+
172 ring
->desc_size
* ((i
+ 1) % ring
->size
));
174 desc
->ctrl
= DESC_EMPTY
;
175 ring
->buf
[i
].skb
= NULL
;
178 /* flush descriptors */
183 netdev_reset_queue(ag
->dev
);
186 static void ag71xx_ring_rx_clean(struct ag71xx
*ag
)
188 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
194 for (i
= 0; i
< ring
->size
; i
++)
195 if (ring
->buf
[i
].rx_buf
) {
196 dma_unmap_single(&ag
->dev
->dev
, ring
->buf
[i
].dma_addr
,
197 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
198 kfree(ring
->buf
[i
].rx_buf
);
202 static int ag71xx_buffer_offset(struct ag71xx
*ag
)
204 int offset
= NET_SKB_PAD
;
207 * On AR71xx/AR91xx packets must be 4-byte aligned.
209 * When using builtin AR8216 support, hardware adds a 2-byte header,
210 * so we don't need any extra alignment in that case.
212 if (!ag71xx_get_pdata(ag
)->is_ar724x
|| ag71xx_has_ar8216(ag
))
215 return offset
+ NET_IP_ALIGN
;
218 static bool ag71xx_fill_rx_buf(struct ag71xx
*ag
, struct ag71xx_buf
*buf
,
221 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
222 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, buf
- &ring
->buf
[0]);
225 data
= kmalloc(ag
->rx_buf_size
+
226 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)),
232 buf
->dma_addr
= dma_map_single(&ag
->dev
->dev
, data
, ag
->rx_buf_size
,
234 desc
->data
= (u32
) buf
->dma_addr
+ offset
;
238 static int ag71xx_ring_rx_init(struct ag71xx
*ag
)
240 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
243 int offset
= ag71xx_buffer_offset(ag
);
246 for (i
= 0; i
< ring
->size
; i
++) {
247 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
249 desc
->next
= (u32
) (ring
->descs_dma
+
250 ring
->desc_size
* ((i
+ 1) % ring
->size
));
252 DBG("ag71xx: RX desc at %p, next is %08x\n",
256 for (i
= 0; i
< ring
->size
; i
++) {
257 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
259 if (!ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
)) {
264 desc
->ctrl
= DESC_EMPTY
;
267 /* flush descriptors */
276 static int ag71xx_ring_rx_refill(struct ag71xx
*ag
)
278 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
280 int offset
= ag71xx_buffer_offset(ag
);
283 for (; ring
->curr
- ring
->dirty
> 0; ring
->dirty
++) {
284 struct ag71xx_desc
*desc
;
287 i
= ring
->dirty
% ring
->size
;
288 desc
= ag71xx_ring_desc(ring
, i
);
290 if (!ring
->buf
[i
].rx_buf
&&
291 !ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
))
294 desc
->ctrl
= DESC_EMPTY
;
298 /* flush descriptors */
301 DBG("%s: %u rx descriptors refilled\n", ag
->dev
->name
, count
);
306 static int ag71xx_rings_init(struct ag71xx
*ag
)
310 ret
= ag71xx_ring_alloc(&ag
->tx_ring
);
314 ag71xx_ring_tx_init(ag
);
316 ret
= ag71xx_ring_alloc(&ag
->rx_ring
);
320 ret
= ag71xx_ring_rx_init(ag
);
324 static void ag71xx_rings_cleanup(struct ag71xx
*ag
)
326 ag71xx_ring_rx_clean(ag
);
327 ag71xx_ring_free(&ag
->rx_ring
);
329 ag71xx_ring_tx_clean(ag
);
330 netdev_reset_queue(ag
->dev
);
331 ag71xx_ring_free(&ag
->tx_ring
);
334 static unsigned char *ag71xx_speed_str(struct ag71xx
*ag
)
348 static void ag71xx_hw_set_macaddr(struct ag71xx
*ag
, unsigned char *mac
)
352 t
= (((u32
) mac
[5]) << 24) | (((u32
) mac
[4]) << 16)
353 | (((u32
) mac
[3]) << 8) | ((u32
) mac
[2]);
355 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR1
, t
);
357 t
= (((u32
) mac
[1]) << 24) | (((u32
) mac
[0]) << 16);
358 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR2
, t
);
361 static void ag71xx_dma_reset(struct ag71xx
*ag
)
366 ag71xx_dump_dma_regs(ag
);
369 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
370 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
373 * give the hardware some time to really stop all rx/tx activity
374 * clearing the descriptors too early causes random memory corruption
378 /* clear descriptor addresses */
379 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->stop_desc_dma
);
380 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->stop_desc_dma
);
382 /* clear pending RX/TX interrupts */
383 for (i
= 0; i
< 256; i
++) {
384 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
385 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
388 /* clear pending errors */
389 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
| RX_STATUS_OF
);
390 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
| TX_STATUS_UR
);
392 val
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
394 pr_alert("%s: unable to clear DMA Rx status: %08x\n",
397 val
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
399 /* mask out reserved bits */
403 pr_alert("%s: unable to clear DMA Tx status: %08x\n",
406 ag71xx_dump_dma_regs(ag
);
409 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
410 MAC_CFG1_SRX | MAC_CFG1_STX)
412 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
414 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
415 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
416 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
417 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
418 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
421 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
422 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
423 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
424 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
425 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
426 FIFO_CFG5_17 | FIFO_CFG5_SF)
428 static void ag71xx_hw_stop(struct ag71xx
*ag
)
430 /* disable all interrupts and stop the rx/tx engine */
431 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, 0);
432 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
433 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
436 static void ag71xx_hw_setup(struct ag71xx
*ag
)
438 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
440 /* setup MAC configuration registers */
441 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, MAC_CFG1_INIT
);
443 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG2
,
444 MAC_CFG2_PAD_CRC_EN
| MAC_CFG2_LEN_CHECK
);
446 /* setup max frame length to zero */
447 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, 0);
449 /* setup FIFO configuration registers */
450 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG0
, FIFO_CFG0_INIT
);
451 if (pdata
->is_ar724x
) {
452 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, pdata
->fifo_cfg1
);
453 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, pdata
->fifo_cfg2
);
455 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, 0x0fff0000);
456 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, 0x00001fff);
458 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG4
, FIFO_CFG4_INIT
);
459 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, FIFO_CFG5_INIT
);
462 static void ag71xx_hw_init(struct ag71xx
*ag
)
464 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
465 u32 reset_mask
= pdata
->reset_bit
;
469 if (pdata
->is_ar724x
) {
470 u32 reset_phy
= reset_mask
;
472 reset_phy
&= AR71XX_RESET_GE0_PHY
| AR71XX_RESET_GE1_PHY
;
473 reset_mask
&= ~(AR71XX_RESET_GE0_PHY
| AR71XX_RESET_GE1_PHY
);
475 ath79_device_reset_set(reset_phy
);
477 ath79_device_reset_clear(reset_phy
);
481 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG1
, MAC_CFG1_SR
);
484 ath79_device_reset_set(reset_mask
);
486 ath79_device_reset_clear(reset_mask
);
491 ag71xx_dma_reset(ag
);
494 static void ag71xx_fast_reset(struct ag71xx
*ag
)
496 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
497 struct net_device
*dev
= ag
->dev
;
498 u32 reset_mask
= pdata
->reset_bit
;
502 reset_mask
&= AR71XX_RESET_GE0_MAC
| AR71XX_RESET_GE1_MAC
;
504 mii_reg
= ag71xx_rr(ag
, AG71XX_REG_MII_CFG
);
505 rx_ds
= ag71xx_rr(ag
, AG71XX_REG_RX_DESC
);
506 tx_ds
= ag71xx_rr(ag
, AG71XX_REG_TX_DESC
);
508 ath79_device_reset_set(reset_mask
);
510 ath79_device_reset_clear(reset_mask
);
513 ag71xx_dma_reset(ag
);
516 /* setup max frame length */
517 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
518 ag71xx_max_frame_len(ag
->dev
->mtu
));
520 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, rx_ds
);
521 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, tx_ds
);
522 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, mii_reg
);
524 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
527 static void ag71xx_hw_start(struct ag71xx
*ag
)
529 /* start RX engine */
530 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
532 /* enable interrupts */
533 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, AG71XX_INT_INIT
);
536 void ag71xx_link_adjust(struct ag71xx
*ag
)
538 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
546 netif_carrier_off(ag
->dev
);
547 if (netif_msg_link(ag
))
548 pr_info("%s: link down\n", ag
->dev
->name
);
552 if (pdata
->is_ar724x
)
553 ag71xx_fast_reset(ag
);
555 cfg2
= ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
);
556 cfg2
&= ~(MAC_CFG2_IF_1000
| MAC_CFG2_IF_10_100
| MAC_CFG2_FDX
);
557 cfg2
|= (ag
->duplex
) ? MAC_CFG2_FDX
: 0;
559 ifctl
= ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
);
560 ifctl
&= ~(MAC_IFCTL_SPEED
);
562 fifo5
= ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
);
563 fifo5
&= ~FIFO_CFG5_BM
;
567 cfg2
|= MAC_CFG2_IF_1000
;
568 fifo5
|= FIFO_CFG5_BM
;
571 cfg2
|= MAC_CFG2_IF_10_100
;
572 ifctl
|= MAC_IFCTL_SPEED
;
575 cfg2
|= MAC_CFG2_IF_10_100
;
582 if (pdata
->is_ar91xx
)
584 else if (pdata
->is_ar724x
)
585 fifo3
= pdata
->fifo_cfg3
;
589 if (ag
->tx_ring
.desc_split
) {
591 fifo3
|= ((2048 - ag
->tx_ring
.desc_split
) / 4) << 16;
594 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG3
, fifo3
);
596 if (pdata
->set_speed
)
597 pdata
->set_speed(ag
->speed
);
599 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG2
, cfg2
);
600 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, fifo5
);
601 ag71xx_wr(ag
, AG71XX_REG_MAC_IFCTL
, ifctl
);
604 netif_carrier_on(ag
->dev
);
605 if (netif_msg_link(ag
))
606 pr_info("%s: link up (%sMbps/%s duplex)\n",
608 ag71xx_speed_str(ag
),
609 (DUPLEX_FULL
== ag
->duplex
) ? "Full" : "Half");
611 DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n",
613 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG0
),
614 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG1
),
615 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG2
));
617 DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n",
619 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG3
),
620 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG4
),
621 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
));
623 DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x\n",
625 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
),
626 ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
));
629 static int ag71xx_open(struct net_device
*dev
)
631 struct ag71xx
*ag
= netdev_priv(dev
);
632 unsigned int max_frame_len
;
635 max_frame_len
= ag71xx_max_frame_len(dev
->mtu
);
636 ag
->rx_buf_size
= max_frame_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
;
638 /* setup max frame length */
639 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, max_frame_len
);
641 ret
= ag71xx_rings_init(ag
);
645 napi_enable(&ag
->napi
);
647 netif_carrier_off(dev
);
648 ag71xx_phy_start(ag
);
650 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
651 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->rx_ring
.descs_dma
);
653 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
655 netif_start_queue(dev
);
660 ag71xx_rings_cleanup(ag
);
664 static int ag71xx_stop(struct net_device
*dev
)
666 struct ag71xx
*ag
= netdev_priv(dev
);
669 netif_carrier_off(dev
);
672 spin_lock_irqsave(&ag
->lock
, flags
);
674 netif_stop_queue(dev
);
677 ag71xx_dma_reset(ag
);
679 napi_disable(&ag
->napi
);
680 del_timer_sync(&ag
->oom_timer
);
682 spin_unlock_irqrestore(&ag
->lock
, flags
);
684 ag71xx_rings_cleanup(ag
);
689 static int ag71xx_fill_dma_desc(struct ag71xx_ring
*ring
, u32 addr
, int len
)
692 struct ag71xx_desc
*desc
;
694 int split
= ring
->desc_split
;
700 unsigned int cur_len
= len
;
702 i
= (ring
->curr
+ ndesc
) % ring
->size
;
703 desc
= ag71xx_ring_desc(ring
, i
);
705 if (!ag71xx_desc_empty(desc
))
708 if (cur_len
> split
) {
712 * TX will hang if DMA transfers <= 4 bytes,
713 * make sure next segment is more than 4 bytes long.
715 if (len
<= split
+ 4)
724 cur_len
|= DESC_MORE
;
726 /* prevent early tx attempt of this descriptor */
728 cur_len
|= DESC_EMPTY
;
730 desc
->ctrl
= cur_len
;
737 static netdev_tx_t
ag71xx_hard_start_xmit(struct sk_buff
*skb
,
738 struct net_device
*dev
)
740 struct ag71xx
*ag
= netdev_priv(dev
);
741 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
742 struct ag71xx_desc
*desc
;
746 if (ag71xx_has_ar8216(ag
))
747 ag71xx_add_ar8216_header(ag
, skb
);
750 DBG("%s: packet len is too small\n", ag
->dev
->name
);
754 dma_addr
= dma_map_single(&dev
->dev
, skb
->data
, skb
->len
,
757 i
= ring
->curr
% ring
->size
;
758 desc
= ag71xx_ring_desc(ring
, i
);
760 /* setup descriptor fields */
761 n
= ag71xx_fill_dma_desc(ring
, (u32
) dma_addr
, skb
->len
& ag
->desc_pktlen_mask
);
765 i
= (ring
->curr
+ n
- 1) % ring
->size
;
766 ring
->buf
[i
].len
= skb
->len
;
767 ring
->buf
[i
].skb
= skb
;
768 ring
->buf
[i
].timestamp
= jiffies
;
770 netdev_sent_queue(dev
, skb
->len
);
772 desc
->ctrl
&= ~DESC_EMPTY
;
775 /* flush descriptor */
779 if (ring
->desc_split
)
780 ring_min
*= AG71XX_TX_RING_DS_PER_PKT
;
782 if (ring
->curr
- ring
->dirty
>= ring
->size
- ring_min
) {
783 DBG("%s: tx queue full\n", dev
->name
);
784 netif_stop_queue(dev
);
787 DBG("%s: packet injected into TX queue\n", ag
->dev
->name
);
789 /* enable TX engine */
790 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, TX_CTRL_TXE
);
795 dma_unmap_single(&dev
->dev
, dma_addr
, skb
->len
, DMA_TO_DEVICE
);
798 dev
->stats
.tx_dropped
++;
804 static int ag71xx_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
806 struct ag71xx
*ag
= netdev_priv(dev
);
811 if (ag
->phy_dev
== NULL
)
814 spin_lock_irq(&ag
->lock
);
815 ret
= phy_ethtool_ioctl(ag
->phy_dev
, (void *) ifr
->ifr_data
);
816 spin_unlock_irq(&ag
->lock
);
821 (dev
->dev_addr
, ifr
->ifr_data
, sizeof(dev
->dev_addr
)))
827 (ifr
->ifr_data
, dev
->dev_addr
, sizeof(dev
->dev_addr
)))
834 if (ag
->phy_dev
== NULL
)
837 return phy_mii_ioctl(ag
->phy_dev
, ifr
, cmd
);
846 static void ag71xx_oom_timer_handler(unsigned long data
)
848 struct net_device
*dev
= (struct net_device
*) data
;
849 struct ag71xx
*ag
= netdev_priv(dev
);
851 napi_schedule(&ag
->napi
);
854 static void ag71xx_tx_timeout(struct net_device
*dev
)
856 struct ag71xx
*ag
= netdev_priv(dev
);
858 if (netif_msg_tx_err(ag
))
859 pr_info("%s: tx timeout\n", ag
->dev
->name
);
861 schedule_work(&ag
->restart_work
);
864 static void ag71xx_restart_work_func(struct work_struct
*work
)
866 struct ag71xx
*ag
= container_of(work
, struct ag71xx
, restart_work
);
868 if (ag71xx_get_pdata(ag
)->is_ar724x
) {
870 ag71xx_link_adjust(ag
);
874 ag71xx_stop(ag
->dev
);
875 ag71xx_open(ag
->dev
);
878 static bool ag71xx_check_dma_stuck(struct ag71xx
*ag
, unsigned long timestamp
)
880 u32 rx_sm
, tx_sm
, rx_fd
;
882 if (likely(time_before(jiffies
, timestamp
+ HZ
/10)))
885 if (!netif_carrier_ok(ag
->dev
))
888 rx_sm
= ag71xx_rr(ag
, AG71XX_REG_RX_SM
);
889 if ((rx_sm
& 0x7) == 0x3 && ((rx_sm
>> 4) & 0x7) == 0x6)
892 tx_sm
= ag71xx_rr(ag
, AG71XX_REG_TX_SM
);
893 rx_fd
= ag71xx_rr(ag
, AG71XX_REG_FIFO_DEPTH
);
894 if (((tx_sm
>> 4) & 0x7) == 0 && ((rx_sm
& 0x7) == 0) &&
895 ((rx_sm
>> 4) & 0x7) == 0 && rx_fd
== 0)
901 static int ag71xx_tx_packets(struct ag71xx
*ag
)
903 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
904 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
909 DBG("%s: processing TX ring\n", ag
->dev
->name
);
911 while (ring
->dirty
+ n
!= ring
->curr
) {
912 unsigned int i
= (ring
->dirty
+ n
) % ring
->size
;
913 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
914 struct sk_buff
*skb
= ring
->buf
[i
].skb
;
916 if (!ag71xx_desc_empty(desc
)) {
917 if (pdata
->is_ar7240
&&
918 ag71xx_check_dma_stuck(ag
, ring
->buf
[i
].timestamp
))
919 schedule_work(&ag
->restart_work
);
927 dev_kfree_skb_any(skb
);
928 ring
->buf
[i
].skb
= NULL
;
930 bytes_compl
+= ring
->buf
[i
].len
;
936 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
941 DBG("%s: %d packets sent out\n", ag
->dev
->name
, sent
);
943 ag
->dev
->stats
.tx_bytes
+= bytes_compl
;
944 ag
->dev
->stats
.tx_packets
+= sent
;
949 netdev_completed_queue(ag
->dev
, sent
, bytes_compl
);
950 if ((ring
->curr
- ring
->dirty
) < (ring
->size
* 3) / 4)
951 netif_wake_queue(ag
->dev
);
956 static int ag71xx_rx_packets(struct ag71xx
*ag
, int limit
)
958 struct net_device
*dev
= ag
->dev
;
959 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
960 int offset
= ag71xx_buffer_offset(ag
);
961 unsigned int pktlen_mask
= ag
->desc_pktlen_mask
;
964 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
965 dev
->name
, limit
, ring
->curr
, ring
->dirty
);
967 while (done
< limit
) {
968 unsigned int i
= ring
->curr
% ring
->size
;
969 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
974 if (ag71xx_desc_empty(desc
))
977 if ((ring
->dirty
+ ring
->size
) == ring
->curr
) {
982 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
984 pktlen
= desc
->ctrl
& pktlen_mask
;
985 pktlen
-= ETH_FCS_LEN
;
987 dma_unmap_single(&dev
->dev
, ring
->buf
[i
].dma_addr
,
988 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
990 dev
->stats
.rx_packets
++;
991 dev
->stats
.rx_bytes
+= pktlen
;
993 skb
= build_skb(ring
->buf
[i
].rx_buf
, 0);
995 kfree(ring
->buf
[i
].rx_buf
);
999 skb_reserve(skb
, offset
);
1000 skb_put(skb
, pktlen
);
1002 if (ag71xx_has_ar8216(ag
))
1003 err
= ag71xx_remove_ar8216_header(ag
, skb
, pktlen
);
1006 dev
->stats
.rx_dropped
++;
1010 skb
->ip_summed
= CHECKSUM_NONE
;
1011 skb
->protocol
= eth_type_trans(skb
, dev
);
1012 netif_receive_skb(skb
);
1016 ring
->buf
[i
].rx_buf
= NULL
;
1022 ag71xx_ring_rx_refill(ag
);
1024 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1025 dev
->name
, ring
->curr
, ring
->dirty
, done
);
1030 static int ag71xx_poll(struct napi_struct
*napi
, int limit
)
1032 struct ag71xx
*ag
= container_of(napi
, struct ag71xx
, napi
);
1033 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
1034 struct net_device
*dev
= ag
->dev
;
1035 struct ag71xx_ring
*rx_ring
;
1036 unsigned long flags
;
1042 tx_done
= ag71xx_tx_packets(ag
);
1044 DBG("%s: processing RX ring\n", dev
->name
);
1045 rx_done
= ag71xx_rx_packets(ag
, limit
);
1047 ag71xx_debugfs_update_napi_stats(ag
, rx_done
, tx_done
);
1049 rx_ring
= &ag
->rx_ring
;
1050 if (rx_ring
->buf
[rx_ring
->dirty
% rx_ring
->size
].rx_buf
== NULL
)
1053 status
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
1054 if (unlikely(status
& RX_STATUS_OF
)) {
1055 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_OF
);
1056 dev
->stats
.rx_fifo_errors
++;
1059 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
1062 if (rx_done
< limit
) {
1063 if (status
& RX_STATUS_PR
)
1066 status
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
1067 if (status
& TX_STATUS_PS
)
1070 DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1071 dev
->name
, rx_done
, tx_done
, limit
);
1073 napi_complete(napi
);
1075 /* enable interrupts */
1076 spin_lock_irqsave(&ag
->lock
, flags
);
1077 ag71xx_int_enable(ag
, AG71XX_INT_POLL
);
1078 spin_unlock_irqrestore(&ag
->lock
, flags
);
1083 DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1084 dev
->name
, rx_done
, tx_done
, limit
);
1088 if (netif_msg_rx_err(ag
))
1089 pr_info("%s: out of memory\n", dev
->name
);
1091 mod_timer(&ag
->oom_timer
, jiffies
+ AG71XX_OOM_REFILL
);
1092 napi_complete(napi
);
1096 static irqreturn_t
ag71xx_interrupt(int irq
, void *dev_id
)
1098 struct net_device
*dev
= dev_id
;
1099 struct ag71xx
*ag
= netdev_priv(dev
);
1102 status
= ag71xx_rr(ag
, AG71XX_REG_INT_STATUS
);
1103 ag71xx_dump_intr(ag
, "raw", status
);
1105 if (unlikely(!status
))
1108 if (unlikely(status
& AG71XX_INT_ERR
)) {
1109 if (status
& AG71XX_INT_TX_BE
) {
1110 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
);
1111 dev_err(&dev
->dev
, "TX BUS error\n");
1113 if (status
& AG71XX_INT_RX_BE
) {
1114 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
);
1115 dev_err(&dev
->dev
, "RX BUS error\n");
1119 if (likely(status
& AG71XX_INT_POLL
)) {
1120 ag71xx_int_disable(ag
, AG71XX_INT_POLL
);
1121 DBG("%s: enable polling mode\n", dev
->name
);
1122 napi_schedule(&ag
->napi
);
1125 ag71xx_debugfs_update_int_stats(ag
, status
);
1130 #ifdef CONFIG_NET_POLL_CONTROLLER
1132 * Polling 'interrupt' - used by things like netconsole to send skbs
1133 * without having to re-enable interrupts. It's not called while
1134 * the interrupt routine is executing.
1136 static void ag71xx_netpoll(struct net_device
*dev
)
1138 disable_irq(dev
->irq
);
1139 ag71xx_interrupt(dev
->irq
, dev
);
1140 enable_irq(dev
->irq
);
1144 static int ag71xx_change_mtu(struct net_device
*dev
, int new_mtu
)
1146 struct ag71xx
*ag
= netdev_priv(dev
);
1147 unsigned int max_frame_len
;
1149 max_frame_len
= ag71xx_max_frame_len(new_mtu
);
1150 if (new_mtu
< 68 || max_frame_len
> ag
->max_frame_len
)
1153 if (netif_running(dev
))
1160 static const struct net_device_ops ag71xx_netdev_ops
= {
1161 .ndo_open
= ag71xx_open
,
1162 .ndo_stop
= ag71xx_stop
,
1163 .ndo_start_xmit
= ag71xx_hard_start_xmit
,
1164 .ndo_do_ioctl
= ag71xx_do_ioctl
,
1165 .ndo_tx_timeout
= ag71xx_tx_timeout
,
1166 .ndo_change_mtu
= ag71xx_change_mtu
,
1167 .ndo_set_mac_address
= eth_mac_addr
,
1168 .ndo_validate_addr
= eth_validate_addr
,
1169 #ifdef CONFIG_NET_POLL_CONTROLLER
1170 .ndo_poll_controller
= ag71xx_netpoll
,
1174 static const char *ag71xx_get_phy_if_mode_name(phy_interface_t mode
)
1177 case PHY_INTERFACE_MODE_MII
:
1179 case PHY_INTERFACE_MODE_GMII
:
1181 case PHY_INTERFACE_MODE_RMII
:
1183 case PHY_INTERFACE_MODE_RGMII
:
1185 case PHY_INTERFACE_MODE_SGMII
:
1195 static int ag71xx_probe(struct platform_device
*pdev
)
1197 struct net_device
*dev
;
1198 struct resource
*res
;
1200 struct ag71xx_platform_data
*pdata
;
1203 pdata
= pdev
->dev
.platform_data
;
1205 dev_err(&pdev
->dev
, "no platform data specified\n");
1210 if (pdata
->mii_bus_dev
== NULL
&& pdata
->phy_mask
) {
1211 dev_err(&pdev
->dev
, "no MII bus device specified\n");
1216 dev
= alloc_etherdev(sizeof(*ag
));
1218 dev_err(&pdev
->dev
, "alloc_etherdev failed\n");
1223 if (!pdata
->max_frame_len
|| !pdata
->desc_pktlen_mask
)
1226 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1228 ag
= netdev_priv(dev
);
1231 ag
->msg_enable
= netif_msg_init(ag71xx_msg_level
,
1232 AG71XX_DEFAULT_MSG_ENABLE
);
1233 spin_lock_init(&ag
->lock
);
1235 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "mac_base");
1237 dev_err(&pdev
->dev
, "no mac_base resource found\n");
1242 ag
->mac_base
= ioremap_nocache(res
->start
, res
->end
- res
->start
+ 1);
1243 if (!ag
->mac_base
) {
1244 dev_err(&pdev
->dev
, "unable to ioremap mac_base\n");
1249 dev
->irq
= platform_get_irq(pdev
, 0);
1250 err
= request_irq(dev
->irq
, ag71xx_interrupt
,
1254 dev_err(&pdev
->dev
, "unable to request IRQ %d\n", dev
->irq
);
1255 goto err_unmap_base
;
1258 dev
->base_addr
= (unsigned long)ag
->mac_base
;
1259 dev
->netdev_ops
= &ag71xx_netdev_ops
;
1260 dev
->ethtool_ops
= &ag71xx_ethtool_ops
;
1262 INIT_WORK(&ag
->restart_work
, ag71xx_restart_work_func
);
1264 init_timer(&ag
->oom_timer
);
1265 ag
->oom_timer
.data
= (unsigned long) dev
;
1266 ag
->oom_timer
.function
= ag71xx_oom_timer_handler
;
1268 ag
->tx_ring
.size
= AG71XX_TX_RING_SIZE_DEFAULT
;
1269 ag
->rx_ring
.size
= AG71XX_RX_RING_SIZE_DEFAULT
;
1271 ag
->max_frame_len
= pdata
->max_frame_len
;
1272 ag
->desc_pktlen_mask
= pdata
->desc_pktlen_mask
;
1274 if (!pdata
->is_ar724x
&& !pdata
->is_ar91xx
) {
1275 ag
->tx_ring
.desc_split
= AG71XX_TX_RING_SPLIT
;
1276 ag
->tx_ring
.size
*= AG71XX_TX_RING_DS_PER_PKT
;
1279 ag
->stop_desc
= dma_alloc_coherent(NULL
,
1280 sizeof(struct ag71xx_desc
), &ag
->stop_desc_dma
, GFP_KERNEL
);
1285 ag
->stop_desc
->data
= 0;
1286 ag
->stop_desc
->ctrl
= 0;
1287 ag
->stop_desc
->next
= (u32
) ag
->stop_desc_dma
;
1289 memcpy(dev
->dev_addr
, pdata
->mac_addr
, ETH_ALEN
);
1291 netif_napi_add(dev
, &ag
->napi
, ag71xx_poll
, AG71XX_NAPI_WEIGHT
);
1293 ag71xx_dump_regs(ag
);
1297 ag71xx_dump_regs(ag
);
1299 err
= ag71xx_phy_connect(ag
);
1303 err
= ag71xx_debugfs_init(ag
);
1305 goto err_phy_disconnect
;
1307 platform_set_drvdata(pdev
, dev
);
1309 err
= register_netdev(dev
);
1311 dev_err(&pdev
->dev
, "unable to register net device\n");
1312 goto err_debugfs_exit
;
1315 pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1316 dev
->name
, dev
->base_addr
, dev
->irq
,
1317 ag71xx_get_phy_if_mode_name(pdata
->phy_if_mode
));
1322 ag71xx_debugfs_exit(ag
);
1324 ag71xx_phy_disconnect(ag
);
1326 dma_free_coherent(NULL
, sizeof(struct ag71xx_desc
), ag
->stop_desc
,
1329 free_irq(dev
->irq
, dev
);
1331 iounmap(ag
->mac_base
);
1335 platform_set_drvdata(pdev
, NULL
);
1339 static int ag71xx_remove(struct platform_device
*pdev
)
1341 struct net_device
*dev
= platform_get_drvdata(pdev
);
1344 struct ag71xx
*ag
= netdev_priv(dev
);
1346 ag71xx_debugfs_exit(ag
);
1347 ag71xx_phy_disconnect(ag
);
1348 unregister_netdev(dev
);
1349 free_irq(dev
->irq
, dev
);
1350 iounmap(ag
->mac_base
);
1352 platform_set_drvdata(pdev
, NULL
);
1358 static struct platform_driver ag71xx_driver
= {
1359 .probe
= ag71xx_probe
,
1360 .remove
= ag71xx_remove
,
1362 .name
= AG71XX_DRV_NAME
,
1366 static int __init
ag71xx_module_init(void)
1370 ret
= ag71xx_debugfs_root_init();
1374 ret
= ag71xx_mdio_driver_init();
1376 goto err_debugfs_exit
;
1378 ret
= platform_driver_register(&ag71xx_driver
);
1385 ag71xx_mdio_driver_exit();
1387 ag71xx_debugfs_root_exit();
1392 static void __exit
ag71xx_module_exit(void)
1394 platform_driver_unregister(&ag71xx_driver
);
1395 ag71xx_mdio_driver_exit();
1396 ag71xx_debugfs_root_exit();
1399 module_init(ag71xx_module_init
);
1400 module_exit(ag71xx_module_exit
);
1402 MODULE_VERSION(AG71XX_DRV_VERSION
);
1403 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1404 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1405 MODULE_LICENSE("GPL v2");
1406 MODULE_ALIAS("platform:" AG71XX_DRV_NAME
);