2 * Atheros AR71xx built-in ethernet mac driver
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 * Based on Atheros' AG7100 driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
14 #include <linux/sizes.h>
15 #include <linux/of_net.h>
16 #include <linux/of_address.h>
19 #define AG71XX_DEFAULT_MSG_ENABLE \
29 static int ag71xx_msg_level
= -1;
31 module_param_named(msg_level
, ag71xx_msg_level
, int, 0);
32 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
34 #define ETH_SWITCH_HEADER_LEN 2
36 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
);
38 static inline unsigned int ag71xx_max_frame_len(unsigned int mtu
)
40 return ETH_SWITCH_HEADER_LEN
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
+ ETH_FCS_LEN
;
43 static void ag71xx_dump_dma_regs(struct ag71xx
*ag
)
45 DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
47 ag71xx_rr(ag
, AG71XX_REG_TX_CTRL
),
48 ag71xx_rr(ag
, AG71XX_REG_TX_DESC
),
49 ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
));
51 DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
53 ag71xx_rr(ag
, AG71XX_REG_RX_CTRL
),
54 ag71xx_rr(ag
, AG71XX_REG_RX_DESC
),
55 ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
));
58 static inline void ag71xx_dump_intr(struct ag71xx
*ag
, char *label
, u32 intr
)
60 DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
61 ag
->dev
->name
, label
, intr
,
62 (intr
& AG71XX_INT_TX_PS
) ? "TXPS " : "",
63 (intr
& AG71XX_INT_TX_UR
) ? "TXUR " : "",
64 (intr
& AG71XX_INT_TX_BE
) ? "TXBE " : "",
65 (intr
& AG71XX_INT_RX_PR
) ? "RXPR " : "",
66 (intr
& AG71XX_INT_RX_OF
) ? "RXOF " : "",
67 (intr
& AG71XX_INT_RX_BE
) ? "RXBE " : "");
70 static void ag71xx_ring_tx_clean(struct ag71xx
*ag
)
72 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
73 struct net_device
*dev
= ag
->dev
;
74 int ring_mask
= BIT(ring
->order
) - 1;
75 u32 bytes_compl
= 0, pkts_compl
= 0;
77 while (ring
->curr
!= ring
->dirty
) {
78 struct ag71xx_desc
*desc
;
79 u32 i
= ring
->dirty
& ring_mask
;
81 desc
= ag71xx_ring_desc(ring
, i
);
82 if (!ag71xx_desc_empty(desc
)) {
84 dev
->stats
.tx_errors
++;
87 if (ring
->buf
[i
].skb
) {
88 bytes_compl
+= ring
->buf
[i
].len
;
90 dev_kfree_skb_any(ring
->buf
[i
].skb
);
92 ring
->buf
[i
].skb
= NULL
;
96 /* flush descriptors */
99 netdev_completed_queue(dev
, pkts_compl
, bytes_compl
);
102 static void ag71xx_ring_tx_init(struct ag71xx
*ag
)
104 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
105 int ring_size
= BIT(ring
->order
);
106 int ring_mask
= ring_size
- 1;
109 for (i
= 0; i
< ring_size
; i
++) {
110 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
112 desc
->next
= (u32
) (ring
->descs_dma
+
113 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
115 desc
->ctrl
= DESC_EMPTY
;
116 ring
->buf
[i
].skb
= NULL
;
119 /* flush descriptors */
124 netdev_reset_queue(ag
->dev
);
127 static void ag71xx_ring_rx_clean(struct ag71xx
*ag
)
129 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
130 int ring_size
= BIT(ring
->order
);
136 for (i
= 0; i
< ring_size
; i
++)
137 if (ring
->buf
[i
].rx_buf
) {
138 dma_unmap_single(&ag
->dev
->dev
, ring
->buf
[i
].dma_addr
,
139 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
140 skb_free_frag(ring
->buf
[i
].rx_buf
);
144 static int ag71xx_buffer_size(struct ag71xx
*ag
)
146 return ag
->rx_buf_size
+
147 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
150 static bool ag71xx_fill_rx_buf(struct ag71xx
*ag
, struct ag71xx_buf
*buf
,
152 void *(*alloc
)(unsigned int size
))
154 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
155 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, buf
- &ring
->buf
[0]);
158 data
= alloc(ag71xx_buffer_size(ag
));
163 buf
->dma_addr
= dma_map_single(&ag
->dev
->dev
, data
, ag
->rx_buf_size
,
165 desc
->data
= (u32
) buf
->dma_addr
+ offset
;
169 static int ag71xx_ring_rx_init(struct ag71xx
*ag
)
171 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
172 int ring_size
= BIT(ring
->order
);
173 int ring_mask
= BIT(ring
->order
) - 1;
178 for (i
= 0; i
< ring_size
; i
++) {
179 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
181 desc
->next
= (u32
) (ring
->descs_dma
+
182 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
184 DBG("ag71xx: RX desc at %p, next is %08x\n",
188 for (i
= 0; i
< ring_size
; i
++) {
189 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
191 if (!ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], ag
->rx_buf_offset
,
192 netdev_alloc_frag
)) {
197 desc
->ctrl
= DESC_EMPTY
;
200 /* flush descriptors */
209 static int ag71xx_ring_rx_refill(struct ag71xx
*ag
)
211 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
212 int ring_mask
= BIT(ring
->order
) - 1;
214 int offset
= ag
->rx_buf_offset
;
217 for (; ring
->curr
- ring
->dirty
> 0; ring
->dirty
++) {
218 struct ag71xx_desc
*desc
;
221 i
= ring
->dirty
& ring_mask
;
222 desc
= ag71xx_ring_desc(ring
, i
);
224 if (!ring
->buf
[i
].rx_buf
&&
225 !ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
,
229 desc
->ctrl
= DESC_EMPTY
;
233 /* flush descriptors */
236 DBG("%s: %u rx descriptors refilled\n", ag
->dev
->name
, count
);
241 static int ag71xx_rings_init(struct ag71xx
*ag
)
243 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
244 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
245 int ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
246 int tx_size
= BIT(tx
->order
);
248 tx
->buf
= kzalloc(ring_size
* sizeof(*tx
->buf
), GFP_KERNEL
);
252 tx
->descs_cpu
= dma_alloc_coherent(NULL
, ring_size
* AG71XX_DESC_SIZE
,
253 &tx
->descs_dma
, GFP_ATOMIC
);
254 if (!tx
->descs_cpu
) {
260 rx
->buf
= &tx
->buf
[BIT(tx
->order
)];
261 rx
->descs_cpu
= ((void *)tx
->descs_cpu
) + tx_size
* AG71XX_DESC_SIZE
;
262 rx
->descs_dma
= tx
->descs_dma
+ tx_size
* AG71XX_DESC_SIZE
;
264 ag71xx_ring_tx_init(ag
);
265 return ag71xx_ring_rx_init(ag
);
268 static void ag71xx_rings_free(struct ag71xx
*ag
)
270 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
271 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
272 int ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
275 dma_free_coherent(NULL
, ring_size
* AG71XX_DESC_SIZE
,
276 tx
->descs_cpu
, tx
->descs_dma
);
280 tx
->descs_cpu
= NULL
;
281 rx
->descs_cpu
= NULL
;
286 static void ag71xx_rings_cleanup(struct ag71xx
*ag
)
288 ag71xx_ring_rx_clean(ag
);
289 ag71xx_ring_tx_clean(ag
);
290 ag71xx_rings_free(ag
);
292 netdev_reset_queue(ag
->dev
);
295 static unsigned char *ag71xx_speed_str(struct ag71xx
*ag
)
309 static void ag71xx_hw_set_macaddr(struct ag71xx
*ag
, unsigned char *mac
)
313 t
= (((u32
) mac
[5]) << 24) | (((u32
) mac
[4]) << 16)
314 | (((u32
) mac
[3]) << 8) | ((u32
) mac
[2]);
316 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR1
, t
);
318 t
= (((u32
) mac
[1]) << 24) | (((u32
) mac
[0]) << 16);
319 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR2
, t
);
322 static void ag71xx_dma_reset(struct ag71xx
*ag
)
327 ag71xx_dump_dma_regs(ag
);
330 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
331 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
334 * give the hardware some time to really stop all rx/tx activity
335 * clearing the descriptors too early causes random memory corruption
339 /* clear descriptor addresses */
340 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->stop_desc_dma
);
341 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->stop_desc_dma
);
343 /* clear pending RX/TX interrupts */
344 for (i
= 0; i
< 256; i
++) {
345 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
346 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
349 /* clear pending errors */
350 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
| RX_STATUS_OF
);
351 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
| TX_STATUS_UR
);
353 val
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
355 pr_alert("%s: unable to clear DMA Rx status: %08x\n",
358 val
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
360 /* mask out reserved bits */
364 pr_alert("%s: unable to clear DMA Tx status: %08x\n",
367 ag71xx_dump_dma_regs(ag
);
370 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
371 MAC_CFG1_SRX | MAC_CFG1_STX)
373 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
375 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
376 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
377 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
378 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
379 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
382 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
383 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
384 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
385 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
386 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
387 FIFO_CFG5_17 | FIFO_CFG5_SF)
389 static void ag71xx_hw_stop(struct ag71xx
*ag
)
391 /* disable all interrupts and stop the rx/tx engine */
392 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, 0);
393 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
394 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
397 static void ag71xx_hw_setup(struct ag71xx
*ag
)
399 struct device_node
*np
= ag
->pdev
->dev
.of_node
;
400 u32 init
= MAC_CFG1_INIT
;
402 /* setup MAC configuration registers */
403 if (of_property_read_bool(np
, "flow-control"))
404 init
|= MAC_CFG1_TFC
| MAC_CFG1_RFC
;
405 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, init
);
407 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG2
,
408 MAC_CFG2_PAD_CRC_EN
| MAC_CFG2_LEN_CHECK
);
410 /* setup max frame length to zero */
411 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, 0);
413 /* setup FIFO configuration registers */
414 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG0
, FIFO_CFG0_INIT
);
415 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, ag
->fifodata
[0]);
416 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, ag
->fifodata
[1]);
417 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG4
, FIFO_CFG4_INIT
);
418 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, FIFO_CFG5_INIT
);
421 static void ag71xx_hw_init(struct ag71xx
*ag
)
426 reset_control_assert(ag
->phy_reset
);
428 reset_control_deassert(ag
->phy_reset
);
432 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG1
, MAC_CFG1_SR
);
435 reset_control_assert(ag
->mac_reset
);
437 reset_control_deassert(ag
->mac_reset
);
442 ag71xx_dma_reset(ag
);
445 static void ag71xx_fast_reset(struct ag71xx
*ag
)
447 struct net_device
*dev
= ag
->dev
;
454 mii_reg
= ag71xx_rr(ag
, AG71XX_REG_MII_CFG
);
455 rx_ds
= ag71xx_rr(ag
, AG71XX_REG_RX_DESC
);
457 ag71xx_tx_packets(ag
, true);
459 reset_control_assert(ag
->mac_reset
);
461 reset_control_deassert(ag
->mac_reset
);
464 ag71xx_dma_reset(ag
);
466 ag
->tx_ring
.curr
= 0;
467 ag
->tx_ring
.dirty
= 0;
468 netdev_reset_queue(ag
->dev
);
470 /* setup max frame length */
471 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
472 ag71xx_max_frame_len(ag
->dev
->mtu
));
474 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, rx_ds
);
475 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
476 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, mii_reg
);
478 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
481 static void ag71xx_hw_start(struct ag71xx
*ag
)
483 /* start RX engine */
484 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
486 /* enable interrupts */
487 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, AG71XX_INT_INIT
);
489 netif_wake_queue(ag
->dev
);
492 static void ath79_set_pllval(struct ag71xx
*ag
)
494 u32 pll_reg
= ag
->pllreg
[1];
502 pll_val
= ag
->plldata
[2];
505 pll_val
= ag
->plldata
[1];
508 pll_val
= ag
->plldata
[0];
515 regmap_write(ag
->pllregmap
, pll_reg
, pll_val
);
518 static void ath79_set_pll(struct ag71xx
*ag
)
520 u32 pll_cfg
= ag
->pllreg
[0];
521 u32 pll_shift
= ag
->pllreg
[2];
526 regmap_update_bits(ag
->pllregmap
, pll_cfg
, 3 << pll_shift
, 2 << pll_shift
);
529 ath79_set_pllval(ag
);
531 regmap_update_bits(ag
->pllregmap
, pll_cfg
, 3 << pll_shift
, 3 << pll_shift
);
534 regmap_update_bits(ag
->pllregmap
, pll_cfg
, 3 << pll_shift
, 0);
538 static void ath79_mii_ctrl_set_speed(struct ag71xx
*ag
)
540 unsigned int mii_speed
;
548 mii_speed
= AR71XX_MII_CTRL_SPEED_10
;
551 mii_speed
= AR71XX_MII_CTRL_SPEED_100
;
554 mii_speed
= AR71XX_MII_CTRL_SPEED_1000
;
560 t
= __raw_readl(ag
->mii_base
);
561 t
&= ~(AR71XX_MII_CTRL_IF_MASK
);
562 t
|= (mii_speed
& AR71XX_MII_CTRL_IF_MASK
);
563 __raw_writel(t
, ag
->mii_base
);
567 __ag71xx_link_adjust(struct ag71xx
*ag
, bool update
)
569 struct device_node
*np
= ag
->pdev
->dev
.of_node
;
574 if (!ag
->link
&& update
) {
576 netif_carrier_off(ag
->dev
);
577 if (netif_msg_link(ag
))
578 pr_info("%s: link down\n", ag
->dev
->name
);
582 if (!of_device_is_compatible(np
, "qca,ar9130-eth") &&
583 !of_device_is_compatible(np
, "qca,ar7100-eth"))
584 ag71xx_fast_reset(ag
);
586 cfg2
= ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
);
587 cfg2
&= ~(MAC_CFG2_IF_1000
| MAC_CFG2_IF_10_100
| MAC_CFG2_FDX
);
588 cfg2
|= (ag
->duplex
) ? MAC_CFG2_FDX
: 0;
590 ifctl
= ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
);
591 ifctl
&= ~(MAC_IFCTL_SPEED
);
593 fifo5
= ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
);
594 fifo5
&= ~FIFO_CFG5_BM
;
598 cfg2
|= MAC_CFG2_IF_1000
;
599 fifo5
|= FIFO_CFG5_BM
;
602 cfg2
|= MAC_CFG2_IF_10_100
;
603 ifctl
|= MAC_IFCTL_SPEED
;
606 cfg2
|= MAC_CFG2_IF_10_100
;
613 if (ag
->tx_ring
.desc_split
) {
614 ag
->fifodata
[2] &= 0xffff;
615 ag
->fifodata
[2] |= ((2048 - ag
->tx_ring
.desc_split
) / 4) << 16;
618 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG3
, ag
->fifodata
[2]);
621 if (of_device_is_compatible(np
, "qca,ar7100-eth")) {
623 ath79_mii_ctrl_set_speed(ag
);
624 } else if (of_device_is_compatible(np
, "qca,ar7242-eth")) {
626 } else if (of_device_is_compatible(np
, "qca,ar9130-eth")) {
628 ath79_mii_ctrl_set_speed(ag
);
629 } else if (of_device_is_compatible(np
, "qca,ar9340-eth")) {
631 } else if (of_device_is_compatible(np
, "qca,qca9550-eth")) {
632 } else if (of_device_is_compatible(np
, "qca,qca9560-eth")) {
636 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG2
, cfg2
);
637 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, fifo5
);
638 ag71xx_wr(ag
, AG71XX_REG_MAC_IFCTL
, ifctl
);
640 if (of_device_is_compatible(np
, "qca,qca9530-eth")) {
642 * The rx ring buffer can stall on small packets on QCA953x and
643 * QCA956x. Disabling the inline checksum engine fixes the stall.
644 * The wr, rr functions cannot be used since this hidden register
645 * is outside of the normal ag71xx register block.
647 void __iomem
*dam
= ioremap_nocache(0xb90001bc, 0x4);
649 __raw_writel(__raw_readl(dam
) & ~BIT(27), dam
);
650 (void)__raw_readl(dam
);
657 netif_carrier_on(ag
->dev
);
658 if (update
&& netif_msg_link(ag
))
659 pr_info("%s: link up (%sMbps/%s duplex)\n",
661 ag71xx_speed_str(ag
),
662 (DUPLEX_FULL
== ag
->duplex
) ? "Full" : "Half");
664 DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n",
666 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG0
),
667 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG1
),
668 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG2
));
670 DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n",
672 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG3
),
673 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG4
),
674 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
));
676 DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x\n",
678 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
),
679 ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
));
682 void ag71xx_link_adjust(struct ag71xx
*ag
)
684 __ag71xx_link_adjust(ag
, true);
687 static int ag71xx_hw_enable(struct ag71xx
*ag
)
691 ret
= ag71xx_rings_init(ag
);
695 napi_enable(&ag
->napi
);
696 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
697 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->rx_ring
.descs_dma
);
698 netif_start_queue(ag
->dev
);
703 static void ag71xx_hw_disable(struct ag71xx
*ag
)
707 spin_lock_irqsave(&ag
->lock
, flags
);
709 netif_stop_queue(ag
->dev
);
712 ag71xx_dma_reset(ag
);
714 napi_disable(&ag
->napi
);
715 del_timer_sync(&ag
->oom_timer
);
717 spin_unlock_irqrestore(&ag
->lock
, flags
);
719 ag71xx_rings_cleanup(ag
);
722 static int ag71xx_open(struct net_device
*dev
)
724 struct ag71xx
*ag
= netdev_priv(dev
);
725 unsigned int max_frame_len
;
728 netif_carrier_off(dev
);
729 max_frame_len
= ag71xx_max_frame_len(dev
->mtu
);
730 ag
->rx_buf_size
= SKB_DATA_ALIGN(max_frame_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
);
732 /* setup max frame length */
733 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, max_frame_len
);
734 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
736 ret
= ag71xx_hw_enable(ag
);
740 ag71xx_ar7240_start(ag
);
741 phy_start(ag
->phy_dev
);
746 ag71xx_rings_cleanup(ag
);
750 static int ag71xx_stop(struct net_device
*dev
)
752 struct ag71xx
*ag
= netdev_priv(dev
);
754 netif_carrier_off(dev
);
755 phy_stop(ag
->phy_dev
);
756 ag71xx_hw_disable(ag
);
761 static int ag71xx_fill_dma_desc(struct ag71xx_ring
*ring
, u32 addr
, int len
)
764 struct ag71xx_desc
*desc
;
765 int ring_mask
= BIT(ring
->order
) - 1;
767 int split
= ring
->desc_split
;
773 unsigned int cur_len
= len
;
775 i
= (ring
->curr
+ ndesc
) & ring_mask
;
776 desc
= ag71xx_ring_desc(ring
, i
);
778 if (!ag71xx_desc_empty(desc
))
781 if (cur_len
> split
) {
785 * TX will hang if DMA transfers <= 4 bytes,
786 * make sure next segment is more than 4 bytes long.
788 if (len
<= split
+ 4)
797 cur_len
|= DESC_MORE
;
799 /* prevent early tx attempt of this descriptor */
801 cur_len
|= DESC_EMPTY
;
803 desc
->ctrl
= cur_len
;
810 static netdev_tx_t
ag71xx_hard_start_xmit(struct sk_buff
*skb
,
811 struct net_device
*dev
)
813 struct ag71xx
*ag
= netdev_priv(dev
);
814 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
815 int ring_mask
= BIT(ring
->order
) - 1;
816 int ring_size
= BIT(ring
->order
);
817 struct ag71xx_desc
*desc
;
822 DBG("%s: packet len is too small\n", ag
->dev
->name
);
826 dma_addr
= dma_map_single(&dev
->dev
, skb
->data
, skb
->len
,
829 i
= ring
->curr
& ring_mask
;
830 desc
= ag71xx_ring_desc(ring
, i
);
832 /* setup descriptor fields */
833 n
= ag71xx_fill_dma_desc(ring
, (u32
) dma_addr
, skb
->len
& ag
->desc_pktlen_mask
);
837 i
= (ring
->curr
+ n
- 1) & ring_mask
;
838 ring
->buf
[i
].len
= skb
->len
;
839 ring
->buf
[i
].skb
= skb
;
841 netdev_sent_queue(dev
, skb
->len
);
843 skb_tx_timestamp(skb
);
845 desc
->ctrl
&= ~DESC_EMPTY
;
848 /* flush descriptor */
852 if (ring
->desc_split
)
853 ring_min
*= AG71XX_TX_RING_DS_PER_PKT
;
855 if (ring
->curr
- ring
->dirty
>= ring_size
- ring_min
) {
856 DBG("%s: tx queue full\n", dev
->name
);
857 netif_stop_queue(dev
);
860 DBG("%s: packet injected into TX queue\n", ag
->dev
->name
);
862 /* enable TX engine */
863 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, TX_CTRL_TXE
);
868 dma_unmap_single(&dev
->dev
, dma_addr
, skb
->len
, DMA_TO_DEVICE
);
871 dev
->stats
.tx_dropped
++;
877 static int ag71xx_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
879 struct ag71xx
*ag
= netdev_priv(dev
);
884 if (ag
->phy_dev
== NULL
)
887 spin_lock_irq(&ag
->lock
);
888 ret
= phy_ethtool_ioctl(ag
->phy_dev
, (void *) ifr
->ifr_data
);
889 spin_unlock_irq(&ag
->lock
);
894 (dev
->dev_addr
, ifr
->ifr_data
, sizeof(dev
->dev_addr
)))
900 (ifr
->ifr_data
, dev
->dev_addr
, sizeof(dev
->dev_addr
)))
907 if (ag
->phy_dev
== NULL
)
910 return phy_mii_ioctl(ag
->phy_dev
, ifr
, cmd
);
919 static void ag71xx_oom_timer_handler(unsigned long data
)
921 struct net_device
*dev
= (struct net_device
*) data
;
922 struct ag71xx
*ag
= netdev_priv(dev
);
924 napi_schedule(&ag
->napi
);
927 static void ag71xx_tx_timeout(struct net_device
*dev
)
929 struct ag71xx
*ag
= netdev_priv(dev
);
931 if (netif_msg_tx_err(ag
))
932 pr_info("%s: tx timeout\n", ag
->dev
->name
);
934 schedule_delayed_work(&ag
->restart_work
, 1);
937 static void ag71xx_restart_work_func(struct work_struct
*work
)
939 struct ag71xx
*ag
= container_of(work
, struct ag71xx
, restart_work
.work
);
942 ag71xx_hw_disable(ag
);
943 ag71xx_hw_enable(ag
);
945 __ag71xx_link_adjust(ag
, false);
949 static bool ag71xx_check_dma_stuck(struct ag71xx
*ag
)
951 unsigned long timestamp
;
952 u32 rx_sm
, tx_sm
, rx_fd
;
954 timestamp
= netdev_get_tx_queue(ag
->dev
, 0)->trans_start
;
955 if (likely(time_before(jiffies
, timestamp
+ HZ
/10)))
958 if (!netif_carrier_ok(ag
->dev
))
961 rx_sm
= ag71xx_rr(ag
, AG71XX_REG_RX_SM
);
962 if ((rx_sm
& 0x7) == 0x3 && ((rx_sm
>> 4) & 0x7) == 0x6)
965 tx_sm
= ag71xx_rr(ag
, AG71XX_REG_TX_SM
);
966 rx_fd
= ag71xx_rr(ag
, AG71XX_REG_FIFO_DEPTH
);
967 if (((tx_sm
>> 4) & 0x7) == 0 && ((rx_sm
& 0x7) == 0) &&
968 ((rx_sm
>> 4) & 0x7) == 0 && rx_fd
== 0)
974 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
)
976 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
977 bool dma_stuck
= false;
978 int ring_mask
= BIT(ring
->order
) - 1;
979 int ring_size
= BIT(ring
->order
);
984 DBG("%s: processing TX ring\n", ag
->dev
->name
);
986 while (ring
->dirty
+ n
!= ring
->curr
) {
987 unsigned int i
= (ring
->dirty
+ n
) & ring_mask
;
988 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
989 struct sk_buff
*skb
= ring
->buf
[i
].skb
;
991 if (!flush
&& !ag71xx_desc_empty(desc
)) {
992 if (ag
->tx_hang_workaround
&&
993 ag71xx_check_dma_stuck(ag
)) {
994 schedule_delayed_work(&ag
->restart_work
, HZ
/ 2);
1001 desc
->ctrl
|= DESC_EMPTY
;
1007 dev_kfree_skb_any(skb
);
1008 ring
->buf
[i
].skb
= NULL
;
1010 bytes_compl
+= ring
->buf
[i
].len
;
1016 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
1021 DBG("%s: %d packets sent out\n", ag
->dev
->name
, sent
);
1026 ag
->dev
->stats
.tx_bytes
+= bytes_compl
;
1027 ag
->dev
->stats
.tx_packets
+= sent
;
1029 netdev_completed_queue(ag
->dev
, sent
, bytes_compl
);
1030 if ((ring
->curr
- ring
->dirty
) < (ring_size
* 3) / 4)
1031 netif_wake_queue(ag
->dev
);
1034 cancel_delayed_work(&ag
->restart_work
);
1039 static int ag71xx_rx_packets(struct ag71xx
*ag
, int limit
)
1041 struct net_device
*dev
= ag
->dev
;
1042 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1043 unsigned int pktlen_mask
= ag
->desc_pktlen_mask
;
1044 unsigned int offset
= ag
->rx_buf_offset
;
1045 int ring_mask
= BIT(ring
->order
) - 1;
1046 int ring_size
= BIT(ring
->order
);
1047 struct sk_buff_head queue
;
1048 struct sk_buff
*skb
;
1051 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
1052 dev
->name
, limit
, ring
->curr
, ring
->dirty
);
1054 skb_queue_head_init(&queue
);
1056 while (done
< limit
) {
1057 unsigned int i
= ring
->curr
& ring_mask
;
1058 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1062 if (ag71xx_desc_empty(desc
))
1065 if ((ring
->dirty
+ ring_size
) == ring
->curr
) {
1070 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
1072 pktlen
= desc
->ctrl
& pktlen_mask
;
1073 pktlen
-= ETH_FCS_LEN
;
1075 dma_unmap_single(&dev
->dev
, ring
->buf
[i
].dma_addr
,
1076 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
1078 dev
->stats
.rx_packets
++;
1079 dev
->stats
.rx_bytes
+= pktlen
;
1081 skb
= build_skb(ring
->buf
[i
].rx_buf
, ag71xx_buffer_size(ag
));
1083 skb_free_frag(ring
->buf
[i
].rx_buf
);
1087 skb_reserve(skb
, offset
);
1088 skb_put(skb
, pktlen
);
1091 dev
->stats
.rx_dropped
++;
1095 skb
->ip_summed
= CHECKSUM_NONE
;
1096 __skb_queue_tail(&queue
, skb
);
1100 ring
->buf
[i
].rx_buf
= NULL
;
1106 ag71xx_ring_rx_refill(ag
);
1108 while ((skb
= __skb_dequeue(&queue
)) != NULL
) {
1109 skb
->protocol
= eth_type_trans(skb
, dev
);
1110 netif_receive_skb(skb
);
1113 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1114 dev
->name
, ring
->curr
, ring
->dirty
, done
);
1119 static int ag71xx_poll(struct napi_struct
*napi
, int limit
)
1121 struct ag71xx
*ag
= container_of(napi
, struct ag71xx
, napi
);
1122 struct net_device
*dev
= ag
->dev
;
1123 struct ag71xx_ring
*rx_ring
= &ag
->rx_ring
;
1124 int rx_ring_size
= BIT(rx_ring
->order
);
1125 unsigned long flags
;
1130 tx_done
= ag71xx_tx_packets(ag
, false);
1132 DBG("%s: processing RX ring\n", dev
->name
);
1133 rx_done
= ag71xx_rx_packets(ag
, limit
);
1135 ag71xx_debugfs_update_napi_stats(ag
, rx_done
, tx_done
);
1137 if (rx_ring
->buf
[rx_ring
->dirty
% rx_ring_size
].rx_buf
== NULL
)
1140 status
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
1141 if (unlikely(status
& RX_STATUS_OF
)) {
1142 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_OF
);
1143 dev
->stats
.rx_fifo_errors
++;
1146 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
1149 if (rx_done
< limit
) {
1150 if (status
& RX_STATUS_PR
)
1153 status
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
1154 if (status
& TX_STATUS_PS
)
1157 DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1158 dev
->name
, rx_done
, tx_done
, limit
);
1160 napi_complete(napi
);
1162 /* enable interrupts */
1163 spin_lock_irqsave(&ag
->lock
, flags
);
1164 ag71xx_int_enable(ag
, AG71XX_INT_POLL
);
1165 spin_unlock_irqrestore(&ag
->lock
, flags
);
1170 DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1171 dev
->name
, rx_done
, tx_done
, limit
);
1175 if (netif_msg_rx_err(ag
))
1176 pr_info("%s: out of memory\n", dev
->name
);
1178 mod_timer(&ag
->oom_timer
, jiffies
+ AG71XX_OOM_REFILL
);
1179 napi_complete(napi
);
1183 static irqreturn_t
ag71xx_interrupt(int irq
, void *dev_id
)
1185 struct net_device
*dev
= dev_id
;
1186 struct ag71xx
*ag
= netdev_priv(dev
);
1189 status
= ag71xx_rr(ag
, AG71XX_REG_INT_STATUS
);
1190 ag71xx_dump_intr(ag
, "raw", status
);
1192 if (unlikely(!status
))
1195 if (unlikely(status
& AG71XX_INT_ERR
)) {
1196 if (status
& AG71XX_INT_TX_BE
) {
1197 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
);
1198 dev_err(&dev
->dev
, "TX BUS error\n");
1200 if (status
& AG71XX_INT_RX_BE
) {
1201 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
);
1202 dev_err(&dev
->dev
, "RX BUS error\n");
1206 if (likely(status
& AG71XX_INT_POLL
)) {
1207 ag71xx_int_disable(ag
, AG71XX_INT_POLL
);
1208 DBG("%s: enable polling mode\n", dev
->name
);
1209 napi_schedule(&ag
->napi
);
1212 ag71xx_debugfs_update_int_stats(ag
, status
);
1217 #ifdef CONFIG_NET_POLL_CONTROLLER
1219 * Polling 'interrupt' - used by things like netconsole to send skbs
1220 * without having to re-enable interrupts. It's not called while
1221 * the interrupt routine is executing.
1223 static void ag71xx_netpoll(struct net_device
*dev
)
1225 disable_irq(dev
->irq
);
1226 ag71xx_interrupt(dev
->irq
, dev
);
1227 enable_irq(dev
->irq
);
1231 static int ag71xx_change_mtu(struct net_device
*dev
, int new_mtu
)
1233 struct ag71xx
*ag
= netdev_priv(dev
);
1236 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
1237 ag71xx_max_frame_len(dev
->mtu
));
1242 static const struct net_device_ops ag71xx_netdev_ops
= {
1243 .ndo_open
= ag71xx_open
,
1244 .ndo_stop
= ag71xx_stop
,
1245 .ndo_start_xmit
= ag71xx_hard_start_xmit
,
1246 .ndo_do_ioctl
= ag71xx_do_ioctl
,
1247 .ndo_tx_timeout
= ag71xx_tx_timeout
,
1248 .ndo_change_mtu
= ag71xx_change_mtu
,
1249 .ndo_set_mac_address
= eth_mac_addr
,
1250 .ndo_validate_addr
= eth_validate_addr
,
1251 #ifdef CONFIG_NET_POLL_CONTROLLER
1252 .ndo_poll_controller
= ag71xx_netpoll
,
1256 static const char *ag71xx_get_phy_if_mode_name(phy_interface_t mode
)
1259 case PHY_INTERFACE_MODE_MII
:
1261 case PHY_INTERFACE_MODE_GMII
:
1263 case PHY_INTERFACE_MODE_RMII
:
1265 case PHY_INTERFACE_MODE_RGMII
:
1267 case PHY_INTERFACE_MODE_SGMII
:
1276 static void ag71xx_of_bit(struct device_node
*np
, const char *prop
,
1281 if (of_property_read_u32(np
, prop
, &val
))
1290 static void ag71xx_setup_gmac_933x(struct device_node
*np
, void __iomem
*base
)
1292 u32 val
= __raw_readl(base
+ AR933X_GMAC_REG_ETH_CFG
);
1294 ag71xx_of_bit(np
, "switch-phy-swap", &val
, AR933X_ETH_CFG_SW_PHY_SWAP
);
1295 ag71xx_of_bit(np
, "switch-phy-addr-swap", &val
,
1296 AR933X_ETH_CFG_SW_PHY_ADDR_SWAP
);
1298 __raw_writel(val
, base
+ AR933X_GMAC_REG_ETH_CFG
);
1301 static int ag71xx_setup_gmac(struct device_node
*np
)
1303 struct device_node
*np_dev
;
1307 np
= of_get_child_by_name(np
, "gmac-config");
1311 np_dev
= of_parse_phandle(np
, "device", 0);
1315 base
= of_iomap(np_dev
, 0);
1317 pr_err("%pOF: can't map GMAC registers\n", np_dev
);
1322 if (of_device_is_compatible(np_dev
, "qca,ar9330-gmac"))
1323 ag71xx_setup_gmac_933x(np
, base
);
1328 of_node_put(np_dev
);
1334 static int ag71xx_probe(struct platform_device
*pdev
)
1336 struct device_node
*np
= pdev
->dev
.of_node
;
1337 struct net_device
*dev
;
1338 struct resource
*res
;
1340 const void *mac_addr
;
1347 dev
= alloc_etherdev(sizeof(*ag
));
1351 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1355 err
= ag71xx_setup_gmac(np
);
1359 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1361 ag
= netdev_priv(dev
);
1364 ag
->msg_enable
= netif_msg_init(ag71xx_msg_level
,
1365 AG71XX_DEFAULT_MSG_ENABLE
);
1366 spin_lock_init(&ag
->lock
);
1368 ag
->mac_reset
= devm_reset_control_get(&pdev
->dev
, "mac");
1369 if (IS_ERR(ag
->mac_reset
)) {
1370 dev_err(&pdev
->dev
, "missing mac reset\n");
1371 err
= PTR_ERR(ag
->mac_reset
);
1375 ag
->phy_reset
= devm_reset_control_get_optional(&pdev
->dev
, "phy");
1377 if (of_property_read_u32_array(np
, "fifo-data", ag
->fifodata
, 3)) {
1378 if (of_device_is_compatible(np
, "qca,ar9130-eth") ||
1379 of_device_is_compatible(np
, "qca,ar7100-eth")) {
1380 ag
->fifodata
[0] = 0x0fff0000;
1381 ag
->fifodata
[1] = 0x00001fff;
1383 ag
->fifodata
[0] = 0x0010ffff;
1384 ag
->fifodata
[1] = 0x015500aa;
1385 ag
->fifodata
[2] = 0x01f00140;
1387 if (of_device_is_compatible(np
, "qca,ar9130-eth"))
1388 ag
->fifodata
[2] = 0x00780fff;
1389 else if (of_device_is_compatible(np
, "qca,ar7100-eth"))
1390 ag
->fifodata
[2] = 0x008001ff;
1393 if (of_property_read_u32_array(np
, "pll-data", ag
->plldata
, 3))
1394 dev_dbg(&pdev
->dev
, "failed to read pll-data property\n");
1396 if (of_property_read_u32_array(np
, "pll-reg", ag
->pllreg
, 3))
1397 dev_dbg(&pdev
->dev
, "failed to read pll-reg property\n");
1399 ag
->pllregmap
= syscon_regmap_lookup_by_phandle(np
, "pll-handle");
1400 if (IS_ERR(ag
->pllregmap
)) {
1401 dev_dbg(&pdev
->dev
, "failed to read pll-handle property\n");
1402 ag
->pllregmap
= NULL
;
1405 ag
->mac_base
= devm_ioremap_nocache(&pdev
->dev
, res
->start
,
1406 res
->end
- res
->start
+ 1);
1407 if (!ag
->mac_base
) {
1411 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1413 ag
->mii_base
= devm_ioremap_nocache(&pdev
->dev
, res
->start
,
1414 res
->end
- res
->start
+ 1);
1415 if (!ag
->mii_base
) {
1421 dev
->irq
= platform_get_irq(pdev
, 0);
1422 err
= devm_request_irq(&pdev
->dev
, dev
->irq
, ag71xx_interrupt
,
1423 0x0, dev_name(&pdev
->dev
), dev
);
1425 dev_err(&pdev
->dev
, "unable to request IRQ %d\n", dev
->irq
);
1429 dev
->netdev_ops
= &ag71xx_netdev_ops
;
1430 dev
->ethtool_ops
= &ag71xx_ethtool_ops
;
1432 INIT_DELAYED_WORK(&ag
->restart_work
, ag71xx_restart_work_func
);
1434 init_timer(&ag
->oom_timer
);
1435 ag
->oom_timer
.data
= (unsigned long) dev
;
1436 ag
->oom_timer
.function
= ag71xx_oom_timer_handler
;
1438 tx_size
= AG71XX_TX_RING_SIZE_DEFAULT
;
1439 ag
->rx_ring
.order
= ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT
);
1441 if (of_device_is_compatible(np
, "qca,ar9340-eth") ||
1442 of_device_is_compatible(np
, "qca,qca9530-eth") ||
1443 of_device_is_compatible(np
, "qca,qca9550-eth") ||
1444 of_device_is_compatible(np
, "qca,qca9560-eth"))
1445 ag
->desc_pktlen_mask
= SZ_16K
- 1;
1447 ag
->desc_pktlen_mask
= SZ_4K
- 1;
1449 if (ag
->desc_pktlen_mask
== SZ_16K
- 1 &&
1450 !of_device_is_compatible(np
, "qca,qca9550-eth") &&
1451 !of_device_is_compatible(np
, "qca,qca9560-eth"))
1452 max_frame_len
= ag
->desc_pktlen_mask
;
1454 max_frame_len
= 1540;
1457 dev
->max_mtu
= max_frame_len
- ag71xx_max_frame_len(0);
1459 if (of_device_is_compatible(np
, "qca,ar7240-eth"))
1460 ag
->tx_hang_workaround
= 1;
1462 ag
->rx_buf_offset
= NET_SKB_PAD
;
1463 if (!of_device_is_compatible(np
, "qca,ar7100-eth") &&
1464 !of_device_is_compatible(np
, "qca,ar9130-eth"))
1465 ag
->rx_buf_offset
+= NET_IP_ALIGN
;
1467 if (of_device_is_compatible(np
, "qca,ar7100-eth")) {
1468 ag
->tx_ring
.desc_split
= AG71XX_TX_RING_SPLIT
;
1469 tx_size
*= AG71XX_TX_RING_DS_PER_PKT
;
1471 ag
->tx_ring
.order
= ag71xx_ring_size_order(tx_size
);
1473 ag
->stop_desc
= dmam_alloc_coherent(&pdev
->dev
,
1474 sizeof(struct ag71xx_desc
),
1475 &ag
->stop_desc_dma
, GFP_KERNEL
);
1479 ag
->stop_desc
->data
= 0;
1480 ag
->stop_desc
->ctrl
= 0;
1481 ag
->stop_desc
->next
= (u32
) ag
->stop_desc_dma
;
1483 mac_addr
= of_get_mac_address(np
);
1485 memcpy(dev
->dev_addr
, mac_addr
, ETH_ALEN
);
1486 if (!mac_addr
|| !is_valid_ether_addr(dev
->dev_addr
)) {
1487 dev_err(&pdev
->dev
, "invalid MAC address, using random address\n");
1488 eth_random_addr(dev
->dev_addr
);
1491 ag
->phy_if_mode
= of_get_phy_mode(np
);
1492 if (ag
->phy_if_mode
< 0) {
1493 dev_err(&pdev
->dev
, "missing phy-mode property in DT\n");
1494 err
= ag
->phy_if_mode
;
1498 netif_napi_add(dev
, &ag
->napi
, ag71xx_poll
, AG71XX_NAPI_WEIGHT
);
1500 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, 0);
1502 ag71xx_mdio_init(ag
);
1504 err
= ag71xx_phy_connect(ag
);
1508 err
= ag71xx_debugfs_init(ag
);
1510 goto err_phy_disconnect
;
1512 platform_set_drvdata(pdev
, dev
);
1514 err
= register_netdev(dev
);
1516 dev_err(&pdev
->dev
, "unable to register net device\n");
1517 platform_set_drvdata(pdev
, NULL
);
1518 ag71xx_debugfs_exit(ag
);
1519 goto err_phy_disconnect
;
1522 pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1523 dev
->name
, (unsigned long) ag
->mac_base
, dev
->irq
,
1524 ag71xx_get_phy_if_mode_name(ag
->phy_if_mode
));
1529 ag71xx_phy_disconnect(ag
);
1531 ag71xx_mdio_cleanup(ag
);
1537 static int ag71xx_remove(struct platform_device
*pdev
)
1539 struct net_device
*dev
= platform_get_drvdata(pdev
);
1545 ag
= netdev_priv(dev
);
1546 ag71xx_debugfs_exit(ag
);
1547 ag71xx_phy_disconnect(ag
);
1548 ag71xx_mdio_cleanup(ag
);
1549 unregister_netdev(dev
);
1550 free_irq(dev
->irq
, dev
);
1551 iounmap(ag
->mac_base
);
1553 platform_set_drvdata(pdev
, NULL
);
1558 static const struct of_device_id ag71xx_match
[] = {
1559 { .compatible
= "qca,ar7100-eth" },
1560 { .compatible
= "qca,ar7240-eth" },
1561 { .compatible
= "qca,ar7241-eth" },
1562 { .compatible
= "qca,ar7242-eth" },
1563 { .compatible
= "qca,ar9130-eth" },
1564 { .compatible
= "qca,ar9330-eth" },
1565 { .compatible
= "qca,ar9340-eth" },
1566 { .compatible
= "qca,qca9530-eth" },
1567 { .compatible
= "qca,qca9550-eth" },
1568 { .compatible
= "qca,qca9560-eth" },
1572 static struct platform_driver ag71xx_driver
= {
1573 .probe
= ag71xx_probe
,
1574 .remove
= ag71xx_remove
,
1576 .name
= AG71XX_DRV_NAME
,
1577 .of_match_table
= ag71xx_match
,
1581 static int __init
ag71xx_module_init(void)
1585 ret
= ag71xx_debugfs_root_init();
1589 ret
= platform_driver_register(&ag71xx_driver
);
1591 goto err_debugfs_exit
;
1596 ag71xx_debugfs_root_exit();
1601 static void __exit
ag71xx_module_exit(void)
1603 platform_driver_unregister(&ag71xx_driver
);
1604 ag71xx_debugfs_root_exit();
1607 module_init(ag71xx_module_init
);
1608 module_exit(ag71xx_module_exit
);
1610 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1611 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1612 MODULE_AUTHOR("Felix Fietkau <nbd@nbd.name>");
1613 MODULE_LICENSE("GPL v2");
1614 MODULE_ALIAS("platform:" AG71XX_DRV_NAME
);