2 * Atheros AR71xx built-in ethernet mac driver
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 * Based on Atheros' AG7100 driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
14 #include <linux/sizes.h>
15 #include <linux/of_net.h>
16 #include <linux/of_address.h>
17 #include <linux/of_platform.h>
20 #define AG71XX_DEFAULT_MSG_ENABLE \
30 static int ag71xx_msg_level
= -1;
32 module_param_named(msg_level
, ag71xx_msg_level
, int, 0);
33 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
35 #define ETH_SWITCH_HEADER_LEN 2
37 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
);
39 static inline unsigned int ag71xx_max_frame_len(unsigned int mtu
)
41 return ETH_SWITCH_HEADER_LEN
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
+ ETH_FCS_LEN
;
44 static void ag71xx_dump_dma_regs(struct ag71xx
*ag
)
46 DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
48 ag71xx_rr(ag
, AG71XX_REG_TX_CTRL
),
49 ag71xx_rr(ag
, AG71XX_REG_TX_DESC
),
50 ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
));
52 DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
54 ag71xx_rr(ag
, AG71XX_REG_RX_CTRL
),
55 ag71xx_rr(ag
, AG71XX_REG_RX_DESC
),
56 ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
));
59 static void ag71xx_dump_regs(struct ag71xx
*ag
)
61 DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
63 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG1
),
64 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
),
65 ag71xx_rr(ag
, AG71XX_REG_MAC_IPG
),
66 ag71xx_rr(ag
, AG71XX_REG_MAC_HDX
),
67 ag71xx_rr(ag
, AG71XX_REG_MAC_MFL
));
68 DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
70 ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
),
71 ag71xx_rr(ag
, AG71XX_REG_MAC_ADDR1
),
72 ag71xx_rr(ag
, AG71XX_REG_MAC_ADDR2
));
73 DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
75 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG0
),
76 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG1
),
77 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG2
));
78 DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
80 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG3
),
81 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG4
),
82 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
));
85 static inline void ag71xx_dump_intr(struct ag71xx
*ag
, char *label
, u32 intr
)
87 DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
88 ag
->dev
->name
, label
, intr
,
89 (intr
& AG71XX_INT_TX_PS
) ? "TXPS " : "",
90 (intr
& AG71XX_INT_TX_UR
) ? "TXUR " : "",
91 (intr
& AG71XX_INT_TX_BE
) ? "TXBE " : "",
92 (intr
& AG71XX_INT_RX_PR
) ? "RXPR " : "",
93 (intr
& AG71XX_INT_RX_OF
) ? "RXOF " : "",
94 (intr
& AG71XX_INT_RX_BE
) ? "RXBE " : "");
97 static void ag71xx_ring_tx_clean(struct ag71xx
*ag
)
99 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
100 struct net_device
*dev
= ag
->dev
;
101 int ring_mask
= BIT(ring
->order
) - 1;
102 u32 bytes_compl
= 0, pkts_compl
= 0;
104 while (ring
->curr
!= ring
->dirty
) {
105 struct ag71xx_desc
*desc
;
106 u32 i
= ring
->dirty
& ring_mask
;
108 desc
= ag71xx_ring_desc(ring
, i
);
109 if (!ag71xx_desc_empty(desc
)) {
111 dev
->stats
.tx_errors
++;
114 if (ring
->buf
[i
].skb
) {
115 bytes_compl
+= ring
->buf
[i
].len
;
117 dev_kfree_skb_any(ring
->buf
[i
].skb
);
119 ring
->buf
[i
].skb
= NULL
;
123 /* flush descriptors */
126 netdev_completed_queue(dev
, pkts_compl
, bytes_compl
);
129 static void ag71xx_ring_tx_init(struct ag71xx
*ag
)
131 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
132 int ring_size
= BIT(ring
->order
);
133 int ring_mask
= ring_size
- 1;
136 for (i
= 0; i
< ring_size
; i
++) {
137 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
139 desc
->next
= (u32
) (ring
->descs_dma
+
140 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
142 desc
->ctrl
= DESC_EMPTY
;
143 ring
->buf
[i
].skb
= NULL
;
146 /* flush descriptors */
151 netdev_reset_queue(ag
->dev
);
154 static void ag71xx_ring_rx_clean(struct ag71xx
*ag
)
156 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
157 int ring_size
= BIT(ring
->order
);
163 for (i
= 0; i
< ring_size
; i
++)
164 if (ring
->buf
[i
].rx_buf
) {
165 dma_unmap_single(&ag
->dev
->dev
, ring
->buf
[i
].dma_addr
,
166 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
167 skb_free_frag(ring
->buf
[i
].rx_buf
);
171 static int ag71xx_buffer_size(struct ag71xx
*ag
)
173 return ag
->rx_buf_size
+
174 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
177 static bool ag71xx_fill_rx_buf(struct ag71xx
*ag
, struct ag71xx_buf
*buf
,
179 void *(*alloc
)(unsigned int size
))
181 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
182 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, buf
- &ring
->buf
[0]);
185 data
= alloc(ag71xx_buffer_size(ag
));
190 buf
->dma_addr
= dma_map_single(&ag
->dev
->dev
, data
, ag
->rx_buf_size
,
192 desc
->data
= (u32
) buf
->dma_addr
+ offset
;
196 static int ag71xx_ring_rx_init(struct ag71xx
*ag
)
198 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
199 int ring_size
= BIT(ring
->order
);
200 int ring_mask
= BIT(ring
->order
) - 1;
205 for (i
= 0; i
< ring_size
; i
++) {
206 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
208 desc
->next
= (u32
) (ring
->descs_dma
+
209 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
211 DBG("ag71xx: RX desc at %p, next is %08x\n",
215 for (i
= 0; i
< ring_size
; i
++) {
216 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
218 if (!ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], ag
->rx_buf_offset
,
219 netdev_alloc_frag
)) {
224 desc
->ctrl
= DESC_EMPTY
;
227 /* flush descriptors */
236 static int ag71xx_ring_rx_refill(struct ag71xx
*ag
)
238 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
239 int ring_mask
= BIT(ring
->order
) - 1;
241 int offset
= ag
->rx_buf_offset
;
244 for (; ring
->curr
- ring
->dirty
> 0; ring
->dirty
++) {
245 struct ag71xx_desc
*desc
;
248 i
= ring
->dirty
& ring_mask
;
249 desc
= ag71xx_ring_desc(ring
, i
);
251 if (!ring
->buf
[i
].rx_buf
&&
252 !ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
,
256 desc
->ctrl
= DESC_EMPTY
;
260 /* flush descriptors */
263 DBG("%s: %u rx descriptors refilled\n", ag
->dev
->name
, count
);
268 static int ag71xx_rings_init(struct ag71xx
*ag
)
270 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
271 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
272 int ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
273 int tx_size
= BIT(tx
->order
);
275 tx
->buf
= kzalloc(ring_size
* sizeof(*tx
->buf
), GFP_KERNEL
);
279 tx
->descs_cpu
= dma_alloc_coherent(NULL
, ring_size
* AG71XX_DESC_SIZE
,
280 &tx
->descs_dma
, GFP_ATOMIC
);
281 if (!tx
->descs_cpu
) {
287 rx
->buf
= &tx
->buf
[BIT(tx
->order
)];
288 rx
->descs_cpu
= ((void *)tx
->descs_cpu
) + tx_size
* AG71XX_DESC_SIZE
;
289 rx
->descs_dma
= tx
->descs_dma
+ tx_size
* AG71XX_DESC_SIZE
;
291 ag71xx_ring_tx_init(ag
);
292 return ag71xx_ring_rx_init(ag
);
295 static void ag71xx_rings_free(struct ag71xx
*ag
)
297 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
298 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
299 int ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
302 dma_free_coherent(NULL
, ring_size
* AG71XX_DESC_SIZE
,
303 tx
->descs_cpu
, tx
->descs_dma
);
307 tx
->descs_cpu
= NULL
;
308 rx
->descs_cpu
= NULL
;
313 static void ag71xx_rings_cleanup(struct ag71xx
*ag
)
315 ag71xx_ring_rx_clean(ag
);
316 ag71xx_ring_tx_clean(ag
);
317 ag71xx_rings_free(ag
);
319 netdev_reset_queue(ag
->dev
);
322 static unsigned char *ag71xx_speed_str(struct ag71xx
*ag
)
336 static void ag71xx_hw_set_macaddr(struct ag71xx
*ag
, unsigned char *mac
)
340 t
= (((u32
) mac
[5]) << 24) | (((u32
) mac
[4]) << 16)
341 | (((u32
) mac
[3]) << 8) | ((u32
) mac
[2]);
343 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR1
, t
);
345 t
= (((u32
) mac
[1]) << 24) | (((u32
) mac
[0]) << 16);
346 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR2
, t
);
349 static void ag71xx_dma_reset(struct ag71xx
*ag
)
354 ag71xx_dump_dma_regs(ag
);
357 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
358 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
361 * give the hardware some time to really stop all rx/tx activity
362 * clearing the descriptors too early causes random memory corruption
366 /* clear descriptor addresses */
367 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->stop_desc_dma
);
368 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->stop_desc_dma
);
370 /* clear pending RX/TX interrupts */
371 for (i
= 0; i
< 256; i
++) {
372 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
373 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
376 /* clear pending errors */
377 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
| RX_STATUS_OF
);
378 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
| TX_STATUS_UR
);
380 val
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
382 pr_alert("%s: unable to clear DMA Rx status: %08x\n",
385 val
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
387 /* mask out reserved bits */
391 pr_alert("%s: unable to clear DMA Tx status: %08x\n",
394 ag71xx_dump_dma_regs(ag
);
397 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
398 MAC_CFG1_SRX | MAC_CFG1_STX)
400 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
402 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
403 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
404 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
405 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
406 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
409 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
410 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
411 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
412 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
413 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
414 FIFO_CFG5_17 | FIFO_CFG5_SF)
416 static void ag71xx_hw_stop(struct ag71xx
*ag
)
418 /* disable all interrupts and stop the rx/tx engine */
419 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, 0);
420 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
421 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
424 static void ag71xx_hw_setup(struct ag71xx
*ag
)
426 struct device_node
*np
= ag
->pdev
->dev
.of_node
;
427 u32 init
= MAC_CFG1_INIT
;
429 /* setup MAC configuration registers */
430 if (of_property_read_bool(np
, "flow-control"))
431 init
|= MAC_CFG1_TFC
| MAC_CFG1_RFC
;
432 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, init
);
434 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG2
,
435 MAC_CFG2_PAD_CRC_EN
| MAC_CFG2_LEN_CHECK
);
437 /* setup max frame length to zero */
438 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, 0);
440 /* setup FIFO configuration registers */
441 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG0
, FIFO_CFG0_INIT
);
442 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, ag
->fifodata
[0]);
443 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, ag
->fifodata
[1]);
444 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG4
, FIFO_CFG4_INIT
);
445 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, FIFO_CFG5_INIT
);
448 static void ag71xx_hw_init(struct ag71xx
*ag
)
452 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG1
, MAC_CFG1_SR
);
455 reset_control_assert(ag
->mac_reset
);
457 reset_control_deassert(ag
->mac_reset
);
462 ag71xx_dma_reset(ag
);
465 static void ag71xx_fast_reset(struct ag71xx
*ag
)
467 struct net_device
*dev
= ag
->dev
;
474 mii_reg
= ag71xx_rr(ag
, AG71XX_REG_MII_CFG
);
475 rx_ds
= ag71xx_rr(ag
, AG71XX_REG_RX_DESC
);
477 ag71xx_tx_packets(ag
, true);
479 reset_control_assert(ag
->mac_reset
);
481 reset_control_deassert(ag
->mac_reset
);
484 ag71xx_dma_reset(ag
);
486 ag
->tx_ring
.curr
= 0;
487 ag
->tx_ring
.dirty
= 0;
488 netdev_reset_queue(ag
->dev
);
490 /* setup max frame length */
491 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
492 ag71xx_max_frame_len(ag
->dev
->mtu
));
494 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, rx_ds
);
495 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
496 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, mii_reg
);
498 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
501 static void ag71xx_hw_start(struct ag71xx
*ag
)
503 /* start RX engine */
504 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
506 /* enable interrupts */
507 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, AG71XX_INT_INIT
);
509 netif_wake_queue(ag
->dev
);
512 static void ath79_set_pllval(struct ag71xx
*ag
)
514 u32 pll_reg
= ag
->pllreg
[1];
522 pll_val
= ag
->plldata
[2];
525 pll_val
= ag
->plldata
[1];
528 pll_val
= ag
->plldata
[0];
535 regmap_write(ag
->pllregmap
, pll_reg
, pll_val
);
538 static void ath79_set_pll(struct ag71xx
*ag
)
540 u32 pll_cfg
= ag
->pllreg
[0];
541 u32 pll_shift
= ag
->pllreg
[2];
546 regmap_update_bits(ag
->pllregmap
, pll_cfg
, 3 << pll_shift
, 2 << pll_shift
);
549 ath79_set_pllval(ag
);
551 regmap_update_bits(ag
->pllregmap
, pll_cfg
, 3 << pll_shift
, 3 << pll_shift
);
554 regmap_update_bits(ag
->pllregmap
, pll_cfg
, 3 << pll_shift
, 0);
558 static void ath79_mii_ctrl_set_if(struct ag71xx
*ag
, unsigned int mii_if
)
562 t
= __raw_readl(ag
->mii_base
);
563 t
&= ~(AR71XX_MII_CTRL_IF_MASK
);
564 t
|= (mii_if
& AR71XX_MII_CTRL_IF_MASK
);
565 __raw_writel(t
, ag
->mii_base
);
568 static void ath79_mii0_ctrl_set_if(struct ag71xx
*ag
)
572 switch (ag
->phy_if_mode
) {
573 case PHY_INTERFACE_MODE_MII
:
574 mii_if
= AR71XX_MII0_CTRL_IF_MII
;
576 case PHY_INTERFACE_MODE_GMII
:
577 mii_if
= AR71XX_MII0_CTRL_IF_GMII
;
579 case PHY_INTERFACE_MODE_RGMII
:
580 mii_if
= AR71XX_MII0_CTRL_IF_RGMII
;
582 case PHY_INTERFACE_MODE_RMII
:
583 mii_if
= AR71XX_MII0_CTRL_IF_RMII
;
586 WARN(1, "Impossible PHY mode defined.\n");
590 ath79_mii_ctrl_set_if(ag
, mii_if
);
593 static void ath79_mii1_ctrl_set_if(struct ag71xx
*ag
)
597 switch (ag
->phy_if_mode
) {
598 case PHY_INTERFACE_MODE_RMII
:
599 mii_if
= AR71XX_MII1_CTRL_IF_RMII
;
601 case PHY_INTERFACE_MODE_RGMII
:
602 mii_if
= AR71XX_MII1_CTRL_IF_RGMII
;
605 WARN(1, "Impossible PHY mode defined.\n");
609 ath79_mii_ctrl_set_if(ag
, mii_if
);
612 static void ath79_mii_ctrl_set_speed(struct ag71xx
*ag
)
614 unsigned int mii_speed
;
622 mii_speed
= AR71XX_MII_CTRL_SPEED_10
;
625 mii_speed
= AR71XX_MII_CTRL_SPEED_100
;
628 mii_speed
= AR71XX_MII_CTRL_SPEED_1000
;
634 t
= __raw_readl(ag
->mii_base
);
635 t
&= ~(AR71XX_MII_CTRL_SPEED_MASK
<< AR71XX_MII_CTRL_SPEED_SHIFT
);
636 t
|= mii_speed
<< AR71XX_MII_CTRL_SPEED_SHIFT
;
637 __raw_writel(t
, ag
->mii_base
);
641 __ag71xx_link_adjust(struct ag71xx
*ag
, bool update
)
643 struct device_node
*np
= ag
->pdev
->dev
.of_node
;
648 if (!ag
->link
&& update
) {
650 netif_carrier_off(ag
->dev
);
651 if (netif_msg_link(ag
))
652 pr_info("%s: link down\n", ag
->dev
->name
);
656 if (!of_device_is_compatible(np
, "qca,ar9130-eth") &&
657 !of_device_is_compatible(np
, "qca,ar7100-eth"))
658 ag71xx_fast_reset(ag
);
660 cfg2
= ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
);
661 cfg2
&= ~(MAC_CFG2_IF_1000
| MAC_CFG2_IF_10_100
| MAC_CFG2_FDX
);
662 cfg2
|= (ag
->duplex
) ? MAC_CFG2_FDX
: 0;
664 ifctl
= ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
);
665 ifctl
&= ~(MAC_IFCTL_SPEED
);
667 fifo5
= ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
);
668 fifo5
&= ~FIFO_CFG5_BM
;
672 cfg2
|= MAC_CFG2_IF_1000
;
673 fifo5
|= FIFO_CFG5_BM
;
676 cfg2
|= MAC_CFG2_IF_10_100
;
677 ifctl
|= MAC_IFCTL_SPEED
;
680 cfg2
|= MAC_CFG2_IF_10_100
;
687 if (ag
->tx_ring
.desc_split
) {
688 ag
->fifodata
[2] &= 0xffff;
689 ag
->fifodata
[2] |= ((2048 - ag
->tx_ring
.desc_split
) / 4) << 16;
692 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG3
, ag
->fifodata
[2]);
695 if (of_device_is_compatible(np
, "qca,ar7100-eth") ||
696 of_device_is_compatible(np
, "qca,ar9130-eth")) {
698 ath79_mii_ctrl_set_speed(ag
);
699 } else if (of_device_is_compatible(np
, "qca,ar7242-eth") ||
700 of_device_is_compatible(np
, "qca,ar9340-eth") ||
701 of_device_is_compatible(np
, "qca,qca9550-eth") ||
702 of_device_is_compatible(np
, "qca,qca9560-eth")) {
703 ath79_set_pllval(ag
);
707 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG2
, cfg2
);
708 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, fifo5
);
709 ag71xx_wr(ag
, AG71XX_REG_MAC_IFCTL
, ifctl
);
711 if (of_device_is_compatible(np
, "qca,qca9530-eth") ||
712 of_device_is_compatible(np
, "qca,qca9560-eth")) {
714 * The rx ring buffer can stall on small packets on QCA953x and
715 * QCA956x. Disabling the inline checksum engine fixes the stall.
716 * The wr, rr functions cannot be used since this hidden register
717 * is outside of the normal ag71xx register block.
719 void __iomem
*dam
= ioremap_nocache(0xb90001bc, 0x4);
721 __raw_writel(__raw_readl(dam
) & ~BIT(27), dam
);
722 (void)__raw_readl(dam
);
729 netif_carrier_on(ag
->dev
);
730 if (update
&& netif_msg_link(ag
))
731 pr_info("%s: link up (%sMbps/%s duplex)\n",
733 ag71xx_speed_str(ag
),
734 (DUPLEX_FULL
== ag
->duplex
) ? "Full" : "Half");
736 ag71xx_dump_regs(ag
);
739 void ag71xx_link_adjust(struct ag71xx
*ag
)
741 __ag71xx_link_adjust(ag
, true);
744 static int ag71xx_hw_enable(struct ag71xx
*ag
)
748 ret
= ag71xx_rings_init(ag
);
752 napi_enable(&ag
->napi
);
753 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
754 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->rx_ring
.descs_dma
);
755 netif_start_queue(ag
->dev
);
760 static void ag71xx_hw_disable(struct ag71xx
*ag
)
764 spin_lock_irqsave(&ag
->lock
, flags
);
766 netif_stop_queue(ag
->dev
);
769 ag71xx_dma_reset(ag
);
771 napi_disable(&ag
->napi
);
772 del_timer_sync(&ag
->oom_timer
);
774 spin_unlock_irqrestore(&ag
->lock
, flags
);
776 ag71xx_rings_cleanup(ag
);
779 static int ag71xx_open(struct net_device
*dev
)
781 struct ag71xx
*ag
= netdev_priv(dev
);
782 unsigned int max_frame_len
;
785 netif_carrier_off(dev
);
786 max_frame_len
= ag71xx_max_frame_len(dev
->mtu
);
787 ag
->rx_buf_size
= SKB_DATA_ALIGN(max_frame_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
);
789 /* setup max frame length */
790 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, max_frame_len
);
791 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
793 ret
= ag71xx_hw_enable(ag
);
797 phy_start(ag
->phy_dev
);
802 ag71xx_rings_cleanup(ag
);
806 static int ag71xx_stop(struct net_device
*dev
)
809 struct ag71xx
*ag
= netdev_priv(dev
);
811 netif_carrier_off(dev
);
812 phy_stop(ag
->phy_dev
);
814 spin_lock_irqsave(&ag
->lock
, flags
);
817 ag71xx_link_adjust(ag
);
819 spin_unlock_irqrestore(&ag
->lock
, flags
);
821 ag71xx_hw_disable(ag
);
826 static int ag71xx_fill_dma_desc(struct ag71xx_ring
*ring
, u32 addr
, int len
)
829 struct ag71xx_desc
*desc
;
830 int ring_mask
= BIT(ring
->order
) - 1;
832 int split
= ring
->desc_split
;
838 unsigned int cur_len
= len
;
840 i
= (ring
->curr
+ ndesc
) & ring_mask
;
841 desc
= ag71xx_ring_desc(ring
, i
);
843 if (!ag71xx_desc_empty(desc
))
846 if (cur_len
> split
) {
850 * TX will hang if DMA transfers <= 4 bytes,
851 * make sure next segment is more than 4 bytes long.
853 if (len
<= split
+ 4)
862 cur_len
|= DESC_MORE
;
864 /* prevent early tx attempt of this descriptor */
866 cur_len
|= DESC_EMPTY
;
868 desc
->ctrl
= cur_len
;
875 static netdev_tx_t
ag71xx_hard_start_xmit(struct sk_buff
*skb
,
876 struct net_device
*dev
)
878 struct ag71xx
*ag
= netdev_priv(dev
);
879 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
880 int ring_mask
= BIT(ring
->order
) - 1;
881 int ring_size
= BIT(ring
->order
);
882 struct ag71xx_desc
*desc
;
887 DBG("%s: packet len is too small\n", ag
->dev
->name
);
891 dma_addr
= dma_map_single(&dev
->dev
, skb
->data
, skb
->len
,
894 i
= ring
->curr
& ring_mask
;
895 desc
= ag71xx_ring_desc(ring
, i
);
897 /* setup descriptor fields */
898 n
= ag71xx_fill_dma_desc(ring
, (u32
) dma_addr
, skb
->len
& ag
->desc_pktlen_mask
);
902 i
= (ring
->curr
+ n
- 1) & ring_mask
;
903 ring
->buf
[i
].len
= skb
->len
;
904 ring
->buf
[i
].skb
= skb
;
906 netdev_sent_queue(dev
, skb
->len
);
908 skb_tx_timestamp(skb
);
910 desc
->ctrl
&= ~DESC_EMPTY
;
913 /* flush descriptor */
917 if (ring
->desc_split
)
918 ring_min
*= AG71XX_TX_RING_DS_PER_PKT
;
920 if (ring
->curr
- ring
->dirty
>= ring_size
- ring_min
) {
921 DBG("%s: tx queue full\n", dev
->name
);
922 netif_stop_queue(dev
);
925 DBG("%s: packet injected into TX queue\n", ag
->dev
->name
);
927 /* enable TX engine */
928 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, TX_CTRL_TXE
);
933 dma_unmap_single(&dev
->dev
, dma_addr
, skb
->len
, DMA_TO_DEVICE
);
936 dev
->stats
.tx_dropped
++;
942 static int ag71xx_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
944 struct ag71xx
*ag
= netdev_priv(dev
);
949 if (ag
->phy_dev
== NULL
)
952 spin_lock_irq(&ag
->lock
);
953 ret
= phy_ethtool_ioctl(ag
->phy_dev
, (void *) ifr
->ifr_data
);
954 spin_unlock_irq(&ag
->lock
);
959 (dev
->dev_addr
, ifr
->ifr_data
, sizeof(dev
->dev_addr
)))
965 (ifr
->ifr_data
, dev
->dev_addr
, sizeof(dev
->dev_addr
)))
972 if (ag
->phy_dev
== NULL
)
975 return phy_mii_ioctl(ag
->phy_dev
, ifr
, cmd
);
984 static void ag71xx_oom_timer_handler(unsigned long data
)
986 struct net_device
*dev
= (struct net_device
*) data
;
987 struct ag71xx
*ag
= netdev_priv(dev
);
989 napi_schedule(&ag
->napi
);
992 static void ag71xx_tx_timeout(struct net_device
*dev
)
994 struct ag71xx
*ag
= netdev_priv(dev
);
996 if (netif_msg_tx_err(ag
))
997 pr_info("%s: tx timeout\n", ag
->dev
->name
);
999 schedule_delayed_work(&ag
->restart_work
, 1);
1002 static void ag71xx_restart_work_func(struct work_struct
*work
)
1004 struct ag71xx
*ag
= container_of(work
, struct ag71xx
, restart_work
.work
);
1007 ag71xx_hw_disable(ag
);
1008 ag71xx_hw_enable(ag
);
1010 __ag71xx_link_adjust(ag
, false);
1014 static bool ag71xx_check_dma_stuck(struct ag71xx
*ag
)
1016 unsigned long timestamp
;
1017 u32 rx_sm
, tx_sm
, rx_fd
;
1019 timestamp
= netdev_get_tx_queue(ag
->dev
, 0)->trans_start
;
1020 if (likely(time_before(jiffies
, timestamp
+ HZ
/10)))
1023 if (!netif_carrier_ok(ag
->dev
))
1026 rx_sm
= ag71xx_rr(ag
, AG71XX_REG_RX_SM
);
1027 if ((rx_sm
& 0x7) == 0x3 && ((rx_sm
>> 4) & 0x7) == 0x6)
1030 tx_sm
= ag71xx_rr(ag
, AG71XX_REG_TX_SM
);
1031 rx_fd
= ag71xx_rr(ag
, AG71XX_REG_FIFO_DEPTH
);
1032 if (((tx_sm
>> 4) & 0x7) == 0 && ((rx_sm
& 0x7) == 0) &&
1033 ((rx_sm
>> 4) & 0x7) == 0 && rx_fd
== 0)
1039 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
)
1041 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
1042 bool dma_stuck
= false;
1043 int ring_mask
= BIT(ring
->order
) - 1;
1044 int ring_size
= BIT(ring
->order
);
1046 int bytes_compl
= 0;
1049 DBG("%s: processing TX ring\n", ag
->dev
->name
);
1051 while (ring
->dirty
+ n
!= ring
->curr
) {
1052 unsigned int i
= (ring
->dirty
+ n
) & ring_mask
;
1053 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1054 struct sk_buff
*skb
= ring
->buf
[i
].skb
;
1056 if (!flush
&& !ag71xx_desc_empty(desc
)) {
1057 if (ag
->tx_hang_workaround
&&
1058 ag71xx_check_dma_stuck(ag
)) {
1059 schedule_delayed_work(&ag
->restart_work
, HZ
/ 2);
1066 desc
->ctrl
|= DESC_EMPTY
;
1072 dev_kfree_skb_any(skb
);
1073 ring
->buf
[i
].skb
= NULL
;
1075 bytes_compl
+= ring
->buf
[i
].len
;
1081 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
1086 DBG("%s: %d packets sent out\n", ag
->dev
->name
, sent
);
1091 ag
->dev
->stats
.tx_bytes
+= bytes_compl
;
1092 ag
->dev
->stats
.tx_packets
+= sent
;
1094 netdev_completed_queue(ag
->dev
, sent
, bytes_compl
);
1095 if ((ring
->curr
- ring
->dirty
) < (ring_size
* 3) / 4)
1096 netif_wake_queue(ag
->dev
);
1099 cancel_delayed_work(&ag
->restart_work
);
1104 static int ag71xx_rx_packets(struct ag71xx
*ag
, int limit
)
1106 struct net_device
*dev
= ag
->dev
;
1107 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1108 unsigned int pktlen_mask
= ag
->desc_pktlen_mask
;
1109 unsigned int offset
= ag
->rx_buf_offset
;
1110 int ring_mask
= BIT(ring
->order
) - 1;
1111 int ring_size
= BIT(ring
->order
);
1112 struct sk_buff_head queue
;
1113 struct sk_buff
*skb
;
1116 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
1117 dev
->name
, limit
, ring
->curr
, ring
->dirty
);
1119 skb_queue_head_init(&queue
);
1121 while (done
< limit
) {
1122 unsigned int i
= ring
->curr
& ring_mask
;
1123 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1127 if (ag71xx_desc_empty(desc
))
1130 if ((ring
->dirty
+ ring_size
) == ring
->curr
) {
1135 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
1137 pktlen
= desc
->ctrl
& pktlen_mask
;
1138 pktlen
-= ETH_FCS_LEN
;
1140 dma_unmap_single(&dev
->dev
, ring
->buf
[i
].dma_addr
,
1141 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
1143 dev
->stats
.rx_packets
++;
1144 dev
->stats
.rx_bytes
+= pktlen
;
1146 skb
= build_skb(ring
->buf
[i
].rx_buf
, ag71xx_buffer_size(ag
));
1148 skb_free_frag(ring
->buf
[i
].rx_buf
);
1152 skb_reserve(skb
, offset
);
1153 skb_put(skb
, pktlen
);
1156 dev
->stats
.rx_dropped
++;
1160 skb
->ip_summed
= CHECKSUM_NONE
;
1161 __skb_queue_tail(&queue
, skb
);
1165 ring
->buf
[i
].rx_buf
= NULL
;
1171 ag71xx_ring_rx_refill(ag
);
1173 while ((skb
= __skb_dequeue(&queue
)) != NULL
) {
1174 skb
->protocol
= eth_type_trans(skb
, dev
);
1175 netif_receive_skb(skb
);
1178 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1179 dev
->name
, ring
->curr
, ring
->dirty
, done
);
1184 static int ag71xx_poll(struct napi_struct
*napi
, int limit
)
1186 struct ag71xx
*ag
= container_of(napi
, struct ag71xx
, napi
);
1187 struct net_device
*dev
= ag
->dev
;
1188 struct ag71xx_ring
*rx_ring
= &ag
->rx_ring
;
1189 int rx_ring_size
= BIT(rx_ring
->order
);
1190 unsigned long flags
;
1195 tx_done
= ag71xx_tx_packets(ag
, false);
1197 DBG("%s: processing RX ring\n", dev
->name
);
1198 rx_done
= ag71xx_rx_packets(ag
, limit
);
1200 ag71xx_debugfs_update_napi_stats(ag
, rx_done
, tx_done
);
1202 if (rx_ring
->buf
[rx_ring
->dirty
% rx_ring_size
].rx_buf
== NULL
)
1205 status
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
1206 if (unlikely(status
& RX_STATUS_OF
)) {
1207 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_OF
);
1208 dev
->stats
.rx_fifo_errors
++;
1211 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
1214 if (rx_done
< limit
) {
1215 if (status
& RX_STATUS_PR
)
1218 status
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
1219 if (status
& TX_STATUS_PS
)
1222 DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1223 dev
->name
, rx_done
, tx_done
, limit
);
1225 napi_complete(napi
);
1227 /* enable interrupts */
1228 spin_lock_irqsave(&ag
->lock
, flags
);
1229 ag71xx_int_enable(ag
, AG71XX_INT_POLL
);
1230 spin_unlock_irqrestore(&ag
->lock
, flags
);
1235 DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1236 dev
->name
, rx_done
, tx_done
, limit
);
1240 if (netif_msg_rx_err(ag
))
1241 pr_info("%s: out of memory\n", dev
->name
);
1243 mod_timer(&ag
->oom_timer
, jiffies
+ AG71XX_OOM_REFILL
);
1244 napi_complete(napi
);
1248 static irqreturn_t
ag71xx_interrupt(int irq
, void *dev_id
)
1250 struct net_device
*dev
= dev_id
;
1251 struct ag71xx
*ag
= netdev_priv(dev
);
1254 status
= ag71xx_rr(ag
, AG71XX_REG_INT_STATUS
);
1255 ag71xx_dump_intr(ag
, "raw", status
);
1257 if (unlikely(!status
))
1260 if (unlikely(status
& AG71XX_INT_ERR
)) {
1261 if (status
& AG71XX_INT_TX_BE
) {
1262 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
);
1263 dev_err(&dev
->dev
, "TX BUS error\n");
1265 if (status
& AG71XX_INT_RX_BE
) {
1266 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
);
1267 dev_err(&dev
->dev
, "RX BUS error\n");
1271 if (likely(status
& AG71XX_INT_POLL
)) {
1272 ag71xx_int_disable(ag
, AG71XX_INT_POLL
);
1273 DBG("%s: enable polling mode\n", dev
->name
);
1274 napi_schedule(&ag
->napi
);
1277 ag71xx_debugfs_update_int_stats(ag
, status
);
1282 #ifdef CONFIG_NET_POLL_CONTROLLER
1284 * Polling 'interrupt' - used by things like netconsole to send skbs
1285 * without having to re-enable interrupts. It's not called while
1286 * the interrupt routine is executing.
1288 static void ag71xx_netpoll(struct net_device
*dev
)
1290 disable_irq(dev
->irq
);
1291 ag71xx_interrupt(dev
->irq
, dev
);
1292 enable_irq(dev
->irq
);
1296 static int ag71xx_change_mtu(struct net_device
*dev
, int new_mtu
)
1298 struct ag71xx
*ag
= netdev_priv(dev
);
1301 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
1302 ag71xx_max_frame_len(dev
->mtu
));
1307 static const struct net_device_ops ag71xx_netdev_ops
= {
1308 .ndo_open
= ag71xx_open
,
1309 .ndo_stop
= ag71xx_stop
,
1310 .ndo_start_xmit
= ag71xx_hard_start_xmit
,
1311 .ndo_do_ioctl
= ag71xx_do_ioctl
,
1312 .ndo_tx_timeout
= ag71xx_tx_timeout
,
1313 .ndo_change_mtu
= ag71xx_change_mtu
,
1314 .ndo_set_mac_address
= eth_mac_addr
,
1315 .ndo_validate_addr
= eth_validate_addr
,
1316 #ifdef CONFIG_NET_POLL_CONTROLLER
1317 .ndo_poll_controller
= ag71xx_netpoll
,
1321 static int ag71xx_probe(struct platform_device
*pdev
)
1323 struct device_node
*np
= pdev
->dev
.of_node
;
1324 struct device_node
*mdio_node
;
1325 struct net_device
*dev
;
1326 struct resource
*res
;
1328 const void *mac_addr
;
1335 dev
= devm_alloc_etherdev(&pdev
->dev
, sizeof(*ag
));
1339 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1343 err
= ag71xx_setup_gmac(np
);
1347 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1349 ag
= netdev_priv(dev
);
1352 ag
->msg_enable
= netif_msg_init(ag71xx_msg_level
,
1353 AG71XX_DEFAULT_MSG_ENABLE
);
1354 spin_lock_init(&ag
->lock
);
1356 ag
->mac_reset
= devm_reset_control_get(&pdev
->dev
, "mac");
1357 if (IS_ERR(ag
->mac_reset
)) {
1358 dev_err(&pdev
->dev
, "missing mac reset\n");
1359 return PTR_ERR(ag
->mac_reset
);
1362 if (of_property_read_u32_array(np
, "fifo-data", ag
->fifodata
, 3)) {
1363 if (of_device_is_compatible(np
, "qca,ar9130-eth") ||
1364 of_device_is_compatible(np
, "qca,ar7100-eth")) {
1365 ag
->fifodata
[0] = 0x0fff0000;
1366 ag
->fifodata
[1] = 0x00001fff;
1368 ag
->fifodata
[0] = 0x0010ffff;
1369 ag
->fifodata
[1] = 0x015500aa;
1370 ag
->fifodata
[2] = 0x01f00140;
1372 if (of_device_is_compatible(np
, "qca,ar9130-eth"))
1373 ag
->fifodata
[2] = 0x00780fff;
1374 else if (of_device_is_compatible(np
, "qca,ar7100-eth"))
1375 ag
->fifodata
[2] = 0x008001ff;
1378 if (of_property_read_u32_array(np
, "pll-data", ag
->plldata
, 3))
1379 dev_dbg(&pdev
->dev
, "failed to read pll-data property\n");
1381 if (of_property_read_u32_array(np
, "pll-reg", ag
->pllreg
, 3))
1382 dev_dbg(&pdev
->dev
, "failed to read pll-reg property\n");
1384 ag
->pllregmap
= syscon_regmap_lookup_by_phandle(np
, "pll-handle");
1385 if (IS_ERR(ag
->pllregmap
)) {
1386 dev_dbg(&pdev
->dev
, "failed to read pll-handle property\n");
1387 ag
->pllregmap
= NULL
;
1390 ag
->mac_base
= devm_ioremap_nocache(&pdev
->dev
, res
->start
,
1391 res
->end
- res
->start
+ 1);
1395 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1397 ag
->mii_base
= devm_ioremap_nocache(&pdev
->dev
, res
->start
,
1398 res
->end
- res
->start
+ 1);
1403 dev
->irq
= platform_get_irq(pdev
, 0);
1404 err
= devm_request_irq(&pdev
->dev
, dev
->irq
, ag71xx_interrupt
,
1405 0x0, dev_name(&pdev
->dev
), dev
);
1407 dev_err(&pdev
->dev
, "unable to request IRQ %d\n", dev
->irq
);
1411 dev
->netdev_ops
= &ag71xx_netdev_ops
;
1412 dev
->ethtool_ops
= &ag71xx_ethtool_ops
;
1414 INIT_DELAYED_WORK(&ag
->restart_work
, ag71xx_restart_work_func
);
1416 init_timer(&ag
->oom_timer
);
1417 ag
->oom_timer
.data
= (unsigned long) dev
;
1418 ag
->oom_timer
.function
= ag71xx_oom_timer_handler
;
1420 tx_size
= AG71XX_TX_RING_SIZE_DEFAULT
;
1421 ag
->rx_ring
.order
= ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT
);
1423 if (of_device_is_compatible(np
, "qca,ar9340-eth") ||
1424 of_device_is_compatible(np
, "qca,qca9530-eth") ||
1425 of_device_is_compatible(np
, "qca,qca9550-eth") ||
1426 of_device_is_compatible(np
, "qca,qca9560-eth"))
1427 ag
->desc_pktlen_mask
= SZ_16K
- 1;
1429 ag
->desc_pktlen_mask
= SZ_4K
- 1;
1431 if (ag
->desc_pktlen_mask
== SZ_16K
- 1 &&
1432 !of_device_is_compatible(np
, "qca,qca9550-eth") &&
1433 !of_device_is_compatible(np
, "qca,qca9560-eth"))
1434 max_frame_len
= ag
->desc_pktlen_mask
;
1436 max_frame_len
= 1540;
1439 dev
->max_mtu
= max_frame_len
- ag71xx_max_frame_len(0);
1441 if (of_device_is_compatible(np
, "qca,ar7240-eth"))
1442 ag
->tx_hang_workaround
= 1;
1444 ag
->rx_buf_offset
= NET_SKB_PAD
;
1445 if (!of_device_is_compatible(np
, "qca,ar7100-eth") &&
1446 !of_device_is_compatible(np
, "qca,ar9130-eth"))
1447 ag
->rx_buf_offset
+= NET_IP_ALIGN
;
1449 if (of_device_is_compatible(np
, "qca,ar7100-eth")) {
1450 ag
->tx_ring
.desc_split
= AG71XX_TX_RING_SPLIT
;
1451 tx_size
*= AG71XX_TX_RING_DS_PER_PKT
;
1453 ag
->tx_ring
.order
= ag71xx_ring_size_order(tx_size
);
1455 ag
->stop_desc
= dmam_alloc_coherent(&pdev
->dev
,
1456 sizeof(struct ag71xx_desc
),
1457 &ag
->stop_desc_dma
, GFP_KERNEL
);
1461 ag
->stop_desc
->data
= 0;
1462 ag
->stop_desc
->ctrl
= 0;
1463 ag
->stop_desc
->next
= (u32
) ag
->stop_desc_dma
;
1465 mac_addr
= of_get_mac_address(np
);
1467 memcpy(dev
->dev_addr
, mac_addr
, ETH_ALEN
);
1468 if (!mac_addr
|| !is_valid_ether_addr(dev
->dev_addr
)) {
1469 dev_err(&pdev
->dev
, "invalid MAC address, using random address\n");
1470 eth_random_addr(dev
->dev_addr
);
1473 ag
->phy_if_mode
= of_get_phy_mode(np
);
1474 if (ag
->phy_if_mode
< 0) {
1475 dev_err(&pdev
->dev
, "missing phy-mode property in DT\n");
1476 return ag
->phy_if_mode
;
1479 if (of_property_read_u32(np
, "qca,mac-idx", &ag
->mac_idx
))
1482 switch (ag
->mac_idx
) {
1484 ath79_mii0_ctrl_set_if(ag
);
1487 ath79_mii1_ctrl_set_if(ag
);
1493 netif_napi_add(dev
, &ag
->napi
, ag71xx_poll
, AG71XX_NAPI_WEIGHT
);
1495 ag71xx_dump_regs(ag
);
1497 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, 0);
1501 ag71xx_dump_regs(ag
);
1503 if (!of_device_is_compatible(np
, "simple-mfd")) {
1504 mdio_node
= of_get_child_by_name(np
, "mdio-bus");
1505 if (!IS_ERR(mdio_node
))
1506 of_platform_device_create(mdio_node
, NULL
, NULL
);
1509 err
= ag71xx_phy_connect(ag
);
1513 err
= ag71xx_debugfs_init(ag
);
1515 goto err_phy_disconnect
;
1517 platform_set_drvdata(pdev
, dev
);
1519 err
= register_netdev(dev
);
1521 dev_err(&pdev
->dev
, "unable to register net device\n");
1522 platform_set_drvdata(pdev
, NULL
);
1523 ag71xx_debugfs_exit(ag
);
1524 goto err_phy_disconnect
;
1527 pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode: %s\n",
1528 dev
->name
, (unsigned long) ag
->mac_base
, dev
->irq
,
1529 phy_modes(ag
->phy_if_mode
));
1534 ag71xx_phy_disconnect(ag
);
1538 static int ag71xx_remove(struct platform_device
*pdev
)
1540 struct net_device
*dev
= platform_get_drvdata(pdev
);
1546 ag
= netdev_priv(dev
);
1547 ag71xx_debugfs_exit(ag
);
1548 ag71xx_phy_disconnect(ag
);
1549 unregister_netdev(dev
);
1550 platform_set_drvdata(pdev
, NULL
);
1554 static const struct of_device_id ag71xx_match
[] = {
1555 { .compatible
= "qca,ar7100-eth" },
1556 { .compatible
= "qca,ar7240-eth" },
1557 { .compatible
= "qca,ar7241-eth" },
1558 { .compatible
= "qca,ar7242-eth" },
1559 { .compatible
= "qca,ar9130-eth" },
1560 { .compatible
= "qca,ar9330-eth" },
1561 { .compatible
= "qca,ar9340-eth" },
1562 { .compatible
= "qca,qca9530-eth" },
1563 { .compatible
= "qca,qca9550-eth" },
1564 { .compatible
= "qca,qca9560-eth" },
1568 static struct platform_driver ag71xx_driver
= {
1569 .probe
= ag71xx_probe
,
1570 .remove
= ag71xx_remove
,
1572 .name
= AG71XX_DRV_NAME
,
1573 .of_match_table
= ag71xx_match
,
1577 static int __init
ag71xx_module_init(void)
1581 ret
= ag71xx_debugfs_root_init();
1585 ret
= platform_driver_register(&ag71xx_driver
);
1587 goto err_debugfs_exit
;
1592 ag71xx_debugfs_root_exit();
1597 static void __exit
ag71xx_module_exit(void)
1599 platform_driver_unregister(&ag71xx_driver
);
1600 ag71xx_debugfs_root_exit();
1603 module_init(ag71xx_module_init
);
1604 module_exit(ag71xx_module_exit
);
1606 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1607 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1608 MODULE_AUTHOR("Felix Fietkau <nbd@nbd.name>");
1609 MODULE_LICENSE("GPL v2");
1610 MODULE_ALIAS("platform:" AG71XX_DRV_NAME
);