2 * Atheros AR71xx built-in ethernet mac driver
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 * Based on Atheros' AG7100 driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
14 #include <linux/sizes.h>
15 #include <linux/of_net.h>
16 #include <linux/of_address.h>
17 #include <linux/of_platform.h>
20 #define AG71XX_DEFAULT_MSG_ENABLE \
30 static int ag71xx_msg_level
= -1;
32 module_param_named(msg_level
, ag71xx_msg_level
, int, 0);
33 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
35 #define ETH_SWITCH_HEADER_LEN 2
37 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
, int budget
);
39 static inline unsigned int ag71xx_max_frame_len(unsigned int mtu
)
41 return ETH_SWITCH_HEADER_LEN
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
+ ETH_FCS_LEN
;
44 static void ag71xx_dump_dma_regs(struct ag71xx
*ag
)
46 DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
48 ag71xx_rr(ag
, AG71XX_REG_TX_CTRL
),
49 ag71xx_rr(ag
, AG71XX_REG_TX_DESC
),
50 ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
));
52 DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
54 ag71xx_rr(ag
, AG71XX_REG_RX_CTRL
),
55 ag71xx_rr(ag
, AG71XX_REG_RX_DESC
),
56 ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
));
59 static void ag71xx_dump_regs(struct ag71xx
*ag
)
61 DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
63 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG1
),
64 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
),
65 ag71xx_rr(ag
, AG71XX_REG_MAC_IPG
),
66 ag71xx_rr(ag
, AG71XX_REG_MAC_HDX
),
67 ag71xx_rr(ag
, AG71XX_REG_MAC_MFL
));
68 DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
70 ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
),
71 ag71xx_rr(ag
, AG71XX_REG_MAC_ADDR1
),
72 ag71xx_rr(ag
, AG71XX_REG_MAC_ADDR2
));
73 DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
75 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG0
),
76 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG1
),
77 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG2
));
78 DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
80 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG3
),
81 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG4
),
82 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
));
85 static inline void ag71xx_dump_intr(struct ag71xx
*ag
, char *label
, u32 intr
)
87 DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
88 ag
->dev
->name
, label
, intr
,
89 (intr
& AG71XX_INT_TX_PS
) ? "TXPS " : "",
90 (intr
& AG71XX_INT_TX_UR
) ? "TXUR " : "",
91 (intr
& AG71XX_INT_TX_BE
) ? "TXBE " : "",
92 (intr
& AG71XX_INT_RX_PR
) ? "RXPR " : "",
93 (intr
& AG71XX_INT_RX_OF
) ? "RXOF " : "",
94 (intr
& AG71XX_INT_RX_BE
) ? "RXBE " : "");
97 static void ag71xx_ring_tx_clean(struct ag71xx
*ag
)
99 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
100 struct net_device
*dev
= ag
->dev
;
101 int ring_mask
= BIT(ring
->order
) - 1;
102 u32 bytes_compl
= 0, pkts_compl
= 0;
104 while (ring
->curr
!= ring
->dirty
) {
105 struct ag71xx_desc
*desc
;
106 u32 i
= ring
->dirty
& ring_mask
;
108 desc
= ag71xx_ring_desc(ring
, i
);
109 if (!ag71xx_desc_empty(desc
)) {
111 dev
->stats
.tx_errors
++;
114 if (ring
->buf
[i
].skb
) {
115 bytes_compl
+= ring
->buf
[i
].len
;
117 dev_kfree_skb_any(ring
->buf
[i
].skb
);
119 ring
->buf
[i
].skb
= NULL
;
123 /* flush descriptors */
126 netdev_completed_queue(dev
, pkts_compl
, bytes_compl
);
129 static void ag71xx_ring_tx_init(struct ag71xx
*ag
)
131 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
132 int ring_size
= BIT(ring
->order
);
133 int ring_mask
= BIT(ring
->order
) - 1;
136 for (i
= 0; i
< ring_size
; i
++) {
137 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
139 desc
->next
= (u32
) (ring
->descs_dma
+
140 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
142 desc
->ctrl
= DESC_EMPTY
;
143 ring
->buf
[i
].skb
= NULL
;
146 /* flush descriptors */
151 netdev_reset_queue(ag
->dev
);
154 static void ag71xx_ring_rx_clean(struct ag71xx
*ag
)
156 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
157 int ring_size
= BIT(ring
->order
);
163 for (i
= 0; i
< ring_size
; i
++)
164 if (ring
->buf
[i
].rx_buf
) {
165 dma_unmap_single(&ag
->pdev
->dev
, ring
->buf
[i
].dma_addr
,
166 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
167 skb_free_frag(ring
->buf
[i
].rx_buf
);
171 static int ag71xx_buffer_size(struct ag71xx
*ag
)
173 return ag
->rx_buf_size
+
174 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
177 static bool ag71xx_fill_rx_buf(struct ag71xx
*ag
, struct ag71xx_buf
*buf
,
179 void *(*alloc
)(unsigned int size
))
181 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
182 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, buf
- &ring
->buf
[0]);
185 data
= alloc(ag71xx_buffer_size(ag
));
190 buf
->dma_addr
= dma_map_single(&ag
->pdev
->dev
, data
, ag
->rx_buf_size
,
192 desc
->data
= (u32
) buf
->dma_addr
+ offset
;
196 static int ag71xx_ring_rx_init(struct ag71xx
*ag
)
198 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
199 int ring_size
= BIT(ring
->order
);
200 int ring_mask
= BIT(ring
->order
) - 1;
205 for (i
= 0; i
< ring_size
; i
++) {
206 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
208 desc
->next
= (u32
) (ring
->descs_dma
+
209 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
211 DBG("ag71xx: RX desc at %p, next is %08x\n",
215 for (i
= 0; i
< ring_size
; i
++) {
216 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
218 if (!ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], ag
->rx_buf_offset
,
219 netdev_alloc_frag
)) {
224 desc
->ctrl
= DESC_EMPTY
;
227 /* flush descriptors */
236 static int ag71xx_ring_rx_refill(struct ag71xx
*ag
)
238 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
239 int ring_mask
= BIT(ring
->order
) - 1;
241 int offset
= ag
->rx_buf_offset
;
244 for (; ring
->curr
- ring
->dirty
> 0; ring
->dirty
++) {
245 struct ag71xx_desc
*desc
;
248 i
= ring
->dirty
& ring_mask
;
249 desc
= ag71xx_ring_desc(ring
, i
);
251 if (!ring
->buf
[i
].rx_buf
&&
252 !ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
,
256 desc
->ctrl
= DESC_EMPTY
;
260 /* flush descriptors */
263 DBG("%s: %u rx descriptors refilled\n", ag
->dev
->name
, count
);
268 static int ag71xx_rings_init(struct ag71xx
*ag
)
270 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
271 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
272 int ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
273 int tx_size
= BIT(tx
->order
);
275 tx
->buf
= kzalloc(ring_size
* sizeof(*tx
->buf
), GFP_KERNEL
);
279 tx
->descs_cpu
= dma_alloc_coherent(&ag
->pdev
->dev
, ring_size
* AG71XX_DESC_SIZE
,
280 &tx
->descs_dma
, GFP_KERNEL
);
281 if (!tx
->descs_cpu
) {
287 rx
->buf
= &tx
->buf
[tx_size
];
288 rx
->descs_cpu
= ((void *)tx
->descs_cpu
) + tx_size
* AG71XX_DESC_SIZE
;
289 rx
->descs_dma
= tx
->descs_dma
+ tx_size
* AG71XX_DESC_SIZE
;
291 ag71xx_ring_tx_init(ag
);
292 return ag71xx_ring_rx_init(ag
);
295 static void ag71xx_rings_free(struct ag71xx
*ag
)
297 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
298 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
299 int ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
302 dma_free_coherent(&ag
->pdev
->dev
, ring_size
* AG71XX_DESC_SIZE
,
303 tx
->descs_cpu
, tx
->descs_dma
);
307 tx
->descs_cpu
= NULL
;
308 rx
->descs_cpu
= NULL
;
313 static void ag71xx_rings_cleanup(struct ag71xx
*ag
)
315 ag71xx_ring_rx_clean(ag
);
316 ag71xx_ring_tx_clean(ag
);
317 ag71xx_rings_free(ag
);
319 netdev_reset_queue(ag
->dev
);
322 static unsigned char *ag71xx_speed_str(struct ag71xx
*ag
)
336 static void ag71xx_hw_set_macaddr(struct ag71xx
*ag
, const unsigned char *mac
)
340 t
= (((u32
) mac
[5]) << 24) | (((u32
) mac
[4]) << 16)
341 | (((u32
) mac
[3]) << 8) | ((u32
) mac
[2]);
343 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR1
, t
);
345 t
= (((u32
) mac
[1]) << 24) | (((u32
) mac
[0]) << 16);
346 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR2
, t
);
349 static void ag71xx_dma_reset(struct ag71xx
*ag
)
354 ag71xx_dump_dma_regs(ag
);
357 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
358 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
361 * give the hardware some time to really stop all rx/tx activity
362 * clearing the descriptors too early causes random memory corruption
366 /* clear descriptor addresses */
367 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->stop_desc_dma
);
368 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->stop_desc_dma
);
370 /* clear pending RX/TX interrupts */
371 for (i
= 0; i
< 256; i
++) {
372 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
373 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
376 /* clear pending errors */
377 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
| RX_STATUS_OF
);
378 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
| TX_STATUS_UR
);
380 val
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
382 pr_alert("%s: unable to clear DMA Rx status: %08x\n",
385 val
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
387 /* mask out reserved bits */
391 pr_alert("%s: unable to clear DMA Tx status: %08x\n",
394 ag71xx_dump_dma_regs(ag
);
397 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
398 MAC_CFG1_SRX | MAC_CFG1_STX)
400 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
402 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
403 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
404 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
405 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
406 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
409 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
410 FIFO_CFG5_CE | FIFO_CFG5_LM | FIFO_CFG5_LO | \
411 FIFO_CFG5_OK | FIFO_CFG5_MC | FIFO_CFG5_BC | \
412 FIFO_CFG5_DR | FIFO_CFG5_CF | FIFO_CFG5_UO | \
413 FIFO_CFG5_VT | FIFO_CFG5_LE | FIFO_CFG5_FT | \
414 FIFO_CFG5_UC | FIFO_CFG5_SF)
416 static void ag71xx_hw_stop(struct ag71xx
*ag
)
418 /* disable all interrupts and stop the rx/tx engine */
419 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, 0);
420 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
421 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
424 static void ag71xx_hw_setup(struct ag71xx
*ag
)
426 struct device_node
*np
= ag
->pdev
->dev
.of_node
;
427 u32 init
= MAC_CFG1_INIT
;
429 /* setup MAC configuration registers */
430 if (of_property_read_bool(np
, "flow-control"))
431 init
|= MAC_CFG1_TFC
| MAC_CFG1_RFC
;
432 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, init
);
434 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG2
,
435 MAC_CFG2_PAD_CRC_EN
| MAC_CFG2_LEN_CHECK
);
437 /* setup max frame length to zero */
438 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, 0);
440 /* setup FIFO configuration registers */
441 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG0
, FIFO_CFG0_INIT
);
442 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, ag
->fifodata
[0]);
443 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, ag
->fifodata
[1]);
444 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG4
, FIFO_CFG4_INIT
);
445 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, FIFO_CFG5_INIT
);
448 static void ag71xx_hw_init(struct ag71xx
*ag
)
452 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG1
, MAC_CFG1_SR
);
455 reset_control_assert(ag
->mac_reset
);
457 reset_control_assert(ag
->mdio_reset
);
459 reset_control_deassert(ag
->mac_reset
);
461 reset_control_deassert(ag
->mdio_reset
);
466 ag71xx_dma_reset(ag
);
469 static void ag71xx_fast_reset(struct ag71xx
*ag
)
471 struct net_device
*dev
= ag
->dev
;
478 mii_reg
= ag71xx_rr(ag
, AG71XX_REG_MII_CFG
);
479 rx_ds
= ag71xx_rr(ag
, AG71XX_REG_RX_DESC
);
481 ag71xx_tx_packets(ag
, true, 0);
483 reset_control_assert(ag
->mac_reset
);
485 reset_control_deassert(ag
->mac_reset
);
488 ag71xx_dma_reset(ag
);
490 ag
->tx_ring
.curr
= 0;
491 ag
->tx_ring
.dirty
= 0;
492 netdev_reset_queue(ag
->dev
);
494 /* setup max frame length */
495 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
496 ag71xx_max_frame_len(ag
->dev
->mtu
));
498 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, rx_ds
);
499 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
500 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, mii_reg
);
502 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
505 static void ag71xx_hw_start(struct ag71xx
*ag
)
507 /* start RX engine */
508 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
510 /* enable interrupts */
511 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, AG71XX_INT_INIT
);
513 netif_wake_queue(ag
->dev
);
516 static void ath79_set_pllval(struct ag71xx
*ag
)
518 u32 pll_reg
= ag
->pllreg
[1];
526 pll_val
= ag
->plldata
[2];
529 pll_val
= ag
->plldata
[1];
532 pll_val
= ag
->plldata
[0];
539 regmap_write(ag
->pllregmap
, pll_reg
, pll_val
);
542 static void ath79_set_pll(struct ag71xx
*ag
)
544 u32 pll_cfg
= ag
->pllreg
[0];
545 u32 pll_shift
= ag
->pllreg
[2];
550 regmap_update_bits(ag
->pllregmap
, pll_cfg
, 3 << pll_shift
, 2 << pll_shift
);
553 ath79_set_pllval(ag
);
555 regmap_update_bits(ag
->pllregmap
, pll_cfg
, 3 << pll_shift
, 3 << pll_shift
);
558 regmap_update_bits(ag
->pllregmap
, pll_cfg
, 3 << pll_shift
, 0);
562 static void ag71xx_bit_set(void __iomem
*reg
, u32 bit
)
566 val
= __raw_readl(reg
) | bit
;
567 __raw_writel(val
, reg
);
571 static void ag71xx_bit_clear(void __iomem
*reg
, u32 bit
)
575 val
= __raw_readl(reg
) & ~bit
;
576 __raw_writel(val
, reg
);
580 static void ag71xx_sgmii_serdes_init_qca956x(struct device_node
*np
)
582 struct device_node
*np_dev
;
583 void __iomem
*gmac_base
;
587 np
= of_get_child_by_name(np
, "gmac-config");
591 if (of_property_read_u32(np
, "serdes-cal", &serdes_cal
))
592 /* By default, use middle value for resistor calibration */
595 np_dev
= of_parse_phandle(np
, "device", 0);
599 gmac_base
= of_iomap(np_dev
, 0);
601 pr_err("%pOF: can't map GMAC registers\n", np_dev
);
605 t
= __raw_readl(gmac_base
+ QCA956X_GMAC_REG_SGMII_CONFIG
);
606 t
&= ~(QCA956X_SGMII_CONFIG_MODE_CTRL_MASK
<< QCA956X_SGMII_CONFIG_MODE_CTRL_SHIFT
);
607 t
|= QCA956X_SGMII_CONFIG_MODE_CTRL_SGMII_MAC
;
608 __raw_writel(t
, gmac_base
+ QCA956X_GMAC_REG_SGMII_CONFIG
);
610 pr_debug("%pOF: fixup SERDES calibration to value %i\n",
612 t
= __raw_readl(gmac_base
+ QCA956X_GMAC_REG_SGMII_SERDES
);
613 t
&= ~(QCA956X_SGMII_SERDES_RES_CALIBRATION_MASK
614 << QCA956X_SGMII_SERDES_RES_CALIBRATION_SHIFT
);
615 t
|= (serdes_cal
& QCA956X_SGMII_SERDES_RES_CALIBRATION_MASK
)
616 << QCA956X_SGMII_SERDES_RES_CALIBRATION_SHIFT
;
617 __raw_writel(t
, gmac_base
+ QCA956X_GMAC_REG_SGMII_SERDES
);
619 ath79_pll_wr(QCA956X_PLL_ETH_SGMII_SERDES_REG
,
620 QCA956X_PLL_ETH_SGMII_SERDES_LOCK_DETECT
621 | QCA956X_PLL_ETH_SGMII_SERDES_EN_PLL
);
623 t
= __raw_readl(gmac_base
+ QCA956X_GMAC_REG_SGMII_SERDES
);
625 /* missing in QCA u-boot code, clear before setting */
626 t
&= ~(QCA956X_SGMII_SERDES_CDR_BW_MASK
627 << QCA956X_SGMII_SERDES_CDR_BW_SHIFT
|
628 QCA956X_SGMII_SERDES_TX_DR_CTRL_MASK
629 << QCA956X_SGMII_SERDES_TX_DR_CTRL_SHIFT
|
630 QCA956X_SGMII_SERDES_VCO_REG_MASK
631 << QCA956X_SGMII_SERDES_VCO_REG_SHIFT
);
633 t
|= (3 << QCA956X_SGMII_SERDES_CDR_BW_SHIFT
) |
634 (1 << QCA956X_SGMII_SERDES_TX_DR_CTRL_SHIFT
) |
635 QCA956X_SGMII_SERDES_PLL_BW
|
636 QCA956X_SGMII_SERDES_EN_SIGNAL_DETECT
|
637 QCA956X_SGMII_SERDES_FIBER_SDO
|
638 (3 << QCA956X_SGMII_SERDES_VCO_REG_SHIFT
);
640 __raw_writel(t
, gmac_base
+ QCA956X_GMAC_REG_SGMII_SERDES
);
642 ath79_device_reset_clear(QCA956X_RESET_SGMII_ANALOG
);
643 ath79_device_reset_clear(QCA956X_RESET_SGMII
);
645 while (!(__raw_readl(gmac_base
+ QCA956X_GMAC_REG_SGMII_SERDES
)
646 & QCA956X_SGMII_SERDES_LOCK_DETECT_STATUS
))
656 static void ag71xx_sgmii_init_qca955x(struct device_node
*np
)
658 struct device_node
*np_dev
;
659 void __iomem
*gmac_base
;
665 np
= of_get_child_by_name(np
, "gmac-config");
669 np_dev
= of_parse_phandle(np
, "device", 0);
673 gmac_base
= of_iomap(np_dev
, 0);
675 pr_err("%pOF: can't map GMAC registers\n", np_dev
);
680 mr_an_status
= __raw_readl(gmac_base
+ QCA955X_GMAC_REG_MR_AN_STATUS
);
681 if (!(mr_an_status
& QCA955X_MR_AN_STATUS_AN_ABILITY
))
684 /* SGMII reset sequence */
685 __raw_writel(QCA955X_SGMII_RESET_RX_CLK_N_RESET
,
686 gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
);
687 __raw_readl(gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
);
690 ag71xx_bit_set(gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
,
691 QCA955X_SGMII_RESET_HW_RX_125M_N
);
694 ag71xx_bit_set(gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
,
695 QCA955X_SGMII_RESET_RX_125M_N
);
698 ag71xx_bit_set(gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
,
699 QCA955X_SGMII_RESET_TX_125M_N
);
702 ag71xx_bit_set(gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
,
703 QCA955X_SGMII_RESET_RX_CLK_N
);
706 ag71xx_bit_set(gmac_base
+ QCA955X_GMAC_REG_SGMII_RESET
,
707 QCA955X_SGMII_RESET_TX_CLK_N
);
711 * The following is what QCA has to say about what happens here:
713 * Across resets SGMII link status goes to weird state.
714 * If SGMII_DEBUG register reads other than 0x1f or 0x10,
715 * we are for sure in a bad state.
717 * Issue a PHY reset in MR_AN_CONTROL to keep going.
720 ag71xx_bit_set(gmac_base
+ QCA955X_GMAC_REG_MR_AN_CONTROL
,
721 QCA955X_MR_AN_CONTROL_PHY_RESET
|
722 QCA955X_MR_AN_CONTROL_AN_ENABLE
);
724 ag71xx_bit_clear(gmac_base
+ QCA955X_GMAC_REG_MR_AN_CONTROL
,
725 QCA955X_MR_AN_CONTROL_PHY_RESET
);
727 sgmii_status
= __raw_readl(gmac_base
+ QCA955X_GMAC_REG_SGMII_DEBUG
) &
728 QCA955X_SGMII_DEBUG_TX_STATE_MASK
;
731 pr_err("ag71xx: max retries for SGMII fixup exceeded\n");
734 } while (!(sgmii_status
== 0xf || sgmii_status
== 0x10));
744 static void ag71xx_mux_select_sgmii_qca956x(struct device_node
*np
)
746 struct device_node
*np_dev
;
747 void __iomem
*gmac_base
;
750 np
= of_get_child_by_name(np
, "gmac-config");
754 np_dev
= of_parse_phandle(np
, "device", 0);
758 gmac_base
= of_iomap(np_dev
, 0);
760 pr_err("%pOF: can't map GMAC registers\n", np_dev
);
764 t
= __raw_readl(gmac_base
+ QCA956X_GMAC_REG_ETH_CFG
);
765 t
|= QCA956X_ETH_CFG_GE0_SGMII
;
766 __raw_writel(t
, gmac_base
+ QCA956X_GMAC_REG_ETH_CFG
);
775 static void ath79_mii_ctrl_set_if(struct ag71xx
*ag
, unsigned int mii_if
)
779 t
= __raw_readl(ag
->mii_base
);
780 t
&= ~(AR71XX_MII_CTRL_IF_MASK
);
781 t
|= (mii_if
& AR71XX_MII_CTRL_IF_MASK
);
782 __raw_writel(t
, ag
->mii_base
);
785 static void ath79_mii0_ctrl_set_if(struct ag71xx
*ag
)
789 switch (ag
->phy_if_mode
) {
790 case PHY_INTERFACE_MODE_MII
:
791 mii_if
= AR71XX_MII0_CTRL_IF_MII
;
793 case PHY_INTERFACE_MODE_GMII
:
794 mii_if
= AR71XX_MII0_CTRL_IF_GMII
;
796 case PHY_INTERFACE_MODE_RGMII
:
797 case PHY_INTERFACE_MODE_RGMII_ID
:
798 case PHY_INTERFACE_MODE_RGMII_RXID
:
799 case PHY_INTERFACE_MODE_RGMII_TXID
:
800 mii_if
= AR71XX_MII0_CTRL_IF_RGMII
;
802 case PHY_INTERFACE_MODE_RMII
:
803 mii_if
= AR71XX_MII0_CTRL_IF_RMII
;
806 WARN(1, "Impossible PHY mode defined.\n");
810 ath79_mii_ctrl_set_if(ag
, mii_if
);
813 static void ath79_mii1_ctrl_set_if(struct ag71xx
*ag
)
817 switch (ag
->phy_if_mode
) {
818 case PHY_INTERFACE_MODE_RMII
:
819 mii_if
= AR71XX_MII1_CTRL_IF_RMII
;
821 case PHY_INTERFACE_MODE_RGMII
:
822 case PHY_INTERFACE_MODE_RGMII_ID
:
823 case PHY_INTERFACE_MODE_RGMII_RXID
:
824 case PHY_INTERFACE_MODE_RGMII_TXID
:
825 mii_if
= AR71XX_MII1_CTRL_IF_RGMII
;
828 WARN(1, "Impossible PHY mode defined.\n");
832 ath79_mii_ctrl_set_if(ag
, mii_if
);
835 static void ath79_mii_ctrl_set_speed(struct ag71xx
*ag
)
837 unsigned int mii_speed
;
845 mii_speed
= AR71XX_MII_CTRL_SPEED_10
;
848 mii_speed
= AR71XX_MII_CTRL_SPEED_100
;
851 mii_speed
= AR71XX_MII_CTRL_SPEED_1000
;
857 t
= __raw_readl(ag
->mii_base
);
858 t
&= ~(AR71XX_MII_CTRL_SPEED_MASK
<< AR71XX_MII_CTRL_SPEED_SHIFT
);
859 t
|= mii_speed
<< AR71XX_MII_CTRL_SPEED_SHIFT
;
860 __raw_writel(t
, ag
->mii_base
);
864 __ag71xx_link_adjust(struct ag71xx
*ag
, bool update
)
866 struct device_node
*np
= ag
->pdev
->dev
.of_node
;
871 if (!ag
->link
&& update
) {
873 netif_carrier_off(ag
->dev
);
874 if (netif_msg_link(ag
))
875 pr_info("%s: link down\n", ag
->dev
->name
);
879 if (!of_device_is_compatible(np
, "qca,ar9130-eth") &&
880 !of_device_is_compatible(np
, "qca,ar7100-eth"))
881 ag71xx_fast_reset(ag
);
883 cfg2
= ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
);
884 cfg2
&= ~(MAC_CFG2_IF_1000
| MAC_CFG2_IF_10_100
| MAC_CFG2_FDX
);
885 cfg2
|= (ag
->duplex
) ? MAC_CFG2_FDX
: 0;
887 ifctl
= ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
);
888 ifctl
&= ~(MAC_IFCTL_SPEED
);
890 fifo5
= ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
);
891 fifo5
&= ~FIFO_CFG5_BM
;
895 cfg2
|= MAC_CFG2_IF_1000
;
896 fifo5
|= FIFO_CFG5_BM
;
899 cfg2
|= MAC_CFG2_IF_10_100
;
900 ifctl
|= MAC_IFCTL_SPEED
;
903 cfg2
|= MAC_CFG2_IF_10_100
;
910 if (ag
->tx_ring
.desc_split
) {
911 ag
->fifodata
[2] &= 0xffff;
912 ag
->fifodata
[2] |= ((2048 - ag
->tx_ring
.desc_split
) / 4) << 16;
915 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG3
, ag
->fifodata
[2]);
918 if (of_device_is_compatible(np
, "qca,ar7100-eth") ||
919 of_device_is_compatible(np
, "qca,ar9130-eth")) {
921 ath79_mii_ctrl_set_speed(ag
);
922 } else if (of_device_is_compatible(np
, "qca,ar7242-eth") ||
923 of_device_is_compatible(np
, "qca,ar9340-eth") ||
924 of_device_is_compatible(np
, "qca,qca9550-eth") ||
925 of_device_is_compatible(np
, "qca,qca9560-eth")) {
926 ath79_set_pllval(ag
);
927 if (of_property_read_bool(np
, "qca955x-sgmii-fixup"))
928 ag71xx_sgmii_init_qca955x(np
);
932 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG2
, cfg2
);
933 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, fifo5
);
934 ag71xx_wr(ag
, AG71XX_REG_MAC_IFCTL
, ifctl
);
936 if (of_device_is_compatible(np
, "qca,qca9530-eth") ||
937 of_device_is_compatible(np
, "qca,qca9560-eth")) {
939 * The rx ring buffer can stall on small packets on QCA953x and
940 * QCA956x. Disabling the inline checksum engine fixes the stall.
941 * The wr, rr functions cannot be used since this hidden register
942 * is outside of the normal ag71xx register block.
944 void __iomem
*dam
= ioremap(0xb90001bc, 0x4);
946 __raw_writel(__raw_readl(dam
) & ~BIT(27), dam
);
947 (void)__raw_readl(dam
);
954 netif_carrier_on(ag
->dev
);
955 if (update
&& netif_msg_link(ag
))
956 pr_info("%s: link up (%sMbps/%s duplex)\n",
958 ag71xx_speed_str(ag
),
959 (DUPLEX_FULL
== ag
->duplex
) ? "Full" : "Half");
961 ag71xx_dump_regs(ag
);
964 void ag71xx_link_adjust(struct ag71xx
*ag
)
966 __ag71xx_link_adjust(ag
, true);
969 static int ag71xx_hw_enable(struct ag71xx
*ag
)
973 ret
= ag71xx_rings_init(ag
);
977 napi_enable(&ag
->napi
);
978 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
979 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->rx_ring
.descs_dma
);
980 netif_start_queue(ag
->dev
);
985 static void ag71xx_hw_disable(struct ag71xx
*ag
)
987 netif_stop_queue(ag
->dev
);
990 ag71xx_dma_reset(ag
);
992 napi_disable(&ag
->napi
);
993 del_timer_sync(&ag
->oom_timer
);
995 ag71xx_rings_cleanup(ag
);
998 static int ag71xx_open(struct net_device
*dev
)
1000 struct ag71xx
*ag
= netdev_priv(dev
);
1001 unsigned int max_frame_len
;
1004 netif_carrier_off(dev
);
1005 max_frame_len
= ag71xx_max_frame_len(dev
->mtu
);
1006 ag
->rx_buf_size
= SKB_DATA_ALIGN(max_frame_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
);
1008 /* setup max frame length */
1009 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, max_frame_len
);
1010 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
1012 ret
= ag71xx_hw_enable(ag
);
1016 phy_start(ag
->phy_dev
);
1021 ag71xx_rings_cleanup(ag
);
1025 static int ag71xx_stop(struct net_device
*dev
)
1027 unsigned long flags
;
1028 struct ag71xx
*ag
= netdev_priv(dev
);
1030 netif_carrier_off(dev
);
1031 phy_stop(ag
->phy_dev
);
1033 spin_lock_irqsave(&ag
->lock
, flags
);
1036 ag71xx_link_adjust(ag
);
1038 spin_unlock_irqrestore(&ag
->lock
, flags
);
1040 ag71xx_hw_disable(ag
);
1045 static int ag71xx_fill_dma_desc(struct ag71xx_ring
*ring
, u32 addr
, int len
)
1048 struct ag71xx_desc
*desc
;
1049 int ring_mask
= BIT(ring
->order
) - 1;
1051 int split
= ring
->desc_split
;
1057 unsigned int cur_len
= len
;
1059 i
= (ring
->curr
+ ndesc
) & ring_mask
;
1060 desc
= ag71xx_ring_desc(ring
, i
);
1062 if (!ag71xx_desc_empty(desc
))
1065 if (cur_len
> split
) {
1069 * TX will hang if DMA transfers <= 4 bytes,
1070 * make sure next segment is more than 4 bytes long.
1072 if (len
<= split
+ 4)
1081 cur_len
|= DESC_MORE
;
1083 /* prevent early tx attempt of this descriptor */
1085 cur_len
|= DESC_EMPTY
;
1087 desc
->ctrl
= cur_len
;
1094 static netdev_tx_t
ag71xx_hard_start_xmit(struct sk_buff
*skb
,
1095 struct net_device
*dev
)
1097 struct ag71xx
*ag
= netdev_priv(dev
);
1098 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
1099 int ring_mask
= BIT(ring
->order
) - 1;
1100 int ring_size
= BIT(ring
->order
);
1101 struct ag71xx_desc
*desc
;
1102 dma_addr_t dma_addr
;
1105 if (skb
->len
<= 4) {
1106 DBG("%s: packet len is too small\n", ag
->dev
->name
);
1110 dma_addr
= dma_map_single(&ag
->pdev
->dev
, skb
->data
, skb
->len
,
1113 i
= ring
->curr
& ring_mask
;
1114 desc
= ag71xx_ring_desc(ring
, i
);
1116 /* setup descriptor fields */
1117 n
= ag71xx_fill_dma_desc(ring
, (u32
) dma_addr
, skb
->len
& ag
->desc_pktlen_mask
);
1119 goto err_drop_unmap
;
1121 i
= (ring
->curr
+ n
- 1) & ring_mask
;
1122 ring
->buf
[i
].len
= skb
->len
;
1123 ring
->buf
[i
].skb
= skb
;
1125 netdev_sent_queue(dev
, skb
->len
);
1127 skb_tx_timestamp(skb
);
1129 desc
->ctrl
&= ~DESC_EMPTY
;
1132 /* flush descriptor */
1136 if (ring
->desc_split
)
1137 ring_min
*= AG71XX_TX_RING_DS_PER_PKT
;
1139 if (ring
->curr
- ring
->dirty
>= ring_size
- ring_min
) {
1140 DBG("%s: tx queue full\n", dev
->name
);
1141 netif_stop_queue(dev
);
1144 DBG("%s: packet injected into TX queue\n", ag
->dev
->name
);
1146 /* enable TX engine */
1147 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, TX_CTRL_TXE
);
1149 return NETDEV_TX_OK
;
1152 dma_unmap_single(&ag
->pdev
->dev
, dma_addr
, skb
->len
, DMA_TO_DEVICE
);
1155 dev
->stats
.tx_dropped
++;
1158 return NETDEV_TX_OK
;
1161 static int ag71xx_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1163 struct ag71xx
*ag
= netdev_priv(dev
);
1169 ((void*)dev
->dev_addr
, ifr
->ifr_data
, sizeof(dev
->dev_addr
)))
1175 (ifr
->ifr_data
, dev
->dev_addr
, sizeof(dev
->dev_addr
)))
1182 if (ag
->phy_dev
== NULL
)
1185 return phy_mii_ioctl(ag
->phy_dev
, ifr
, cmd
);
1194 static void ag71xx_oom_timer_handler(struct timer_list
*t
)
1196 struct ag71xx
*ag
= from_timer(ag
, t
, oom_timer
);
1198 napi_schedule(&ag
->napi
);
1201 static void ag71xx_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
1203 struct ag71xx
*ag
= netdev_priv(dev
);
1205 if (netif_msg_tx_err(ag
))
1206 pr_info("%s: tx timeout\n", ag
->dev
->name
);
1208 schedule_delayed_work(&ag
->restart_work
, 1);
1211 static void ag71xx_restart_work_func(struct work_struct
*work
)
1213 struct ag71xx
*ag
= container_of(work
, struct ag71xx
, restart_work
.work
);
1216 ag71xx_hw_disable(ag
);
1217 ag71xx_hw_enable(ag
);
1219 __ag71xx_link_adjust(ag
, false);
1223 static bool ag71xx_check_dma_stuck(struct ag71xx
*ag
)
1225 unsigned long timestamp
;
1226 u32 rx_sm
, tx_sm
, rx_fd
;
1228 timestamp
= netdev_get_tx_queue(ag
->dev
, 0)->trans_start
;
1229 if (likely(time_before(jiffies
, timestamp
+ HZ
/10)))
1232 if (!netif_carrier_ok(ag
->dev
))
1235 rx_sm
= ag71xx_rr(ag
, AG71XX_REG_RX_SM
);
1236 if ((rx_sm
& 0x7) == 0x3 && ((rx_sm
>> 4) & 0x7) == 0x6)
1239 tx_sm
= ag71xx_rr(ag
, AG71XX_REG_TX_SM
);
1240 rx_fd
= ag71xx_rr(ag
, AG71XX_REG_FIFO_DEPTH
);
1241 if (((tx_sm
>> 4) & 0x7) == 0 && ((rx_sm
& 0x7) == 0) &&
1242 ((rx_sm
>> 4) & 0x7) == 0 && rx_fd
== 0)
1248 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
, int budget
)
1250 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
1251 bool dma_stuck
= false;
1252 int ring_mask
= BIT(ring
->order
) - 1;
1253 int ring_size
= BIT(ring
->order
);
1255 int bytes_compl
= 0;
1258 DBG("%s: processing TX ring\n", ag
->dev
->name
);
1260 while (ring
->dirty
+ n
!= ring
->curr
) {
1261 unsigned int i
= (ring
->dirty
+ n
) & ring_mask
;
1262 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1263 struct sk_buff
*skb
= ring
->buf
[i
].skb
;
1265 if (!flush
&& !ag71xx_desc_empty(desc
)) {
1266 if (ag
->tx_hang_workaround
&&
1267 ag71xx_check_dma_stuck(ag
)) {
1268 schedule_delayed_work(&ag
->restart_work
, HZ
/ 2);
1275 desc
->ctrl
|= DESC_EMPTY
;
1281 napi_consume_skb(skb
, budget
);
1282 ring
->buf
[i
].skb
= NULL
;
1284 bytes_compl
+= ring
->buf
[i
].len
;
1290 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
1295 DBG("%s: %d packets sent out\n", ag
->dev
->name
, sent
);
1300 ag
->dev
->stats
.tx_bytes
+= bytes_compl
;
1301 ag
->dev
->stats
.tx_packets
+= sent
;
1303 netdev_completed_queue(ag
->dev
, sent
, bytes_compl
);
1304 if ((ring
->curr
- ring
->dirty
) < (ring_size
* 3) / 4)
1305 netif_wake_queue(ag
->dev
);
1308 cancel_delayed_work(&ag
->restart_work
);
1313 static int ag71xx_rx_packets(struct ag71xx
*ag
, int limit
)
1315 struct net_device
*dev
= ag
->dev
;
1316 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1317 unsigned int pktlen_mask
= ag
->desc_pktlen_mask
;
1318 unsigned int offset
= ag
->rx_buf_offset
;
1319 int ring_mask
= BIT(ring
->order
) - 1;
1320 int ring_size
= BIT(ring
->order
);
1321 struct list_head rx_list
;
1322 struct sk_buff
*skb
;
1325 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
1326 dev
->name
, limit
, ring
->curr
, ring
->dirty
);
1327 INIT_LIST_HEAD(&rx_list
);
1329 while (done
< limit
) {
1330 unsigned int i
= ring
->curr
& ring_mask
;
1331 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1335 if (ag71xx_desc_empty(desc
))
1338 if ((ring
->dirty
+ ring_size
) == ring
->curr
) {
1343 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
1345 pktlen
= desc
->ctrl
& pktlen_mask
;
1346 pktlen
-= ETH_FCS_LEN
;
1348 dma_unmap_single(&ag
->pdev
->dev
, ring
->buf
[i
].dma_addr
,
1349 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
1351 dev
->stats
.rx_packets
++;
1352 dev
->stats
.rx_bytes
+= pktlen
;
1354 skb
= napi_build_skb(ring
->buf
[i
].rx_buf
, ag71xx_buffer_size(ag
));
1356 skb_free_frag(ring
->buf
[i
].rx_buf
);
1360 skb_reserve(skb
, offset
);
1361 skb_put(skb
, pktlen
);
1364 dev
->stats
.rx_dropped
++;
1368 skb
->ip_summed
= CHECKSUM_NONE
;
1369 list_add_tail(&skb
->list
, &rx_list
);
1373 ring
->buf
[i
].rx_buf
= NULL
;
1379 ag71xx_ring_rx_refill(ag
);
1381 list_for_each_entry(skb
, &rx_list
, list
)
1382 skb
->protocol
= eth_type_trans(skb
, dev
);
1383 netif_receive_skb_list(&rx_list
);
1385 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1386 dev
->name
, ring
->curr
, ring
->dirty
, done
);
1391 static int ag71xx_poll(struct napi_struct
*napi
, int limit
)
1393 struct ag71xx
*ag
= container_of(napi
, struct ag71xx
, napi
);
1394 struct net_device
*dev
= ag
->dev
;
1395 struct ag71xx_ring
*rx_ring
= &ag
->rx_ring
;
1396 int rx_ring_size
= BIT(rx_ring
->order
);
1397 unsigned long flags
;
1402 tx_done
= ag71xx_tx_packets(ag
, false, limit
);
1404 DBG("%s: processing RX ring\n", dev
->name
);
1405 rx_done
= ag71xx_rx_packets(ag
, limit
);
1407 ag71xx_debugfs_update_napi_stats(ag
, rx_done
, tx_done
);
1409 if (rx_ring
->buf
[rx_ring
->dirty
% rx_ring_size
].rx_buf
== NULL
)
1412 status
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
1413 if (unlikely(status
& RX_STATUS_OF
)) {
1414 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_OF
);
1415 dev
->stats
.rx_fifo_errors
++;
1418 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
1421 if (rx_done
< limit
) {
1422 if (status
& RX_STATUS_PR
)
1425 status
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
1426 if (status
& TX_STATUS_PS
)
1429 DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1430 dev
->name
, rx_done
, tx_done
, limit
);
1432 napi_complete(napi
);
1434 /* enable interrupts */
1435 spin_lock_irqsave(&ag
->lock
, flags
);
1436 ag71xx_int_enable(ag
, AG71XX_INT_POLL
);
1437 spin_unlock_irqrestore(&ag
->lock
, flags
);
1442 DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1443 dev
->name
, rx_done
, tx_done
, limit
);
1447 if (netif_msg_rx_err(ag
))
1448 pr_info("%s: out of memory\n", dev
->name
);
1450 mod_timer(&ag
->oom_timer
, jiffies
+ AG71XX_OOM_REFILL
);
1451 napi_complete(napi
);
1455 static irqreturn_t
ag71xx_interrupt(int irq
, void *dev_id
)
1457 struct net_device
*dev
= dev_id
;
1458 struct ag71xx
*ag
= netdev_priv(dev
);
1461 status
= ag71xx_rr(ag
, AG71XX_REG_INT_STATUS
);
1462 ag71xx_dump_intr(ag
, "raw", status
);
1464 if (unlikely(!status
))
1467 if (unlikely(status
& AG71XX_INT_ERR
)) {
1468 if (status
& AG71XX_INT_TX_BE
) {
1469 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
);
1470 dev_err(&dev
->dev
, "TX BUS error\n");
1472 if (status
& AG71XX_INT_RX_BE
) {
1473 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
);
1474 dev_err(&dev
->dev
, "RX BUS error\n");
1478 if (likely(status
& AG71XX_INT_POLL
)) {
1479 ag71xx_int_disable(ag
, AG71XX_INT_POLL
);
1480 DBG("%s: enable polling mode\n", dev
->name
);
1481 napi_schedule(&ag
->napi
);
1484 ag71xx_debugfs_update_int_stats(ag
, status
);
1489 static int ag71xx_change_mtu(struct net_device
*dev
, int new_mtu
)
1491 struct ag71xx
*ag
= netdev_priv(dev
);
1494 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
1495 ag71xx_max_frame_len(dev
->mtu
));
1500 static const struct net_device_ops ag71xx_netdev_ops
= {
1501 .ndo_open
= ag71xx_open
,
1502 .ndo_stop
= ag71xx_stop
,
1503 .ndo_start_xmit
= ag71xx_hard_start_xmit
,
1504 .ndo_do_ioctl
= ag71xx_do_ioctl
,
1505 .ndo_tx_timeout
= ag71xx_tx_timeout
,
1506 .ndo_change_mtu
= ag71xx_change_mtu
,
1507 .ndo_set_mac_address
= eth_mac_addr
,
1508 .ndo_validate_addr
= eth_validate_addr
,
1511 static int ag71xx_probe(struct platform_device
*pdev
)
1513 struct device_node
*np
= pdev
->dev
.of_node
;
1514 struct net_device
*dev
;
1515 struct resource
*res
;
1523 dev
= devm_alloc_etherdev(&pdev
->dev
, sizeof(*ag
));
1527 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1531 if (of_property_read_bool(np
, "qca956x-serdes-fixup")) {
1532 ag71xx_sgmii_serdes_init_qca956x(np
);
1533 ag71xx_sgmii_init_qca955x(np
);
1536 err
= ag71xx_setup_gmac(np
);
1540 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1542 ag
= netdev_priv(dev
);
1545 ag
->msg_enable
= netif_msg_init(ag71xx_msg_level
,
1546 AG71XX_DEFAULT_MSG_ENABLE
);
1547 spin_lock_init(&ag
->lock
);
1549 ag
->mac_reset
= devm_reset_control_get_exclusive(&pdev
->dev
, "mac");
1550 if (IS_ERR(ag
->mac_reset
)) {
1551 dev_err(&pdev
->dev
, "missing mac reset\n");
1552 return PTR_ERR(ag
->mac_reset
);
1555 ag
->mdio_reset
= devm_reset_control_get_optional_exclusive(&pdev
->dev
, "mdio");
1557 if (of_property_read_u32_array(np
, "fifo-data", ag
->fifodata
, 3)) {
1558 if (of_device_is_compatible(np
, "qca,ar9130-eth") ||
1559 of_device_is_compatible(np
, "qca,ar7100-eth")) {
1560 ag
->fifodata
[0] = 0x0fff0000;
1561 ag
->fifodata
[1] = 0x00001fff;
1563 ag
->fifodata
[0] = 0x0010ffff;
1564 ag
->fifodata
[1] = 0x015500aa;
1565 ag
->fifodata
[2] = 0x01f00140;
1567 if (of_device_is_compatible(np
, "qca,ar9130-eth"))
1568 ag
->fifodata
[2] = 0x00780fff;
1569 else if (of_device_is_compatible(np
, "qca,ar7100-eth"))
1570 ag
->fifodata
[2] = 0x008001ff;
1573 if (of_property_read_u32_array(np
, "pll-data", ag
->plldata
, 3))
1574 dev_dbg(&pdev
->dev
, "failed to read pll-data property\n");
1576 if (of_property_read_u32_array(np
, "pll-reg", ag
->pllreg
, 3))
1577 dev_dbg(&pdev
->dev
, "failed to read pll-reg property\n");
1579 ag
->pllregmap
= syscon_regmap_lookup_by_phandle(np
, "pll-handle");
1580 if (IS_ERR(ag
->pllregmap
)) {
1581 dev_dbg(&pdev
->dev
, "failed to read pll-handle property\n");
1582 ag
->pllregmap
= NULL
;
1585 ag
->mac_base
= devm_ioremap(&pdev
->dev
, res
->start
,
1586 res
->end
- res
->start
+ 1);
1590 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1592 ag
->mii_base
= devm_ioremap(&pdev
->dev
, res
->start
,
1593 res
->end
- res
->start
+ 1);
1598 /* ensure that HW is in manual polling mode before interrupts are
1599 * activated. Otherwise ag71xx_interrupt might call napi_schedule
1600 * before it is initialized by netif_napi_add.
1602 ag71xx_int_disable(ag
, AG71XX_INT_POLL
);
1604 dev
->irq
= platform_get_irq(pdev
, 0);
1605 err
= devm_request_irq(&pdev
->dev
, dev
->irq
, ag71xx_interrupt
,
1606 0x0, dev_name(&pdev
->dev
), dev
);
1608 dev_err(&pdev
->dev
, "unable to request IRQ %d\n", dev
->irq
);
1612 dev
->netdev_ops
= &ag71xx_netdev_ops
;
1613 dev
->ethtool_ops
= &ag71xx_ethtool_ops
;
1615 INIT_DELAYED_WORK(&ag
->restart_work
, ag71xx_restart_work_func
);
1617 timer_setup(&ag
->oom_timer
, ag71xx_oom_timer_handler
, 0);
1619 tx_size
= AG71XX_TX_RING_SIZE_DEFAULT
;
1620 ag
->rx_ring
.order
= ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT
);
1622 if (of_device_is_compatible(np
, "qca,ar9340-eth") ||
1623 of_device_is_compatible(np
, "qca,qca9530-eth") ||
1624 of_device_is_compatible(np
, "qca,qca9550-eth") ||
1625 of_device_is_compatible(np
, "qca,qca9560-eth"))
1626 ag
->desc_pktlen_mask
= SZ_16K
- 1;
1628 ag
->desc_pktlen_mask
= SZ_4K
- 1;
1630 if (ag
->desc_pktlen_mask
== SZ_16K
- 1 &&
1631 !of_device_is_compatible(np
, "qca,qca9550-eth") &&
1632 !of_device_is_compatible(np
, "qca,qca9560-eth"))
1633 max_frame_len
= ag
->desc_pktlen_mask
;
1635 max_frame_len
= 1540;
1638 dev
->max_mtu
= max_frame_len
- ag71xx_max_frame_len(0);
1640 if (of_device_is_compatible(np
, "qca,ar7240-eth") ||
1641 of_device_is_compatible(np
, "qca,ar7241-eth") ||
1642 of_device_is_compatible(np
, "qca,ar7242-eth") ||
1643 of_device_is_compatible(np
, "qca,ar9330-eth") ||
1644 of_device_is_compatible(np
, "qca,ar9340-eth") ||
1645 of_device_is_compatible(np
, "qca,qca9530-eth") ||
1646 of_device_is_compatible(np
, "qca,qca9550-eth") ||
1647 of_device_is_compatible(np
, "qca,qca9560-eth"))
1648 ag
->tx_hang_workaround
= 1;
1650 ag
->rx_buf_offset
= NET_SKB_PAD
;
1651 if (!of_device_is_compatible(np
, "qca,ar7100-eth") &&
1652 !of_device_is_compatible(np
, "qca,ar9130-eth"))
1653 ag
->rx_buf_offset
+= NET_IP_ALIGN
;
1655 if (of_device_is_compatible(np
, "qca,ar7100-eth")) {
1656 ag
->tx_ring
.desc_split
= AG71XX_TX_RING_SPLIT
;
1657 tx_size
*= AG71XX_TX_RING_DS_PER_PKT
;
1659 ag
->tx_ring
.order
= ag71xx_ring_size_order(tx_size
);
1661 ag
->stop_desc
= dmam_alloc_coherent(&pdev
->dev
,
1662 sizeof(struct ag71xx_desc
),
1663 &ag
->stop_desc_dma
, GFP_KERNEL
);
1667 ag
->stop_desc
->data
= 0;
1668 ag
->stop_desc
->ctrl
= 0;
1669 ag
->stop_desc
->next
= (u32
) ag
->stop_desc_dma
;
1671 if (of_get_ethdev_address(np
, dev
)) {
1672 dev_err(&pdev
->dev
, "invalid MAC address, using random address\n");
1673 eth_hw_addr_random(dev
);
1676 err
= of_get_phy_mode(np
, &ag
->phy_if_mode
);
1678 dev_err(&pdev
->dev
, "missing phy-mode property in DT\n");
1679 return ag
->phy_if_mode
;
1682 if (of_device_is_compatible(np
, "qca,qca9560-eth") &&
1683 ag
->phy_if_mode
== PHY_INTERFACE_MODE_SGMII
)
1684 ag71xx_mux_select_sgmii_qca956x(np
);
1686 if (of_property_read_u32(np
, "qca,mac-idx", &ag
->mac_idx
))
1689 switch (ag
->mac_idx
) {
1691 ath79_mii0_ctrl_set_if(ag
);
1694 ath79_mii1_ctrl_set_if(ag
);
1700 netif_napi_add_weight(dev
, &ag
->napi
, ag71xx_poll
, AG71XX_NAPI_WEIGHT
);
1702 ag71xx_dump_regs(ag
);
1704 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, 0);
1708 ag71xx_dump_regs(ag
);
1711 * populate current node to register mdio-bus as a subdevice.
1712 * the mdio bus works independently on ar7241 and later chips
1713 * and we need to load mdio1 before gmac0, which can be done
1714 * by adding a "simple-mfd" compatible to gmac node. The
1715 * following code checks OF_POPULATED_BUS flag before populating
1716 * to avoid duplicated population.
1718 if (!of_node_check_flag(np
, OF_POPULATED_BUS
)) {
1719 err
= of_platform_populate(np
, NULL
, NULL
, &pdev
->dev
);
1724 err
= ag71xx_phy_connect(ag
);
1728 err
= ag71xx_debugfs_init(ag
);
1730 goto err_phy_disconnect
;
1732 platform_set_drvdata(pdev
, dev
);
1734 err
= register_netdev(dev
);
1736 dev_err(&pdev
->dev
, "unable to register net device\n");
1737 platform_set_drvdata(pdev
, NULL
);
1738 ag71xx_debugfs_exit(ag
);
1739 goto err_phy_disconnect
;
1742 pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode: %s\n",
1743 dev
->name
, (unsigned long) ag
->mac_base
, dev
->irq
,
1744 phy_modes(ag
->phy_if_mode
));
1749 ag71xx_phy_disconnect(ag
);
1753 static int ag71xx_remove(struct platform_device
*pdev
)
1755 struct net_device
*dev
= platform_get_drvdata(pdev
);
1761 ag
= netdev_priv(dev
);
1762 ag71xx_debugfs_exit(ag
);
1763 ag71xx_phy_disconnect(ag
);
1764 unregister_netdev(dev
);
1765 platform_set_drvdata(pdev
, NULL
);
1769 static const struct of_device_id ag71xx_match
[] = {
1770 { .compatible
= "qca,ar7100-eth" },
1771 { .compatible
= "qca,ar7240-eth" },
1772 { .compatible
= "qca,ar7241-eth" },
1773 { .compatible
= "qca,ar7242-eth" },
1774 { .compatible
= "qca,ar9130-eth" },
1775 { .compatible
= "qca,ar9330-eth" },
1776 { .compatible
= "qca,ar9340-eth" },
1777 { .compatible
= "qca,qca9530-eth" },
1778 { .compatible
= "qca,qca9550-eth" },
1779 { .compatible
= "qca,qca9560-eth" },
1783 static struct platform_driver ag71xx_driver
= {
1784 .probe
= ag71xx_probe
,
1785 .remove
= ag71xx_remove
,
1787 .name
= AG71XX_DRV_NAME
,
1788 .of_match_table
= ag71xx_match
,
1792 static int __init
ag71xx_module_init(void)
1796 ret
= ag71xx_debugfs_root_init();
1800 ret
= platform_driver_register(&ag71xx_driver
);
1802 goto err_debugfs_exit
;
1807 ag71xx_debugfs_root_exit();
1812 static void __exit
ag71xx_module_exit(void)
1814 platform_driver_unregister(&ag71xx_driver
);
1815 ag71xx_debugfs_root_exit();
1818 module_init(ag71xx_module_init
);
1819 module_exit(ag71xx_module_exit
);
1821 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1822 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1823 MODULE_AUTHOR("Felix Fietkau <nbd@nbd.name>");
1824 MODULE_LICENSE("GPL v2");
1825 MODULE_ALIAS("platform:" AG71XX_DRV_NAME
);