2 * Atheros AR71xx built-in ethernet mac driver
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 * Based on Atheros' AG7100 driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
16 #define AG71XX_DEFAULT_MSG_ENABLE \
26 static int ag71xx_msg_level
= -1;
28 module_param_named(msg_level
, ag71xx_msg_level
, int, 0);
29 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
31 #define ETH_SWITCH_HEADER_LEN 2
33 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
);
35 static inline unsigned int ag71xx_max_frame_len(unsigned int mtu
)
37 return ETH_SWITCH_HEADER_LEN
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
+ ETH_FCS_LEN
;
40 static void ag71xx_dump_dma_regs(struct ag71xx
*ag
)
42 DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
44 ag71xx_rr(ag
, AG71XX_REG_TX_CTRL
),
45 ag71xx_rr(ag
, AG71XX_REG_TX_DESC
),
46 ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
));
48 DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
50 ag71xx_rr(ag
, AG71XX_REG_RX_CTRL
),
51 ag71xx_rr(ag
, AG71XX_REG_RX_DESC
),
52 ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
));
55 static void ag71xx_dump_regs(struct ag71xx
*ag
)
57 DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
59 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG1
),
60 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
),
61 ag71xx_rr(ag
, AG71XX_REG_MAC_IPG
),
62 ag71xx_rr(ag
, AG71XX_REG_MAC_HDX
),
63 ag71xx_rr(ag
, AG71XX_REG_MAC_MFL
));
64 DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
66 ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
),
67 ag71xx_rr(ag
, AG71XX_REG_MAC_ADDR1
),
68 ag71xx_rr(ag
, AG71XX_REG_MAC_ADDR2
));
69 DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
71 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG0
),
72 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG1
),
73 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG2
));
74 DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
76 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG3
),
77 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG4
),
78 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
));
81 static inline void ag71xx_dump_intr(struct ag71xx
*ag
, char *label
, u32 intr
)
83 DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
84 ag
->dev
->name
, label
, intr
,
85 (intr
& AG71XX_INT_TX_PS
) ? "TXPS " : "",
86 (intr
& AG71XX_INT_TX_UR
) ? "TXUR " : "",
87 (intr
& AG71XX_INT_TX_BE
) ? "TXBE " : "",
88 (intr
& AG71XX_INT_RX_PR
) ? "RXPR " : "",
89 (intr
& AG71XX_INT_RX_OF
) ? "RXOF " : "",
90 (intr
& AG71XX_INT_RX_BE
) ? "RXBE " : "");
93 static void ag71xx_ring_tx_clean(struct ag71xx
*ag
)
95 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
96 struct net_device
*dev
= ag
->dev
;
97 int ring_mask
= BIT(ring
->order
) - 1;
98 u32 bytes_compl
= 0, pkts_compl
= 0;
100 while (ring
->curr
!= ring
->dirty
) {
101 struct ag71xx_desc
*desc
;
102 u32 i
= ring
->dirty
& ring_mask
;
104 desc
= ag71xx_ring_desc(ring
, i
);
105 if (!ag71xx_desc_empty(desc
)) {
107 dev
->stats
.tx_errors
++;
110 if (ring
->buf
[i
].skb
) {
111 bytes_compl
+= ring
->buf
[i
].len
;
113 dev_kfree_skb_any(ring
->buf
[i
].skb
);
115 ring
->buf
[i
].skb
= NULL
;
119 /* flush descriptors */
122 netdev_completed_queue(dev
, pkts_compl
, bytes_compl
);
125 static void ag71xx_ring_tx_init(struct ag71xx
*ag
)
127 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
128 int ring_size
= BIT(ring
->order
);
129 int ring_mask
= ring_size
- 1;
132 for (i
= 0; i
< ring_size
; i
++) {
133 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
135 desc
->next
= (u32
) (ring
->descs_dma
+
136 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
138 desc
->ctrl
= DESC_EMPTY
;
139 ring
->buf
[i
].skb
= NULL
;
142 /* flush descriptors */
147 netdev_reset_queue(ag
->dev
);
150 static void ag71xx_ring_rx_clean(struct ag71xx
*ag
)
152 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
153 int ring_size
= BIT(ring
->order
);
159 for (i
= 0; i
< ring_size
; i
++)
160 if (ring
->buf
[i
].rx_buf
) {
161 dma_unmap_single(&ag
->dev
->dev
, ring
->buf
[i
].dma_addr
,
162 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
163 skb_free_frag(ring
->buf
[i
].rx_buf
);
167 static int ag71xx_buffer_offset(struct ag71xx
*ag
)
169 int offset
= NET_SKB_PAD
;
172 * On AR71xx/AR91xx packets must be 4-byte aligned.
174 * When using builtin AR8216 support, hardware adds a 2-byte header,
175 * so we don't need any extra alignment in that case.
177 if (!ag71xx_get_pdata(ag
)->is_ar724x
|| ag71xx_has_ar8216(ag
))
180 return offset
+ NET_IP_ALIGN
;
183 static int ag71xx_buffer_size(struct ag71xx
*ag
)
185 return ag
->rx_buf_size
+
186 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
189 static bool ag71xx_fill_rx_buf(struct ag71xx
*ag
, struct ag71xx_buf
*buf
,
191 void *(*alloc
)(unsigned int size
))
193 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
194 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, buf
- &ring
->buf
[0]);
197 data
= alloc(ag71xx_buffer_size(ag
));
202 buf
->dma_addr
= dma_map_single(&ag
->dev
->dev
, data
, ag
->rx_buf_size
,
204 desc
->data
= (u32
) buf
->dma_addr
+ offset
;
208 static int ag71xx_ring_rx_init(struct ag71xx
*ag
)
210 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
211 int ring_size
= BIT(ring
->order
);
212 int ring_mask
= BIT(ring
->order
) - 1;
215 int offset
= ag71xx_buffer_offset(ag
);
218 for (i
= 0; i
< ring_size
; i
++) {
219 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
221 desc
->next
= (u32
) (ring
->descs_dma
+
222 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
224 DBG("ag71xx: RX desc at %p, next is %08x\n",
228 for (i
= 0; i
< ring_size
; i
++) {
229 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
231 if (!ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
,
232 netdev_alloc_frag
)) {
237 desc
->ctrl
= DESC_EMPTY
;
240 /* flush descriptors */
249 static int ag71xx_ring_rx_refill(struct ag71xx
*ag
)
251 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
252 int ring_mask
= BIT(ring
->order
) - 1;
254 int offset
= ag71xx_buffer_offset(ag
);
257 for (; ring
->curr
- ring
->dirty
> 0; ring
->dirty
++) {
258 struct ag71xx_desc
*desc
;
261 i
= ring
->dirty
& ring_mask
;
262 desc
= ag71xx_ring_desc(ring
, i
);
264 if (!ring
->buf
[i
].rx_buf
&&
265 !ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
,
269 desc
->ctrl
= DESC_EMPTY
;
273 /* flush descriptors */
276 DBG("%s: %u rx descriptors refilled\n", ag
->dev
->name
, count
);
281 static int ag71xx_rings_init(struct ag71xx
*ag
)
283 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
284 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
285 int ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
286 int tx_size
= BIT(tx
->order
);
288 tx
->buf
= kzalloc(ring_size
* sizeof(*tx
->buf
), GFP_KERNEL
);
292 tx
->descs_cpu
= dma_alloc_coherent(NULL
, ring_size
* AG71XX_DESC_SIZE
,
293 &tx
->descs_dma
, GFP_ATOMIC
);
294 if (!tx
->descs_cpu
) {
300 rx
->buf
= &tx
->buf
[BIT(tx
->order
)];
301 rx
->descs_cpu
= ((void *)tx
->descs_cpu
) + tx_size
* AG71XX_DESC_SIZE
;
302 rx
->descs_dma
= tx
->descs_dma
+ tx_size
* AG71XX_DESC_SIZE
;
304 ag71xx_ring_tx_init(ag
);
305 return ag71xx_ring_rx_init(ag
);
308 static void ag71xx_rings_free(struct ag71xx
*ag
)
310 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
311 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
312 int ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
315 dma_free_coherent(NULL
, ring_size
* AG71XX_DESC_SIZE
,
316 tx
->descs_cpu
, tx
->descs_dma
);
320 tx
->descs_cpu
= NULL
;
321 rx
->descs_cpu
= NULL
;
326 static void ag71xx_rings_cleanup(struct ag71xx
*ag
)
328 ag71xx_ring_rx_clean(ag
);
329 ag71xx_ring_tx_clean(ag
);
330 ag71xx_rings_free(ag
);
332 netdev_reset_queue(ag
->dev
);
335 static unsigned char *ag71xx_speed_str(struct ag71xx
*ag
)
349 static void ag71xx_hw_set_macaddr(struct ag71xx
*ag
, unsigned char *mac
)
353 t
= (((u32
) mac
[5]) << 24) | (((u32
) mac
[4]) << 16)
354 | (((u32
) mac
[3]) << 8) | ((u32
) mac
[2]);
356 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR1
, t
);
358 t
= (((u32
) mac
[1]) << 24) | (((u32
) mac
[0]) << 16);
359 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR2
, t
);
362 static void ag71xx_dma_reset(struct ag71xx
*ag
)
367 ag71xx_dump_dma_regs(ag
);
370 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
371 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
374 * give the hardware some time to really stop all rx/tx activity
375 * clearing the descriptors too early causes random memory corruption
379 /* clear descriptor addresses */
380 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->stop_desc_dma
);
381 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->stop_desc_dma
);
383 /* clear pending RX/TX interrupts */
384 for (i
= 0; i
< 256; i
++) {
385 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
386 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
389 /* clear pending errors */
390 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
| RX_STATUS_OF
);
391 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
| TX_STATUS_UR
);
393 val
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
395 pr_alert("%s: unable to clear DMA Rx status: %08x\n",
398 val
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
400 /* mask out reserved bits */
404 pr_alert("%s: unable to clear DMA Tx status: %08x\n",
407 ag71xx_dump_dma_regs(ag
);
410 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
411 MAC_CFG1_SRX | MAC_CFG1_STX)
413 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
415 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
416 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
417 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
418 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
419 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
422 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
423 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
424 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
425 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
426 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
427 FIFO_CFG5_17 | FIFO_CFG5_SF)
429 static void ag71xx_hw_stop(struct ag71xx
*ag
)
431 /* disable all interrupts and stop the rx/tx engine */
432 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, 0);
433 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
434 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
437 static void ag71xx_hw_setup(struct ag71xx
*ag
)
439 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
440 u32 init
= MAC_CFG1_INIT
;
442 /* setup MAC configuration registers */
443 if (pdata
->use_flow_control
)
444 init
|= MAC_CFG1_TFC
| MAC_CFG1_RFC
;
445 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, init
);
447 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG2
,
448 MAC_CFG2_PAD_CRC_EN
| MAC_CFG2_LEN_CHECK
);
450 /* setup max frame length to zero */
451 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, 0);
453 /* setup FIFO configuration registers */
454 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG0
, FIFO_CFG0_INIT
);
455 if (pdata
->is_ar724x
) {
456 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, pdata
->fifo_cfg1
);
457 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, pdata
->fifo_cfg2
);
459 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, 0x0fff0000);
460 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, 0x00001fff);
462 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG4
, FIFO_CFG4_INIT
);
463 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, FIFO_CFG5_INIT
);
466 static void ag71xx_hw_init(struct ag71xx
*ag
)
468 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
469 u32 reset_mask
= pdata
->reset_bit
;
473 if (pdata
->is_ar724x
) {
474 u32 reset_phy
= reset_mask
;
476 reset_phy
&= AR71XX_RESET_GE0_PHY
| AR71XX_RESET_GE1_PHY
;
477 reset_mask
&= ~(AR71XX_RESET_GE0_PHY
| AR71XX_RESET_GE1_PHY
);
479 ath79_device_reset_set(reset_phy
);
481 ath79_device_reset_clear(reset_phy
);
485 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG1
, MAC_CFG1_SR
);
488 ath79_device_reset_set(reset_mask
);
490 ath79_device_reset_clear(reset_mask
);
495 ag71xx_dma_reset(ag
);
498 static void ag71xx_fast_reset(struct ag71xx
*ag
)
500 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
501 struct net_device
*dev
= ag
->dev
;
502 u32 reset_mask
= pdata
->reset_bit
;
506 reset_mask
&= AR71XX_RESET_GE0_MAC
| AR71XX_RESET_GE1_MAC
;
511 mii_reg
= ag71xx_rr(ag
, AG71XX_REG_MII_CFG
);
512 rx_ds
= ag71xx_rr(ag
, AG71XX_REG_RX_DESC
);
514 ag71xx_tx_packets(ag
, true);
516 ath79_device_reset_set(reset_mask
);
518 ath79_device_reset_clear(reset_mask
);
521 ag71xx_dma_reset(ag
);
523 ag
->tx_ring
.curr
= 0;
524 ag
->tx_ring
.dirty
= 0;
525 netdev_reset_queue(ag
->dev
);
527 /* setup max frame length */
528 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
529 ag71xx_max_frame_len(ag
->dev
->mtu
));
531 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, rx_ds
);
532 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
533 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, mii_reg
);
535 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
538 static void ag71xx_hw_start(struct ag71xx
*ag
)
540 /* start RX engine */
541 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
543 /* enable interrupts */
544 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, AG71XX_INT_INIT
);
546 netif_wake_queue(ag
->dev
);
550 __ag71xx_link_adjust(struct ag71xx
*ag
, bool update
)
552 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
558 if (!ag
->link
&& update
) {
560 netif_carrier_off(ag
->dev
);
561 if (netif_msg_link(ag
))
562 pr_info("%s: link down\n", ag
->dev
->name
);
566 if (pdata
->is_ar724x
)
567 ag71xx_fast_reset(ag
);
569 cfg2
= ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
);
570 cfg2
&= ~(MAC_CFG2_IF_1000
| MAC_CFG2_IF_10_100
| MAC_CFG2_FDX
);
571 cfg2
|= (ag
->duplex
) ? MAC_CFG2_FDX
: 0;
573 ifctl
= ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
);
574 ifctl
&= ~(MAC_IFCTL_SPEED
);
576 fifo5
= ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
);
577 fifo5
&= ~FIFO_CFG5_BM
;
581 cfg2
|= MAC_CFG2_IF_1000
;
582 fifo5
|= FIFO_CFG5_BM
;
585 cfg2
|= MAC_CFG2_IF_10_100
;
586 ifctl
|= MAC_IFCTL_SPEED
;
589 cfg2
|= MAC_CFG2_IF_10_100
;
596 if (pdata
->is_ar91xx
)
598 else if (pdata
->is_ar724x
)
599 fifo3
= pdata
->fifo_cfg3
;
603 if (ag
->tx_ring
.desc_split
) {
605 fifo3
|= ((2048 - ag
->tx_ring
.desc_split
) / 4) << 16;
608 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG3
, fifo3
);
610 if (update
&& pdata
->set_speed
)
611 pdata
->set_speed(ag
->speed
);
613 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG2
, cfg2
);
614 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, fifo5
);
615 ag71xx_wr(ag
, AG71XX_REG_MAC_IFCTL
, ifctl
);
617 if (pdata
->disable_inline_checksum_engine
) {
619 * The rx ring buffer can stall on small packets on QCA953x and
620 * QCA956x. Disabling the inline checksum engine fixes the stall.
621 * The wr, rr functions cannot be used since this hidden register
622 * is outside of the normal ag71xx register block.
624 void __iomem
*dam
= ioremap_nocache(0xb90001bc, 0x4);
626 __raw_writel(__raw_readl(dam
) & ~BIT(27), dam
);
627 (void)__raw_readl(dam
);
634 netif_carrier_on(ag
->dev
);
635 if (update
&& netif_msg_link(ag
))
636 pr_info("%s: link up (%sMbps/%s duplex)\n",
638 ag71xx_speed_str(ag
),
639 (DUPLEX_FULL
== ag
->duplex
) ? "Full" : "Half");
641 DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n",
643 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG0
),
644 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG1
),
645 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG2
));
647 DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n",
649 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG3
),
650 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG4
),
651 ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
));
653 DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x\n",
655 ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
),
656 ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
));
659 void ag71xx_link_adjust(struct ag71xx
*ag
)
661 __ag71xx_link_adjust(ag
, true);
664 static int ag71xx_hw_enable(struct ag71xx
*ag
)
668 ret
= ag71xx_rings_init(ag
);
672 napi_enable(&ag
->napi
);
673 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
674 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->rx_ring
.descs_dma
);
675 netif_start_queue(ag
->dev
);
680 static void ag71xx_hw_disable(struct ag71xx
*ag
)
684 spin_lock_irqsave(&ag
->lock
, flags
);
686 netif_stop_queue(ag
->dev
);
689 ag71xx_dma_reset(ag
);
691 napi_disable(&ag
->napi
);
692 del_timer_sync(&ag
->oom_timer
);
694 spin_unlock_irqrestore(&ag
->lock
, flags
);
696 ag71xx_rings_cleanup(ag
);
699 static int ag71xx_open(struct net_device
*dev
)
701 struct ag71xx
*ag
= netdev_priv(dev
);
702 unsigned int max_frame_len
;
705 netif_carrier_off(dev
);
706 max_frame_len
= ag71xx_max_frame_len(dev
->mtu
);
707 ag
->rx_buf_size
= SKB_DATA_ALIGN(max_frame_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
);
709 /* setup max frame length */
710 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, max_frame_len
);
711 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
713 ret
= ag71xx_hw_enable(ag
);
717 ag71xx_phy_start(ag
);
722 ag71xx_rings_cleanup(ag
);
726 static int ag71xx_stop(struct net_device
*dev
)
728 struct ag71xx
*ag
= netdev_priv(dev
);
730 netif_carrier_off(dev
);
732 ag71xx_hw_disable(ag
);
737 static int ag71xx_fill_dma_desc(struct ag71xx_ring
*ring
, u32 addr
, int len
)
740 struct ag71xx_desc
*desc
;
741 int ring_mask
= BIT(ring
->order
) - 1;
743 int split
= ring
->desc_split
;
749 unsigned int cur_len
= len
;
751 i
= (ring
->curr
+ ndesc
) & ring_mask
;
752 desc
= ag71xx_ring_desc(ring
, i
);
754 if (!ag71xx_desc_empty(desc
))
757 if (cur_len
> split
) {
761 * TX will hang if DMA transfers <= 4 bytes,
762 * make sure next segment is more than 4 bytes long.
764 if (len
<= split
+ 4)
773 cur_len
|= DESC_MORE
;
775 /* prevent early tx attempt of this descriptor */
777 cur_len
|= DESC_EMPTY
;
779 desc
->ctrl
= cur_len
;
786 static netdev_tx_t
ag71xx_hard_start_xmit(struct sk_buff
*skb
,
787 struct net_device
*dev
)
789 struct ag71xx
*ag
= netdev_priv(dev
);
790 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
791 int ring_mask
= BIT(ring
->order
) - 1;
792 int ring_size
= BIT(ring
->order
);
793 struct ag71xx_desc
*desc
;
797 if (ag71xx_has_ar8216(ag
))
798 ag71xx_add_ar8216_header(ag
, skb
);
801 DBG("%s: packet len is too small\n", ag
->dev
->name
);
805 dma_addr
= dma_map_single(&dev
->dev
, skb
->data
, skb
->len
,
808 i
= ring
->curr
& ring_mask
;
809 desc
= ag71xx_ring_desc(ring
, i
);
811 /* setup descriptor fields */
812 n
= ag71xx_fill_dma_desc(ring
, (u32
) dma_addr
, skb
->len
& ag
->desc_pktlen_mask
);
816 i
= (ring
->curr
+ n
- 1) & ring_mask
;
817 ring
->buf
[i
].len
= skb
->len
;
818 ring
->buf
[i
].skb
= skb
;
819 ring
->buf
[i
].timestamp
= jiffies
;
821 netdev_sent_queue(dev
, skb
->len
);
823 skb_tx_timestamp(skb
);
825 desc
->ctrl
&= ~DESC_EMPTY
;
828 /* flush descriptor */
832 if (ring
->desc_split
)
833 ring_min
*= AG71XX_TX_RING_DS_PER_PKT
;
835 if (ring
->curr
- ring
->dirty
>= ring_size
- ring_min
) {
836 DBG("%s: tx queue full\n", dev
->name
);
837 netif_stop_queue(dev
);
840 DBG("%s: packet injected into TX queue\n", ag
->dev
->name
);
842 /* enable TX engine */
843 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, TX_CTRL_TXE
);
848 dma_unmap_single(&dev
->dev
, dma_addr
, skb
->len
, DMA_TO_DEVICE
);
851 dev
->stats
.tx_dropped
++;
857 static int ag71xx_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
859 struct ag71xx
*ag
= netdev_priv(dev
);
864 if (ag
->phy_dev
== NULL
)
867 spin_lock_irq(&ag
->lock
);
868 ret
= phy_ethtool_ioctl(ag
->phy_dev
, (void *) ifr
->ifr_data
);
869 spin_unlock_irq(&ag
->lock
);
874 (dev
->dev_addr
, ifr
->ifr_data
, sizeof(dev
->dev_addr
)))
880 (ifr
->ifr_data
, dev
->dev_addr
, sizeof(dev
->dev_addr
)))
887 if (ag
->phy_dev
== NULL
)
890 return phy_mii_ioctl(ag
->phy_dev
, ifr
, cmd
);
899 static void ag71xx_oom_timer_handler(unsigned long data
)
901 struct net_device
*dev
= (struct net_device
*) data
;
902 struct ag71xx
*ag
= netdev_priv(dev
);
904 napi_schedule(&ag
->napi
);
907 static void ag71xx_tx_timeout(struct net_device
*dev
)
909 struct ag71xx
*ag
= netdev_priv(dev
);
911 if (netif_msg_tx_err(ag
))
912 pr_info("%s: tx timeout\n", ag
->dev
->name
);
914 schedule_delayed_work(&ag
->restart_work
, 1);
917 static void ag71xx_restart_work_func(struct work_struct
*work
)
919 struct ag71xx
*ag
= container_of(work
, struct ag71xx
, restart_work
.work
);
922 ag71xx_hw_disable(ag
);
923 ag71xx_hw_enable(ag
);
925 __ag71xx_link_adjust(ag
, false);
929 static bool ag71xx_check_dma_stuck(struct ag71xx
*ag
, unsigned long timestamp
)
931 u32 rx_sm
, tx_sm
, rx_fd
;
933 if (likely(time_before(jiffies
, timestamp
+ HZ
/10)))
936 if (!netif_carrier_ok(ag
->dev
))
939 rx_sm
= ag71xx_rr(ag
, AG71XX_REG_RX_SM
);
940 if ((rx_sm
& 0x7) == 0x3 && ((rx_sm
>> 4) & 0x7) == 0x6)
943 tx_sm
= ag71xx_rr(ag
, AG71XX_REG_TX_SM
);
944 rx_fd
= ag71xx_rr(ag
, AG71XX_REG_FIFO_DEPTH
);
945 if (((tx_sm
>> 4) & 0x7) == 0 && ((rx_sm
& 0x7) == 0) &&
946 ((rx_sm
>> 4) & 0x7) == 0 && rx_fd
== 0)
952 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
)
954 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
955 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
956 bool dma_stuck
= false;
957 int ring_mask
= BIT(ring
->order
) - 1;
958 int ring_size
= BIT(ring
->order
);
963 DBG("%s: processing TX ring\n", ag
->dev
->name
);
965 while (ring
->dirty
+ n
!= ring
->curr
) {
966 unsigned int i
= (ring
->dirty
+ n
) & ring_mask
;
967 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
968 struct sk_buff
*skb
= ring
->buf
[i
].skb
;
970 if (!flush
&& !ag71xx_desc_empty(desc
)) {
971 if (pdata
->is_ar724x
&&
972 ag71xx_check_dma_stuck(ag
, ring
->buf
[i
].timestamp
)) {
973 schedule_delayed_work(&ag
->restart_work
, HZ
/ 2);
980 desc
->ctrl
|= DESC_EMPTY
;
986 dev_kfree_skb_any(skb
);
987 ring
->buf
[i
].skb
= NULL
;
989 bytes_compl
+= ring
->buf
[i
].len
;
995 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
1000 DBG("%s: %d packets sent out\n", ag
->dev
->name
, sent
);
1002 ag
->dev
->stats
.tx_bytes
+= bytes_compl
;
1003 ag
->dev
->stats
.tx_packets
+= sent
;
1008 netdev_completed_queue(ag
->dev
, sent
, bytes_compl
);
1009 if ((ring
->curr
- ring
->dirty
) < (ring_size
* 3) / 4)
1010 netif_wake_queue(ag
->dev
);
1013 cancel_delayed_work(&ag
->restart_work
);
1018 static int ag71xx_rx_packets(struct ag71xx
*ag
, int limit
)
1020 struct net_device
*dev
= ag
->dev
;
1021 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1022 int offset
= ag71xx_buffer_offset(ag
);
1023 unsigned int pktlen_mask
= ag
->desc_pktlen_mask
;
1024 int ring_mask
= BIT(ring
->order
) - 1;
1025 int ring_size
= BIT(ring
->order
);
1026 struct sk_buff_head queue
;
1027 struct sk_buff
*skb
;
1030 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
1031 dev
->name
, limit
, ring
->curr
, ring
->dirty
);
1033 skb_queue_head_init(&queue
);
1035 while (done
< limit
) {
1036 unsigned int i
= ring
->curr
& ring_mask
;
1037 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1041 if (ag71xx_desc_empty(desc
))
1044 if ((ring
->dirty
+ ring_size
) == ring
->curr
) {
1049 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
1051 pktlen
= desc
->ctrl
& pktlen_mask
;
1052 pktlen
-= ETH_FCS_LEN
;
1054 dma_unmap_single(&dev
->dev
, ring
->buf
[i
].dma_addr
,
1055 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
1057 dev
->stats
.rx_packets
++;
1058 dev
->stats
.rx_bytes
+= pktlen
;
1060 skb
= build_skb(ring
->buf
[i
].rx_buf
, ag71xx_buffer_size(ag
));
1062 skb_free_frag(ring
->buf
[i
].rx_buf
);
1066 skb_reserve(skb
, offset
);
1067 skb_put(skb
, pktlen
);
1069 if (ag71xx_has_ar8216(ag
))
1070 err
= ag71xx_remove_ar8216_header(ag
, skb
, pktlen
);
1073 dev
->stats
.rx_dropped
++;
1077 skb
->ip_summed
= CHECKSUM_NONE
;
1078 __skb_queue_tail(&queue
, skb
);
1082 ring
->buf
[i
].rx_buf
= NULL
;
1088 ag71xx_ring_rx_refill(ag
);
1090 while ((skb
= __skb_dequeue(&queue
)) != NULL
) {
1091 skb
->protocol
= eth_type_trans(skb
, dev
);
1092 netif_receive_skb(skb
);
1095 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1096 dev
->name
, ring
->curr
, ring
->dirty
, done
);
1101 static int ag71xx_poll(struct napi_struct
*napi
, int limit
)
1103 struct ag71xx
*ag
= container_of(napi
, struct ag71xx
, napi
);
1104 struct ag71xx_platform_data
*pdata
= ag71xx_get_pdata(ag
);
1105 struct net_device
*dev
= ag
->dev
;
1106 struct ag71xx_ring
*rx_ring
= &ag
->rx_ring
;
1107 int rx_ring_size
= BIT(rx_ring
->order
);
1108 unsigned long flags
;
1114 tx_done
= ag71xx_tx_packets(ag
, false);
1116 DBG("%s: processing RX ring\n", dev
->name
);
1117 rx_done
= ag71xx_rx_packets(ag
, limit
);
1119 ag71xx_debugfs_update_napi_stats(ag
, rx_done
, tx_done
);
1121 if (rx_ring
->buf
[rx_ring
->dirty
% rx_ring_size
].rx_buf
== NULL
)
1124 status
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
1125 if (unlikely(status
& RX_STATUS_OF
)) {
1126 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_OF
);
1127 dev
->stats
.rx_fifo_errors
++;
1130 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
1133 if (rx_done
< limit
) {
1134 if (status
& RX_STATUS_PR
)
1137 status
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
1138 if (status
& TX_STATUS_PS
)
1141 DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1142 dev
->name
, rx_done
, tx_done
, limit
);
1144 napi_complete(napi
);
1146 /* enable interrupts */
1147 spin_lock_irqsave(&ag
->lock
, flags
);
1148 ag71xx_int_enable(ag
, AG71XX_INT_POLL
);
1149 spin_unlock_irqrestore(&ag
->lock
, flags
);
1154 DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1155 dev
->name
, rx_done
, tx_done
, limit
);
1159 if (netif_msg_rx_err(ag
))
1160 pr_info("%s: out of memory\n", dev
->name
);
1162 mod_timer(&ag
->oom_timer
, jiffies
+ AG71XX_OOM_REFILL
);
1163 napi_complete(napi
);
1167 static irqreturn_t
ag71xx_interrupt(int irq
, void *dev_id
)
1169 struct net_device
*dev
= dev_id
;
1170 struct ag71xx
*ag
= netdev_priv(dev
);
1173 status
= ag71xx_rr(ag
, AG71XX_REG_INT_STATUS
);
1174 ag71xx_dump_intr(ag
, "raw", status
);
1176 if (unlikely(!status
))
1179 if (unlikely(status
& AG71XX_INT_ERR
)) {
1180 if (status
& AG71XX_INT_TX_BE
) {
1181 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
);
1182 dev_err(&dev
->dev
, "TX BUS error\n");
1184 if (status
& AG71XX_INT_RX_BE
) {
1185 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
);
1186 dev_err(&dev
->dev
, "RX BUS error\n");
1190 if (likely(status
& AG71XX_INT_POLL
)) {
1191 ag71xx_int_disable(ag
, AG71XX_INT_POLL
);
1192 DBG("%s: enable polling mode\n", dev
->name
);
1193 napi_schedule(&ag
->napi
);
1196 ag71xx_debugfs_update_int_stats(ag
, status
);
1201 #ifdef CONFIG_NET_POLL_CONTROLLER
1203 * Polling 'interrupt' - used by things like netconsole to send skbs
1204 * without having to re-enable interrupts. It's not called while
1205 * the interrupt routine is executing.
1207 static void ag71xx_netpoll(struct net_device
*dev
)
1209 disable_irq(dev
->irq
);
1210 ag71xx_interrupt(dev
->irq
, dev
);
1211 enable_irq(dev
->irq
);
1215 static int ag71xx_change_mtu(struct net_device
*dev
, int new_mtu
)
1217 struct ag71xx
*ag
= netdev_priv(dev
);
1218 unsigned int max_frame_len
;
1220 max_frame_len
= ag71xx_max_frame_len(new_mtu
);
1221 if (new_mtu
< 68 || max_frame_len
> ag
->max_frame_len
)
1224 if (netif_running(dev
))
1231 static const struct net_device_ops ag71xx_netdev_ops
= {
1232 .ndo_open
= ag71xx_open
,
1233 .ndo_stop
= ag71xx_stop
,
1234 .ndo_start_xmit
= ag71xx_hard_start_xmit
,
1235 .ndo_do_ioctl
= ag71xx_do_ioctl
,
1236 .ndo_tx_timeout
= ag71xx_tx_timeout
,
1237 .ndo_change_mtu
= ag71xx_change_mtu
,
1238 .ndo_set_mac_address
= eth_mac_addr
,
1239 .ndo_validate_addr
= eth_validate_addr
,
1240 #ifdef CONFIG_NET_POLL_CONTROLLER
1241 .ndo_poll_controller
= ag71xx_netpoll
,
1245 static const char *ag71xx_get_phy_if_mode_name(phy_interface_t mode
)
1248 case PHY_INTERFACE_MODE_MII
:
1250 case PHY_INTERFACE_MODE_GMII
:
1252 case PHY_INTERFACE_MODE_RMII
:
1254 case PHY_INTERFACE_MODE_RGMII
:
1256 case PHY_INTERFACE_MODE_SGMII
:
1266 static int ag71xx_probe(struct platform_device
*pdev
)
1268 struct net_device
*dev
;
1269 struct resource
*res
;
1271 struct ag71xx_platform_data
*pdata
;
1274 pdata
= pdev
->dev
.platform_data
;
1276 dev_err(&pdev
->dev
, "no platform data specified\n");
1281 if (pdata
->mii_bus_dev
== NULL
&& pdata
->phy_mask
) {
1282 dev_err(&pdev
->dev
, "no MII bus device specified\n");
1287 dev
= alloc_etherdev(sizeof(*ag
));
1289 dev_err(&pdev
->dev
, "alloc_etherdev failed\n");
1294 if (!pdata
->max_frame_len
|| !pdata
->desc_pktlen_mask
)
1297 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1299 ag
= netdev_priv(dev
);
1302 ag
->msg_enable
= netif_msg_init(ag71xx_msg_level
,
1303 AG71XX_DEFAULT_MSG_ENABLE
);
1304 spin_lock_init(&ag
->lock
);
1306 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "mac_base");
1308 dev_err(&pdev
->dev
, "no mac_base resource found\n");
1313 ag
->mac_base
= ioremap_nocache(res
->start
, res
->end
- res
->start
+ 1);
1314 if (!ag
->mac_base
) {
1315 dev_err(&pdev
->dev
, "unable to ioremap mac_base\n");
1320 dev
->irq
= platform_get_irq(pdev
, 0);
1321 err
= request_irq(dev
->irq
, ag71xx_interrupt
,
1325 dev_err(&pdev
->dev
, "unable to request IRQ %d\n", dev
->irq
);
1326 goto err_unmap_base
;
1329 dev
->base_addr
= (unsigned long)ag
->mac_base
;
1330 dev
->netdev_ops
= &ag71xx_netdev_ops
;
1331 dev
->ethtool_ops
= &ag71xx_ethtool_ops
;
1333 INIT_DELAYED_WORK(&ag
->restart_work
, ag71xx_restart_work_func
);
1335 init_timer(&ag
->oom_timer
);
1336 ag
->oom_timer
.data
= (unsigned long) dev
;
1337 ag
->oom_timer
.function
= ag71xx_oom_timer_handler
;
1339 tx_size
= AG71XX_TX_RING_SIZE_DEFAULT
;
1340 ag
->rx_ring
.order
= ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT
);
1342 ag
->max_frame_len
= pdata
->max_frame_len
;
1343 ag
->desc_pktlen_mask
= pdata
->desc_pktlen_mask
;
1345 if (!pdata
->is_ar724x
&& !pdata
->is_ar91xx
) {
1346 ag
->tx_ring
.desc_split
= AG71XX_TX_RING_SPLIT
;
1347 tx_size
*= AG71XX_TX_RING_DS_PER_PKT
;
1349 ag
->tx_ring
.order
= ag71xx_ring_size_order(tx_size
);
1351 ag
->stop_desc
= dma_alloc_coherent(NULL
,
1352 sizeof(struct ag71xx_desc
), &ag
->stop_desc_dma
, GFP_KERNEL
);
1357 ag
->stop_desc
->data
= 0;
1358 ag
->stop_desc
->ctrl
= 0;
1359 ag
->stop_desc
->next
= (u32
) ag
->stop_desc_dma
;
1361 memcpy(dev
->dev_addr
, pdata
->mac_addr
, ETH_ALEN
);
1363 netif_napi_add(dev
, &ag
->napi
, ag71xx_poll
, AG71XX_NAPI_WEIGHT
);
1365 ag71xx_dump_regs(ag
);
1369 ag71xx_dump_regs(ag
);
1371 err
= ag71xx_phy_connect(ag
);
1375 err
= ag71xx_debugfs_init(ag
);
1377 goto err_phy_disconnect
;
1379 platform_set_drvdata(pdev
, dev
);
1381 err
= register_netdev(dev
);
1383 dev_err(&pdev
->dev
, "unable to register net device\n");
1384 goto err_debugfs_exit
;
1387 pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1388 dev
->name
, dev
->base_addr
, dev
->irq
,
1389 ag71xx_get_phy_if_mode_name(pdata
->phy_if_mode
));
1394 ag71xx_debugfs_exit(ag
);
1396 ag71xx_phy_disconnect(ag
);
1398 dma_free_coherent(NULL
, sizeof(struct ag71xx_desc
), ag
->stop_desc
,
1401 free_irq(dev
->irq
, dev
);
1403 iounmap(ag
->mac_base
);
1407 platform_set_drvdata(pdev
, NULL
);
1411 static int ag71xx_remove(struct platform_device
*pdev
)
1413 struct net_device
*dev
= platform_get_drvdata(pdev
);
1416 struct ag71xx
*ag
= netdev_priv(dev
);
1418 ag71xx_debugfs_exit(ag
);
1419 ag71xx_phy_disconnect(ag
);
1420 unregister_netdev(dev
);
1421 free_irq(dev
->irq
, dev
);
1422 iounmap(ag
->mac_base
);
1424 platform_set_drvdata(pdev
, NULL
);
1430 static struct platform_driver ag71xx_driver
= {
1431 .probe
= ag71xx_probe
,
1432 .remove
= ag71xx_remove
,
1434 .name
= AG71XX_DRV_NAME
,
1438 static int __init
ag71xx_module_init(void)
1442 ret
= ag71xx_debugfs_root_init();
1446 ret
= ag71xx_mdio_driver_init();
1448 goto err_debugfs_exit
;
1450 ret
= platform_driver_register(&ag71xx_driver
);
1457 ag71xx_mdio_driver_exit();
1459 ag71xx_debugfs_root_exit();
1464 static void __exit
ag71xx_module_exit(void)
1466 platform_driver_unregister(&ag71xx_driver
);
1467 ag71xx_mdio_driver_exit();
1468 ag71xx_debugfs_root_exit();
1471 module_init(ag71xx_module_init
);
1472 module_exit(ag71xx_module_exit
);
1474 MODULE_VERSION(AG71XX_DRV_VERSION
);
1475 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1476 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1477 MODULE_LICENSE("GPL v2");
1478 MODULE_ALIAS("platform:" AG71XX_DRV_NAME
);