2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
15 * Copyright (C) 2009 John Crispin <blogic@openwrt.org>
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/platform_device.h>
28 #include <ramips_eth_platform.h>
29 #include "ramips_eth.h"
31 #define TX_TIMEOUT (20 * HZ / 100)
32 #define MAX_RX_LENGTH 1600
34 #ifdef CONFIG_RALINK_RT305X
35 #include "ramips_esw.c"
37 static inline int rt305x_esw_init(void) { return 0; }
38 static inline void rt305x_esw_exit(void) { }
41 #define phys_to_bus(a) (a & 0x1FFFFFFF)
43 #ifdef CONFIG_RAMIPS_ETH_DEBUG
44 #define RADEBUG(fmt, args...) printk(KERN_DEBUG fmt, ## args)
46 #define RADEBUG(fmt, args...) do {} while (0)
49 static struct net_device
* ramips_dev
;
50 static void __iomem
*ramips_fe_base
= 0;
53 ramips_fe_wr(u32 val
, unsigned reg
)
55 __raw_writel(val
, ramips_fe_base
+ reg
);
59 ramips_fe_rr(unsigned reg
)
61 return __raw_readl(ramips_fe_base
+ reg
);
65 ramips_fe_int_disable(u32 mask
)
67 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE
) & ~mask
,
68 RAMIPS_FE_INT_ENABLE
);
70 ramips_fe_rr(RAMIPS_FE_INT_ENABLE
);
74 ramips_fe_int_enable(u32 mask
)
76 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE
) | mask
,
77 RAMIPS_FE_INT_ENABLE
);
79 ramips_fe_rr(RAMIPS_FE_INT_ENABLE
);
83 ramips_hw_set_macaddr(unsigned char *mac
)
85 ramips_fe_wr((mac
[0] << 8) | mac
[1], RAMIPS_GDMA1_MAC_ADRH
);
86 ramips_fe_wr((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5],
87 RAMIPS_GDMA1_MAC_ADRL
);
90 #ifdef CONFIG_RALINK_RT288X
92 ramips_setup_mdio_cfg(struct raeth_priv
*re
)
94 unsigned int mdio_cfg
;
96 mdio_cfg
= RAMIPS_MDIO_CFG_TX_CLK_SKEW_200
|
97 RAMIPS_MDIO_CFG_TX_CLK_SKEW_200
|
98 RAMIPS_MDIO_CFG_GP1_FRC_EN
;
100 if (re
->duplex
== DUPLEX_FULL
)
101 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_DUPLEX
;
104 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_FC_TX
;
107 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_FC_RX
;
111 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_SPEED_10
;
114 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_SPEED_100
;
117 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_SPEED_1000
;
123 ramips_fe_wr(mdio_cfg
, RAMIPS_MDIO_CFG
);
126 static inline void ramips_setup_mdio_cfg(struct raeth_priv
*re
)
129 #endif /* CONFIG_RALINK_RT288X */
132 ramips_cleanup_dma(struct raeth_priv
*re
)
136 for (i
= 0; i
< NUM_RX_DESC
; i
++)
138 dma_unmap_single(&re
->netdev
->dev
, re
->rx_dma
[i
],
139 MAX_RX_LENGTH
, DMA_FROM_DEVICE
);
140 dev_kfree_skb_any(re
->rx_skb
[i
]);
144 dma_free_coherent(&re
->netdev
->dev
,
145 NUM_RX_DESC
* sizeof(struct ramips_rx_dma
),
146 re
->rx
, re
->rx_desc_dma
);
149 dma_free_coherent(&re
->netdev
->dev
,
150 NUM_TX_DESC
* sizeof(struct ramips_tx_dma
),
151 re
->tx
, re
->tx_desc_dma
);
155 ramips_alloc_dma(struct raeth_priv
*re
)
160 re
->skb_free_idx
= 0;
163 re
->tx
= dma_alloc_coherent(&re
->netdev
->dev
,
164 NUM_TX_DESC
* sizeof(struct ramips_tx_dma
),
165 &re
->tx_desc_dma
, GFP_ATOMIC
);
169 memset(re
->tx
, 0, NUM_TX_DESC
* sizeof(struct ramips_tx_dma
));
170 for (i
= 0; i
< NUM_TX_DESC
; i
++) {
171 re
->tx
[i
].txd2
= TX_DMA_LSO
| TX_DMA_DONE
;
172 re
->tx
[i
].txd4
= TX_DMA_QN(3) | TX_DMA_PN(1);
176 re
->rx
= dma_alloc_coherent(&re
->netdev
->dev
,
177 NUM_RX_DESC
* sizeof(struct ramips_rx_dma
),
178 &re
->rx_desc_dma
, GFP_ATOMIC
);
182 memset(re
->rx
, 0, sizeof(struct ramips_rx_dma
) * NUM_RX_DESC
);
183 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
185 struct sk_buff
*new_skb
= dev_alloc_skb(MAX_RX_LENGTH
+
191 skb_reserve(new_skb
, NET_IP_ALIGN
);
193 dma_addr
= dma_map_single(&re
->netdev
->dev
, new_skb
->data
,
194 MAX_RX_LENGTH
, DMA_FROM_DEVICE
);
195 re
->rx_dma
[i
] = dma_addr
;
196 re
->rx
[i
].rxd1
= (unsigned int) re
->rx_dma
[i
];
197 re
->rx
[i
].rxd2
|= RX_DMA_LSO
;
198 re
->rx_skb
[i
] = new_skb
;
204 ramips_cleanup_dma(re
);
209 ramips_setup_dma(struct raeth_priv
*re
)
211 ramips_fe_wr(re
->tx_desc_dma
, RAMIPS_TX_BASE_PTR0
);
212 ramips_fe_wr(NUM_TX_DESC
, RAMIPS_TX_MAX_CNT0
);
213 ramips_fe_wr(0, RAMIPS_TX_CTX_IDX0
);
214 ramips_fe_wr(RAMIPS_PST_DTX_IDX0
, RAMIPS_PDMA_RST_CFG
);
216 ramips_fe_wr(re
->rx_desc_dma
, RAMIPS_RX_BASE_PTR0
);
217 ramips_fe_wr(NUM_RX_DESC
, RAMIPS_RX_MAX_CNT0
);
218 ramips_fe_wr((NUM_RX_DESC
- 1), RAMIPS_RX_CALC_IDX0
);
219 ramips_fe_wr(RAMIPS_PST_DRX_IDX0
, RAMIPS_PDMA_RST_CFG
);
223 ramips_eth_hard_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
225 struct raeth_priv
*priv
= netdev_priv(dev
);
227 unsigned int tx_next
;
228 dma_addr_t mapped_addr
;
230 if (priv
->plat
->min_pkt_len
) {
231 if (skb
->len
< priv
->plat
->min_pkt_len
) {
232 if (skb_padto(skb
, priv
->plat
->min_pkt_len
)) {
234 "ramips_eth: skb_padto failed\n");
238 skb_put(skb
, priv
->plat
->min_pkt_len
- skb
->len
);
242 dev
->trans_start
= jiffies
;
243 mapped_addr
= dma_map_single(&priv
->netdev
->dev
, skb
->data
, skb
->len
,
246 spin_lock(&priv
->page_lock
);
247 tx
= ramips_fe_rr(RAMIPS_TX_CTX_IDX0
);
248 tx_next
= (tx
+ 1) % NUM_TX_DESC
;
250 if ((priv
->tx_skb
[tx
]) || (priv
->tx_skb
[tx_next
]) ||
251 !(priv
->tx
[tx
].txd2
& TX_DMA_DONE
) ||
252 !(priv
->tx
[tx_next
].txd2
& TX_DMA_DONE
))
255 priv
->tx
[tx
].txd1
= (unsigned int) mapped_addr
;
256 priv
->tx
[tx
].txd2
&= ~(TX_DMA_PLEN0_MASK
| TX_DMA_DONE
);
257 priv
->tx
[tx
].txd2
|= TX_DMA_PLEN0(skb
->len
);
258 dev
->stats
.tx_packets
++;
259 dev
->stats
.tx_bytes
+= skb
->len
;
260 priv
->tx_skb
[tx
] = skb
;
262 ramips_fe_wr(tx_next
, RAMIPS_TX_CTX_IDX0
);
263 spin_unlock(&priv
->page_lock
);
267 spin_unlock(&priv
->page_lock
);
268 dev
->stats
.tx_dropped
++;
274 ramips_eth_rx_hw(unsigned long ptr
)
276 struct net_device
*dev
= (struct net_device
*) ptr
;
277 struct raeth_priv
*priv
= netdev_priv(dev
);
282 struct sk_buff
*rx_skb
, *new_skb
;
285 rx
= (ramips_fe_rr(RAMIPS_RX_CALC_IDX0
) + 1) % NUM_RX_DESC
;
286 if (!(priv
->rx
[rx
].rxd2
& RX_DMA_DONE
))
290 rx_skb
= priv
->rx_skb
[rx
];
291 pktlen
= RX_DMA_PLEN0(priv
->rx
[rx
].rxd2
);
293 new_skb
= netdev_alloc_skb(dev
, MAX_RX_LENGTH
+ NET_IP_ALIGN
);
294 /* Reuse the buffer on allocation failures */
298 dma_unmap_single(&priv
->netdev
->dev
, priv
->rx_dma
[rx
],
299 MAX_RX_LENGTH
, DMA_FROM_DEVICE
);
301 skb_put(rx_skb
, pktlen
);
303 rx_skb
->protocol
= eth_type_trans(rx_skb
, dev
);
304 rx_skb
->ip_summed
= CHECKSUM_NONE
;
305 dev
->stats
.rx_packets
++;
306 dev
->stats
.rx_bytes
+= pktlen
;
309 priv
->rx_skb
[rx
] = new_skb
;
310 skb_reserve(new_skb
, NET_IP_ALIGN
);
312 dma_addr
= dma_map_single(&priv
->netdev
->dev
,
316 priv
->rx_dma
[rx
] = dma_addr
;
317 priv
->rx
[rx
].rxd1
= (unsigned int) dma_addr
;
319 dev
->stats
.rx_dropped
++;
322 priv
->rx
[rx
].rxd2
&= ~RX_DMA_DONE
;
324 ramips_fe_wr(rx
, RAMIPS_RX_CALC_IDX0
);
328 tasklet_schedule(&priv
->rx_tasklet
);
330 ramips_fe_int_enable(RAMIPS_RX_DLY_INT
);
334 ramips_eth_tx_housekeeping(unsigned long ptr
)
336 struct net_device
*dev
= (struct net_device
*)ptr
;
337 struct raeth_priv
*priv
= netdev_priv(dev
);
339 spin_lock(&priv
->page_lock
);
340 while ((priv
->tx
[priv
->skb_free_idx
].txd2
& TX_DMA_DONE
) &&
341 (priv
->tx_skb
[priv
->skb_free_idx
])) {
342 dev_kfree_skb_irq(priv
->tx_skb
[priv
->skb_free_idx
]);
343 priv
->tx_skb
[priv
->skb_free_idx
] = 0;
344 priv
->skb_free_idx
++;
345 if (priv
->skb_free_idx
>= NUM_TX_DESC
)
346 priv
->skb_free_idx
= 0;
348 spin_unlock(&priv
->page_lock
);
350 ramips_fe_int_enable(RAMIPS_TX_DLY_INT
);
354 ramips_eth_timeout(struct net_device
*dev
)
356 struct raeth_priv
*priv
= netdev_priv(dev
);
358 tasklet_schedule(&priv
->tx_housekeeping_tasklet
);
362 ramips_eth_irq(int irq
, void *dev
)
364 struct raeth_priv
*priv
= netdev_priv(dev
);
365 unsigned long fe_int
= ramips_fe_rr(RAMIPS_FE_INT_STATUS
);
367 ramips_fe_wr(0xFFFFFFFF, RAMIPS_FE_INT_STATUS
);
369 if (fe_int
& RAMIPS_RX_DLY_INT
) {
370 ramips_fe_int_disable(RAMIPS_RX_DLY_INT
);
371 tasklet_schedule(&priv
->rx_tasklet
);
374 if (fe_int
& RAMIPS_TX_DLY_INT
) {
375 ramips_fe_int_disable(RAMIPS_TX_DLY_INT
);
376 tasklet_schedule(&priv
->tx_housekeeping_tasklet
);
383 ramips_eth_open(struct net_device
*dev
)
385 struct raeth_priv
*priv
= netdev_priv(dev
);
388 err
= request_irq(dev
->irq
, ramips_eth_irq
, IRQF_DISABLED
,
393 err
= ramips_alloc_dma(priv
);
397 ramips_hw_set_macaddr(dev
->dev_addr
);
399 ramips_setup_dma(priv
);
400 ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG
) & 0xff) |
401 (RAMIPS_TX_WB_DDONE
| RAMIPS_RX_DMA_EN
|
402 RAMIPS_TX_DMA_EN
| RAMIPS_PDMA_SIZE_4DWORDS
),
403 RAMIPS_PDMA_GLO_CFG
);
404 ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG
) &
405 ~(RAMIPS_US_CYC_CNT_MASK
<< RAMIPS_US_CYC_CNT_SHIFT
)) |
406 ((priv
->plat
->sys_freq
/ RAMIPS_US_CYC_CNT_DIVISOR
) << RAMIPS_US_CYC_CNT_SHIFT
),
409 tasklet_init(&priv
->tx_housekeeping_tasklet
, ramips_eth_tx_housekeeping
,
411 tasklet_init(&priv
->rx_tasklet
, ramips_eth_rx_hw
, (unsigned long)dev
);
413 ramips_setup_mdio_cfg(priv
);
415 ramips_fe_wr(RAMIPS_DELAY_INIT
, RAMIPS_DLY_INT_CFG
);
416 ramips_fe_wr(RAMIPS_TX_DLY_INT
| RAMIPS_RX_DLY_INT
, RAMIPS_FE_INT_ENABLE
);
417 ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG
) &
418 ~(RAMIPS_GDM1_ICS_EN
| RAMIPS_GDM1_TCS_EN
| RAMIPS_GDM1_UCS_EN
| 0xffff),
419 RAMIPS_GDMA1_FWD_CFG
);
420 ramips_fe_wr(ramips_fe_rr(RAMIPS_CDMA_CSG_CFG
) &
421 ~(RAMIPS_ICS_GEN_EN
| RAMIPS_TCS_GEN_EN
| RAMIPS_UCS_GEN_EN
),
422 RAMIPS_CDMA_CSG_CFG
);
423 ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT
, RAMIPS_PSE_FQ_CFG
);
424 ramips_fe_wr(1, RAMIPS_FE_RST_GL
);
425 ramips_fe_wr(0, RAMIPS_FE_RST_GL
);
427 netif_start_queue(dev
);
431 free_irq(dev
->irq
, dev
);
436 ramips_eth_stop(struct net_device
*dev
)
438 struct raeth_priv
*priv
= netdev_priv(dev
);
440 ramips_fe_wr(ramips_fe_rr(RAMIPS_PDMA_GLO_CFG
) &
441 ~(RAMIPS_TX_WB_DDONE
| RAMIPS_RX_DMA_EN
| RAMIPS_TX_DMA_EN
),
442 RAMIPS_PDMA_GLO_CFG
);
444 /* disable all interrupts in the hw */
445 ramips_fe_wr(0, RAMIPS_FE_INT_ENABLE
);
447 free_irq(dev
->irq
, dev
);
448 netif_stop_queue(dev
);
449 tasklet_kill(&priv
->tx_housekeeping_tasklet
);
450 tasklet_kill(&priv
->rx_tasklet
);
451 ramips_cleanup_dma(priv
);
452 RADEBUG("ramips_eth: stopped\n");
457 ramips_eth_probe(struct net_device
*dev
)
459 struct raeth_priv
*priv
= netdev_priv(dev
);
461 BUG_ON(!priv
->plat
->reset_fe
);
462 priv
->plat
->reset_fe();
463 net_srandom(jiffies
);
464 memcpy(dev
->dev_addr
, priv
->plat
->mac
, ETH_ALEN
);
468 dev
->watchdog_timeo
= TX_TIMEOUT
;
469 spin_lock_init(&priv
->page_lock
);
474 static const struct net_device_ops ramips_eth_netdev_ops
= {
475 .ndo_init
= ramips_eth_probe
,
476 .ndo_open
= ramips_eth_open
,
477 .ndo_stop
= ramips_eth_stop
,
478 .ndo_start_xmit
= ramips_eth_hard_start_xmit
,
479 .ndo_tx_timeout
= ramips_eth_timeout
,
480 .ndo_change_mtu
= eth_change_mtu
,
481 .ndo_set_mac_address
= eth_mac_addr
,
482 .ndo_validate_addr
= eth_validate_addr
,
486 ramips_eth_plat_probe(struct platform_device
*plat
)
488 struct raeth_priv
*priv
;
489 struct ramips_eth_platform_data
*data
= plat
->dev
.platform_data
;
490 struct resource
*res
;
494 dev_err(&plat
->dev
, "no platform data specified\n");
498 res
= platform_get_resource(plat
, IORESOURCE_MEM
, 0);
500 dev_err(&plat
->dev
, "no memory resource found\n");
504 ramips_fe_base
= ioremap_nocache(res
->start
, res
->end
- res
->start
+ 1);
508 ramips_dev
= alloc_etherdev(sizeof(struct raeth_priv
));
510 dev_err(&plat
->dev
, "alloc_etherdev failed\n");
515 strcpy(ramips_dev
->name
, "eth%d");
516 ramips_dev
->irq
= platform_get_irq(plat
, 0);
517 if (ramips_dev
->irq
< 0) {
518 dev_err(&plat
->dev
, "no IRQ resource found\n");
522 ramips_dev
->addr_len
= ETH_ALEN
;
523 ramips_dev
->base_addr
= (unsigned long)ramips_fe_base
;
524 ramips_dev
->netdev_ops
= &ramips_eth_netdev_ops
;
526 priv
= netdev_priv(ramips_dev
);
528 priv
->netdev
= ramips_dev
;
529 priv
->speed
= data
->speed
;
530 priv
->duplex
= data
->duplex
;
531 priv
->rx_fc
= data
->rx_fc
;
532 priv
->tx_fc
= data
->tx_fc
;
535 err
= register_netdev(ramips_dev
);
537 dev_err(&plat
->dev
, "error bringing up device\n");
541 RADEBUG("ramips_eth: loaded\n");
547 iounmap(ramips_fe_base
);
552 ramips_eth_plat_remove(struct platform_device
*plat
)
554 unregister_netdev(ramips_dev
);
555 free_netdev(ramips_dev
);
556 RADEBUG("ramips_eth: unloaded\n");
560 static struct platform_driver ramips_eth_driver
= {
561 .probe
= ramips_eth_plat_probe
,
562 .remove
= ramips_eth_plat_remove
,
564 .name
= "ramips_eth",
565 .owner
= THIS_MODULE
,
570 ramips_eth_init(void)
574 ret
= rt305x_esw_init();
578 ret
= platform_driver_register(&ramips_eth_driver
);
581 "ramips_eth: Error registering platfom driver!\n");
593 ramips_eth_cleanup(void)
595 platform_driver_unregister(&ramips_eth_driver
);
599 module_init(ramips_eth_init
);
600 module_exit(ramips_eth_cleanup
);
602 MODULE_LICENSE("GPL");
603 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
604 MODULE_DESCRIPTION("ethernet driver for ramips boards");