e44e6b654089c108c245c078fac4c6e76ce88257
[openwrt/svn-archive/archive.git] / target / linux / ramips / files / drivers / net / ramips.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2009 John Crispin <blogic@openwrt.org>
16 */
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/platform_device.h>
27
28 #include <ramips_eth_platform.h>
29 #include "ramips_eth.h"
30
31 #define TX_TIMEOUT (20 * HZ / 100)
32 #define MAX_RX_LENGTH 1600
33
34 #ifdef CONFIG_RALINK_RT305X
35 #include "ramips_esw.c"
36 #else
37 static inline int rt305x_esw_init(void) { return 0; }
38 static inline void rt305x_esw_exit(void) { }
39 #endif
40
41 #define phys_to_bus(a) (a & 0x1FFFFFFF)
42
43 static struct net_device * ramips_dev;
44 static void __iomem *ramips_fe_base = 0;
45
46 static inline void
47 ramips_fe_wr(u32 val, unsigned reg)
48 {
49 __raw_writel(val, ramips_fe_base + reg);
50 }
51
52 static inline u32
53 ramips_fe_rr(unsigned reg)
54 {
55 return __raw_readl(ramips_fe_base + reg);
56 }
57
58 static inline void
59 ramips_fe_int_disable(u32 mask)
60 {
61 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~mask,
62 RAMIPS_FE_INT_ENABLE);
63 /* flush write */
64 ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
65 }
66
67 static inline void
68 ramips_fe_int_enable(u32 mask)
69 {
70 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | mask,
71 RAMIPS_FE_INT_ENABLE);
72 /* flush write */
73 ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
74 }
75
76 static inline void
77 ramips_hw_set_macaddr(unsigned char *mac)
78 {
79 ramips_fe_wr((mac[0] << 8) | mac[1], RAMIPS_GDMA1_MAC_ADRH);
80 ramips_fe_wr((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
81 RAMIPS_GDMA1_MAC_ADRL);
82 }
83
84 #ifdef CONFIG_RALINK_RT288X
85 static void
86 ramips_setup_mdio_cfg(struct raeth_priv *re)
87 {
88 unsigned int mdio_cfg;
89
90 mdio_cfg = RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
91 RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
92 RAMIPS_MDIO_CFG_GP1_FRC_EN;
93
94 if (re->duplex == DUPLEX_FULL)
95 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_DUPLEX;
96
97 if (re->tx_fc)
98 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_FC_TX;
99
100 if (re->rx_fc)
101 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_FC_RX;
102
103 switch (re->speed) {
104 case SPEED_10:
105 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_10;
106 break;
107 case SPEED_100:
108 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_100;
109 break;
110 case SPEED_1000:
111 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_1000;
112 break;
113 default:
114 BUG();
115 }
116
117 ramips_fe_wr(mdio_cfg, RAMIPS_MDIO_CFG);
118 }
119 #else
120 static inline void ramips_setup_mdio_cfg(struct raeth_priv *re)
121 {
122 }
123 #endif /* CONFIG_RALINK_RT288X */
124
125 static void
126 ramips_cleanup_dma(struct raeth_priv *re)
127 {
128 int i;
129
130 for (i = 0; i < NUM_RX_DESC; i++)
131 if (re->rx_skb[i]) {
132 dma_unmap_single(NULL, re->rx_dma[i], MAX_RX_LENGTH,
133 DMA_FROM_DEVICE);
134 dev_kfree_skb_any(re->rx_skb[i]);
135 }
136
137 if (re->rx)
138 dma_free_coherent(NULL,
139 NUM_RX_DESC * sizeof(struct ramips_rx_dma),
140 re->rx, re->rx_desc_dma);
141
142 if (re->tx)
143 dma_free_coherent(NULL,
144 NUM_TX_DESC * sizeof(struct ramips_tx_dma),
145 re->tx, re->tx_desc_dma);
146 }
147
148 static int
149 ramips_alloc_dma(struct raeth_priv *re)
150 {
151 int err = -ENOMEM;
152 int i;
153
154 re->skb_free_idx = 0;
155
156 /* setup tx ring */
157 re->tx = dma_alloc_coherent(NULL,
158 NUM_TX_DESC * sizeof(struct ramips_tx_dma),
159 &re->tx_desc_dma, GFP_ATOMIC);
160 if (!re->tx)
161 goto err_cleanup;
162
163 memset(re->tx, 0, NUM_TX_DESC * sizeof(struct ramips_tx_dma));
164 for (i = 0; i < NUM_TX_DESC; i++) {
165 re->tx[i].txd2 = TX_DMA_LSO | TX_DMA_DONE;
166 re->tx[i].txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
167 }
168
169 /* setup rx ring */
170 re->rx = dma_alloc_coherent(NULL,
171 NUM_RX_DESC * sizeof(struct ramips_rx_dma),
172 &re->rx_desc_dma, GFP_ATOMIC);
173 if (!re->rx)
174 goto err_cleanup;
175
176 memset(re->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
177 for (i = 0; i < NUM_RX_DESC; i++) {
178 dma_addr_t dma_addr;
179 struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH +
180 NET_IP_ALIGN);
181
182 if (!new_skb)
183 goto err_cleanup;
184
185 skb_reserve(new_skb, NET_IP_ALIGN);
186
187 dma_addr = dma_map_single(NULL, new_skb->data,
188 MAX_RX_LENGTH, DMA_FROM_DEVICE);
189 re->rx_dma[i] = dma_addr;
190 re->rx[i].rxd1 = (unsigned int) re->rx_dma[i];
191 re->rx[i].rxd2 |= RX_DMA_LSO;
192 re->rx_skb[i] = new_skb;
193 }
194
195 return 0;
196
197 err_cleanup:
198 ramips_cleanup_dma(re);
199 return err;
200 }
201
202 static void
203 ramips_setup_dma(struct raeth_priv *re)
204 {
205 ramips_fe_wr(re->tx_desc_dma, RAMIPS_TX_BASE_PTR0);
206 ramips_fe_wr(NUM_TX_DESC, RAMIPS_TX_MAX_CNT0);
207 ramips_fe_wr(0, RAMIPS_TX_CTX_IDX0);
208 ramips_fe_wr(RAMIPS_PST_DTX_IDX0, RAMIPS_PDMA_RST_CFG);
209
210 ramips_fe_wr(re->rx_desc_dma, RAMIPS_RX_BASE_PTR0);
211 ramips_fe_wr(NUM_RX_DESC, RAMIPS_RX_MAX_CNT0);
212 ramips_fe_wr((NUM_RX_DESC - 1), RAMIPS_RX_CALC_IDX0);
213 ramips_fe_wr(RAMIPS_PST_DRX_IDX0, RAMIPS_PDMA_RST_CFG);
214 }
215
216 static int
217 ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
218 {
219 struct raeth_priv *priv = netdev_priv(dev);
220 unsigned long tx;
221 unsigned int tx_next;
222 unsigned int mapped_addr;
223
224 if (priv->plat->min_pkt_len) {
225 if (skb->len < priv->plat->min_pkt_len) {
226 if (skb_padto(skb, priv->plat->min_pkt_len)) {
227 printk(KERN_ERR
228 "ramips_eth: skb_padto failed\n");
229 kfree_skb(skb);
230 return 0;
231 }
232 skb_put(skb, priv->plat->min_pkt_len - skb->len);
233 }
234 }
235
236 dev->trans_start = jiffies;
237 mapped_addr = (unsigned int) dma_map_single(NULL, skb->data, skb->len,
238 DMA_TO_DEVICE);
239 dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE);
240 spin_lock(&priv->page_lock);
241 tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
242 tx_next = (tx + 1) % NUM_TX_DESC;
243
244 if ((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) ||
245 !(priv->tx[tx].txd2 & TX_DMA_DONE) ||
246 !(priv->tx[tx_next].txd2 & TX_DMA_DONE))
247 goto out;
248
249 priv->tx[tx].txd1 = mapped_addr;
250 priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
251 priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
252 dev->stats.tx_packets++;
253 dev->stats.tx_bytes += skb->len;
254 priv->tx_skb[tx] = skb;
255 wmb();
256 ramips_fe_wr(tx_next, RAMIPS_TX_CTX_IDX0);
257 spin_unlock(&priv->page_lock);
258 return NETDEV_TX_OK;
259
260 out:
261 spin_unlock(&priv->page_lock);
262 dev->stats.tx_dropped++;
263 kfree_skb(skb);
264 return NETDEV_TX_OK;
265 }
266
267 static void
268 ramips_eth_rx_hw(unsigned long ptr)
269 {
270 struct net_device *dev = (struct net_device *) ptr;
271 struct raeth_priv *priv = netdev_priv(dev);
272 int rx;
273 int max_rx = 16;
274
275 while (max_rx) {
276 struct sk_buff *rx_skb, *new_skb;
277 int pktlen;
278
279 rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
280 if (!(priv->rx[rx].rxd2 & RX_DMA_DONE))
281 break;
282 max_rx--;
283
284 rx_skb = priv->rx_skb[rx];
285 pktlen = RX_DMA_PLEN0(priv->rx[rx].rxd2);
286
287 new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + NET_IP_ALIGN);
288 /* Reuse the buffer on allocation failures */
289 if (new_skb) {
290 dma_addr_t dma_addr;
291
292 dma_unmap_single(NULL, priv->rx_dma[rx], MAX_RX_LENGTH,
293 DMA_FROM_DEVICE);
294
295 skb_put(rx_skb, pktlen);
296 rx_skb->dev = dev;
297 rx_skb->protocol = eth_type_trans(rx_skb, dev);
298 rx_skb->ip_summed = CHECKSUM_NONE;
299 dev->stats.rx_packets++;
300 dev->stats.rx_bytes += pktlen;
301 netif_rx(rx_skb);
302
303 priv->rx_skb[rx] = new_skb;
304 skb_reserve(new_skb, NET_IP_ALIGN);
305
306 dma_addr = dma_map_single(NULL,
307 new_skb->data,
308 MAX_RX_LENGTH,
309 DMA_FROM_DEVICE);
310 priv->rx_dma[rx] = dma_addr;
311 priv->rx[rx].rxd1 = (unsigned int) dma_addr;
312 } else {
313 dev->stats.rx_dropped++;
314 }
315
316 priv->rx[rx].rxd2 &= ~RX_DMA_DONE;
317 wmb();
318 ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
319 }
320
321 if (max_rx == 0)
322 tasklet_schedule(&priv->rx_tasklet);
323 else
324 ramips_fe_int_enable(RAMIPS_RX_DLY_INT);
325 }
326
327 static void
328 ramips_eth_tx_housekeeping(unsigned long ptr)
329 {
330 struct net_device *dev = (struct net_device*)ptr;
331 struct raeth_priv *priv = netdev_priv(dev);
332
333 spin_lock(&priv->page_lock);
334 while ((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) &&
335 (priv->tx_skb[priv->skb_free_idx])) {
336 dev_kfree_skb_irq(priv->tx_skb[priv->skb_free_idx]);
337 priv->tx_skb[priv->skb_free_idx] = 0;
338 priv->skb_free_idx++;
339 if (priv->skb_free_idx >= NUM_TX_DESC)
340 priv->skb_free_idx = 0;
341 }
342 spin_unlock(&priv->page_lock);
343
344 ramips_fe_int_enable(RAMIPS_TX_DLY_INT);
345 }
346
347 static void
348 ramips_eth_timeout(struct net_device *dev)
349 {
350 struct raeth_priv *priv = netdev_priv(dev);
351
352 tasklet_schedule(&priv->tx_housekeeping_tasklet);
353 }
354
355 static irqreturn_t
356 ramips_eth_irq(int irq, void *dev)
357 {
358 struct raeth_priv *priv = netdev_priv(dev);
359 unsigned long fe_int = ramips_fe_rr(RAMIPS_FE_INT_STATUS);
360
361 ramips_fe_wr(0xFFFFFFFF, RAMIPS_FE_INT_STATUS);
362
363 if (fe_int & RAMIPS_RX_DLY_INT) {
364 ramips_fe_int_disable(RAMIPS_RX_DLY_INT);
365 tasklet_schedule(&priv->rx_tasklet);
366 }
367
368 if (fe_int & RAMIPS_TX_DLY_INT) {
369 ramips_fe_int_disable(RAMIPS_TX_DLY_INT);
370 tasklet_schedule(&priv->tx_housekeeping_tasklet);
371 }
372
373 return IRQ_HANDLED;
374 }
375
376 static int
377 ramips_eth_open(struct net_device *dev)
378 {
379 struct raeth_priv *priv = netdev_priv(dev);
380 int err;
381
382 err = request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED,
383 dev->name, dev);
384 if (err)
385 return err;
386
387 err = ramips_alloc_dma(priv);
388 if (err)
389 goto err_free_irq;
390
391 ramips_hw_set_macaddr(dev->dev_addr);
392
393 ramips_setup_dma(priv);
394 ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & 0xff) |
395 (RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN |
396 RAMIPS_TX_DMA_EN | RAMIPS_PDMA_SIZE_4DWORDS),
397 RAMIPS_PDMA_GLO_CFG);
398 ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG) &
399 ~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) |
400 ((priv->plat->sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
401 RAMIPS_FE_GLO_CFG);
402
403 tasklet_init(&priv->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
404 (unsigned long)dev);
405 tasklet_init(&priv->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
406
407 ramips_setup_mdio_cfg(priv);
408
409 ramips_fe_wr(RAMIPS_DELAY_INIT, RAMIPS_DLY_INT_CFG);
410 ramips_fe_wr(RAMIPS_TX_DLY_INT | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE);
411 ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG) &
412 ~(RAMIPS_GDM1_ICS_EN | RAMIPS_GDM1_TCS_EN | RAMIPS_GDM1_UCS_EN | 0xffff),
413 RAMIPS_GDMA1_FWD_CFG);
414 ramips_fe_wr(ramips_fe_rr(RAMIPS_CDMA_CSG_CFG) &
415 ~(RAMIPS_ICS_GEN_EN | RAMIPS_TCS_GEN_EN | RAMIPS_UCS_GEN_EN),
416 RAMIPS_CDMA_CSG_CFG);
417 ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT, RAMIPS_PSE_FQ_CFG);
418 ramips_fe_wr(1, RAMIPS_FE_RST_GL);
419 ramips_fe_wr(0, RAMIPS_FE_RST_GL);
420
421 netif_start_queue(dev);
422 return 0;
423
424 err_free_irq:
425 free_irq(dev->irq, dev);
426 return err;
427 }
428
429 static int
430 ramips_eth_stop(struct net_device *dev)
431 {
432 struct raeth_priv *priv = netdev_priv(dev);
433
434 ramips_fe_wr(ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) &
435 ~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN),
436 RAMIPS_PDMA_GLO_CFG);
437
438 /* disable all interrupts in the hw */
439 ramips_fe_wr(0, RAMIPS_FE_INT_ENABLE);
440
441 free_irq(dev->irq, dev);
442 netif_stop_queue(dev);
443 tasklet_kill(&priv->tx_housekeeping_tasklet);
444 tasklet_kill(&priv->rx_tasklet);
445 ramips_cleanup_dma(priv);
446 printk(KERN_DEBUG "ramips_eth: stopped\n");
447 return 0;
448 }
449
450 static int __init
451 ramips_eth_probe(struct net_device *dev)
452 {
453 struct raeth_priv *priv = netdev_priv(dev);
454
455 BUG_ON(!priv->plat->reset_fe);
456 priv->plat->reset_fe();
457 net_srandom(jiffies);
458 memcpy(dev->dev_addr, priv->plat->mac, ETH_ALEN);
459
460 ether_setup(dev);
461 dev->mtu = 1500;
462 dev->watchdog_timeo = TX_TIMEOUT;
463 spin_lock_init(&priv->page_lock);
464
465 return 0;
466 }
467
468 static const struct net_device_ops ramips_eth_netdev_ops = {
469 .ndo_init = ramips_eth_probe,
470 .ndo_open = ramips_eth_open,
471 .ndo_stop = ramips_eth_stop,
472 .ndo_start_xmit = ramips_eth_hard_start_xmit,
473 .ndo_tx_timeout = ramips_eth_timeout,
474 .ndo_change_mtu = eth_change_mtu,
475 .ndo_set_mac_address = eth_mac_addr,
476 .ndo_validate_addr = eth_validate_addr,
477 };
478
479 static int
480 ramips_eth_plat_probe(struct platform_device *plat)
481 {
482 struct raeth_priv *priv;
483 struct ramips_eth_platform_data *data = plat->dev.platform_data;
484 struct resource *res;
485 int err;
486
487 if (!data) {
488 dev_err(&plat->dev, "no platform data specified\n");
489 return -EINVAL;
490 }
491
492 res = platform_get_resource(plat, IORESOURCE_MEM, 0);
493 if (!res) {
494 dev_err(&plat->dev, "no memory resource found\n");
495 return -ENXIO;
496 }
497
498 ramips_fe_base = ioremap_nocache(res->start, res->end - res->start + 1);
499 if (!ramips_fe_base)
500 return -ENOMEM;
501
502 ramips_dev = alloc_etherdev(sizeof(struct raeth_priv));
503 if (!ramips_dev) {
504 dev_err(&plat->dev, "alloc_etherdev failed\n");
505 err = -ENOMEM;
506 goto err_unmap;
507 }
508
509 strcpy(ramips_dev->name, "eth%d");
510 ramips_dev->irq = platform_get_irq(plat, 0);
511 if (ramips_dev->irq < 0) {
512 dev_err(&plat->dev, "no IRQ resource found\n");
513 err = -ENXIO;
514 goto err_free_dev;
515 }
516 ramips_dev->addr_len = ETH_ALEN;
517 ramips_dev->base_addr = (unsigned long)ramips_fe_base;
518 ramips_dev->netdev_ops = &ramips_eth_netdev_ops;
519
520 priv = netdev_priv(ramips_dev);
521
522 priv->speed = data->speed;
523 priv->duplex = data->duplex;
524 priv->rx_fc = data->rx_fc;
525 priv->tx_fc = data->tx_fc;
526 priv->plat = data;
527
528 err = register_netdev(ramips_dev);
529 if (err) {
530 dev_err(&plat->dev, "error bringing up device\n");
531 goto err_free_dev;
532 }
533
534 printk(KERN_DEBUG "ramips_eth: loaded\n");
535 return 0;
536
537 err_free_dev:
538 kfree(ramips_dev);
539 err_unmap:
540 iounmap(ramips_fe_base);
541 return err;
542 }
543
544 static int
545 ramips_eth_plat_remove(struct platform_device *plat)
546 {
547 unregister_netdev(ramips_dev);
548 free_netdev(ramips_dev);
549 printk(KERN_DEBUG "ramips_eth: unloaded\n");
550 return 0;
551 }
552
553 static struct platform_driver ramips_eth_driver = {
554 .probe = ramips_eth_plat_probe,
555 .remove = ramips_eth_plat_remove,
556 .driver = {
557 .name = "ramips_eth",
558 .owner = THIS_MODULE,
559 },
560 };
561
562 static int __init
563 ramips_eth_init(void)
564 {
565 int ret;
566
567 ret = rt305x_esw_init();
568 if (ret)
569 return ret;
570
571 ret = platform_driver_register(&ramips_eth_driver);
572 if (ret) {
573 printk(KERN_ERR
574 "ramips_eth: Error registering platfom driver!\n");
575 goto esw_cleanup;
576 }
577
578 return 0;
579
580 esw_cleanup:
581 rt305x_esw_exit();
582 return ret;
583 }
584
585 static void __exit
586 ramips_eth_cleanup(void)
587 {
588 platform_driver_unregister(&ramips_eth_driver);
589 rt305x_esw_exit();
590 }
591
592 module_init(ramips_eth_init);
593 module_exit(ramips_eth_cleanup);
594
595 MODULE_LICENSE("GPL");
596 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
597 MODULE_DESCRIPTION("ethernet driver for ramips boards");