ramips_eth: coding style cleanup
[openwrt/svn-archive/archive.git] / target / linux / ramips / files / drivers / net / ramips.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2009 John Crispin <blogic@openwrt.org>
16 */
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/platform_device.h>
26
27 #include <ramips_eth_platform.h>
28 #include "ramips_eth.h"
29
30 #define TX_TIMEOUT (20 * HZ / 100)
31 #define MAX_RX_LENGTH 1600
32
33 #ifdef CONFIG_RALINK_RT305X
34 #include "ramips_esw.c"
35 #endif
36
37 #define phys_to_bus(a) (a & 0x1FFFFFFF)
38
39 static struct net_device * ramips_dev;
40 static void __iomem *ramips_fe_base = 0;
41
42 static inline void
43 ramips_fe_wr(u32 val, unsigned reg)
44 {
45 __raw_writel(val, ramips_fe_base + reg);
46 }
47
48 static inline u32
49 ramips_fe_rr(unsigned reg)
50 {
51 return __raw_readl(ramips_fe_base + reg);
52 }
53
54 static void
55 ramips_cleanup_dma(struct net_device *dev)
56 {
57 struct raeth_priv *priv = netdev_priv(dev);
58 int i;
59
60 for (i = 0; i < NUM_RX_DESC; i++)
61 if (priv->rx_skb[i])
62 dev_kfree_skb_any(priv->rx_skb[i]);
63
64 if (priv->rx)
65 dma_free_coherent(NULL,
66 NUM_RX_DESC * sizeof(struct ramips_rx_dma),
67 priv->rx, priv->phy_rx);
68
69 if (priv->tx)
70 dma_free_coherent(NULL,
71 NUM_TX_DESC * sizeof(struct ramips_tx_dma),
72 priv->tx, priv->phy_tx);
73 }
74
75 static int
76 ramips_alloc_dma(struct net_device *dev)
77 {
78 struct raeth_priv *priv = netdev_priv(dev);
79 int err = -ENOMEM;
80 int i;
81
82 priv->skb_free_idx = 0;
83
84 /* setup tx ring */
85 priv->tx = dma_alloc_coherent(NULL,
86 NUM_TX_DESC * sizeof(struct ramips_tx_dma),
87 &priv->phy_tx, GFP_ATOMIC);
88 if (!priv->tx)
89 goto err_cleanup;
90
91 for (i = 0; i < NUM_TX_DESC; i++) {
92 memset(&priv->tx[i], 0, sizeof(struct ramips_tx_dma));
93 priv->tx[i].txd2 |= TX_DMA_LSO | TX_DMA_DONE;
94 priv->tx[i].txd4 &= (TX_DMA_QN_MASK | TX_DMA_PN_MASK);
95 priv->tx[i].txd4 |= TX_DMA_QN(3) | TX_DMA_PN(1);
96 }
97
98 /* setup rx ring */
99 priv->rx = dma_alloc_coherent(NULL,
100 NUM_RX_DESC * sizeof(struct ramips_rx_dma),
101 &priv->phy_rx, GFP_ATOMIC);
102 if (!priv->rx)
103 goto err_cleanup;
104
105 memset(priv->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
106 for (i = 0; i < NUM_RX_DESC; i++) {
107 struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH + 2);
108
109 if (!new_skb)
110 goto err_cleanup;
111
112 skb_reserve(new_skb, 2);
113 priv->rx[i].rxd1 = dma_map_single(NULL,
114 skb_put(new_skb, 2),
115 MAX_RX_LENGTH + 2,
116 DMA_FROM_DEVICE);
117 priv->rx[i].rxd2 |= RX_DMA_LSO;
118 priv->rx_skb[i] = new_skb;
119 }
120
121 return 0;
122
123 err_cleanup:
124 ramips_cleanup_dma(dev);
125 return err;
126 }
127
128 static void
129 ramips_setup_dma(struct net_device *dev)
130 {
131 struct raeth_priv *priv = netdev_priv(dev);
132
133 ramips_fe_wr(phys_to_bus(priv->phy_tx), RAMIPS_TX_BASE_PTR0);
134 ramips_fe_wr(NUM_TX_DESC, RAMIPS_TX_MAX_CNT0);
135 ramips_fe_wr(0, RAMIPS_TX_CTX_IDX0);
136 ramips_fe_wr(RAMIPS_PST_DTX_IDX0, RAMIPS_PDMA_RST_CFG);
137
138 ramips_fe_wr(phys_to_bus(priv->phy_rx), RAMIPS_RX_BASE_PTR0);
139 ramips_fe_wr(NUM_RX_DESC, RAMIPS_RX_MAX_CNT0);
140 ramips_fe_wr((NUM_RX_DESC - 1), RAMIPS_RX_CALC_IDX0);
141 ramips_fe_wr(RAMIPS_PST_DRX_IDX0, RAMIPS_PDMA_RST_CFG);
142 }
143
144 static int
145 ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
146 {
147 struct raeth_priv *priv = netdev_priv(dev);
148 unsigned long tx;
149 unsigned int tx_next;
150 unsigned int mapped_addr;
151 unsigned long flags;
152
153 if (priv->plat->min_pkt_len) {
154 if (skb->len < priv->plat->min_pkt_len) {
155 if (skb_padto(skb, priv->plat->min_pkt_len)) {
156 printk(KERN_ERR
157 "ramips_eth: skb_padto failed\n");
158 kfree_skb(skb);
159 return 0;
160 }
161 skb_put(skb, priv->plat->min_pkt_len - skb->len);
162 }
163 }
164
165 dev->trans_start = jiffies;
166 mapped_addr = (unsigned int) dma_map_single(NULL, skb->data, skb->len,
167 DMA_TO_DEVICE);
168 dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE);
169 spin_lock_irqsave(&priv->page_lock, flags);
170 tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
171 if (tx == NUM_TX_DESC - 1)
172 tx_next = 0;
173 else
174 tx_next = tx + 1;
175
176 if ((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) ||
177 !(priv->tx[tx].txd2 & TX_DMA_DONE) ||
178 !(priv->tx[tx_next].txd2 & TX_DMA_DONE))
179 goto out;
180
181 priv->tx[tx].txd1 = mapped_addr;
182 priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
183 priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
184 dev->stats.tx_packets++;
185 dev->stats.tx_bytes += skb->len;
186 priv->tx_skb[tx] = skb;
187 wmb();
188 ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0);
189 spin_unlock_irqrestore(&priv->page_lock, flags);
190 return NETDEV_TX_OK;
191
192 out:
193 spin_unlock_irqrestore(&priv->page_lock, flags);
194 dev->stats.tx_dropped++;
195 kfree_skb(skb);
196 return NETDEV_TX_OK;
197 }
198
199 static void
200 ramips_eth_rx_hw(unsigned long ptr)
201 {
202 struct net_device *dev = (struct net_device *) ptr;
203 struct raeth_priv *priv = netdev_priv(dev);
204 int rx;
205 int max_rx = 16;
206
207 while (max_rx) {
208 struct sk_buff *rx_skb, *new_skb;
209
210 rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
211 if (!(priv->rx[rx].rxd2 & RX_DMA_DONE))
212 break;
213 max_rx--;
214
215 rx_skb = priv->rx_skb[rx];
216 rx_skb->len = RX_DMA_PLEN0(priv->rx[rx].rxd2);
217 rx_skb->dev = dev;
218 rx_skb->protocol = eth_type_trans(rx_skb, dev);
219 rx_skb->ip_summed = CHECKSUM_NONE;
220 dev->stats.rx_packets++;
221 dev->stats.rx_bytes += rx_skb->len;
222 netif_rx(rx_skb);
223
224 new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + 2);
225 priv->rx_skb[rx] = new_skb;
226 BUG_ON(!new_skb);
227 skb_reserve(new_skb, 2);
228 priv->rx[rx].rxd1 = dma_map_single(NULL,
229 new_skb->data,
230 MAX_RX_LENGTH + 2,
231 DMA_FROM_DEVICE);
232 priv->rx[rx].rxd2 &= ~RX_DMA_DONE;
233 wmb();
234 ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
235 }
236
237 if (max_rx == 0)
238 tasklet_schedule(&priv->rx_tasklet);
239 else
240 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_RX_DLY_INT,
241 RAMIPS_FE_INT_ENABLE);
242 }
243
244 static void
245 ramips_eth_tx_housekeeping(unsigned long ptr)
246 {
247 struct net_device *dev = (struct net_device*)ptr;
248 struct raeth_priv *priv = netdev_priv(dev);
249
250 while ((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) &&
251 (priv->tx_skb[priv->skb_free_idx])) {
252 dev_kfree_skb_irq((struct sk_buff *) priv->tx_skb[priv->skb_free_idx]);
253 priv->tx_skb[priv->skb_free_idx] = 0;
254 priv->skb_free_idx++;
255 if (priv->skb_free_idx >= NUM_TX_DESC)
256 priv->skb_free_idx = 0;
257 }
258
259 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_TX_DLY_INT,
260 RAMIPS_FE_INT_ENABLE);
261 }
262
263 static int
264 ramips_eth_set_mac_addr(struct net_device *dev, void *priv)
265 {
266 unsigned char *mac = (unsigned char *) priv;
267
268 if (netif_running(dev))
269 return -EBUSY;
270
271 memcpy(dev->dev_addr, ((struct sockaddr*)priv)->sa_data, dev->addr_len);
272 ramips_fe_wr((mac[0] << 8) | mac[1], RAMIPS_GDMA1_MAC_ADRH);
273 ramips_fe_wr(RAMIPS_GDMA1_MAC_ADRL,
274 (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5]);
275 return 0;
276 }
277
278 static void
279 ramips_eth_timeout(struct net_device *dev)
280 {
281 struct raeth_priv *priv = netdev_priv(dev);
282
283 tasklet_schedule(&priv->tx_housekeeping_tasklet);
284 }
285
286 static irqreturn_t
287 ramips_eth_irq(int irq, void *dev)
288 {
289 struct raeth_priv *priv = netdev_priv(dev);
290 unsigned long fe_int = ramips_fe_rr(RAMIPS_FE_INT_STATUS);
291
292 ramips_fe_wr(0xFFFFFFFF, RAMIPS_FE_INT_STATUS);
293
294 if (fe_int & RAMIPS_RX_DLY_INT) {
295 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~(RAMIPS_RX_DLY_INT),
296 RAMIPS_FE_INT_ENABLE);
297 tasklet_schedule(&priv->rx_tasklet);
298 }
299
300 if (fe_int & RAMIPS_TX_DLY_INT)
301 ramips_eth_tx_housekeeping((unsigned long)dev);
302
303 return IRQ_HANDLED;
304 }
305
306 static int
307 ramips_eth_open(struct net_device *dev)
308 {
309 struct raeth_priv *priv = netdev_priv(dev);
310 int err;
311
312 err = request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED,
313 dev->name, dev);
314 if (err)
315 return err;
316
317 err = ramips_alloc_dma(dev);
318 if (err)
319 goto err_free_irq;
320
321 ramips_setup_dma(dev);
322 ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & 0xff) |
323 (RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN |
324 RAMIPS_TX_DMA_EN | RAMIPS_PDMA_SIZE_4DWORDS),
325 RAMIPS_PDMA_GLO_CFG);
326 ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG) &
327 ~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) |
328 ((rt305x_sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
329 RAMIPS_FE_GLO_CFG);
330
331 tasklet_init(&priv->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
332 (unsigned long)dev);
333 tasklet_init(&priv->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
334
335 ramips_fe_wr(RAMIPS_DELAY_INIT, RAMIPS_DLY_INT_CFG);
336 ramips_fe_wr(RAMIPS_TX_DLY_INT | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE);
337 ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG) &
338 ~(RAMIPS_GDM1_ICS_EN | RAMIPS_GDM1_TCS_EN | RAMIPS_GDM1_UCS_EN | 0xffff),
339 RAMIPS_GDMA1_FWD_CFG);
340 ramips_fe_wr(ramips_fe_rr(RAMIPS_CDMA_CSG_CFG) &
341 ~(RAMIPS_ICS_GEN_EN | RAMIPS_TCS_GEN_EN | RAMIPS_UCS_GEN_EN),
342 RAMIPS_CDMA_CSG_CFG);
343 ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT, RAMIPS_PSE_FQ_CFG);
344 ramips_fe_wr(1, RAMIPS_FE_RST_GL);
345 ramips_fe_wr(0, RAMIPS_FE_RST_GL);
346
347 netif_start_queue(dev);
348 return 0;
349
350 err_free_irq:
351 free_irq(dev->irq, dev);
352 return err;
353 }
354
355 static int
356 ramips_eth_stop(struct net_device *dev)
357 {
358 struct raeth_priv *priv = netdev_priv(dev);
359
360 ramips_fe_wr(RAMIPS_PDMA_GLO_CFG, ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) &
361 ~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN));
362 free_irq(dev->irq, dev);
363 netif_stop_queue(dev);
364 tasklet_kill(&priv->tx_housekeeping_tasklet);
365 tasklet_kill(&priv->rx_tasklet);
366 ramips_cleanup_dma(dev);
367 printk(KERN_DEBUG "ramips_eth: stopped\n");
368 return 0;
369 }
370
371 static int __init
372 ramips_eth_probe(struct net_device *dev)
373 {
374 struct raeth_priv *priv = netdev_priv(dev);
375 struct sockaddr addr;
376
377 BUG_ON(!priv->plat->reset_fe);
378 priv->plat->reset_fe();
379 net_srandom(jiffies);
380 memcpy(addr.sa_data, priv->plat->mac, 6);
381 ramips_eth_set_mac_addr(dev, &addr);
382
383 ether_setup(dev);
384 dev->open = ramips_eth_open;
385 dev->stop = ramips_eth_stop;
386 dev->hard_start_xmit = ramips_eth_hard_start_xmit;
387 dev->set_mac_address = ramips_eth_set_mac_addr;
388 dev->mtu = 1500;
389 dev->tx_timeout = ramips_eth_timeout;
390 dev->watchdog_timeo = TX_TIMEOUT;
391 spin_lock_init(&priv->page_lock);
392
393 return 0;
394 }
395
396 static int
397 ramips_eth_plat_probe(struct platform_device *plat)
398 {
399 struct raeth_priv *priv;
400 struct ramips_eth_platform_data *data = plat->dev.platform_data;
401 struct resource *res;
402 int err;
403
404 if (!data) {
405 dev_err(&plat->dev, "no platform data specified\n");
406 return -EINVAL;
407 }
408
409 res = platform_get_resource(plat, IORESOURCE_MEM, 0);
410 if (!res) {
411 dev_err(&plat->dev, "no memory resource found\n");
412 return -ENXIO;
413 }
414
415 ramips_fe_base = ioremap_nocache(res->start, res->end - res->start + 1);
416 if (!ramips_fe_base)
417 return -ENOMEM;
418
419 ramips_dev = alloc_etherdev(sizeof(struct raeth_priv));
420 if (!ramips_dev) {
421 dev_err(&plat->dev, "alloc_etherdev failed\n");
422 err = -ENOMEM;
423 goto err_unmap;
424 }
425
426 strcpy(ramips_dev->name, "eth%d");
427 ramips_dev->irq = platform_get_irq(plat, 0);
428 if (ramips_dev->irq < 0) {
429 dev_err(&plat->dev, "no IRQ resource found\n");
430 err = -ENXIO;
431 goto err_free_dev;
432 }
433 ramips_dev->addr_len = ETH_ALEN;
434 ramips_dev->base_addr = (unsigned long)ramips_fe_base;
435 ramips_dev->init = ramips_eth_probe;
436
437 priv = netdev_priv(ramips_dev);
438 priv->plat = data;
439
440 err = register_netdev(ramips_dev);
441 if (err) {
442 dev_err(&plat->dev, "error bringing up device\n");
443 goto err_free_dev;
444 }
445
446 #ifdef CONFIG_RALINK_RT305X
447 rt305x_esw_init();
448 #endif
449 printk(KERN_DEBUG "ramips_eth: loaded\n");
450 return 0;
451
452 err_free_dev:
453 kfree(ramips_dev);
454 err_unmap:
455 iounmap(ramips_fe_base);
456 return err;
457 }
458
459 static int
460 ramips_eth_plat_remove(struct platform_device *plat)
461 {
462 unregister_netdev(ramips_dev);
463 free_netdev(ramips_dev);
464 printk(KERN_DEBUG "ramips_eth: unloaded\n");
465 return 0;
466 }
467
468 static struct platform_driver ramips_eth_driver = {
469 .probe = ramips_eth_plat_probe,
470 .remove = ramips_eth_plat_remove,
471 .driver = {
472 .name = "ramips_eth",
473 .owner = THIS_MODULE,
474 },
475 };
476
477 static int __init
478 ramips_eth_init(void)
479 {
480 int ret = platform_driver_register(&ramips_eth_driver);
481 if (ret)
482 printk(KERN_ERR
483 "ramips_eth: Error registering platfom driver!\n");
484 return ret;
485 }
486
487 static void __exit
488 ramips_eth_cleanup(void)
489 {
490 platform_driver_unregister(&ramips_eth_driver);
491 }
492
493 module_init(ramips_eth_init);
494 module_exit(ramips_eth_cleanup);
495
496 MODULE_LICENSE("GPL");
497 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
498 MODULE_DESCRIPTION("ethernet driver for ramips boards");