ramips_eth: pass 'raeth_priv' struct directly to dma specific functions
[openwrt/svn-archive/archive.git] / target / linux / ramips / files / drivers / net / ramips.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2009 John Crispin <blogic@openwrt.org>
16 */
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/platform_device.h>
26
27 #include <ramips_eth_platform.h>
28 #include "ramips_eth.h"
29
30 #define TX_TIMEOUT (20 * HZ / 100)
31 #define MAX_RX_LENGTH 1600
32
33 #ifdef CONFIG_RALINK_RT305X
34 #include "ramips_esw.c"
35 #endif
36
37 #define phys_to_bus(a) (a & 0x1FFFFFFF)
38
39 static struct net_device * ramips_dev;
40 static void __iomem *ramips_fe_base = 0;
41
42 static inline void
43 ramips_fe_wr(u32 val, unsigned reg)
44 {
45 __raw_writel(val, ramips_fe_base + reg);
46 }
47
48 static inline u32
49 ramips_fe_rr(unsigned reg)
50 {
51 return __raw_readl(ramips_fe_base + reg);
52 }
53
54 static void
55 ramips_cleanup_dma(struct raeth_priv *re)
56 {
57 int i;
58
59 for (i = 0; i < NUM_RX_DESC; i++)
60 if (re->rx_skb[i])
61 dev_kfree_skb_any(re->rx_skb[i]);
62
63 if (re->rx)
64 dma_free_coherent(NULL,
65 NUM_RX_DESC * sizeof(struct ramips_rx_dma),
66 re->rx, re->phy_rx);
67
68 if (re->tx)
69 dma_free_coherent(NULL,
70 NUM_TX_DESC * sizeof(struct ramips_tx_dma),
71 re->tx, re->phy_tx);
72 }
73
74 static int
75 ramips_alloc_dma(struct raeth_priv *re)
76 {
77 int err = -ENOMEM;
78 int i;
79
80 re->skb_free_idx = 0;
81
82 /* setup tx ring */
83 re->tx = dma_alloc_coherent(NULL,
84 NUM_TX_DESC * sizeof(struct ramips_tx_dma),
85 &re->phy_tx, GFP_ATOMIC);
86 if (!re->tx)
87 goto err_cleanup;
88
89 memset(re->tx, 0, NUM_TX_DESC * sizeof(struct ramips_tx_dma));
90 for (i = 0; i < NUM_TX_DESC; i++) {
91 re->tx[i].txd2 |= TX_DMA_LSO | TX_DMA_DONE;
92 re->tx[i].txd4 &= (TX_DMA_QN_MASK | TX_DMA_PN_MASK);
93 re->tx[i].txd4 |= TX_DMA_QN(3) | TX_DMA_PN(1);
94 }
95
96 /* setup rx ring */
97 re->rx = dma_alloc_coherent(NULL,
98 NUM_RX_DESC * sizeof(struct ramips_rx_dma),
99 &re->phy_rx, GFP_ATOMIC);
100 if (!re->rx)
101 goto err_cleanup;
102
103 memset(re->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
104 for (i = 0; i < NUM_RX_DESC; i++) {
105 struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH + 2);
106
107 if (!new_skb)
108 goto err_cleanup;
109
110 skb_reserve(new_skb, 2);
111 re->rx[i].rxd1 = dma_map_single(NULL,
112 skb_put(new_skb, 2),
113 MAX_RX_LENGTH + 2,
114 DMA_FROM_DEVICE);
115 re->rx[i].rxd2 |= RX_DMA_LSO;
116 re->rx_skb[i] = new_skb;
117 }
118
119 return 0;
120
121 err_cleanup:
122 ramips_cleanup_dma(re);
123 return err;
124 }
125
126 static void
127 ramips_setup_dma(struct raeth_priv *re)
128 {
129 ramips_fe_wr(phys_to_bus(re->phy_tx), RAMIPS_TX_BASE_PTR0);
130 ramips_fe_wr(NUM_TX_DESC, RAMIPS_TX_MAX_CNT0);
131 ramips_fe_wr(0, RAMIPS_TX_CTX_IDX0);
132 ramips_fe_wr(RAMIPS_PST_DTX_IDX0, RAMIPS_PDMA_RST_CFG);
133
134 ramips_fe_wr(phys_to_bus(re->phy_rx), RAMIPS_RX_BASE_PTR0);
135 ramips_fe_wr(NUM_RX_DESC, RAMIPS_RX_MAX_CNT0);
136 ramips_fe_wr((NUM_RX_DESC - 1), RAMIPS_RX_CALC_IDX0);
137 ramips_fe_wr(RAMIPS_PST_DRX_IDX0, RAMIPS_PDMA_RST_CFG);
138 }
139
140 static int
141 ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
142 {
143 struct raeth_priv *priv = netdev_priv(dev);
144 unsigned long tx;
145 unsigned int tx_next;
146 unsigned int mapped_addr;
147 unsigned long flags;
148
149 if (priv->plat->min_pkt_len) {
150 if (skb->len < priv->plat->min_pkt_len) {
151 if (skb_padto(skb, priv->plat->min_pkt_len)) {
152 printk(KERN_ERR
153 "ramips_eth: skb_padto failed\n");
154 kfree_skb(skb);
155 return 0;
156 }
157 skb_put(skb, priv->plat->min_pkt_len - skb->len);
158 }
159 }
160
161 dev->trans_start = jiffies;
162 mapped_addr = (unsigned int) dma_map_single(NULL, skb->data, skb->len,
163 DMA_TO_DEVICE);
164 dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE);
165 spin_lock_irqsave(&priv->page_lock, flags);
166 tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
167 if (tx == NUM_TX_DESC - 1)
168 tx_next = 0;
169 else
170 tx_next = tx + 1;
171
172 if ((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) ||
173 !(priv->tx[tx].txd2 & TX_DMA_DONE) ||
174 !(priv->tx[tx_next].txd2 & TX_DMA_DONE))
175 goto out;
176
177 priv->tx[tx].txd1 = mapped_addr;
178 priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
179 priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
180 dev->stats.tx_packets++;
181 dev->stats.tx_bytes += skb->len;
182 priv->tx_skb[tx] = skb;
183 wmb();
184 ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0);
185 spin_unlock_irqrestore(&priv->page_lock, flags);
186 return NETDEV_TX_OK;
187
188 out:
189 spin_unlock_irqrestore(&priv->page_lock, flags);
190 dev->stats.tx_dropped++;
191 kfree_skb(skb);
192 return NETDEV_TX_OK;
193 }
194
195 static void
196 ramips_eth_rx_hw(unsigned long ptr)
197 {
198 struct net_device *dev = (struct net_device *) ptr;
199 struct raeth_priv *priv = netdev_priv(dev);
200 int rx;
201 int max_rx = 16;
202
203 while (max_rx) {
204 struct sk_buff *rx_skb, *new_skb;
205
206 rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
207 if (!(priv->rx[rx].rxd2 & RX_DMA_DONE))
208 break;
209 max_rx--;
210
211 rx_skb = priv->rx_skb[rx];
212 rx_skb->len = RX_DMA_PLEN0(priv->rx[rx].rxd2);
213 rx_skb->dev = dev;
214 rx_skb->protocol = eth_type_trans(rx_skb, dev);
215 rx_skb->ip_summed = CHECKSUM_NONE;
216 dev->stats.rx_packets++;
217 dev->stats.rx_bytes += rx_skb->len;
218 netif_rx(rx_skb);
219
220 new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + 2);
221 priv->rx_skb[rx] = new_skb;
222 BUG_ON(!new_skb);
223 skb_reserve(new_skb, 2);
224 priv->rx[rx].rxd1 = dma_map_single(NULL,
225 new_skb->data,
226 MAX_RX_LENGTH + 2,
227 DMA_FROM_DEVICE);
228 priv->rx[rx].rxd2 &= ~RX_DMA_DONE;
229 wmb();
230 ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
231 }
232
233 if (max_rx == 0)
234 tasklet_schedule(&priv->rx_tasklet);
235 else
236 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_RX_DLY_INT,
237 RAMIPS_FE_INT_ENABLE);
238 }
239
240 static void
241 ramips_eth_tx_housekeeping(unsigned long ptr)
242 {
243 struct net_device *dev = (struct net_device*)ptr;
244 struct raeth_priv *priv = netdev_priv(dev);
245
246 while ((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) &&
247 (priv->tx_skb[priv->skb_free_idx])) {
248 dev_kfree_skb_irq((struct sk_buff *) priv->tx_skb[priv->skb_free_idx]);
249 priv->tx_skb[priv->skb_free_idx] = 0;
250 priv->skb_free_idx++;
251 if (priv->skb_free_idx >= NUM_TX_DESC)
252 priv->skb_free_idx = 0;
253 }
254
255 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_TX_DLY_INT,
256 RAMIPS_FE_INT_ENABLE);
257 }
258
259 static int
260 ramips_eth_set_mac_addr(struct net_device *dev, void *priv)
261 {
262 unsigned char *mac = (unsigned char *) priv;
263
264 if (netif_running(dev))
265 return -EBUSY;
266
267 memcpy(dev->dev_addr, ((struct sockaddr*)priv)->sa_data, dev->addr_len);
268 ramips_fe_wr((mac[0] << 8) | mac[1], RAMIPS_GDMA1_MAC_ADRH);
269 ramips_fe_wr((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
270 RAMIPS_GDMA1_MAC_ADRL);
271 return 0;
272 }
273
274 static void
275 ramips_eth_timeout(struct net_device *dev)
276 {
277 struct raeth_priv *priv = netdev_priv(dev);
278
279 tasklet_schedule(&priv->tx_housekeeping_tasklet);
280 }
281
282 static irqreturn_t
283 ramips_eth_irq(int irq, void *dev)
284 {
285 struct raeth_priv *priv = netdev_priv(dev);
286 unsigned long fe_int = ramips_fe_rr(RAMIPS_FE_INT_STATUS);
287
288 ramips_fe_wr(0xFFFFFFFF, RAMIPS_FE_INT_STATUS);
289
290 if (fe_int & RAMIPS_RX_DLY_INT) {
291 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~(RAMIPS_RX_DLY_INT),
292 RAMIPS_FE_INT_ENABLE);
293 tasklet_schedule(&priv->rx_tasklet);
294 }
295
296 if (fe_int & RAMIPS_TX_DLY_INT)
297 ramips_eth_tx_housekeeping((unsigned long)dev);
298
299 return IRQ_HANDLED;
300 }
301
302 static int
303 ramips_eth_open(struct net_device *dev)
304 {
305 struct raeth_priv *priv = netdev_priv(dev);
306 int err;
307
308 err = request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED,
309 dev->name, dev);
310 if (err)
311 return err;
312
313 err = ramips_alloc_dma(priv);
314 if (err)
315 goto err_free_irq;
316
317 ramips_setup_dma(priv);
318 ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & 0xff) |
319 (RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN |
320 RAMIPS_TX_DMA_EN | RAMIPS_PDMA_SIZE_4DWORDS),
321 RAMIPS_PDMA_GLO_CFG);
322 ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG) &
323 ~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) |
324 ((rt305x_sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
325 RAMIPS_FE_GLO_CFG);
326
327 tasklet_init(&priv->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
328 (unsigned long)dev);
329 tasklet_init(&priv->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
330
331 ramips_fe_wr(RAMIPS_DELAY_INIT, RAMIPS_DLY_INT_CFG);
332 ramips_fe_wr(RAMIPS_TX_DLY_INT | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE);
333 ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG) &
334 ~(RAMIPS_GDM1_ICS_EN | RAMIPS_GDM1_TCS_EN | RAMIPS_GDM1_UCS_EN | 0xffff),
335 RAMIPS_GDMA1_FWD_CFG);
336 ramips_fe_wr(ramips_fe_rr(RAMIPS_CDMA_CSG_CFG) &
337 ~(RAMIPS_ICS_GEN_EN | RAMIPS_TCS_GEN_EN | RAMIPS_UCS_GEN_EN),
338 RAMIPS_CDMA_CSG_CFG);
339 ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT, RAMIPS_PSE_FQ_CFG);
340 ramips_fe_wr(1, RAMIPS_FE_RST_GL);
341 ramips_fe_wr(0, RAMIPS_FE_RST_GL);
342
343 netif_start_queue(dev);
344 return 0;
345
346 err_free_irq:
347 free_irq(dev->irq, dev);
348 return err;
349 }
350
351 static int
352 ramips_eth_stop(struct net_device *dev)
353 {
354 struct raeth_priv *priv = netdev_priv(dev);
355
356 ramips_fe_wr(ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) &
357 ~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN),
358 RAMIPS_PDMA_GLO_CFG);
359 free_irq(dev->irq, dev);
360 netif_stop_queue(dev);
361 tasklet_kill(&priv->tx_housekeeping_tasklet);
362 tasklet_kill(&priv->rx_tasklet);
363 ramips_cleanup_dma(priv);
364 printk(KERN_DEBUG "ramips_eth: stopped\n");
365 return 0;
366 }
367
368 static int __init
369 ramips_eth_probe(struct net_device *dev)
370 {
371 struct raeth_priv *priv = netdev_priv(dev);
372 struct sockaddr addr;
373
374 BUG_ON(!priv->plat->reset_fe);
375 priv->plat->reset_fe();
376 net_srandom(jiffies);
377 memcpy(addr.sa_data, priv->plat->mac, 6);
378 ramips_eth_set_mac_addr(dev, &addr);
379
380 ether_setup(dev);
381 dev->mtu = 1500;
382 dev->watchdog_timeo = TX_TIMEOUT;
383 spin_lock_init(&priv->page_lock);
384
385 return 0;
386 }
387
388 static const struct net_device_ops ramips_eth_netdev_ops = {
389 .ndo_init = ramips_eth_probe,
390 .ndo_open = ramips_eth_open,
391 .ndo_stop = ramips_eth_stop,
392 .ndo_start_xmit = ramips_eth_hard_start_xmit,
393 .ndo_tx_timeout = ramips_eth_timeout,
394 .ndo_change_mtu = eth_change_mtu,
395 .ndo_set_mac_address = ramips_eth_set_mac_addr,
396 .ndo_validate_addr = eth_validate_addr,
397 };
398
399 static int
400 ramips_eth_plat_probe(struct platform_device *plat)
401 {
402 struct raeth_priv *priv;
403 struct ramips_eth_platform_data *data = plat->dev.platform_data;
404 struct resource *res;
405 int err;
406
407 if (!data) {
408 dev_err(&plat->dev, "no platform data specified\n");
409 return -EINVAL;
410 }
411
412 res = platform_get_resource(plat, IORESOURCE_MEM, 0);
413 if (!res) {
414 dev_err(&plat->dev, "no memory resource found\n");
415 return -ENXIO;
416 }
417
418 ramips_fe_base = ioremap_nocache(res->start, res->end - res->start + 1);
419 if (!ramips_fe_base)
420 return -ENOMEM;
421
422 ramips_dev = alloc_etherdev(sizeof(struct raeth_priv));
423 if (!ramips_dev) {
424 dev_err(&plat->dev, "alloc_etherdev failed\n");
425 err = -ENOMEM;
426 goto err_unmap;
427 }
428
429 strcpy(ramips_dev->name, "eth%d");
430 ramips_dev->irq = platform_get_irq(plat, 0);
431 if (ramips_dev->irq < 0) {
432 dev_err(&plat->dev, "no IRQ resource found\n");
433 err = -ENXIO;
434 goto err_free_dev;
435 }
436 ramips_dev->addr_len = ETH_ALEN;
437 ramips_dev->base_addr = (unsigned long)ramips_fe_base;
438 ramips_dev->netdev_ops = &ramips_eth_netdev_ops;
439
440 priv = netdev_priv(ramips_dev);
441 priv->plat = data;
442
443 err = register_netdev(ramips_dev);
444 if (err) {
445 dev_err(&plat->dev, "error bringing up device\n");
446 goto err_free_dev;
447 }
448
449 #ifdef CONFIG_RALINK_RT305X
450 rt305x_esw_init();
451 #endif
452 printk(KERN_DEBUG "ramips_eth: loaded\n");
453 return 0;
454
455 err_free_dev:
456 kfree(ramips_dev);
457 err_unmap:
458 iounmap(ramips_fe_base);
459 return err;
460 }
461
462 static int
463 ramips_eth_plat_remove(struct platform_device *plat)
464 {
465 unregister_netdev(ramips_dev);
466 free_netdev(ramips_dev);
467 printk(KERN_DEBUG "ramips_eth: unloaded\n");
468 return 0;
469 }
470
471 static struct platform_driver ramips_eth_driver = {
472 .probe = ramips_eth_plat_probe,
473 .remove = ramips_eth_plat_remove,
474 .driver = {
475 .name = "ramips_eth",
476 .owner = THIS_MODULE,
477 },
478 };
479
480 static int __init
481 ramips_eth_init(void)
482 {
483 int ret = platform_driver_register(&ramips_eth_driver);
484 if (ret)
485 printk(KERN_ERR
486 "ramips_eth: Error registering platfom driver!\n");
487 return ret;
488 }
489
490 static void __exit
491 ramips_eth_cleanup(void)
492 {
493 platform_driver_unregister(&ramips_eth_driver);
494 }
495
496 module_init(ramips_eth_init);
497 module_exit(ramips_eth_cleanup);
498
499 MODULE_LICENSE("GPL");
500 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
501 MODULE_DESCRIPTION("ethernet driver for ramips boards");