655749dc4bcc37a6c98ec39111da59db7f2d935f
[openwrt/staging/wigyori.git] / target / linux / mcs814x / files-3.3 / drivers / net / ethernet / mcs8140 / nuport_mac.c
1 /*
2 * Moschip MCS8140 Ethernet MAC driver
3 *
4 * Copyright (C) 2003, Moschip Semiconductors
5 * Copyright (C) 2012, Florian Fainelli <florian@openwrt.org>
6 *
7 * Licensed under GPLv2
8 */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/ethtool.h>
17 #include <linux/mii.h>
18 #include <linux/io.h>
19 #include <linux/interrupt.h>
20 #include <linux/platform_device.h>
21 #include <linux/of.h>
22 #include <linux/irq.h>
23 #include <linux/err.h>
24 #include <linux/phy.h>
25 #include <linux/clk.h>
26 #include <linux/dma-mapping.h>
27
28 #include <asm/unaligned.h>
29 #include <asm/sizes.h>
30
31 /* Hardware registers */
32 #define MAC_BASE_ADDR ((priv->mac_base))
33
34 #define CTRL_REG (MAC_BASE_ADDR)
35 #define MII_BUSY 0x00000001
36 #define MII_WRITE 0x00000002
37 #define MAC_ADDR_HIGH_REG (MAC_BASE_ADDR + 0x04)
38 #define MAC_ADDR_LOW_REG (MAC_BASE_ADDR + 0x08)
39 #define MII_ADDR_REG (MAC_BASE_ADDR + 0x14)
40 #define MII_DATA_REG (MAC_BASE_ADDR + 0x18)
41 /* Link interrupt registers */
42 #define LINK_INT_CSR (MAC_BASE_ADDR + 0xD0)
43 #define LINK_INT_POLL_TIME (MAC_BASE_ADDR + 0xD4)
44
45 #define DMA_CHAN_WIDTH 32
46 #define DMA_RX_CHAN 0
47 #define DMA_TX_CHAN 2
48
49 /* Receive DMA registers */
50 #define RX_DMA_BASE ((priv->dma_base) + \
51 (DMA_CHAN_WIDTH * DMA_RX_CHAN))
52 #define RX_BUFFER_ADDR (RX_DMA_BASE + 0x00)
53 #define RX_MAX_BYTES (RX_DMA_BASE + 0x04)
54 #define RX_ACT_BYTES (RX_DMA_BASE + 0x08)
55 #define RX_START_DMA (RX_DMA_BASE + 0x0C)
56 #define RX_DMA_ENH (RX_DMA_BASE + 0x14)
57
58 /* Transmit DMA registers */
59 #define TX_DMA_BASE ((priv->dma_base) + \
60 (DMA_CHAN_WIDTH * DMA_TX_CHAN))
61 #define TX_BUFFER_ADDR (TX_DMA_BASE + 0x00)
62 #define TX_PKT_BYTES (TX_DMA_BASE + 0x04)
63 #define TX_BYTES_SENT (TX_DMA_BASE + 0x08)
64 #define TX_START_DMA (TX_DMA_BASE + 0x0C)
65 #define TX_DMA_STATUS (TX_DMA_BASE + 0x10)
66 #define TX_DMA_ENH (TX_DMA_BASE + 0x14)
67
68 #define RX_ALLOC_SIZE SZ_2K
69 #define MAX_ETH_FRAME_SIZE 1536
70 #define RX_SKB_TAILROOM 128
71 #define RX_SKB_HEADROOM (RX_ALLOC_SIZE - \
72 (MAX_ETH_FRAME_SIZE + RX_SKB_TAILROOM) + 0)
73
74 /* WDT Late COL Lenght COL Type */
75 #define ERROR_FILTER_MASK ((1<<14) | (1<<15) | (1<<16) | (1<<17) | (0<<18) | \
76 /* MII Dribbling CRC Len/type Control */\
77 (1<<19) | (1<<20) | (1<<21) | (0<<24) | (1<<25) | \
78 /* Unsup Missed */\
79 (1<<26) | (0<<31))
80 #define TX_RING_SIZE 30
81 #define RX_RING_SIZE 30
82
83 static inline u32 nuport_mac_readl(void __iomem *reg)
84 {
85 return __raw_readl(reg);
86 }
87
88 static inline u8 nuport_mac_readb(void __iomem *reg)
89 {
90 return __raw_readb(reg);
91 }
92
93 static inline void nuport_mac_writel(u32 value, void __iomem *reg)
94 {
95 __raw_writel(value, reg);
96 }
97
98 static inline void nuport_mac_writeb(u8 value, void __iomem *reg)
99 {
100 __raw_writel(value, reg);
101 }
102
103 /* MAC private data */
104 struct nuport_mac_priv {
105 spinlock_t lock;
106
107 void __iomem *mac_base;
108 void __iomem *dma_base;
109
110 int rx_irq;
111 int tx_irq;
112 int link_irq;
113 struct clk *emac_clk;
114 struct clk *ephy_clk;
115
116 /* Transmit buffers */
117 struct sk_buff *tx_skb[TX_RING_SIZE];
118 unsigned int valid_txskb[TX_RING_SIZE];
119 unsigned int cur_tx;
120 unsigned int dma_tx;
121 unsigned int tx_full;
122
123 /* Receive buffers */
124 struct sk_buff *rx_skb[RX_RING_SIZE];
125 unsigned int irq_rxskb[RX_RING_SIZE];
126 int pkt_len[RX_RING_SIZE];
127 unsigned int cur_rx;
128 unsigned int dma_rx;
129 unsigned int rx_full;
130
131 unsigned int first_pkt;
132
133 /* Private data */
134 struct napi_struct napi;
135 struct net_device *dev;
136 struct platform_device *pdev;
137 struct mii_bus *mii_bus;
138 struct phy_device *phydev;
139 int old_link;
140 int old_duplex;
141 u32 msg_level;
142 unsigned int buffer_shifting_len;
143 };
144
145 static inline int nuport_mac_mii_busy_wait(struct nuport_mac_priv *priv)
146 {
147 unsigned long curr;
148 unsigned long finish = jiffies + 3 * HZ;
149
150 do {
151 curr = jiffies;
152 if (!(nuport_mac_readl(MII_ADDR_REG) & MII_BUSY))
153 return 0;
154 cpu_relax();
155 } while (!time_after_eq(curr, finish));
156
157 return -EBUSY;
158 }
159
160 /* Read from PHY registers */
161 static int nuport_mac_mii_read(struct mii_bus *bus,
162 int mii_id, int regnum)
163 {
164 struct net_device *dev = bus->priv;
165 struct nuport_mac_priv *priv = netdev_priv(dev);
166 int ret;
167 u32 val = 0;
168
169 ret = nuport_mac_mii_busy_wait(priv);
170 if (ret)
171 return ret;
172
173 val |= (mii_id << 11) | (regnum << 6) | MII_BUSY;
174 nuport_mac_writel(val, MII_ADDR_REG);
175 ret = nuport_mac_mii_busy_wait(priv);
176 if (ret)
177 return ret;
178
179 return nuport_mac_readl(MII_DATA_REG);
180 }
181
182 static int nuport_mac_mii_write(struct mii_bus *bus, int mii_id,
183 int regnum, u16 value)
184 {
185 struct net_device *dev = bus->priv;
186 struct nuport_mac_priv *priv = netdev_priv(dev);
187 int ret;
188 u32 val = 0;
189
190 ret = nuport_mac_mii_busy_wait(priv);
191 if (ret)
192 return ret;
193
194 val |= (mii_id << 11) | (regnum << 6) | MII_BUSY | MII_WRITE;
195 nuport_mac_writel(value, MII_DATA_REG);
196 nuport_mac_writel(val, MII_ADDR_REG);
197
198 return nuport_mac_mii_busy_wait(priv);
199 }
200
201 static int nuport_mac_mii_reset(struct mii_bus *bus)
202 {
203 return 0;
204 }
205
206 static int nuport_mac_start_tx_dma(struct nuport_mac_priv *priv,
207 struct sk_buff *skb)
208 {
209 dma_addr_t p;
210 u32 reg;
211 unsigned int timeout = 2048;
212
213 while (timeout--) {
214 reg = nuport_mac_readl(TX_START_DMA);
215 if (!(reg & 0x01)) {
216 netdev_dbg(priv->dev, "dma ready\n");
217 break;
218 }
219 cpu_relax();
220 }
221
222 if (!timeout)
223 return -EBUSY;
224
225 p = dma_map_single(&priv->pdev->dev, skb->data,
226 skb->len, DMA_TO_DEVICE);
227
228 /* enable enhanced mode */
229 nuport_mac_writel(0x01, TX_DMA_ENH);
230 nuport_mac_writel(p, TX_BUFFER_ADDR);
231 nuport_mac_writel((skb->len) - 1, TX_PKT_BYTES);
232 wmb();
233 nuport_mac_writel(0x0D, TX_START_DMA);
234
235 return 0;
236 }
237
238 static void nuport_mac_reset_tx_dma(struct nuport_mac_priv *priv)
239 {
240 u32 reg;
241
242 reg = nuport_mac_readl(TX_START_DMA);
243 reg |= (1 << 24);
244 nuport_mac_writel(reg, TX_START_DMA);
245 }
246
247 static int nuport_mac_start_rx_dma(struct nuport_mac_priv *priv,
248 struct sk_buff *skb)
249 {
250 dma_addr_t p;
251 u32 reg;
252 unsigned int timeout = 2048;
253
254 while (timeout--) {
255 reg = nuport_mac_readl(RX_START_DMA);
256 if (!(reg & 0x01)) {
257 netdev_dbg(priv->dev, "dma ready\n");
258 break;
259 }
260 cpu_relax();
261 }
262
263 if (!timeout)
264 return -EBUSY;
265
266 p = dma_map_single(&priv->pdev->dev, skb->data,
267 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
268
269 nuport_mac_writel(p, RX_BUFFER_ADDR);
270 wmb();
271 nuport_mac_writel(0x01, RX_START_DMA);
272
273 return 0;
274 }
275
276 static void nuport_mac_reset_rx_dma(struct nuport_mac_priv *priv)
277 {
278 u32 reg;
279
280 reg = nuport_mac_readl(RX_START_DMA);
281 reg |= (1 << 1);
282 nuport_mac_writel(reg, RX_START_DMA);
283 }
284
285 /* I suppose this might do something, but I am not sure actually */
286 static void nuport_mac_disable_rx_dma(struct nuport_mac_priv *priv)
287 {
288 u32 reg;
289
290 reg = nuport_mac_readl(RX_DMA_ENH);
291 reg &= ~(1 << 1);
292 nuport_mac_writel(reg, RX_DMA_ENH);
293 }
294
295 static void nuport_mac_enable_rx_dma(struct nuport_mac_priv *priv)
296 {
297 u32 reg;
298
299 reg = nuport_mac_readl(RX_DMA_ENH);
300 reg |= (1 << 1);
301 nuport_mac_writel(reg, RX_DMA_ENH);
302 }
303
304 /* Add packets to the transmit queue */
305 static int nuport_mac_start_xmit(struct sk_buff *skb, struct net_device *dev)
306 {
307 unsigned long flags;
308 struct nuport_mac_priv *priv = netdev_priv(dev);
309 int ret;
310
311 if (netif_queue_stopped(dev)) {
312 netdev_warn(dev, "netif queue was stopped, restarting\n");
313 netif_start_queue(dev);
314 }
315
316 spin_lock_irqsave(&priv->lock, flags);
317 if (priv->first_pkt) {
318 ret = nuport_mac_start_tx_dma(priv, skb);
319 if (ret) {
320 netif_stop_queue(dev);
321 spin_unlock_irqrestore(&priv->lock, flags);
322 netdev_err(dev, "transmit path busy\n");
323 return NETDEV_TX_BUSY;
324 }
325 priv->first_pkt = 0;
326 }
327
328 priv->tx_skb[priv->cur_tx] = skb;
329 dev->stats.tx_bytes += skb->len;
330 dev->stats.tx_packets++;
331 priv->valid_txskb[priv->cur_tx] = 1;
332 priv->cur_tx++;
333 dev->trans_start = jiffies;
334
335 if (priv->cur_tx >= TX_RING_SIZE)
336 priv->cur_tx = 0;
337
338 spin_unlock_irqrestore(&priv->lock, flags);
339
340 if (priv->valid_txskb[priv->cur_tx]) {
341 priv->tx_full = 1;
342 netdev_err(dev, "stopping queue\n");
343 netif_stop_queue(dev);
344 }
345
346 return NETDEV_TX_OK;
347 }
348
349 static void nuport_mac_adjust_link(struct net_device *dev)
350 {
351 struct nuport_mac_priv *priv = netdev_priv(dev);
352 struct phy_device *phydev = priv->phydev;
353 unsigned int status_changed = 0;
354 u32 reg;
355
356 BUG_ON(!phydev);
357
358 if (priv->old_link != phydev->link) {
359 status_changed = 1;
360 priv->old_link = phydev->link;
361 }
362
363 if (phydev->link & (priv->old_duplex != phydev->duplex)) {
364 reg = nuport_mac_readl(CTRL_REG);
365 if (phydev->duplex == DUPLEX_FULL)
366 reg |= (1 << 20);
367 else
368 reg &= ~(1 << 20);
369 nuport_mac_writel(reg, CTRL_REG);
370
371 status_changed = 1;
372 priv->old_duplex = phydev->duplex;
373 }
374
375 if (!status_changed)
376 return;
377
378 pr_info("%s: link %s", dev->name, phydev->link ?
379 "UP" : "DOWN");
380 if (phydev->link) {
381 pr_cont(" - %d/%s", phydev->speed,
382 phydev->duplex == DUPLEX_FULL ? "full" : "half");
383 }
384 pr_cont("\n");
385 }
386
387 static irqreturn_t nuport_mac_link_interrupt(int irq, void *dev_id)
388 {
389 struct net_device *dev = dev_id;
390 struct nuport_mac_priv *priv = netdev_priv(dev);
391 u32 reg;
392 u8 phy_addr;
393
394 reg = nuport_mac_readl(LINK_INT_CSR);
395 phy_addr = (reg >> 1) & 0x0f;
396
397 if (phy_addr != priv->phydev->addr) {
398 netdev_err(dev, "spurious PHY irq (phy: %d)\n", phy_addr);
399 return IRQ_NONE;
400 }
401
402 priv->phydev->link = (reg & (1 << 16));
403 nuport_mac_adjust_link(dev);
404
405 return IRQ_HANDLED;
406 }
407
408 static irqreturn_t nuport_mac_tx_interrupt(int irq, void *dev_id)
409 {
410 struct net_device *dev = (struct net_device *)dev_id;
411 struct nuport_mac_priv *priv = netdev_priv(dev);
412 struct sk_buff *skb;
413 unsigned long flags;
414 int ret;
415 u32 reg;
416
417 spin_lock_irqsave(&priv->lock, flags);
418 /* clear status word available if ready */
419 reg = nuport_mac_readl(TX_START_DMA);
420 if (reg & (1 << 18)) {
421 nuport_mac_writel(reg, TX_START_DMA);
422 reg = nuport_mac_readl(TX_DMA_STATUS);
423
424 if (reg & 1)
425 dev->stats.tx_errors++;
426 } else
427 netdev_dbg(dev, "no status word: %08x\n", reg);
428
429 skb = priv->tx_skb[priv->dma_tx];
430 priv->tx_skb[priv->dma_tx] = NULL;
431 priv->valid_txskb[priv->dma_tx] = 0;
432 dev_kfree_skb_irq(skb);
433
434 priv->dma_tx++;
435 if (priv->dma_tx >= TX_RING_SIZE)
436 priv->dma_tx = 0;
437
438 if (!priv->valid_txskb[priv->dma_tx])
439 priv->first_pkt = 1;
440 else {
441 ret = nuport_mac_start_tx_dma(priv, priv->tx_skb[priv->dma_tx]);
442 if (ret)
443 netdev_err(dev, "failed to restart TX dma\n");
444 }
445
446 if (priv->tx_full) {
447 netdev_dbg(dev, "restarting transmit queue\n");
448 netif_wake_queue(dev);
449 priv->tx_full = 0;
450 }
451
452 spin_unlock_irqrestore(&priv->lock, flags);
453
454 return IRQ_HANDLED;
455 }
456
457 static unsigned int nuport_mac_has_work(struct nuport_mac_priv *priv)
458 {
459 unsigned int i;
460
461 for (i = 0; i < RX_RING_SIZE; i++)
462 if (priv->rx_skb[i])
463 return 1;
464
465 return 0;
466 }
467
468 static irqreturn_t nuport_mac_rx_interrupt(int irq, void *dev_id)
469 {
470 struct net_device *dev = (struct net_device *)dev_id;
471 struct nuport_mac_priv *priv = netdev_priv(dev);
472 unsigned long flags;
473 int ret;
474
475 spin_lock_irqsave(&priv->lock, flags);
476 if (!priv->rx_full) {
477 priv->pkt_len[priv->dma_rx] = nuport_mac_readl(RX_ACT_BYTES) - 4;
478 priv->irq_rxskb[priv->dma_rx] = 0;
479 priv->dma_rx++;
480
481 if (priv->dma_rx >= RX_RING_SIZE)
482 priv->dma_rx = 0;
483 } else
484 priv->rx_full = 0;
485
486 if (priv->irq_rxskb[priv->dma_rx] == 1) {
487 ret = nuport_mac_start_rx_dma(priv, priv->rx_skb[priv->dma_rx]);
488 if (ret)
489 netdev_err(dev, "failed to start rx dma\n");
490 } else {
491 priv->rx_full = 1;
492 netdev_dbg(dev, "RX ring full\n");
493 }
494
495 if (likely(nuport_mac_has_work(priv))) {
496 /* find a way to disable DMA rx irq */
497 nuport_mac_disable_rx_dma(priv);
498 napi_schedule(&priv->napi);
499 }
500 spin_unlock_irqrestore(&priv->lock, flags);
501
502 return IRQ_HANDLED;
503 }
504
505 /* Process received packets in tasklet */
506 static int nuport_mac_rx(struct net_device *dev, int limit)
507 {
508 struct nuport_mac_priv *priv = netdev_priv(dev);
509 struct sk_buff *skb;
510 int len, status;
511 int count = 0;
512
513 while (count < limit && !priv->irq_rxskb[priv->cur_rx]) {
514 skb = priv->rx_skb[priv->cur_rx];
515 len = priv->pkt_len[priv->cur_rx];
516
517 /* Remove 2 bytes added by RX buffer shifting */
518 len = len - priv->buffer_shifting_len;
519 skb->data = skb->data + priv->buffer_shifting_len;
520
521 /* Get packet status */
522 status = get_unaligned((u32 *) (skb->data + len));
523 skb->dev = dev;
524
525 /* packet filter failed */
526 if (!(status & (1 << 30))) {
527 dev_kfree_skb_irq(skb);
528 goto exit;
529 }
530
531 /* missed frame */
532 if (status & (1 << 31)) {
533 dev->stats.rx_missed_errors++;
534 dev_kfree_skb_irq(skb);
535 goto exit;
536 }
537
538 /* Not ethernet type */
539 if ((!(status & (1 << 18))) || (status & ERROR_FILTER_MASK))
540 dev->stats.rx_errors++;
541
542 if (len > MAX_ETH_FRAME_SIZE) {
543 dev_kfree_skb_irq(skb);
544 goto exit;
545 } else
546 skb_put(skb, len);
547
548 skb->protocol = eth_type_trans(skb, dev);
549 dev->stats.rx_packets++;
550
551 if (status & (1 << 29))
552 skb->pkt_type = PACKET_OTHERHOST;
553 if (status & (1 << 27))
554 skb->pkt_type = PACKET_MULTICAST;
555 if (status & (1 << 28))
556 skb->pkt_type = PACKET_BROADCAST;
557
558 skb->ip_summed = CHECKSUM_UNNECESSARY;
559
560 /* Pass the received packet to network layer */
561 netif_receive_skb(skb);
562
563 if (status != NET_RX_DROP)
564 dev->stats.rx_bytes += len - 4; /* Without CRC */
565 else
566 dev->stats.rx_dropped++;
567
568 dev->last_rx = jiffies;
569
570 exit:
571 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
572 skb_reserve(skb, RX_SKB_HEADROOM);
573 priv->rx_skb[priv->cur_rx] = skb;
574 priv->irq_rxskb[priv->cur_rx] = 1;
575 priv->cur_rx++;
576
577 if (priv->cur_rx >= RX_RING_SIZE)
578 priv->cur_rx = 0;
579 count++;
580 }
581
582 return count;
583 }
584
585 static int nuport_mac_poll(struct napi_struct *napi, int budget)
586 {
587 struct nuport_mac_priv *priv =
588 container_of(napi, struct nuport_mac_priv, napi);
589 struct net_device *dev = priv->dev;
590 int work_done;
591
592 work_done = nuport_mac_rx(dev, budget);
593
594 if (work_done < budget) {
595 napi_complete(napi);
596 nuport_mac_enable_rx_dma(priv);
597 }
598
599 return work_done;
600 }
601
602 static void nuport_mac_init_tx_ring(struct nuport_mac_priv *priv)
603 {
604 int i;
605
606 priv->cur_tx = priv->dma_tx = priv->tx_full = 0;
607 for (i = 0; i < TX_RING_SIZE; i++) {
608 priv->tx_skb[i] = NULL;
609 priv->valid_txskb[i] = 0;
610 }
611 priv->first_pkt = 1;
612 }
613
614 static int nuport_mac_init_rx_ring(struct net_device *dev)
615 {
616 struct nuport_mac_priv *priv = netdev_priv(dev);
617 struct sk_buff *skb;
618 int i;
619
620 priv->cur_rx = priv->dma_rx = priv->rx_full = 0;
621
622 for (i = 0; i < RX_RING_SIZE; i++) {
623 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
624 if (!skb)
625 return -ENOMEM;
626 skb_reserve(skb, RX_SKB_HEADROOM);
627 priv->rx_skb[i] = skb;
628 priv->irq_rxskb[i] = 1;
629 }
630
631 return 0;
632 }
633
634 static void nuport_mac_free_rx_ring(struct nuport_mac_priv *priv)
635 {
636 int i;
637
638 for (i = 0; i < RX_RING_SIZE; i++) {
639 if (!priv->rx_skb[i])
640 continue;
641
642 dev_kfree_skb(priv->rx_skb[i]);
643 priv->rx_skb[i] = NULL;
644 }
645 }
646
647 static void nuport_mac_read_mac_address(struct net_device *dev)
648 {
649 struct nuport_mac_priv *priv = netdev_priv(dev);
650 int i;
651
652 for (i = 0; i < 4; i++)
653 dev->dev_addr[i] = nuport_mac_readb(MAC_ADDR_LOW_REG + i);
654 dev->dev_addr[4] = nuport_mac_readb(MAC_ADDR_HIGH_REG);
655 dev->dev_addr[5] = nuport_mac_readb(MAC_ADDR_HIGH_REG + 1);
656
657 if (!is_valid_ether_addr(dev->dev_addr)) {
658 dev_info(&priv->pdev->dev, "using random address\n");
659 random_ether_addr(dev->dev_addr);
660 }
661 }
662
663 static int nuport_mac_change_mac_address(struct net_device *dev, void *mac_addr)
664 {
665 struct sockaddr *addr = mac_addr;
666 struct nuport_mac_priv *priv = netdev_priv(dev);
667 unsigned long *temp = (unsigned long *)dev->dev_addr;
668 u32 high, low;
669
670 if (netif_running(dev))
671 return -EBUSY;
672
673 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
674
675 spin_lock_irq(&priv->lock);
676
677 nuport_mac_writel(*temp, MAC_ADDR_LOW_REG);
678 temp = (unsigned long *)(dev->dev_addr + 4);
679 nuport_mac_writel(*temp, MAC_ADDR_HIGH_REG);
680
681 low = nuport_mac_readl(MAC_ADDR_LOW_REG);
682 high = nuport_mac_readl(MAC_ADDR_HIGH_REG);
683
684 spin_unlock_irq(&priv->lock);
685
686 return 0;
687 }
688
689 static int nuport_mac_open(struct net_device *dev)
690 {
691 int ret;
692 struct nuport_mac_priv *priv = netdev_priv(dev);
693 unsigned long flags;
694
695 ret = clk_enable(priv->emac_clk);
696 if (ret) {
697 netdev_err(dev, "failed to enable EMAC clock\n");
698 return ret;
699 }
700
701 /* Set MAC into full duplex mode by default */
702 nuport_mac_writel(0x1010052C, CTRL_REG);
703
704 /* set mac address in hardware in case it was not already */
705 nuport_mac_change_mac_address(dev, dev->dev_addr);
706
707 ret = request_irq(priv->link_irq, &nuport_mac_link_interrupt,
708 0, dev->name, dev);
709 if (ret) {
710 netdev_err(dev, "unable to request link interrupt\n");
711 goto out_emac_clk;
712 }
713
714 phy_start(priv->phydev);
715
716 /* Enable link interrupt monitoring */
717 spin_lock_irqsave(&priv->lock, flags);
718 nuport_mac_writel(0x1041 | (priv->phydev->addr << 1), LINK_INT_CSR);
719 nuport_mac_writel(0xFFFFF, LINK_INT_POLL_TIME);
720 spin_unlock_irqrestore(&priv->lock, flags);
721
722 ret = request_irq(priv->tx_irq, &nuport_mac_tx_interrupt,
723 0, dev->name, dev);
724 if (ret) {
725 netdev_err(dev, "unable to request rx interrupt\n");
726 goto out_link_irq;
727 }
728
729 napi_enable(&priv->napi);
730
731 ret = request_irq(priv->rx_irq, &nuport_mac_rx_interrupt,
732 0, dev->name, dev);
733 if (ret) {
734 netdev_err(dev, "unable to request tx interrupt\n");
735 goto out_tx_irq;
736 }
737
738 netif_start_queue(dev);
739
740 nuport_mac_init_tx_ring(priv);
741
742 ret = nuport_mac_init_rx_ring(dev);
743 if (ret) {
744 netdev_err(dev, "rx ring init failed\n");
745 goto out_rx_skb;
746 }
747
748 nuport_mac_reset_tx_dma(priv);
749 nuport_mac_reset_rx_dma(priv);
750
751 /* Start RX DMA */
752 return nuport_mac_start_rx_dma(priv, priv->rx_skb[0]);
753
754 out_rx_skb:
755 nuport_mac_free_rx_ring(priv);
756 free_irq(priv->rx_irq, dev);
757 out_tx_irq:
758 free_irq(priv->tx_irq, dev);
759 out_link_irq:
760 free_irq(priv->link_irq, dev);
761 out_emac_clk:
762 clk_disable(priv->emac_clk);
763 return ret;
764 }
765
766 static int nuport_mac_close(struct net_device *dev)
767 {
768 struct nuport_mac_priv *priv = netdev_priv(dev);
769
770 spin_lock_irq(&priv->lock);
771 napi_disable(&priv->napi);
772 netif_stop_queue(dev);
773
774 free_irq(priv->link_irq, dev);
775 nuport_mac_writel(0x00, LINK_INT_CSR);
776 nuport_mac_writel(0x00, LINK_INT_POLL_TIME);
777 phy_stop(priv->phydev);
778
779 free_irq(priv->tx_irq, dev);
780 free_irq(priv->rx_irq, dev);
781 spin_unlock_irq(&priv->lock);
782
783 nuport_mac_free_rx_ring(priv);
784
785 clk_disable(priv->emac_clk);
786
787 return 0;
788 }
789
790 static void nuport_mac_tx_timeout(struct net_device *dev)
791 {
792 struct nuport_mac_priv *priv = netdev_priv(dev);
793 unsigned int i;
794
795 netdev_warn(dev, "transmit timeout, attempting recovery\n");
796
797 netdev_info(dev, "TX DMA regs\n");
798 for (i = 0; i < DMA_CHAN_WIDTH; i += 4)
799 netdev_info(dev, "[%02x]: 0x%08x\n", i, nuport_mac_readl(TX_DMA_BASE + i));
800 netdev_info(dev, "RX DMA regs\n");
801 for (i = 0; i < DMA_CHAN_WIDTH; i += 4)
802 netdev_info(dev, "[%02x]: 0x%08x\n", i, nuport_mac_readl(RX_DMA_BASE + i));
803
804 nuport_mac_init_tx_ring(priv);
805 nuport_mac_reset_tx_dma(priv);
806
807 netif_wake_queue(dev);
808 }
809
810 static int nuport_mac_mii_probe(struct net_device *dev)
811 {
812 struct nuport_mac_priv *priv = netdev_priv(dev);
813 struct phy_device *phydev = NULL;
814 int ret;
815
816 ret = clk_enable(priv->ephy_clk);
817 if (ret) {
818 netdev_err(dev, "unable to enable ePHY clk\n");
819 return ret;
820 }
821
822 phydev = phy_find_first(priv->mii_bus);
823 if (!phydev) {
824 netdev_err(dev, "no PHYs found\n");
825 ret = -ENODEV;
826 goto out;
827 }
828
829 phydev = phy_connect(dev, dev_name(&phydev->dev),
830 nuport_mac_adjust_link, 0,
831 PHY_INTERFACE_MODE_MII);
832 if (IS_ERR(phydev)) {
833 netdev_err(dev, "could not attach PHY\n");
834 ret = PTR_ERR(phydev);
835 goto out;
836 }
837
838 phydev->supported &= PHY_BASIC_FEATURES;
839 phydev->advertising = phydev->supported;
840 priv->phydev = phydev;
841 priv->old_link = 0;
842 priv->old_duplex = -1;
843
844 dev_info(&priv->pdev->dev, "attached PHY driver [%s] "
845 "(mii_bus:phy_addr=%d)\n",
846 phydev->drv->name, phydev->addr);
847
848 return 0;
849
850 out:
851 /* disable the Ethernet PHY clock for the moment */
852 clk_disable(priv->ephy_clk);
853
854 return ret;
855 }
856
857 static void nuport_mac_ethtool_drvinfo(struct net_device *dev,
858 struct ethtool_drvinfo *info)
859 {
860 strncpy(info->driver, "nuport-mac", sizeof(info->driver));
861 strncpy(info->version, "0.1", sizeof(info->version));
862 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
863 strncpy(info->bus_info, "internal", sizeof(info->bus_info));
864 info->n_stats = 0;
865 info->testinfo_len = 0;
866 info->regdump_len = 0;
867 info->eedump_len = 0;
868 }
869
870 static int nuport_mac_ethtool_get_settings(struct net_device *dev,
871 struct ethtool_cmd *cmd)
872 {
873 struct nuport_mac_priv *priv = netdev_priv(dev);
874
875 if (priv->phydev)
876 return phy_ethtool_gset(priv->phydev, cmd);
877
878 return -EINVAL;
879 }
880
881 static int nuport_mac_ethtool_set_settings(struct net_device *dev,
882 struct ethtool_cmd *cmd)
883 {
884 struct nuport_mac_priv *priv = netdev_priv(dev);
885
886 if (priv->phydev)
887 return phy_ethtool_sset(priv->phydev, cmd);
888
889 return -EINVAL;
890 }
891
892 static void nuport_mac_set_msglevel(struct net_device *dev, u32 msg_level)
893 {
894 struct nuport_mac_priv *priv = netdev_priv(dev);
895
896 priv->msg_level = msg_level;
897 }
898
899 static u32 nuport_mac_get_msglevel(struct net_device *dev)
900 {
901 struct nuport_mac_priv *priv = netdev_priv(dev);
902
903 return priv->msg_level;
904 }
905
906 static const struct ethtool_ops nuport_mac_ethtool_ops = {
907 .get_drvinfo = nuport_mac_ethtool_drvinfo,
908 .get_link = ethtool_op_get_link,
909 .get_settings = nuport_mac_ethtool_get_settings,
910 .set_settings = nuport_mac_ethtool_set_settings,
911 .set_msglevel = nuport_mac_set_msglevel,
912 .get_msglevel = nuport_mac_get_msglevel,
913 };
914
915 static const struct net_device_ops nuport_mac_ops = {
916 .ndo_open = nuport_mac_open,
917 .ndo_stop = nuport_mac_close,
918 .ndo_start_xmit = nuport_mac_start_xmit,
919 .ndo_change_mtu = eth_change_mtu,
920 .ndo_validate_addr = eth_validate_addr,
921 .ndo_set_mac_address = nuport_mac_change_mac_address,
922 .ndo_tx_timeout = nuport_mac_tx_timeout,
923 };
924
925 static int __init nuport_mac_probe(struct platform_device *pdev)
926 {
927 struct net_device *dev;
928 struct nuport_mac_priv *priv = NULL;
929 struct resource *regs, *dma;
930 int ret = 0;
931 int rx_irq, tx_irq, link_irq;
932 int i;
933 const unsigned int *intspec;
934
935 dev = alloc_etherdev(sizeof(struct nuport_mac_priv));
936 if (!dev) {
937 dev_err(&pdev->dev, "no memory for net_device\n");
938 return -ENOMEM;
939 }
940
941 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
942 dma = platform_get_resource(pdev, IORESOURCE_MEM, 1);
943 if (!regs || !dma) {
944 dev_err(&pdev->dev, "failed to get regs resources\n");
945 ret = -ENODEV;
946 goto out;
947 }
948
949 rx_irq = platform_get_irq(pdev, 0);
950 tx_irq = platform_get_irq(pdev, 1);
951 link_irq = platform_get_irq(pdev, 2);
952 if (rx_irq < 0 || tx_irq < 0 || link_irq < 0) {
953 ret = -ENODEV;
954 goto out;
955 }
956
957 platform_set_drvdata(pdev, dev);
958 SET_NETDEV_DEV(dev, &pdev->dev);
959 priv = netdev_priv(dev);
960 priv->pdev = pdev;
961 priv->dev = dev;
962 spin_lock_init(&priv->lock);
963
964 intspec = of_get_property(pdev->dev.of_node,
965 "nuport-mac,buffer-shifting", NULL);
966 if (!intspec)
967 priv->buffer_shifting_len = 0;
968 else
969 priv->buffer_shifting_len = 2;
970
971 priv->mac_base = devm_ioremap(&pdev->dev,
972 regs->start, resource_size(regs));
973 if (!priv->mac_base) {
974 dev_err(&pdev->dev, "failed to remap regs\n");
975 ret = -ENOMEM;
976 goto out_platform;
977 }
978
979 priv->dma_base = devm_ioremap(&pdev->dev,
980 dma->start, resource_size(dma));
981 if (!priv->dma_base) {
982 dev_err(&pdev->dev, "failed to remap dma-regs\n");
983 ret = -ENOMEM;
984 goto out_platform;
985 }
986
987 priv->emac_clk = clk_get(&pdev->dev, "emac");
988 if (IS_ERR_OR_NULL(priv->emac_clk)) {
989 dev_err(&pdev->dev, "failed to get emac clk\n");
990 ret = PTR_ERR(priv->emac_clk);
991 goto out_platform;
992 }
993
994 priv->ephy_clk = clk_get(&pdev->dev, "ephy");
995 if (IS_ERR_OR_NULL(priv->ephy_clk)) {
996 dev_err(&pdev->dev, "failed to get ephy clk\n");
997 ret = PTR_ERR(priv->ephy_clk);
998 goto out_platform;
999 }
1000
1001 priv->link_irq = link_irq;
1002 priv->rx_irq = rx_irq;
1003 priv->tx_irq = tx_irq;
1004 priv->msg_level = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK;
1005 dev->netdev_ops = &nuport_mac_ops;
1006 dev->ethtool_ops = &nuport_mac_ethtool_ops;
1007 dev->watchdog_timeo = HZ;
1008 dev->flags = IFF_BROADCAST; /* Supports Broadcast */
1009 dev->tx_queue_len = TX_RING_SIZE / 2;
1010
1011 netif_napi_add(dev, &priv->napi, nuport_mac_poll, 64);
1012
1013 priv->mii_bus = mdiobus_alloc();
1014 if (!priv->mii_bus) {
1015 dev_err(&pdev->dev, "mii bus allocation failed\n");
1016 goto out;
1017 }
1018
1019 priv->mii_bus->priv = dev;
1020 priv->mii_bus->read = nuport_mac_mii_read;
1021 priv->mii_bus->write = nuport_mac_mii_write;
1022 priv->mii_bus->reset = nuport_mac_mii_reset;
1023 priv->mii_bus->name = "nuport-mac-mii";
1024 priv->mii_bus->phy_mask = (1 << 0);
1025 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
1026 priv->mii_bus->irq = kzalloc(PHY_MAX_ADDR * sizeof(int), GFP_KERNEL);
1027 if (!priv->mii_bus->irq) {
1028 dev_err(&pdev->dev, "failed to allocate mii_bus irqs\n");
1029 ret = -ENOMEM;
1030 goto out_mdio;
1031 }
1032
1033 /* We support PHY interrupts routed back to the MAC */
1034 for (i = 0; i < PHY_MAX_ADDR; i++)
1035 priv->mii_bus->irq[i] = PHY_IGNORE_INTERRUPT;
1036
1037 ret = mdiobus_register(priv->mii_bus);
1038 if (ret) {
1039 dev_err(&pdev->dev, "failed to register mii_bus\n");
1040 goto out_mdio_irq;
1041 }
1042
1043 ret = nuport_mac_mii_probe(dev);
1044 if (ret) {
1045 dev_err(&pdev->dev, "failed to probe MII bus\n");
1046 goto out_mdio_unregister;
1047 }
1048
1049 ret = register_netdev(dev);
1050 if (ret) {
1051 dev_err(&pdev->dev, "failed to register net_device\n");
1052 goto out_mdio_probe;
1053 }
1054
1055 /* read existing mac address */
1056 nuport_mac_read_mac_address(dev);
1057
1058 dev_info(&pdev->dev, "registered (MAC: %pM)\n", dev->dev_addr);
1059
1060 return ret;
1061
1062 out_mdio_probe:
1063 phy_disconnect(priv->phydev);
1064 out_mdio_unregister:
1065 mdiobus_unregister(priv->mii_bus);
1066 out_mdio_irq:
1067 kfree(priv->mii_bus->irq);
1068 out_mdio:
1069 mdiobus_free(priv->mii_bus);
1070 out_platform:
1071 platform_set_drvdata(pdev, NULL);
1072 out:
1073 clk_put(priv->ephy_clk);
1074 clk_put(priv->emac_clk);
1075 free_netdev(dev);
1076 platform_set_drvdata(pdev, NULL);
1077 return ret;
1078 }
1079
1080 static int nuport_mac_remove(struct platform_device *pdev)
1081 {
1082 struct net_device *dev = platform_get_drvdata(pdev);
1083 struct nuport_mac_priv *priv = netdev_priv(dev);
1084
1085 unregister_netdev(dev);
1086 phy_disconnect(priv->phydev);
1087 mdiobus_unregister(priv->mii_bus);
1088 kfree(priv->mii_bus->irq);
1089 mdiobus_free(priv->mii_bus);
1090 clk_put(priv->ephy_clk);
1091 clk_put(priv->emac_clk);
1092 free_netdev(dev);
1093
1094 platform_set_drvdata(pdev, NULL);
1095
1096 return 0;
1097 }
1098
1099 static struct of_device_id nuport_eth_ids[] __initdata = {
1100 {.compatible = "moschip,nuport-mac",},
1101 { /* sentinel */ },
1102 };
1103
1104 static struct platform_driver nuport_eth_driver = {
1105 .driver = {
1106 .name = "nuport-mac",
1107 .owner = THIS_MODULE,
1108 .of_match_table = nuport_eth_ids,
1109 },
1110 .probe = nuport_mac_probe,
1111 .remove = __devexit_p(nuport_mac_remove),
1112 };
1113
1114 module_platform_driver(nuport_eth_driver);
1115
1116 MODULE_AUTHOR("Moschip Semiconductors Ltd.");
1117 MODULE_DESCRIPTION("Moschip MCS8140 Ethernet MAC driver");
1118 MODULE_LICENSE("GPL");