fc5a5a15a255958184b9981e0333bcf1247840ab
[openwrt/svn-archive/archive.git] / target / linux / mcs814x / files-3.3 / drivers / net / ethernet / mcs8140 / nuport_mac.c
1 /*
2 * Moschip MCS8140 Ethernet MAC driver
3 *
4 * Copyright (C) 2003, Moschip Semiconductors
5 * Copyright (C) 2012, Florian Fainelli <florian@openwrt.org>
6 *
7 * Licensed under GPLv2
8 */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/ethtool.h>
17 #include <linux/mii.h>
18 #include <linux/io.h>
19 #include <linux/interrupt.h>
20 #include <linux/platform_device.h>
21 #include <linux/of.h>
22 #include <linux/irq.h>
23 #include <linux/err.h>
24 #include <linux/phy.h>
25 #include <linux/clk.h>
26 #include <linux/dma-mapping.h>
27
28 #include <asm/unaligned.h>
29 #include <asm/sizes.h>
30
31 /* Hardware registers */
32 #define MAC_BASE_ADDR ((priv->mac_base))
33
34 #define CTRL_REG (MAC_BASE_ADDR)
35 #define MII_BUSY 0x00000001
36 #define MII_WRITE 0x00000002
37 #define MAC_ADDR_HIGH_REG (MAC_BASE_ADDR + 0x04)
38 #define MAC_ADDR_LOW_REG (MAC_BASE_ADDR + 0x08)
39 #define MII_ADDR_REG (MAC_BASE_ADDR + 0x14)
40 #define MII_DATA_REG (MAC_BASE_ADDR + 0x18)
41 /* Link interrupt registers */
42 #define LINK_INT_CSR (MAC_BASE_ADDR + 0xD0)
43 #define LINK_INT_POLL_TIME (MAC_BASE_ADDR + 0xD4)
44
45 #define DMA_CHAN_WIDTH 32
46 #define DMA_RX_CHAN 0
47 #define DMA_TX_CHAN 2
48
49 /* Receive DMA registers */
50 #define RX_DMA_BASE ((priv->dma_base) + \
51 (DMA_CHAN_WIDTH * DMA_RX_CHAN))
52 #define RX_BUFFER_ADDR (RX_DMA_BASE + 0x00)
53 #define RX_MAX_BYTES (RX_DMA_BASE + 0x04)
54 #define RX_ACT_BYTES (RX_DMA_BASE + 0x08)
55 #define RX_START_DMA (RX_DMA_BASE + 0x0C)
56 #define RX_DMA_ENH (RX_DMA_BASE + 0x14)
57
58 /* Transmit DMA registers */
59 #define TX_DMA_BASE ((priv->dma_base) + \
60 (DMA_CHAN_WIDTH * DMA_TX_CHAN))
61 #define TX_BUFFER_ADDR (TX_DMA_BASE + 0x00)
62 #define TX_PKT_BYTES (TX_DMA_BASE + 0x04)
63 #define TX_BYTES_SENT (TX_DMA_BASE + 0x08)
64 #define TX_START_DMA (TX_DMA_BASE + 0x0C)
65 #define TX_DMA_STATUS (TX_DMA_BASE + 0x10)
66 #define TX_DMA_ENH (TX_DMA_BASE + 0x14)
67
68 #define RX_ALLOC_SIZE SZ_2K
69 #define MAX_ETH_FRAME_SIZE 1536
70 #define RX_SKB_TAILROOM 128
71 #define RX_SKB_HEADROOM (RX_ALLOC_SIZE - \
72 (MAX_ETH_FRAME_SIZE + RX_SKB_TAILROOM) + 0)
73
74 /* WDT Late COL Lenght COL Type */
75 #define ERROR_FILTER_MASK ((1<<14) | (1<<15) | (1<<16) | (1<<17) | (0<<18) | \
76 /* MII Dribbling CRC Len/type Control */\
77 (1<<19) | (1<<20) | (1<<21) | (0<<24) | (1<<25) | \
78 /* Unsup Missed */\
79 (1<<26) | (0<<31))
80 #define TX_RING_SIZE 30
81 #define RX_RING_SIZE 30
82
83 static inline u32 nuport_mac_readl(void __iomem *reg)
84 {
85 return __raw_readl(reg);
86 }
87
88 static inline u8 nuport_mac_readb(void __iomem *reg)
89 {
90 return __raw_readb(reg);
91 }
92
93 static inline void nuport_mac_writel(u32 value, void __iomem *reg)
94 {
95 __raw_writel(value, reg);
96 }
97
98 static inline void nuport_mac_writeb(u8 value, void __iomem *reg)
99 {
100 __raw_writel(value, reg);
101 }
102
103 /* MAC private data */
104 struct nuport_mac_priv {
105 spinlock_t lock;
106
107 void __iomem *mac_base;
108 void __iomem *dma_base;
109
110 int rx_irq;
111 int tx_irq;
112 int link_irq;
113 struct clk *emac_clk;
114 struct clk *ephy_clk;
115
116 /* Transmit buffers */
117 struct sk_buff *tx_skb[TX_RING_SIZE];
118 unsigned int valid_txskb[TX_RING_SIZE];
119 unsigned int cur_tx;
120 unsigned int dma_tx;
121 unsigned int tx_full;
122
123 /* Receive buffers */
124 struct sk_buff *rx_skb[RX_RING_SIZE];
125 unsigned int irq_rxskb[RX_RING_SIZE];
126 int pkt_len[RX_RING_SIZE];
127 unsigned int cur_rx;
128 unsigned int dma_rx;
129 unsigned int rx_full;
130
131 unsigned int first_pkt;
132
133 /* Private data */
134 struct napi_struct napi;
135 struct net_device *dev;
136 struct platform_device *pdev;
137 struct mii_bus *mii_bus;
138 struct phy_device *phydev;
139 int old_link;
140 int old_duplex;
141 u32 msg_level;
142 unsigned int buffer_shifting_len;
143 };
144
145 static inline int nuport_mac_mii_busy_wait(struct nuport_mac_priv *priv)
146 {
147 unsigned long curr;
148 unsigned long finish = jiffies + 3 * HZ;
149
150 do {
151 curr = jiffies;
152 if (!(nuport_mac_readl(MII_ADDR_REG) & MII_BUSY))
153 return 0;
154 cpu_relax();
155 } while (!time_after_eq(curr, finish));
156
157 return -EBUSY;
158 }
159
160 /* Read from PHY registers */
161 static int nuport_mac_mii_read(struct mii_bus *bus,
162 int mii_id, int regnum)
163 {
164 struct net_device *dev = bus->priv;
165 struct nuport_mac_priv *priv = netdev_priv(dev);
166 int ret;
167 u32 val = 0;
168
169 ret = nuport_mac_mii_busy_wait(priv);
170 if (ret)
171 return ret;
172
173 val |= (mii_id << 11) | (regnum << 6) | MII_BUSY;
174 nuport_mac_writel(val, MII_ADDR_REG);
175 ret = nuport_mac_mii_busy_wait(priv);
176 if (ret)
177 return ret;
178
179 return nuport_mac_readl(MII_DATA_REG);
180 }
181
182 static int nuport_mac_mii_write(struct mii_bus *bus, int mii_id,
183 int regnum, u16 value)
184 {
185 struct net_device *dev = bus->priv;
186 struct nuport_mac_priv *priv = netdev_priv(dev);
187 int ret;
188 u32 val = 0;
189
190 ret = nuport_mac_mii_busy_wait(priv);
191 if (ret)
192 return ret;
193
194 val |= (mii_id << 11) | (regnum << 6) | MII_BUSY | MII_WRITE;
195 nuport_mac_writel(value, MII_DATA_REG);
196 nuport_mac_writel(val, MII_ADDR_REG);
197
198 return nuport_mac_mii_busy_wait(priv);
199 }
200
201 static int nuport_mac_mii_reset(struct mii_bus *bus)
202 {
203 return 0;
204 }
205
206 static int nuport_mac_start_tx_dma(struct nuport_mac_priv *priv,
207 struct sk_buff *skb)
208 {
209 dma_addr_t p;
210 u32 reg;
211 unsigned int timeout = 2048;
212
213 while (timeout--) {
214 reg = nuport_mac_readl(TX_START_DMA);
215 if (!(reg & 0x01)) {
216 netdev_dbg(priv->dev, "dma ready\n");
217 break;
218 }
219 cpu_relax();
220 }
221
222 if (!timeout)
223 return -EBUSY;
224
225 p = dma_map_single(&priv->pdev->dev, skb->data,
226 skb->len, DMA_TO_DEVICE);
227
228 /* enable enhanced mode */
229 nuport_mac_writel(0x01, TX_DMA_ENH);
230 nuport_mac_writel(p, TX_BUFFER_ADDR);
231 nuport_mac_writel((skb->len) - 1, TX_PKT_BYTES);
232 wmb();
233 nuport_mac_writel(0x0D, TX_START_DMA);
234
235 return 0;
236 }
237
238 static void nuport_mac_reset_tx_dma(struct nuport_mac_priv *priv)
239 {
240 u32 reg;
241
242 reg = nuport_mac_readl(TX_START_DMA);
243 reg |= (1 << 24);
244 nuport_mac_writel(reg, TX_START_DMA);
245 }
246
247 static int nuport_mac_start_rx_dma(struct nuport_mac_priv *priv,
248 struct sk_buff *skb)
249 {
250 dma_addr_t p;
251 u32 reg;
252 unsigned int timeout = 2048;
253
254 while (timeout--) {
255 reg = nuport_mac_readl(RX_START_DMA);
256 if (!(reg & 0x01)) {
257 netdev_dbg(priv->dev, "dma ready\n");
258 break;
259 }
260 cpu_relax();
261 }
262
263 if (!timeout)
264 return -EBUSY;
265
266 p = dma_map_single(&priv->pdev->dev, skb->data,
267 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
268
269 nuport_mac_writel(p, RX_BUFFER_ADDR);
270 wmb();
271 nuport_mac_writel(0x01, RX_START_DMA);
272
273 return 0;
274 }
275
276 static void nuport_mac_reset_rx_dma(struct nuport_mac_priv *priv)
277 {
278 u32 reg;
279
280 reg = nuport_mac_readl(RX_START_DMA);
281 reg |= (1 << 1);
282 nuport_mac_writel(reg, RX_START_DMA);
283 }
284
285 /* I suppose this might do something, but I am not sure actually */
286 static void nuport_mac_disable_rx_dma(struct nuport_mac_priv *priv)
287 {
288 u32 reg;
289
290 reg = nuport_mac_readl(RX_DMA_ENH);
291 reg &= ~(1 << 1);
292 nuport_mac_writel(reg, RX_DMA_ENH);
293 }
294
295 static void nuport_mac_enable_rx_dma(struct nuport_mac_priv *priv)
296 {
297 u32 reg;
298
299 reg = nuport_mac_readl(RX_DMA_ENH);
300 reg |= (1 << 1);
301 nuport_mac_writel(reg, RX_DMA_ENH);
302 }
303
304 /* Add packets to the transmit queue */
305 static int nuport_mac_start_xmit(struct sk_buff *skb, struct net_device *dev)
306 {
307 unsigned long flags;
308 struct nuport_mac_priv *priv = netdev_priv(dev);
309 int ret;
310
311 if (netif_queue_stopped(dev)) {
312 netdev_warn(dev, "netif queue was stopped, restarting\n");
313 netif_start_queue(dev);
314 }
315
316 spin_lock_irqsave(&priv->lock, flags);
317 if (priv->first_pkt) {
318 ret = nuport_mac_start_tx_dma(priv, skb);
319 if (ret) {
320 netif_stop_queue(dev);
321 spin_unlock_irqrestore(&priv->lock, flags);
322 netdev_err(dev, "transmit path busy\n");
323 return NETDEV_TX_BUSY;
324 }
325 priv->first_pkt = 0;
326 }
327
328 priv->tx_skb[priv->cur_tx] = skb;
329 dev->stats.tx_bytes += skb->len;
330 dev->stats.tx_packets++;
331 priv->valid_txskb[priv->cur_tx] = 1;
332 priv->cur_tx++;
333 dev->trans_start = jiffies;
334
335 if (priv->cur_tx >= TX_RING_SIZE)
336 priv->cur_tx = 0;
337
338 spin_unlock_irqrestore(&priv->lock, flags);
339
340 if (priv->valid_txskb[priv->cur_tx]) {
341 priv->tx_full = 1;
342 netdev_err(dev, "stopping queue\n");
343 netif_stop_queue(dev);
344 }
345
346 return NETDEV_TX_OK;
347 }
348
349 static void nuport_mac_adjust_link(struct net_device *dev)
350 {
351 struct nuport_mac_priv *priv = netdev_priv(dev);
352 struct phy_device *phydev = priv->phydev;
353 unsigned int status_changed = 0;
354 u32 reg;
355
356 BUG_ON(!phydev);
357
358 if (priv->old_link != phydev->link) {
359 status_changed = 1;
360 priv->old_link = phydev->link;
361 }
362
363 if (phydev->link & (priv->old_duplex != phydev->duplex)) {
364 reg = nuport_mac_readl(CTRL_REG);
365 if (phydev->duplex == DUPLEX_FULL)
366 reg |= (1 << 20);
367 else
368 reg &= ~(1 << 20);
369 nuport_mac_writel(reg, CTRL_REG);
370
371 status_changed = 1;
372 priv->old_duplex = phydev->duplex;
373 }
374
375 if (!status_changed)
376 return;
377
378 pr_info("%s: link %s", dev->name, phydev->link ?
379 "UP" : "DOWN");
380 if (phydev->link) {
381 pr_cont(" - %d/%s", phydev->speed,
382 phydev->duplex == DUPLEX_FULL ? "full" : "half");
383 }
384 pr_cont("\n");
385 }
386
387 static irqreturn_t nuport_mac_link_interrupt(int irq, void *dev_id)
388 {
389 struct net_device *dev = dev_id;
390 struct nuport_mac_priv *priv = netdev_priv(dev);
391 u32 reg;
392 u8 phy_addr;
393
394 reg = nuport_mac_readl(LINK_INT_CSR);
395 phy_addr = (reg >> 1) & 0x0f;
396
397 if (phy_addr != priv->phydev->addr) {
398 netdev_err(dev, "spurious PHY irq (phy: %d)\n", phy_addr);
399 return IRQ_NONE;
400 }
401
402 priv->phydev->link = (reg & (1 << 16));
403 nuport_mac_adjust_link(dev);
404
405 return IRQ_HANDLED;
406 }
407
408 static irqreturn_t nuport_mac_tx_interrupt(int irq, void *dev_id)
409 {
410 struct net_device *dev = (struct net_device *)dev_id;
411 struct nuport_mac_priv *priv = netdev_priv(dev);
412 struct sk_buff *skb;
413 unsigned long flags;
414 int ret;
415 u32 reg;
416
417 spin_lock_irqsave(&priv->lock, flags);
418 /* clear status word available if ready */
419 reg = nuport_mac_readl(TX_START_DMA);
420 if (reg & (1 << 18)) {
421 nuport_mac_writel(reg, TX_START_DMA);
422 reg = nuport_mac_readl(TX_DMA_STATUS);
423
424 if (reg & 1)
425 dev->stats.tx_errors++;
426 } else
427 netdev_dbg(dev, "no status word: %08x\n", reg);
428
429 skb = priv->tx_skb[priv->dma_tx];
430 priv->tx_skb[priv->dma_tx] = NULL;
431 priv->valid_txskb[priv->dma_tx] = 0;
432 dev_kfree_skb_irq(skb);
433
434 priv->dma_tx++;
435 if (priv->dma_tx >= TX_RING_SIZE)
436 priv->dma_tx = 0;
437
438 if (!priv->valid_txskb[priv->dma_tx])
439 priv->first_pkt = 1;
440 else {
441 ret = nuport_mac_start_tx_dma(priv, priv->tx_skb[priv->dma_tx]);
442 if (ret)
443 netdev_err(dev, "failed to restart TX dma\n");
444 }
445
446 if (priv->tx_full) {
447 netdev_dbg(dev, "restarting transmit queue\n");
448 netif_wake_queue(dev);
449 priv->tx_full = 0;
450 }
451
452 spin_unlock_irqrestore(&priv->lock, flags);
453
454 return IRQ_HANDLED;
455 }
456
457 static unsigned int nuport_mac_has_work(struct nuport_mac_priv *priv)
458 {
459 unsigned int i;
460
461 for (i = 0; i < RX_RING_SIZE; i++)
462 if (priv->rx_skb[i])
463 return 1;
464
465 return 0;
466 }
467
468 static irqreturn_t nuport_mac_rx_interrupt(int irq, void *dev_id)
469 {
470 struct net_device *dev = (struct net_device *)dev_id;
471 struct nuport_mac_priv *priv = netdev_priv(dev);
472 unsigned long flags;
473 int ret;
474
475 spin_lock_irqsave(&priv->lock, flags);
476 if (!priv->rx_full) {
477 priv->pkt_len[priv->dma_rx] = nuport_mac_readl(RX_ACT_BYTES) - 4;
478 priv->irq_rxskb[priv->dma_rx] = 0;
479 priv->dma_rx++;
480
481 if (priv->dma_rx >= RX_RING_SIZE)
482 priv->dma_rx = 0;
483 } else
484 priv->rx_full = 0;
485
486 if (priv->irq_rxskb[priv->dma_rx] == 1) {
487 ret = nuport_mac_start_rx_dma(priv, priv->rx_skb[priv->dma_rx]);
488 if (ret)
489 netdev_err(dev, "failed to start rx dma\n");
490 } else {
491 priv->rx_full = 1;
492 netdev_dbg(dev, "RX ring full\n");
493 }
494
495 if (likely(nuport_mac_has_work(priv))) {
496 /* find a way to disable DMA rx irq */
497 nuport_mac_disable_rx_dma(priv);
498 napi_schedule(&priv->napi);
499 }
500 spin_unlock_irqrestore(&priv->lock, flags);
501
502 return IRQ_HANDLED;
503 }
504
505 /* Process received packets in tasklet */
506 static int nuport_mac_rx(struct net_device *dev, int limit)
507 {
508 struct nuport_mac_priv *priv = netdev_priv(dev);
509 struct sk_buff *skb;
510 int len, status;
511 int count = 0;
512
513 while (count < limit && !priv->irq_rxskb[priv->cur_rx]) {
514 skb = priv->rx_skb[priv->cur_rx];
515 len = priv->pkt_len[priv->cur_rx];
516
517 /* Remove 2 bytes added by RX buffer shifting */
518 len = len - priv->buffer_shifting_len;
519 skb->data = skb->data + priv->buffer_shifting_len;
520
521 /* Get packet status */
522 status = get_unaligned((u32 *) (skb->data + len));
523 skb->dev = dev;
524
525 /* packet filter failed */
526 if (!(status & (1 << 30))) {
527 dev_kfree_skb_irq(skb);
528 goto exit;
529 }
530
531 /* missed frame */
532 if (status & (1 << 31)) {
533 dev->stats.rx_missed_errors++;
534 dev_kfree_skb_irq(skb);
535 goto exit;
536 }
537
538 /* Not ethernet type */
539 if ((!(status & (1 << 18))) || (status & ERROR_FILTER_MASK))
540 dev->stats.rx_errors++;
541
542 if (len > MAX_ETH_FRAME_SIZE) {
543 dev_kfree_skb_irq(skb);
544 goto exit;
545 } else
546 skb_put(skb, len);
547
548 skb->protocol = eth_type_trans(skb, dev);
549 dev->stats.rx_packets++;
550
551 if (status & (1 << 29))
552 skb->pkt_type = PACKET_OTHERHOST;
553 if (status & (1 << 27))
554 skb->pkt_type = PACKET_MULTICAST;
555 if (status & (1 << 28))
556 skb->pkt_type = PACKET_BROADCAST;
557
558 skb->ip_summed = CHECKSUM_UNNECESSARY;
559
560 /* Pass the received packet to network layer */
561 status = netif_receive_skb(skb);
562 if (status != NET_RX_DROP)
563 dev->stats.rx_bytes += len - 4; /* Without CRC */
564 else
565 dev->stats.rx_dropped++;
566
567 dev->last_rx = jiffies;
568
569 exit:
570 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
571 skb_reserve(skb, RX_SKB_HEADROOM);
572 priv->rx_skb[priv->cur_rx] = skb;
573 priv->irq_rxskb[priv->cur_rx] = 1;
574 priv->cur_rx++;
575
576 if (priv->cur_rx >= RX_RING_SIZE)
577 priv->cur_rx = 0;
578 count++;
579 }
580
581 return count;
582 }
583
584 static int nuport_mac_poll(struct napi_struct *napi, int budget)
585 {
586 struct nuport_mac_priv *priv =
587 container_of(napi, struct nuport_mac_priv, napi);
588 struct net_device *dev = priv->dev;
589 int work_done;
590
591 work_done = nuport_mac_rx(dev, budget);
592
593 if (work_done < budget) {
594 napi_complete(napi);
595 nuport_mac_enable_rx_dma(priv);
596 }
597
598 return work_done;
599 }
600
601 static void nuport_mac_init_tx_ring(struct nuport_mac_priv *priv)
602 {
603 int i;
604
605 priv->cur_tx = priv->dma_tx = priv->tx_full = 0;
606 for (i = 0; i < TX_RING_SIZE; i++) {
607 priv->tx_skb[i] = NULL;
608 priv->valid_txskb[i] = 0;
609 }
610 priv->first_pkt = 1;
611 }
612
613 static int nuport_mac_init_rx_ring(struct net_device *dev)
614 {
615 struct nuport_mac_priv *priv = netdev_priv(dev);
616 struct sk_buff *skb;
617 int i;
618
619 priv->cur_rx = priv->dma_rx = priv->rx_full = 0;
620
621 for (i = 0; i < RX_RING_SIZE; i++) {
622 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
623 if (!skb)
624 return -ENOMEM;
625 skb_reserve(skb, RX_SKB_HEADROOM);
626 priv->rx_skb[i] = skb;
627 priv->irq_rxskb[i] = 1;
628 }
629
630 return 0;
631 }
632
633 static void nuport_mac_free_rx_ring(struct nuport_mac_priv *priv)
634 {
635 int i;
636
637 for (i = 0; i < RX_RING_SIZE; i++) {
638 if (!priv->rx_skb[i])
639 continue;
640
641 dev_kfree_skb(priv->rx_skb[i]);
642 priv->rx_skb[i] = NULL;
643 }
644 }
645
646 static void nuport_mac_read_mac_address(struct net_device *dev)
647 {
648 struct nuport_mac_priv *priv = netdev_priv(dev);
649 int i;
650
651 for (i = 0; i < 4; i++)
652 dev->dev_addr[i] = nuport_mac_readb(MAC_ADDR_LOW_REG + i);
653 dev->dev_addr[4] = nuport_mac_readb(MAC_ADDR_HIGH_REG);
654 dev->dev_addr[5] = nuport_mac_readb(MAC_ADDR_HIGH_REG + 1);
655
656 if (!is_valid_ether_addr(dev->dev_addr)) {
657 dev_info(&priv->pdev->dev, "using random address\n");
658 random_ether_addr(dev->dev_addr);
659 }
660 }
661
662 static int nuport_mac_change_mac_address(struct net_device *dev, void *mac_addr)
663 {
664 struct sockaddr *addr = mac_addr;
665 struct nuport_mac_priv *priv = netdev_priv(dev);
666 unsigned long *temp = (unsigned long *)dev->dev_addr;
667 u32 high, low;
668
669 if (netif_running(dev))
670 return -EBUSY;
671
672 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
673
674 spin_lock_irq(&priv->lock);
675
676 nuport_mac_writel(*temp, MAC_ADDR_LOW_REG);
677 temp = (unsigned long *)(dev->dev_addr + 4);
678 nuport_mac_writel(*temp, MAC_ADDR_HIGH_REG);
679
680 low = nuport_mac_readl(MAC_ADDR_LOW_REG);
681 high = nuport_mac_readl(MAC_ADDR_HIGH_REG);
682
683 spin_unlock_irq(&priv->lock);
684
685 return 0;
686 }
687
688 static int nuport_mac_open(struct net_device *dev)
689 {
690 int ret;
691 struct nuport_mac_priv *priv = netdev_priv(dev);
692 unsigned long flags;
693
694 ret = clk_enable(priv->emac_clk);
695 if (ret) {
696 netdev_err(dev, "failed to enable EMAC clock\n");
697 return ret;
698 }
699
700 /* Set MAC into full duplex mode by default */
701 nuport_mac_writel(0x1010052C, CTRL_REG);
702
703 /* set mac address in hardware in case it was not already */
704 nuport_mac_change_mac_address(dev, dev->dev_addr);
705
706 ret = request_irq(priv->link_irq, &nuport_mac_link_interrupt,
707 0, dev->name, dev);
708 if (ret) {
709 netdev_err(dev, "unable to request link interrupt\n");
710 goto out_emac_clk;
711 }
712
713 phy_start(priv->phydev);
714
715 /* Enable link interrupt monitoring */
716 spin_lock_irqsave(&priv->lock, flags);
717 nuport_mac_writel(0x1041 | (priv->phydev->addr << 1), LINK_INT_CSR);
718 nuport_mac_writel(0xFFFFF, LINK_INT_POLL_TIME);
719 spin_unlock_irqrestore(&priv->lock, flags);
720
721 ret = request_irq(priv->tx_irq, &nuport_mac_tx_interrupt,
722 0, dev->name, dev);
723 if (ret) {
724 netdev_err(dev, "unable to request rx interrupt\n");
725 goto out_link_irq;
726 }
727
728 napi_enable(&priv->napi);
729
730 ret = request_irq(priv->rx_irq, &nuport_mac_rx_interrupt,
731 0, dev->name, dev);
732 if (ret) {
733 netdev_err(dev, "unable to request tx interrupt\n");
734 goto out_tx_irq;
735 }
736
737 netif_start_queue(dev);
738
739 nuport_mac_init_tx_ring(priv);
740
741 ret = nuport_mac_init_rx_ring(dev);
742 if (ret) {
743 netdev_err(dev, "rx ring init failed\n");
744 goto out_rx_skb;
745 }
746
747 nuport_mac_reset_tx_dma(priv);
748 nuport_mac_reset_rx_dma(priv);
749
750 /* Start RX DMA */
751 return nuport_mac_start_rx_dma(priv, priv->rx_skb[0]);
752
753 out_rx_skb:
754 nuport_mac_free_rx_ring(priv);
755 free_irq(priv->rx_irq, dev);
756 out_tx_irq:
757 free_irq(priv->tx_irq, dev);
758 out_link_irq:
759 free_irq(priv->link_irq, dev);
760 out_emac_clk:
761 clk_disable(priv->emac_clk);
762 return ret;
763 }
764
765 static int nuport_mac_close(struct net_device *dev)
766 {
767 struct nuport_mac_priv *priv = netdev_priv(dev);
768
769 spin_lock_irq(&priv->lock);
770 napi_disable(&priv->napi);
771 netif_stop_queue(dev);
772
773 free_irq(priv->link_irq, dev);
774 nuport_mac_writel(0x00, LINK_INT_CSR);
775 nuport_mac_writel(0x00, LINK_INT_POLL_TIME);
776 phy_stop(priv->phydev);
777
778 free_irq(priv->tx_irq, dev);
779 free_irq(priv->rx_irq, dev);
780 spin_unlock_irq(&priv->lock);
781
782 nuport_mac_free_rx_ring(priv);
783
784 clk_disable(priv->emac_clk);
785
786 return 0;
787 }
788
789 static void nuport_mac_tx_timeout(struct net_device *dev)
790 {
791 struct nuport_mac_priv *priv = netdev_priv(dev);
792 unsigned int i;
793
794 netdev_warn(dev, "transmit timeout, attempting recovery\n");
795
796 netdev_info(dev, "TX DMA regs\n");
797 for (i = 0; i < DMA_CHAN_WIDTH; i += 4)
798 netdev_info(dev, "[%02x]: 0x%08x\n", i, nuport_mac_readl(TX_DMA_BASE + i));
799 netdev_info(dev, "RX DMA regs\n");
800 for (i = 0; i < DMA_CHAN_WIDTH; i += 4)
801 netdev_info(dev, "[%02x]: 0x%08x\n", i, nuport_mac_readl(RX_DMA_BASE + i));
802
803 nuport_mac_init_tx_ring(priv);
804 nuport_mac_reset_tx_dma(priv);
805
806 netif_wake_queue(dev);
807 }
808
809 static int nuport_mac_mii_probe(struct net_device *dev)
810 {
811 struct nuport_mac_priv *priv = netdev_priv(dev);
812 struct phy_device *phydev = NULL;
813 int ret;
814
815 ret = clk_enable(priv->ephy_clk);
816 if (ret) {
817 netdev_err(dev, "unable to enable ePHY clk\n");
818 return ret;
819 }
820
821 phydev = phy_find_first(priv->mii_bus);
822 if (!phydev) {
823 netdev_err(dev, "no PHYs found\n");
824 ret = -ENODEV;
825 goto out;
826 }
827
828 phydev = phy_connect(dev, dev_name(&phydev->dev),
829 nuport_mac_adjust_link, 0,
830 PHY_INTERFACE_MODE_MII);
831 if (IS_ERR(phydev)) {
832 netdev_err(dev, "could not attach PHY\n");
833 ret = PTR_ERR(phydev);
834 goto out;
835 }
836
837 phydev->supported &= PHY_BASIC_FEATURES;
838 phydev->advertising = phydev->supported;
839 priv->phydev = phydev;
840 priv->old_link = 0;
841 priv->old_duplex = -1;
842
843 dev_info(&priv->pdev->dev, "attached PHY driver [%s] "
844 "(mii_bus:phy_addr=%d)\n",
845 phydev->drv->name, phydev->addr);
846
847 return 0;
848
849 out:
850 /* disable the Ethernet PHY clock for the moment */
851 clk_disable(priv->ephy_clk);
852
853 return ret;
854 }
855
856 static void nuport_mac_ethtool_drvinfo(struct net_device *dev,
857 struct ethtool_drvinfo *info)
858 {
859 strncpy(info->driver, "nuport-mac", sizeof(info->driver));
860 strncpy(info->version, "0.1", sizeof(info->version));
861 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
862 strncpy(info->bus_info, "internal", sizeof(info->bus_info));
863 info->n_stats = 0;
864 info->testinfo_len = 0;
865 info->regdump_len = 0;
866 info->eedump_len = 0;
867 }
868
869 static int nuport_mac_ethtool_get_settings(struct net_device *dev,
870 struct ethtool_cmd *cmd)
871 {
872 struct nuport_mac_priv *priv = netdev_priv(dev);
873
874 if (priv->phydev)
875 return phy_ethtool_gset(priv->phydev, cmd);
876
877 return -EINVAL;
878 }
879
880 static int nuport_mac_ethtool_set_settings(struct net_device *dev,
881 struct ethtool_cmd *cmd)
882 {
883 struct nuport_mac_priv *priv = netdev_priv(dev);
884
885 if (priv->phydev)
886 return phy_ethtool_sset(priv->phydev, cmd);
887
888 return -EINVAL;
889 }
890
891 static void nuport_mac_set_msglevel(struct net_device *dev, u32 msg_level)
892 {
893 struct nuport_mac_priv *priv = netdev_priv(dev);
894
895 priv->msg_level = msg_level;
896 }
897
898 static u32 nuport_mac_get_msglevel(struct net_device *dev)
899 {
900 struct nuport_mac_priv *priv = netdev_priv(dev);
901
902 return priv->msg_level;
903 }
904
905 static const struct ethtool_ops nuport_mac_ethtool_ops = {
906 .get_drvinfo = nuport_mac_ethtool_drvinfo,
907 .get_link = ethtool_op_get_link,
908 .get_settings = nuport_mac_ethtool_get_settings,
909 .set_settings = nuport_mac_ethtool_set_settings,
910 .set_msglevel = nuport_mac_set_msglevel,
911 .get_msglevel = nuport_mac_get_msglevel,
912 };
913
914 static const struct net_device_ops nuport_mac_ops = {
915 .ndo_open = nuport_mac_open,
916 .ndo_stop = nuport_mac_close,
917 .ndo_start_xmit = nuport_mac_start_xmit,
918 .ndo_change_mtu = eth_change_mtu,
919 .ndo_validate_addr = eth_validate_addr,
920 .ndo_set_mac_address = nuport_mac_change_mac_address,
921 .ndo_tx_timeout = nuport_mac_tx_timeout,
922 };
923
924 static int __init nuport_mac_probe(struct platform_device *pdev)
925 {
926 struct net_device *dev;
927 struct nuport_mac_priv *priv = NULL;
928 struct resource *regs, *dma;
929 int ret = 0;
930 int rx_irq, tx_irq, link_irq;
931 int i;
932 const unsigned int *intspec;
933
934 dev = alloc_etherdev(sizeof(struct nuport_mac_priv));
935 if (!dev) {
936 dev_err(&pdev->dev, "no memory for net_device\n");
937 return -ENOMEM;
938 }
939
940 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
941 dma = platform_get_resource(pdev, IORESOURCE_MEM, 1);
942 if (!regs || !dma) {
943 dev_err(&pdev->dev, "failed to get regs resources\n");
944 ret = -ENODEV;
945 goto out;
946 }
947
948 rx_irq = platform_get_irq(pdev, 0);
949 tx_irq = platform_get_irq(pdev, 1);
950 link_irq = platform_get_irq(pdev, 2);
951 if (rx_irq < 0 || tx_irq < 0 || link_irq < 0) {
952 ret = -ENODEV;
953 goto out;
954 }
955
956 platform_set_drvdata(pdev, dev);
957 SET_NETDEV_DEV(dev, &pdev->dev);
958 priv = netdev_priv(dev);
959 priv->pdev = pdev;
960 priv->dev = dev;
961 spin_lock_init(&priv->lock);
962
963 intspec = of_get_property(pdev->dev.of_node,
964 "nuport-mac,buffer-shifting", NULL);
965 if (!intspec)
966 priv->buffer_shifting_len = 0;
967 else
968 priv->buffer_shifting_len = 2;
969
970 priv->mac_base = devm_ioremap(&pdev->dev,
971 regs->start, resource_size(regs));
972 if (!priv->mac_base) {
973 dev_err(&pdev->dev, "failed to remap regs\n");
974 ret = -ENOMEM;
975 goto out_platform;
976 }
977
978 priv->dma_base = devm_ioremap(&pdev->dev,
979 dma->start, resource_size(dma));
980 if (!priv->dma_base) {
981 dev_err(&pdev->dev, "failed to remap dma-regs\n");
982 ret = -ENOMEM;
983 goto out_platform;
984 }
985
986 priv->emac_clk = clk_get(&pdev->dev, "emac");
987 if (IS_ERR_OR_NULL(priv->emac_clk)) {
988 dev_err(&pdev->dev, "failed to get emac clk\n");
989 ret = PTR_ERR(priv->emac_clk);
990 goto out_platform;
991 }
992
993 priv->ephy_clk = clk_get(&pdev->dev, "ephy");
994 if (IS_ERR_OR_NULL(priv->ephy_clk)) {
995 dev_err(&pdev->dev, "failed to get ephy clk\n");
996 ret = PTR_ERR(priv->ephy_clk);
997 goto out_platform;
998 }
999
1000 priv->link_irq = link_irq;
1001 priv->rx_irq = rx_irq;
1002 priv->tx_irq = tx_irq;
1003 priv->msg_level = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK;
1004 dev->netdev_ops = &nuport_mac_ops;
1005 dev->ethtool_ops = &nuport_mac_ethtool_ops;
1006 dev->watchdog_timeo = HZ;
1007 dev->flags = IFF_BROADCAST; /* Supports Broadcast */
1008 dev->tx_queue_len = TX_RING_SIZE / 2;
1009
1010 netif_napi_add(dev, &priv->napi, nuport_mac_poll, 64);
1011
1012 priv->mii_bus = mdiobus_alloc();
1013 if (!priv->mii_bus) {
1014 dev_err(&pdev->dev, "mii bus allocation failed\n");
1015 goto out;
1016 }
1017
1018 priv->mii_bus->priv = dev;
1019 priv->mii_bus->read = nuport_mac_mii_read;
1020 priv->mii_bus->write = nuport_mac_mii_write;
1021 priv->mii_bus->reset = nuport_mac_mii_reset;
1022 priv->mii_bus->name = "nuport-mac-mii";
1023 priv->mii_bus->phy_mask = (1 << 0);
1024 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
1025 priv->mii_bus->irq = kzalloc(PHY_MAX_ADDR * sizeof(int), GFP_KERNEL);
1026 if (!priv->mii_bus->irq) {
1027 dev_err(&pdev->dev, "failed to allocate mii_bus irqs\n");
1028 ret = -ENOMEM;
1029 goto out_mdio;
1030 }
1031
1032 /* We support PHY interrupts routed back to the MAC */
1033 for (i = 0; i < PHY_MAX_ADDR; i++)
1034 priv->mii_bus->irq[i] = PHY_IGNORE_INTERRUPT;
1035
1036 ret = mdiobus_register(priv->mii_bus);
1037 if (ret) {
1038 dev_err(&pdev->dev, "failed to register mii_bus\n");
1039 goto out_mdio_irq;
1040 }
1041
1042 ret = nuport_mac_mii_probe(dev);
1043 if (ret) {
1044 dev_err(&pdev->dev, "failed to probe MII bus\n");
1045 goto out_mdio_unregister;
1046 }
1047
1048 ret = register_netdev(dev);
1049 if (ret) {
1050 dev_err(&pdev->dev, "failed to register net_device\n");
1051 goto out_mdio_probe;
1052 }
1053
1054 /* read existing mac address */
1055 nuport_mac_read_mac_address(dev);
1056
1057 dev_info(&pdev->dev, "registered (MAC: %pM)\n", dev->dev_addr);
1058
1059 return ret;
1060
1061 out_mdio_probe:
1062 phy_disconnect(priv->phydev);
1063 out_mdio_unregister:
1064 mdiobus_unregister(priv->mii_bus);
1065 out_mdio_irq:
1066 kfree(priv->mii_bus->irq);
1067 out_mdio:
1068 mdiobus_free(priv->mii_bus);
1069 out_platform:
1070 platform_set_drvdata(pdev, NULL);
1071 out:
1072 clk_put(priv->ephy_clk);
1073 clk_put(priv->emac_clk);
1074 free_netdev(dev);
1075 platform_set_drvdata(pdev, NULL);
1076 return ret;
1077 }
1078
1079 static int nuport_mac_remove(struct platform_device *pdev)
1080 {
1081 struct net_device *dev = platform_get_drvdata(pdev);
1082 struct nuport_mac_priv *priv = netdev_priv(dev);
1083
1084 unregister_netdev(dev);
1085 phy_disconnect(priv->phydev);
1086 mdiobus_unregister(priv->mii_bus);
1087 kfree(priv->mii_bus->irq);
1088 mdiobus_free(priv->mii_bus);
1089 clk_put(priv->ephy_clk);
1090 clk_put(priv->emac_clk);
1091 free_netdev(dev);
1092
1093 platform_set_drvdata(pdev, NULL);
1094
1095 return 0;
1096 }
1097
1098 static struct of_device_id nuport_eth_ids[] __initdata = {
1099 {.compatible = "moschip,nuport-mac",},
1100 { /* sentinel */ },
1101 };
1102
1103 static struct platform_driver nuport_eth_driver = {
1104 .driver = {
1105 .name = "nuport-mac",
1106 .owner = THIS_MODULE,
1107 .of_match_table = nuport_eth_ids,
1108 },
1109 .probe = nuport_mac_probe,
1110 .remove = __devexit_p(nuport_mac_remove),
1111 };
1112
1113 module_platform_driver(nuport_eth_driver);
1114
1115 MODULE_AUTHOR("Moschip Semiconductors Ltd.");
1116 MODULE_DESCRIPTION("Moschip MCS8140 Ethernet MAC driver");
1117 MODULE_LICENSE("GPL");