[mcs814x] assume the link is up and full-duplex by default
[openwrt/svn-archive/archive.git] / target / linux / mcs814x / files-3.3 / drivers / net / ethernet / mcs8140 / nuport_mac.c
1 /*
2 * Moschip MCS8140 Ethernet MAC driver
3 *
4 * Copyright (C) 2003, Moschip Semiconductors
5 * Copyright (C) 2012, Florian Fainelli <florian@openwrt.org>
6 *
7 * Licensed under GPLv2
8 */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/ethtool.h>
17 #include <linux/mii.h>
18 #include <linux/io.h>
19 #include <linux/interrupt.h>
20 #include <linux/platform_device.h>
21 #include <linux/of.h>
22 #include <linux/irq.h>
23 #include <linux/err.h>
24 #include <linux/phy.h>
25 #include <linux/clk.h>
26 #include <linux/dma-mapping.h>
27
28 #include <asm/unaligned.h>
29 #include <asm/sizes.h>
30
31 /* Hardware registers */
32 #define MAC_BASE_ADDR ((priv->mac_base))
33
34 #define CTRL_REG (MAC_BASE_ADDR)
35 #define MII_BUSY (1 << 0)
36 #define MII_WRITE (1 << 1)
37 #define RX_ENABLE (1 << 2)
38 #define TX_ENABLE (1 << 3)
39 #define DEFER_CHECK (1 << 5)
40 #define STRIP_PAD (1 << 8)
41 #define DRTRY_DISABLE (1 << 10)
42 #define FULL_DUPLEX (1 << 20)
43 #define HBD_DISABLE (1 << 28)
44 #define MAC_ADDR_HIGH_REG (MAC_BASE_ADDR + 0x04)
45 #define MAC_ADDR_LOW_REG (MAC_BASE_ADDR + 0x08)
46 #define MII_ADDR_REG (MAC_BASE_ADDR + 0x14)
47 #define MII_ADDR_SHIFT (11)
48 #define MII_REG_SHIFT (6)
49 #define MII_DATA_REG (MAC_BASE_ADDR + 0x18)
50 /* Link interrupt registers */
51 #define LINK_INT_CSR (MAC_BASE_ADDR + 0xD0)
52 #define LINK_INT_EN (1 << 0)
53 #define LINK_PHY_ADDR_SHIFT (1)
54 #define LINK_PHY_REG_SHIFT (6)
55 #define LINK_BIT_UP_SHIFT (11)
56 #define LINK_UP (1 << 16)
57 #define LINK_INT_POLL_TIME (MAC_BASE_ADDR + 0xD4)
58 #define LINK_POLL_MASK ((1 << 20) - 1)
59
60 #define DMA_CHAN_WIDTH 32
61 #define DMA_RX_CHAN 0
62 #define DMA_TX_CHAN 2
63
64 /* Receive DMA registers */
65 #define RX_DMA_BASE ((priv->dma_base) + \
66 (DMA_CHAN_WIDTH * DMA_RX_CHAN))
67 #define RX_BUFFER_ADDR (RX_DMA_BASE + 0x00)
68 #define RX_MAX_BYTES (RX_DMA_BASE + 0x04)
69 #define RX_ACT_BYTES (RX_DMA_BASE + 0x08)
70 #define RX_START_DMA (RX_DMA_BASE + 0x0C)
71 #define RX_DMA_ENABLE (1 << 0)
72 #define RX_DMA_RESET (1 << 1)
73 #define RX_DMA_STATUS_FIFO (1 << 12)
74 #define RX_DMA_ENH (RX_DMA_BASE + 0x14)
75 #define RX_DMA_INT_ENABLE (1 << 1)
76
77 /* Transmit DMA registers */
78 #define TX_DMA_BASE ((priv->dma_base) + \
79 (DMA_CHAN_WIDTH * DMA_TX_CHAN))
80 #define TX_BUFFER_ADDR (TX_DMA_BASE + 0x00)
81 #define TX_PKT_BYTES (TX_DMA_BASE + 0x04)
82 #define TX_BYTES_SENT (TX_DMA_BASE + 0x08)
83 #define TX_START_DMA (TX_DMA_BASE + 0x0C)
84 #define TX_DMA_ENABLE (1 << 0)
85 #define TX_DMA_START_FRAME (1 << 2)
86 #define TX_DMA_END_FRAME (1 << 3)
87 #define TX_DMA_PAD_DISABLE (1 << 8)
88 #define TX_DMA_CRC_DISABLE (1 << 9)
89 #define TX_DMA_FIFO_FULL (1 << 16)
90 #define TX_DMA_FIFO_EMPTY (1 << 17)
91 #define TX_DMA_STATUS_AVAIL (1 << 18)
92 #define TX_DMA_RESET (1 << 24)
93 #define TX_DMA_STATUS (TX_DMA_BASE + 0x10)
94 #define TX_DMA_ENH (TX_DMA_BASE + 0x14)
95 #define TX_DMA_ENH_ENABLE (1 << 0)
96 #define TX_DMA_INT_FIFO (1 << 1)
97
98 #define RX_ALLOC_SIZE SZ_2K
99 #define MAX_ETH_FRAME_SIZE 1536
100 #define RX_SKB_TAILROOM 128
101 #define RX_SKB_HEADROOM (RX_ALLOC_SIZE - \
102 (MAX_ETH_FRAME_SIZE + RX_SKB_TAILROOM) + 0)
103
104 /* WDT Late COL Lenght COL Type */
105 #define ERROR_FILTER_MASK ((1<<14) | (1<<15) | (1<<16) | (1<<17) | (0<<18) | \
106 /* MII Dribbling CRC Len/type Control */\
107 (1<<19) | (1<<20) | (1<<21) | (0<<24) | (1<<25) | \
108 /* Unsup Missed */\
109 (1<<26) | (0<<31))
110 #define TX_RING_SIZE 30
111 #define RX_RING_SIZE 30
112
113 static inline u32 nuport_mac_readl(void __iomem *reg)
114 {
115 return __raw_readl(reg);
116 }
117
118 static inline u8 nuport_mac_readb(void __iomem *reg)
119 {
120 return __raw_readb(reg);
121 }
122
123 static inline void nuport_mac_writel(u32 value, void __iomem *reg)
124 {
125 __raw_writel(value, reg);
126 }
127
128 static inline void nuport_mac_writeb(u8 value, void __iomem *reg)
129 {
130 __raw_writel(value, reg);
131 }
132
133 /* MAC private data */
134 struct nuport_mac_priv {
135 spinlock_t lock;
136
137 void __iomem *mac_base;
138 void __iomem *dma_base;
139
140 int rx_irq;
141 int tx_irq;
142 int link_irq;
143 struct clk *emac_clk;
144 struct clk *ephy_clk;
145
146 /* Transmit buffers */
147 struct sk_buff *tx_skb[TX_RING_SIZE];
148 dma_addr_t tx_addr;
149 unsigned int valid_txskb[TX_RING_SIZE];
150 unsigned int cur_tx;
151 unsigned int dma_tx;
152 unsigned int tx_full;
153
154 /* Receive buffers */
155 struct sk_buff *rx_skb[RX_RING_SIZE];
156 dma_addr_t rx_addr;
157 unsigned int irq_rxskb[RX_RING_SIZE];
158 int pkt_len[RX_RING_SIZE];
159 unsigned int cur_rx;
160 unsigned int dma_rx;
161 unsigned int rx_full;
162
163 unsigned int first_pkt;
164
165 /* Private data */
166 struct napi_struct napi;
167 struct net_device *dev;
168 struct platform_device *pdev;
169 struct mii_bus *mii_bus;
170 struct phy_device *phydev;
171 int old_link;
172 int old_duplex;
173 u32 msg_level;
174 unsigned int buffer_shifting_len;
175 };
176
177 static inline int nuport_mac_mii_busy_wait(struct nuport_mac_priv *priv)
178 {
179 unsigned long curr;
180 unsigned long finish = jiffies + 3 * HZ;
181
182 do {
183 curr = jiffies;
184 if (!(nuport_mac_readl(MII_ADDR_REG) & MII_BUSY))
185 return 0;
186 cpu_relax();
187 } while (!time_after_eq(curr, finish));
188
189 return -EBUSY;
190 }
191
192 /* Read from PHY registers */
193 static int nuport_mac_mii_read(struct mii_bus *bus,
194 int mii_id, int regnum)
195 {
196 struct net_device *dev = bus->priv;
197 struct nuport_mac_priv *priv = netdev_priv(dev);
198 int ret;
199 u32 val = 0;
200
201 ret = nuport_mac_mii_busy_wait(priv);
202 if (ret)
203 return ret;
204
205 val |= (mii_id << MII_ADDR_SHIFT) | (regnum << MII_REG_SHIFT) | MII_BUSY;
206 nuport_mac_writel(val, MII_ADDR_REG);
207 ret = nuport_mac_mii_busy_wait(priv);
208 if (ret)
209 return ret;
210
211 return nuport_mac_readl(MII_DATA_REG);
212 }
213
214 static int nuport_mac_mii_write(struct mii_bus *bus, int mii_id,
215 int regnum, u16 value)
216 {
217 struct net_device *dev = bus->priv;
218 struct nuport_mac_priv *priv = netdev_priv(dev);
219 int ret;
220 u32 val = 0;
221
222 ret = nuport_mac_mii_busy_wait(priv);
223 if (ret)
224 return ret;
225
226 val |= (mii_id << MII_ADDR_SHIFT) | (regnum << MII_REG_SHIFT);
227 val |= MII_BUSY | MII_WRITE;
228 nuport_mac_writel(value, MII_DATA_REG);
229 nuport_mac_writel(val, MII_ADDR_REG);
230
231 return nuport_mac_mii_busy_wait(priv);
232 }
233
234 static int nuport_mac_mii_reset(struct mii_bus *bus)
235 {
236 return 0;
237 }
238
239 static int nuport_mac_start_tx_dma(struct nuport_mac_priv *priv,
240 struct sk_buff *skb)
241 {
242 u32 reg;
243 unsigned int timeout = 2048;
244
245 while (timeout--) {
246 reg = nuport_mac_readl(TX_START_DMA);
247 if (!(reg & TX_DMA_ENABLE)) {
248 netdev_dbg(priv->dev, "dma ready\n");
249 break;
250 }
251 cpu_relax();
252 }
253
254 if (!timeout)
255 return -EBUSY;
256
257 priv->tx_addr = dma_map_single(&priv->pdev->dev, skb->data,
258 skb->len, DMA_TO_DEVICE);
259
260 /* enable enhanced mode */
261 nuport_mac_writel(TX_DMA_ENH_ENABLE, TX_DMA_ENH);
262 nuport_mac_writel(priv->tx_addr, TX_BUFFER_ADDR);
263 nuport_mac_writel((skb->len) - 1, TX_PKT_BYTES);
264 wmb();
265 reg = TX_DMA_ENABLE | TX_DMA_START_FRAME | TX_DMA_END_FRAME;
266 nuport_mac_writel(reg, TX_START_DMA);
267
268 return 0;
269 }
270
271 static void nuport_mac_reset_tx_dma(struct nuport_mac_priv *priv)
272 {
273 u32 reg;
274
275 reg = nuport_mac_readl(TX_START_DMA);
276 reg |= TX_DMA_RESET;
277 nuport_mac_writel(reg, TX_START_DMA);
278 }
279
280 static int nuport_mac_start_rx_dma(struct nuport_mac_priv *priv,
281 struct sk_buff *skb)
282 {
283 u32 reg;
284 unsigned int timeout = 2048;
285
286 while (timeout--) {
287 reg = nuport_mac_readl(RX_START_DMA);
288 if (!(reg & RX_DMA_ENABLE)) {
289 netdev_dbg(priv->dev, "dma ready\n");
290 break;
291 }
292 cpu_relax();
293 }
294
295 if (!timeout)
296 return -EBUSY;
297
298 priv->rx_addr = dma_map_single(&priv->pdev->dev, skb->data,
299 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
300
301 nuport_mac_writel(priv->rx_addr, RX_BUFFER_ADDR);
302 wmb();
303 nuport_mac_writel(RX_DMA_ENABLE, RX_START_DMA);
304
305 return 0;
306 }
307
308 static void nuport_mac_reset_rx_dma(struct nuport_mac_priv *priv)
309 {
310 u32 reg;
311
312 reg = nuport_mac_readl(RX_START_DMA);
313 reg |= RX_DMA_RESET;
314 nuport_mac_writel(reg, RX_START_DMA);
315 }
316
317 /* I suppose this might do something, but I am not sure actually */
318 static void nuport_mac_disable_rx_dma(struct nuport_mac_priv *priv)
319 {
320 u32 reg;
321
322 reg = nuport_mac_readl(RX_DMA_ENH);
323 reg &= ~RX_DMA_INT_ENABLE;
324 nuport_mac_writel(reg, RX_DMA_ENH);
325 }
326
327 static void nuport_mac_enable_rx_dma(struct nuport_mac_priv *priv)
328 {
329 u32 reg;
330
331 reg = nuport_mac_readl(RX_DMA_ENH);
332 reg |= RX_DMA_INT_ENABLE;
333 nuport_mac_writel(reg, RX_DMA_ENH);
334 }
335
336 /* Add packets to the transmit queue */
337 static int nuport_mac_start_xmit(struct sk_buff *skb, struct net_device *dev)
338 {
339 unsigned long flags;
340 struct nuport_mac_priv *priv = netdev_priv(dev);
341 int ret;
342
343 if (netif_queue_stopped(dev)) {
344 netdev_warn(dev, "netif queue was stopped, restarting\n");
345 netif_start_queue(dev);
346 }
347
348 spin_lock_irqsave(&priv->lock, flags);
349 if (priv->first_pkt) {
350 ret = nuport_mac_start_tx_dma(priv, skb);
351 if (ret) {
352 netif_stop_queue(dev);
353 spin_unlock_irqrestore(&priv->lock, flags);
354 netdev_err(dev, "transmit path busy\n");
355 return NETDEV_TX_BUSY;
356 }
357 priv->first_pkt = 0;
358 }
359
360 priv->tx_skb[priv->cur_tx] = skb;
361 dev->stats.tx_bytes += skb->len;
362 dev->stats.tx_packets++;
363 priv->valid_txskb[priv->cur_tx] = 1;
364 priv->cur_tx++;
365 dev->trans_start = jiffies;
366
367 if (priv->cur_tx >= TX_RING_SIZE)
368 priv->cur_tx = 0;
369
370 spin_unlock_irqrestore(&priv->lock, flags);
371
372 if (priv->valid_txskb[priv->cur_tx]) {
373 priv->tx_full = 1;
374 netdev_err(dev, "stopping queue\n");
375 netif_stop_queue(dev);
376 }
377
378 return NETDEV_TX_OK;
379 }
380
381 static void nuport_mac_adjust_link(struct net_device *dev)
382 {
383 struct nuport_mac_priv *priv = netdev_priv(dev);
384 struct phy_device *phydev = priv->phydev;
385 unsigned int status_changed = 0;
386 u32 reg;
387
388 BUG_ON(!phydev);
389
390 if (priv->old_link != phydev->link) {
391 status_changed = 1;
392 priv->old_link = phydev->link;
393 }
394
395 if (phydev->link & (priv->old_duplex != phydev->duplex)) {
396 reg = nuport_mac_readl(CTRL_REG);
397 if (phydev->duplex == DUPLEX_FULL)
398 reg |= DUPLEX_FULL;
399 else
400 reg &= ~DUPLEX_FULL;
401 nuport_mac_writel(reg, CTRL_REG);
402
403 status_changed = 1;
404 priv->old_duplex = phydev->duplex;
405 }
406
407 if (!status_changed)
408 return;
409
410 pr_info("%s: link %s", dev->name, phydev->link ?
411 "UP" : "DOWN");
412 if (phydev->link) {
413 pr_cont(" - %d/%s", phydev->speed,
414 phydev->duplex == DUPLEX_FULL ? "full" : "half");
415 }
416 pr_cont("\n");
417 }
418
419 static irqreturn_t nuport_mac_link_interrupt(int irq, void *dev_id)
420 {
421 struct net_device *dev = dev_id;
422 struct nuport_mac_priv *priv = netdev_priv(dev);
423 u32 reg;
424 u8 phy_addr;
425
426 reg = nuport_mac_readl(LINK_INT_CSR);
427 phy_addr = (reg >> LINK_PHY_ADDR_SHIFT) & (PHY_MAX_ADDR - 1);
428
429 if (phy_addr != priv->phydev->addr) {
430 netdev_err(dev, "spurious PHY irq (phy: %d)\n", phy_addr);
431 return IRQ_NONE;
432 }
433
434 priv->phydev->link = (reg & LINK_UP);
435 nuport_mac_adjust_link(dev);
436
437 return IRQ_HANDLED;
438 }
439
440 static irqreturn_t nuport_mac_tx_interrupt(int irq, void *dev_id)
441 {
442 struct net_device *dev = (struct net_device *)dev_id;
443 struct nuport_mac_priv *priv = netdev_priv(dev);
444 struct sk_buff *skb;
445 unsigned long flags;
446 int ret;
447 u32 reg;
448
449 spin_lock_irqsave(&priv->lock, flags);
450 /* clear status word available if ready */
451 reg = nuport_mac_readl(TX_START_DMA);
452 if (reg & TX_DMA_STATUS_AVAIL) {
453 nuport_mac_writel(reg, TX_START_DMA);
454 reg = nuport_mac_readl(TX_DMA_STATUS);
455
456 if (reg & 1)
457 dev->stats.tx_errors++;
458 } else
459 netdev_dbg(dev, "no status word: %08x\n", reg);
460
461 skb = priv->tx_skb[priv->dma_tx];
462 priv->tx_skb[priv->dma_tx] = NULL;
463 priv->valid_txskb[priv->dma_tx] = 0;
464 dma_unmap_single(&priv->pdev->dev, priv->rx_addr, skb->len,
465 DMA_TO_DEVICE);
466 dev_kfree_skb_irq(skb);
467
468 priv->dma_tx++;
469 if (priv->dma_tx >= TX_RING_SIZE)
470 priv->dma_tx = 0;
471
472 if (!priv->valid_txskb[priv->dma_tx])
473 priv->first_pkt = 1;
474 else {
475 ret = nuport_mac_start_tx_dma(priv, priv->tx_skb[priv->dma_tx]);
476 if (ret)
477 netdev_err(dev, "failed to restart TX dma\n");
478 }
479
480 if (priv->tx_full) {
481 netdev_dbg(dev, "restarting transmit queue\n");
482 netif_wake_queue(dev);
483 priv->tx_full = 0;
484 }
485
486 spin_unlock_irqrestore(&priv->lock, flags);
487
488 return IRQ_HANDLED;
489 }
490
491 static unsigned int nuport_mac_has_work(struct nuport_mac_priv *priv)
492 {
493 unsigned int i;
494
495 for (i = 0; i < RX_RING_SIZE; i++)
496 if (priv->rx_skb[i])
497 return 1;
498
499 return 0;
500 }
501
502 static irqreturn_t nuport_mac_rx_interrupt(int irq, void *dev_id)
503 {
504 struct net_device *dev = (struct net_device *)dev_id;
505 struct nuport_mac_priv *priv = netdev_priv(dev);
506 unsigned long flags;
507 int ret;
508
509 spin_lock_irqsave(&priv->lock, flags);
510 if (!priv->rx_full) {
511 priv->pkt_len[priv->dma_rx] = nuport_mac_readl(RX_ACT_BYTES) - 4;
512 priv->irq_rxskb[priv->dma_rx] = 0;
513 priv->dma_rx++;
514
515 if (priv->dma_rx >= RX_RING_SIZE)
516 priv->dma_rx = 0;
517 } else
518 priv->rx_full = 0;
519
520 if (priv->irq_rxskb[priv->dma_rx] == 1) {
521 ret = nuport_mac_start_rx_dma(priv, priv->rx_skb[priv->dma_rx]);
522 if (ret)
523 netdev_err(dev, "failed to start rx dma\n");
524 } else {
525 priv->rx_full = 1;
526 netdev_dbg(dev, "RX ring full\n");
527 }
528
529 if (likely(nuport_mac_has_work(priv))) {
530 /* find a way to disable DMA rx irq */
531 nuport_mac_disable_rx_dma(priv);
532 napi_schedule(&priv->napi);
533 }
534 spin_unlock_irqrestore(&priv->lock, flags);
535
536 return IRQ_HANDLED;
537 }
538
539 /* Process received packets in tasklet */
540 static int nuport_mac_rx(struct net_device *dev, int limit)
541 {
542 struct nuport_mac_priv *priv = netdev_priv(dev);
543 struct sk_buff *skb;
544 int len, status;
545 int count = 0;
546
547 while (count < limit && !priv->irq_rxskb[priv->cur_rx]) {
548 skb = priv->rx_skb[priv->cur_rx];
549 len = priv->pkt_len[priv->cur_rx];
550
551 /* Remove 2 bytes added by RX buffer shifting */
552 len = len - priv->buffer_shifting_len;
553 skb->data = skb->data + priv->buffer_shifting_len;
554
555 /* Get packet status */
556 status = get_unaligned((u32 *) (skb->data + len));
557 skb->dev = dev;
558
559 dma_unmap_single(&priv->pdev->dev, priv->rx_addr, skb->len,
560 DMA_FROM_DEVICE);
561
562 /* packet filter failed */
563 if (!(status & (1 << 30))) {
564 dev_kfree_skb_irq(skb);
565 goto exit;
566 }
567
568 /* missed frame */
569 if (status & (1 << 31)) {
570 dev->stats.rx_missed_errors++;
571 dev_kfree_skb_irq(skb);
572 goto exit;
573 }
574
575 /* Not ethernet type */
576 if ((!(status & (1 << 18))) || (status & ERROR_FILTER_MASK))
577 dev->stats.rx_errors++;
578
579 if (len > MAX_ETH_FRAME_SIZE) {
580 dev_kfree_skb_irq(skb);
581 goto exit;
582 } else
583 skb_put(skb, len);
584
585 skb->protocol = eth_type_trans(skb, dev);
586 dev->stats.rx_packets++;
587
588 if (status & (1 << 29))
589 skb->pkt_type = PACKET_OTHERHOST;
590 if (status & (1 << 27))
591 skb->pkt_type = PACKET_MULTICAST;
592 if (status & (1 << 28))
593 skb->pkt_type = PACKET_BROADCAST;
594
595 skb->ip_summed = CHECKSUM_UNNECESSARY;
596
597 /* Pass the received packet to network layer */
598 status = netif_receive_skb(skb);
599 if (status != NET_RX_DROP)
600 dev->stats.rx_bytes += len - 4; /* Without CRC */
601 else
602 dev->stats.rx_dropped++;
603
604 dev->last_rx = jiffies;
605
606 exit:
607 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
608 skb_reserve(skb, RX_SKB_HEADROOM);
609 priv->rx_skb[priv->cur_rx] = skb;
610 priv->irq_rxskb[priv->cur_rx] = 1;
611 priv->cur_rx++;
612
613 if (priv->cur_rx >= RX_RING_SIZE)
614 priv->cur_rx = 0;
615 count++;
616 }
617
618 return count;
619 }
620
621 static int nuport_mac_poll(struct napi_struct *napi, int budget)
622 {
623 struct nuport_mac_priv *priv =
624 container_of(napi, struct nuport_mac_priv, napi);
625 struct net_device *dev = priv->dev;
626 int work_done;
627
628 work_done = nuport_mac_rx(dev, budget);
629
630 if (work_done < budget) {
631 napi_complete(napi);
632 nuport_mac_enable_rx_dma(priv);
633 }
634
635 return work_done;
636 }
637
638 static void nuport_mac_init_tx_ring(struct nuport_mac_priv *priv)
639 {
640 int i;
641
642 priv->cur_tx = priv->dma_tx = priv->tx_full = 0;
643 for (i = 0; i < TX_RING_SIZE; i++) {
644 priv->tx_skb[i] = NULL;
645 priv->valid_txskb[i] = 0;
646 }
647 priv->first_pkt = 1;
648 }
649
650 static int nuport_mac_init_rx_ring(struct net_device *dev)
651 {
652 struct nuport_mac_priv *priv = netdev_priv(dev);
653 struct sk_buff *skb;
654 int i;
655
656 priv->cur_rx = priv->dma_rx = priv->rx_full = 0;
657
658 for (i = 0; i < RX_RING_SIZE; i++) {
659 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
660 if (!skb)
661 return -ENOMEM;
662 skb_reserve(skb, RX_SKB_HEADROOM);
663 priv->rx_skb[i] = skb;
664 priv->irq_rxskb[i] = 1;
665 }
666
667 return 0;
668 }
669
670 static void nuport_mac_free_rx_ring(struct nuport_mac_priv *priv)
671 {
672 int i;
673
674 for (i = 0; i < RX_RING_SIZE; i++) {
675 if (!priv->rx_skb[i])
676 continue;
677
678 dev_kfree_skb(priv->rx_skb[i]);
679 priv->rx_skb[i] = NULL;
680 }
681 }
682
683 static void nuport_mac_read_mac_address(struct net_device *dev)
684 {
685 struct nuport_mac_priv *priv = netdev_priv(dev);
686 int i;
687
688 for (i = 0; i < 4; i++)
689 dev->dev_addr[i] = nuport_mac_readb(MAC_ADDR_LOW_REG + i);
690 dev->dev_addr[4] = nuport_mac_readb(MAC_ADDR_HIGH_REG);
691 dev->dev_addr[5] = nuport_mac_readb(MAC_ADDR_HIGH_REG + 1);
692
693 if (!is_valid_ether_addr(dev->dev_addr)) {
694 dev_info(&priv->pdev->dev, "using random address\n");
695 random_ether_addr(dev->dev_addr);
696 }
697 }
698
699 static int nuport_mac_change_mac_address(struct net_device *dev, void *mac_addr)
700 {
701 struct sockaddr *addr = mac_addr;
702 struct nuport_mac_priv *priv = netdev_priv(dev);
703 unsigned long *temp = (unsigned long *)dev->dev_addr;
704 u32 high, low;
705
706 if (netif_running(dev))
707 return -EBUSY;
708
709 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
710
711 spin_lock_irq(&priv->lock);
712
713 nuport_mac_writel(*temp, MAC_ADDR_LOW_REG);
714 temp = (unsigned long *)(dev->dev_addr + 4);
715 nuport_mac_writel(*temp, MAC_ADDR_HIGH_REG);
716
717 low = nuport_mac_readl(MAC_ADDR_LOW_REG);
718 high = nuport_mac_readl(MAC_ADDR_HIGH_REG);
719
720 spin_unlock_irq(&priv->lock);
721
722 return 0;
723 }
724
725 static int nuport_mac_open(struct net_device *dev)
726 {
727 int ret;
728 struct nuport_mac_priv *priv = netdev_priv(dev);
729 unsigned long flags;
730 u32 reg = 0;
731
732 ret = clk_enable(priv->emac_clk);
733 if (ret) {
734 netdev_err(dev, "failed to enable EMAC clock\n");
735 return ret;
736 }
737
738 /* Set MAC into full duplex mode by default */
739 reg |= RX_ENABLE | TX_ENABLE;
740 reg |= DEFER_CHECK | STRIP_PAD | DRTRY_DISABLE;
741 reg |= FULL_DUPLEX | HBD_DISABLE;
742 nuport_mac_writel(reg, CTRL_REG);
743
744 /* set mac address in hardware in case it was not already */
745 nuport_mac_change_mac_address(dev, dev->dev_addr);
746
747 ret = request_irq(priv->link_irq, &nuport_mac_link_interrupt,
748 0, dev->name, dev);
749 if (ret) {
750 netdev_err(dev, "unable to request link interrupt\n");
751 goto out_emac_clk;
752 }
753
754 phy_start(priv->phydev);
755
756 /* Enable link interrupt monitoring for our PHY address */
757 reg = LINK_INT_EN | (priv->phydev->addr << LINK_PHY_ADDR_SHIFT);
758 /* MII_BMSR register to be watched */
759 reg |= (1 << LINK_PHY_REG_SHIFT);
760 /* BMSR_STATUS to be watched in particular */
761 reg |= (2 << LINK_BIT_UP_SHIFT);
762
763 spin_lock_irqsave(&priv->lock, flags);
764 nuport_mac_writel(reg, LINK_INT_CSR);
765 nuport_mac_writel(LINK_POLL_MASK, LINK_INT_POLL_TIME);
766 spin_unlock_irqrestore(&priv->lock, flags);
767
768 ret = request_irq(priv->tx_irq, &nuport_mac_tx_interrupt,
769 0, dev->name, dev);
770 if (ret) {
771 netdev_err(dev, "unable to request rx interrupt\n");
772 goto out_link_irq;
773 }
774
775 napi_enable(&priv->napi);
776
777 ret = request_irq(priv->rx_irq, &nuport_mac_rx_interrupt,
778 0, dev->name, dev);
779 if (ret) {
780 netdev_err(dev, "unable to request tx interrupt\n");
781 goto out_tx_irq;
782 }
783
784 netif_start_queue(dev);
785
786 nuport_mac_init_tx_ring(priv);
787
788 ret = nuport_mac_init_rx_ring(dev);
789 if (ret) {
790 netdev_err(dev, "rx ring init failed\n");
791 goto out_rx_skb;
792 }
793
794 nuport_mac_reset_tx_dma(priv);
795 nuport_mac_reset_rx_dma(priv);
796
797 /* Start RX DMA */
798 return nuport_mac_start_rx_dma(priv, priv->rx_skb[0]);
799
800 out_rx_skb:
801 nuport_mac_free_rx_ring(priv);
802 free_irq(priv->rx_irq, dev);
803 out_tx_irq:
804 free_irq(priv->tx_irq, dev);
805 out_link_irq:
806 free_irq(priv->link_irq, dev);
807 out_emac_clk:
808 clk_disable(priv->emac_clk);
809 return ret;
810 }
811
812 static int nuport_mac_close(struct net_device *dev)
813 {
814 struct nuport_mac_priv *priv = netdev_priv(dev);
815
816 spin_lock_irq(&priv->lock);
817 napi_disable(&priv->napi);
818 netif_stop_queue(dev);
819
820 free_irq(priv->link_irq, dev);
821 /* disable PHY polling */
822 nuport_mac_writel(0, LINK_INT_CSR);
823 nuport_mac_writel(0, LINK_INT_POLL_TIME);
824 phy_stop(priv->phydev);
825
826 free_irq(priv->tx_irq, dev);
827 free_irq(priv->rx_irq, dev);
828 spin_unlock_irq(&priv->lock);
829
830 nuport_mac_free_rx_ring(priv);
831
832 clk_disable(priv->emac_clk);
833
834 return 0;
835 }
836
837 static void nuport_mac_tx_timeout(struct net_device *dev)
838 {
839 struct nuport_mac_priv *priv = netdev_priv(dev);
840 unsigned int i;
841
842 netdev_warn(dev, "transmit timeout, attempting recovery\n");
843
844 netdev_info(dev, "TX DMA regs\n");
845 for (i = 0; i < DMA_CHAN_WIDTH; i += 4)
846 netdev_info(dev, "[%02x]: 0x%08x\n", i, nuport_mac_readl(TX_DMA_BASE + i));
847 netdev_info(dev, "RX DMA regs\n");
848 for (i = 0; i < DMA_CHAN_WIDTH; i += 4)
849 netdev_info(dev, "[%02x]: 0x%08x\n", i, nuport_mac_readl(RX_DMA_BASE + i));
850
851 nuport_mac_init_tx_ring(priv);
852 nuport_mac_reset_tx_dma(priv);
853
854 netif_wake_queue(dev);
855 }
856
857 static int nuport_mac_mii_probe(struct net_device *dev)
858 {
859 struct nuport_mac_priv *priv = netdev_priv(dev);
860 struct phy_device *phydev = NULL;
861 int ret;
862
863 ret = clk_enable(priv->ephy_clk);
864 if (ret) {
865 netdev_err(dev, "unable to enable ePHY clk\n");
866 return ret;
867 }
868
869 phydev = phy_find_first(priv->mii_bus);
870 if (!phydev) {
871 netdev_err(dev, "no PHYs found\n");
872 ret = -ENODEV;
873 goto out;
874 }
875
876 phydev = phy_connect(dev, dev_name(&phydev->dev),
877 nuport_mac_adjust_link, 0,
878 PHY_INTERFACE_MODE_MII);
879 if (IS_ERR(phydev)) {
880 netdev_err(dev, "could not attach PHY\n");
881 ret = PTR_ERR(phydev);
882 goto out;
883 }
884
885 phydev->supported &= PHY_BASIC_FEATURES;
886 phydev->advertising = phydev->supported;
887 priv->phydev = phydev;
888 priv->old_link = 1;
889 priv->old_duplex = DUPLEX_FULL;
890
891 dev_info(&priv->pdev->dev, "attached PHY driver [%s] "
892 "(mii_bus:phy_addr=%d)\n",
893 phydev->drv->name, phydev->addr);
894
895 return 0;
896
897 out:
898 /* disable the Ethernet PHY clock for the moment */
899 clk_disable(priv->ephy_clk);
900
901 return ret;
902 }
903
904 static void nuport_mac_ethtool_drvinfo(struct net_device *dev,
905 struct ethtool_drvinfo *info)
906 {
907 strncpy(info->driver, "nuport-mac", sizeof(info->driver));
908 strncpy(info->version, "0.1", sizeof(info->version));
909 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
910 strncpy(info->bus_info, "internal", sizeof(info->bus_info));
911 info->n_stats = 0;
912 info->testinfo_len = 0;
913 info->regdump_len = 0;
914 info->eedump_len = 0;
915 }
916
917 static int nuport_mac_ethtool_get_settings(struct net_device *dev,
918 struct ethtool_cmd *cmd)
919 {
920 struct nuport_mac_priv *priv = netdev_priv(dev);
921
922 if (priv->phydev)
923 return phy_ethtool_gset(priv->phydev, cmd);
924
925 return -EINVAL;
926 }
927
928 static int nuport_mac_ethtool_set_settings(struct net_device *dev,
929 struct ethtool_cmd *cmd)
930 {
931 struct nuport_mac_priv *priv = netdev_priv(dev);
932
933 if (priv->phydev)
934 return phy_ethtool_sset(priv->phydev, cmd);
935
936 return -EINVAL;
937 }
938
939 static void nuport_mac_set_msglevel(struct net_device *dev, u32 msg_level)
940 {
941 struct nuport_mac_priv *priv = netdev_priv(dev);
942
943 priv->msg_level = msg_level;
944 }
945
946 static u32 nuport_mac_get_msglevel(struct net_device *dev)
947 {
948 struct nuport_mac_priv *priv = netdev_priv(dev);
949
950 return priv->msg_level;
951 }
952
953 static const struct ethtool_ops nuport_mac_ethtool_ops = {
954 .get_drvinfo = nuport_mac_ethtool_drvinfo,
955 .get_link = ethtool_op_get_link,
956 .get_settings = nuport_mac_ethtool_get_settings,
957 .set_settings = nuport_mac_ethtool_set_settings,
958 .set_msglevel = nuport_mac_set_msglevel,
959 .get_msglevel = nuport_mac_get_msglevel,
960 };
961
962 static const struct net_device_ops nuport_mac_ops = {
963 .ndo_open = nuport_mac_open,
964 .ndo_stop = nuport_mac_close,
965 .ndo_start_xmit = nuport_mac_start_xmit,
966 .ndo_change_mtu = eth_change_mtu,
967 .ndo_validate_addr = eth_validate_addr,
968 .ndo_set_mac_address = nuport_mac_change_mac_address,
969 .ndo_tx_timeout = nuport_mac_tx_timeout,
970 };
971
972 static int __init nuport_mac_probe(struct platform_device *pdev)
973 {
974 struct net_device *dev;
975 struct nuport_mac_priv *priv = NULL;
976 struct resource *regs, *dma;
977 int ret = 0;
978 int rx_irq, tx_irq, link_irq;
979 int i;
980 const unsigned int *intspec;
981
982 dev = alloc_etherdev(sizeof(struct nuport_mac_priv));
983 if (!dev) {
984 dev_err(&pdev->dev, "no memory for net_device\n");
985 return -ENOMEM;
986 }
987
988 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
989 dma = platform_get_resource(pdev, IORESOURCE_MEM, 1);
990 if (!regs || !dma) {
991 dev_err(&pdev->dev, "failed to get regs resources\n");
992 ret = -ENODEV;
993 goto out;
994 }
995
996 rx_irq = platform_get_irq(pdev, 0);
997 tx_irq = platform_get_irq(pdev, 1);
998 link_irq = platform_get_irq(pdev, 2);
999 if (rx_irq < 0 || tx_irq < 0 || link_irq < 0) {
1000 ret = -ENODEV;
1001 goto out;
1002 }
1003
1004 platform_set_drvdata(pdev, dev);
1005 SET_NETDEV_DEV(dev, &pdev->dev);
1006 priv = netdev_priv(dev);
1007 priv->pdev = pdev;
1008 priv->dev = dev;
1009 spin_lock_init(&priv->lock);
1010
1011 intspec = of_get_property(pdev->dev.of_node,
1012 "nuport-mac,buffer-shifting", NULL);
1013 if (!intspec)
1014 priv->buffer_shifting_len = 0;
1015 else
1016 priv->buffer_shifting_len = 2;
1017
1018 priv->mac_base = devm_ioremap(&pdev->dev,
1019 regs->start, resource_size(regs));
1020 if (!priv->mac_base) {
1021 dev_err(&pdev->dev, "failed to remap regs\n");
1022 ret = -ENOMEM;
1023 goto out_platform;
1024 }
1025
1026 priv->dma_base = devm_ioremap(&pdev->dev,
1027 dma->start, resource_size(dma));
1028 if (!priv->dma_base) {
1029 dev_err(&pdev->dev, "failed to remap dma-regs\n");
1030 ret = -ENOMEM;
1031 goto out_platform;
1032 }
1033
1034 priv->emac_clk = clk_get(&pdev->dev, "emac");
1035 if (IS_ERR_OR_NULL(priv->emac_clk)) {
1036 dev_err(&pdev->dev, "failed to get emac clk\n");
1037 ret = PTR_ERR(priv->emac_clk);
1038 goto out_platform;
1039 }
1040
1041 priv->ephy_clk = clk_get(&pdev->dev, "ephy");
1042 if (IS_ERR_OR_NULL(priv->ephy_clk)) {
1043 dev_err(&pdev->dev, "failed to get ephy clk\n");
1044 ret = PTR_ERR(priv->ephy_clk);
1045 goto out_platform;
1046 }
1047
1048 priv->link_irq = link_irq;
1049 priv->rx_irq = rx_irq;
1050 priv->tx_irq = tx_irq;
1051 priv->msg_level = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK;
1052 dev->netdev_ops = &nuport_mac_ops;
1053 dev->ethtool_ops = &nuport_mac_ethtool_ops;
1054 dev->watchdog_timeo = HZ;
1055 dev->flags = IFF_BROADCAST; /* Supports Broadcast */
1056 dev->tx_queue_len = TX_RING_SIZE / 2;
1057
1058 netif_napi_add(dev, &priv->napi, nuport_mac_poll, 64);
1059
1060 priv->mii_bus = mdiobus_alloc();
1061 if (!priv->mii_bus) {
1062 dev_err(&pdev->dev, "mii bus allocation failed\n");
1063 goto out;
1064 }
1065
1066 priv->mii_bus->priv = dev;
1067 priv->mii_bus->read = nuport_mac_mii_read;
1068 priv->mii_bus->write = nuport_mac_mii_write;
1069 priv->mii_bus->reset = nuport_mac_mii_reset;
1070 priv->mii_bus->name = "nuport-mac-mii";
1071 priv->mii_bus->phy_mask = (1 << 0);
1072 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
1073 priv->mii_bus->irq = kzalloc(PHY_MAX_ADDR * sizeof(int), GFP_KERNEL);
1074 if (!priv->mii_bus->irq) {
1075 dev_err(&pdev->dev, "failed to allocate mii_bus irqs\n");
1076 ret = -ENOMEM;
1077 goto out_mdio;
1078 }
1079
1080 /* We support PHY interrupts routed back to the MAC */
1081 for (i = 0; i < PHY_MAX_ADDR; i++)
1082 priv->mii_bus->irq[i] = PHY_IGNORE_INTERRUPT;
1083
1084 ret = mdiobus_register(priv->mii_bus);
1085 if (ret) {
1086 dev_err(&pdev->dev, "failed to register mii_bus\n");
1087 goto out_mdio_irq;
1088 }
1089
1090 ret = nuport_mac_mii_probe(dev);
1091 if (ret) {
1092 dev_err(&pdev->dev, "failed to probe MII bus\n");
1093 goto out_mdio_unregister;
1094 }
1095
1096 ret = register_netdev(dev);
1097 if (ret) {
1098 dev_err(&pdev->dev, "failed to register net_device\n");
1099 goto out_mdio_probe;
1100 }
1101
1102 /* read existing mac address */
1103 nuport_mac_read_mac_address(dev);
1104
1105 dev_info(&pdev->dev, "registered (MAC: %pM)\n", dev->dev_addr);
1106
1107 return ret;
1108
1109 out_mdio_probe:
1110 phy_disconnect(priv->phydev);
1111 out_mdio_unregister:
1112 mdiobus_unregister(priv->mii_bus);
1113 out_mdio_irq:
1114 kfree(priv->mii_bus->irq);
1115 out_mdio:
1116 mdiobus_free(priv->mii_bus);
1117 out_platform:
1118 platform_set_drvdata(pdev, NULL);
1119 out:
1120 clk_put(priv->ephy_clk);
1121 clk_put(priv->emac_clk);
1122 free_netdev(dev);
1123 platform_set_drvdata(pdev, NULL);
1124 return ret;
1125 }
1126
1127 static int nuport_mac_remove(struct platform_device *pdev)
1128 {
1129 struct net_device *dev = platform_get_drvdata(pdev);
1130 struct nuport_mac_priv *priv = netdev_priv(dev);
1131
1132 unregister_netdev(dev);
1133 phy_disconnect(priv->phydev);
1134 mdiobus_unregister(priv->mii_bus);
1135 kfree(priv->mii_bus->irq);
1136 mdiobus_free(priv->mii_bus);
1137 clk_put(priv->ephy_clk);
1138 clk_put(priv->emac_clk);
1139 free_netdev(dev);
1140
1141 platform_set_drvdata(pdev, NULL);
1142
1143 return 0;
1144 }
1145
1146 static struct of_device_id nuport_eth_ids[] __initdata = {
1147 {.compatible = "moschip,nuport-mac",},
1148 { /* sentinel */ },
1149 };
1150
1151 static struct platform_driver nuport_eth_driver = {
1152 .driver = {
1153 .name = "nuport-mac",
1154 .owner = THIS_MODULE,
1155 .of_match_table = nuport_eth_ids,
1156 },
1157 .probe = nuport_mac_probe,
1158 .remove = __devexit_p(nuport_mac_remove),
1159 };
1160
1161 module_platform_driver(nuport_eth_driver);
1162
1163 MODULE_AUTHOR("Moschip Semiconductors Ltd.");
1164 MODULE_DESCRIPTION("Moschip MCS8140 Ethernet MAC driver");
1165 MODULE_LICENSE("GPL");