4a4208604618fd71d409997f80f8d538189aac03
[openwrt/svn-archive/archive.git] / target / linux / mcs814x / files-3.3 / drivers / net / ethernet / mcs8140 / nuport_mac.c
1 /*
2 * Moschip MCS8140 Ethernet MAC driver
3 *
4 * Copyright (C) 2003, Moschip Semiconductors
5 * Copyright (C) 2012, Florian Fainelli <florian@openwrt.org>
6 *
7 * Licensed under GPLv2
8 */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/ethtool.h>
17 #include <linux/mii.h>
18 #include <linux/io.h>
19 #include <linux/interrupt.h>
20 #include <linux/platform_device.h>
21 #include <linux/of.h>
22 #include <linux/irq.h>
23 #include <linux/err.h>
24 #include <linux/phy.h>
25 #include <linux/clk.h>
26 #include <linux/dma-mapping.h>
27
28 #include <asm/unaligned.h>
29 #include <asm/sizes.h>
30
31 /* Hardware registers */
32 #define MAC_BASE_ADDR ((priv->mac_base))
33
34 #define CTRL_REG (MAC_BASE_ADDR)
35 #define MII_BUSY (1 << 0)
36 #define MII_WRITE (1 << 1)
37 #define RX_ENABLE (1 << 2)
38 #define TX_ENABLE (1 << 3)
39 #define DEFER_CHECK (1 << 5)
40 #define STRIP_PAD (1 << 8)
41 #define DRTRY_DISABLE (1 << 10)
42 #define FULL_DUPLEX (1 << 20)
43 #define HBD_DISABLE (1 << 28)
44 #define MAC_ADDR_HIGH_REG (MAC_BASE_ADDR + 0x04)
45 #define MAC_ADDR_LOW_REG (MAC_BASE_ADDR + 0x08)
46 #define MII_ADDR_REG (MAC_BASE_ADDR + 0x14)
47 #define MII_ADDR_SHIFT (11)
48 #define MII_REG_SHIFT (6)
49 #define MII_DATA_REG (MAC_BASE_ADDR + 0x18)
50 /* Link interrupt registers */
51 #define LINK_INT_CSR (MAC_BASE_ADDR + 0xD0)
52 #define LINK_INT_EN (1 << 0)
53 #define LINK_PHY_ADDR_SHIFT (1)
54 #define LINK_PHY_REG_SHIFT (6)
55 #define LINK_BIT_UP_SHIFT (11)
56 #define LINK_UP (1 << 16)
57 #define LINK_INT_POLL_TIME (MAC_BASE_ADDR + 0xD4)
58 #define LINK_POLL_MASK ((1 << 20) - 1)
59
60 #define DMA_CHAN_WIDTH 32
61 #define DMA_RX_CHAN 0
62 #define DMA_TX_CHAN 2
63
64 /* Receive DMA registers */
65 #define RX_DMA_BASE ((priv->dma_base) + \
66 (DMA_CHAN_WIDTH * DMA_RX_CHAN))
67 #define RX_BUFFER_ADDR (RX_DMA_BASE + 0x00)
68 #define RX_MAX_BYTES (RX_DMA_BASE + 0x04)
69 #define RX_ACT_BYTES (RX_DMA_BASE + 0x08)
70 #define RX_START_DMA (RX_DMA_BASE + 0x0C)
71 #define RX_DMA_ENABLE (1 << 0)
72 #define RX_DMA_RESET (1 << 1)
73 #define RX_DMA_STATUS_FIFO (1 << 12)
74 #define RX_DMA_ENH (RX_DMA_BASE + 0x14)
75 #define RX_DMA_INT_ENABLE (1 << 1)
76
77 /* Transmit DMA registers */
78 #define TX_DMA_BASE ((priv->dma_base) + \
79 (DMA_CHAN_WIDTH * DMA_TX_CHAN))
80 #define TX_BUFFER_ADDR (TX_DMA_BASE + 0x00)
81 #define TX_PKT_BYTES (TX_DMA_BASE + 0x04)
82 #define TX_BYTES_SENT (TX_DMA_BASE + 0x08)
83 #define TX_START_DMA (TX_DMA_BASE + 0x0C)
84 #define TX_DMA_ENABLE (1 << 0)
85 #define TX_DMA_START_FRAME (1 << 2)
86 #define TX_DMA_END_FRAME (1 << 3)
87 #define TX_DMA_PAD_DISABLE (1 << 8)
88 #define TX_DMA_CRC_DISABLE (1 << 9)
89 #define TX_DMA_FIFO_FULL (1 << 16)
90 #define TX_DMA_FIFO_EMPTY (1 << 17)
91 #define TX_DMA_STATUS_AVAIL (1 << 18)
92 #define TX_DMA_RESET (1 << 24)
93 #define TX_DMA_STATUS (TX_DMA_BASE + 0x10)
94 #define TX_DMA_ENH (TX_DMA_BASE + 0x14)
95 #define TX_DMA_ENH_ENABLE (1 << 0)
96 #define TX_DMA_INT_FIFO (1 << 1)
97
98 #define RX_ALLOC_SIZE SZ_2K
99 #define MAX_ETH_FRAME_SIZE 1536
100 #define RX_SKB_TAILROOM 128
101 #define RX_SKB_HEADROOM (RX_ALLOC_SIZE - \
102 (MAX_ETH_FRAME_SIZE + RX_SKB_TAILROOM) + 0)
103
104 /* WDT Late COL Lenght COL Type */
105 #define ERROR_FILTER_MASK ((1<<14) | (1<<15) | (1<<16) | (1<<17) | (0<<18) | \
106 /* MII Dribbling CRC Len/type Control */\
107 (1<<19) | (1<<20) | (1<<21) | (0<<24) | (1<<25) | \
108 /* Unsup Missed */\
109 (1<<26) | (0<<31))
110 #define TX_RING_SIZE 30
111 #define RX_RING_SIZE 30
112
113 static inline u32 nuport_mac_readl(void __iomem *reg)
114 {
115 return __raw_readl(reg);
116 }
117
118 static inline u8 nuport_mac_readb(void __iomem *reg)
119 {
120 return __raw_readb(reg);
121 }
122
123 static inline void nuport_mac_writel(u32 value, void __iomem *reg)
124 {
125 __raw_writel(value, reg);
126 }
127
128 static inline void nuport_mac_writeb(u8 value, void __iomem *reg)
129 {
130 __raw_writel(value, reg);
131 }
132
133 /* MAC private data */
134 struct nuport_mac_priv {
135 spinlock_t lock;
136
137 void __iomem *mac_base;
138 void __iomem *dma_base;
139
140 int rx_irq;
141 int tx_irq;
142 int link_irq;
143 struct clk *emac_clk;
144 struct clk *ephy_clk;
145
146 /* Transmit buffers */
147 struct sk_buff *tx_skb[TX_RING_SIZE];
148 dma_addr_t tx_addr;
149 unsigned int valid_txskb[TX_RING_SIZE];
150 unsigned int cur_tx;
151 unsigned int dma_tx;
152 unsigned int tx_full;
153
154 /* Receive buffers */
155 struct sk_buff *rx_skb[RX_RING_SIZE];
156 dma_addr_t rx_addr;
157 unsigned int irq_rxskb[RX_RING_SIZE];
158 int pkt_len[RX_RING_SIZE];
159 unsigned int cur_rx;
160 unsigned int dma_rx;
161 unsigned int rx_full;
162
163 unsigned int first_pkt;
164
165 /* Private data */
166 struct napi_struct napi;
167 struct net_device *dev;
168 struct platform_device *pdev;
169 struct mii_bus *mii_bus;
170 struct phy_device *phydev;
171 int old_link;
172 int old_duplex;
173 u32 msg_level;
174 unsigned int buffer_shifting_len;
175 };
176
177 static inline int nuport_mac_mii_busy_wait(struct nuport_mac_priv *priv)
178 {
179 unsigned long curr;
180 unsigned long finish = jiffies + 3 * HZ;
181
182 do {
183 curr = jiffies;
184 if (!(nuport_mac_readl(MII_ADDR_REG) & MII_BUSY))
185 return 0;
186 cpu_relax();
187 } while (!time_after_eq(curr, finish));
188
189 return -EBUSY;
190 }
191
192 /* Read from PHY registers */
193 static int nuport_mac_mii_read(struct mii_bus *bus,
194 int mii_id, int regnum)
195 {
196 struct net_device *dev = bus->priv;
197 struct nuport_mac_priv *priv = netdev_priv(dev);
198 int ret;
199 u32 val = 0;
200
201 ret = nuport_mac_mii_busy_wait(priv);
202 if (ret)
203 return ret;
204
205 val |= (mii_id << MII_ADDR_SHIFT) | (regnum << MII_REG_SHIFT) | MII_BUSY;
206 nuport_mac_writel(val, MII_ADDR_REG);
207 ret = nuport_mac_mii_busy_wait(priv);
208 if (ret)
209 return ret;
210
211 return nuport_mac_readl(MII_DATA_REG);
212 }
213
214 static int nuport_mac_mii_write(struct mii_bus *bus, int mii_id,
215 int regnum, u16 value)
216 {
217 struct net_device *dev = bus->priv;
218 struct nuport_mac_priv *priv = netdev_priv(dev);
219 int ret;
220 u32 val = 0;
221
222 ret = nuport_mac_mii_busy_wait(priv);
223 if (ret)
224 return ret;
225
226 val |= (mii_id << MII_ADDR_SHIFT) | (regnum << MII_REG_SHIFT);
227 val |= MII_BUSY | MII_WRITE;
228 nuport_mac_writel(value, MII_DATA_REG);
229 nuport_mac_writel(val, MII_ADDR_REG);
230
231 return nuport_mac_mii_busy_wait(priv);
232 }
233
234 static int nuport_mac_mii_reset(struct mii_bus *bus)
235 {
236 return 0;
237 }
238
239 static int nuport_mac_start_tx_dma(struct nuport_mac_priv *priv,
240 struct sk_buff *skb)
241 {
242 u32 reg;
243 unsigned int timeout = 2048;
244
245 while (timeout--) {
246 reg = nuport_mac_readl(TX_START_DMA);
247 if (!(reg & TX_DMA_ENABLE)) {
248 netdev_dbg(priv->dev, "dma ready\n");
249 break;
250 }
251 cpu_relax();
252 }
253
254 if (!timeout)
255 return -EBUSY;
256
257 priv->tx_addr = dma_map_single(&priv->pdev->dev, skb->data,
258 skb->len, DMA_TO_DEVICE);
259 if (dma_mapping_error(&priv->pdev->dev, priv->tx_addr))
260 return -ENOMEM;
261
262 /* enable enhanced mode */
263 nuport_mac_writel(TX_DMA_ENH_ENABLE, TX_DMA_ENH);
264 nuport_mac_writel(priv->tx_addr, TX_BUFFER_ADDR);
265 nuport_mac_writel((skb->len) - 1, TX_PKT_BYTES);
266 wmb();
267 reg = TX_DMA_ENABLE | TX_DMA_START_FRAME | TX_DMA_END_FRAME;
268 nuport_mac_writel(reg, TX_START_DMA);
269
270 return 0;
271 }
272
273 static void nuport_mac_reset_tx_dma(struct nuport_mac_priv *priv)
274 {
275 u32 reg;
276
277 reg = nuport_mac_readl(TX_START_DMA);
278 reg |= TX_DMA_RESET;
279 nuport_mac_writel(reg, TX_START_DMA);
280 }
281
282 static int nuport_mac_start_rx_dma(struct nuport_mac_priv *priv,
283 struct sk_buff *skb)
284 {
285 u32 reg;
286 unsigned int timeout = 2048;
287
288 while (timeout--) {
289 reg = nuport_mac_readl(RX_START_DMA);
290 if (!(reg & RX_DMA_ENABLE)) {
291 netdev_dbg(priv->dev, "dma ready\n");
292 break;
293 }
294 cpu_relax();
295 }
296
297 if (!timeout)
298 return -EBUSY;
299
300 priv->rx_addr = dma_map_single(&priv->pdev->dev, skb->data,
301 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
302 if (dma_mapping_error(&priv->pdev->dev, priv->rx_addr))
303 return -ENOMEM;
304
305 nuport_mac_writel(priv->rx_addr, RX_BUFFER_ADDR);
306 wmb();
307 nuport_mac_writel(RX_DMA_ENABLE, RX_START_DMA);
308
309 return 0;
310 }
311
312 static void nuport_mac_reset_rx_dma(struct nuport_mac_priv *priv)
313 {
314 u32 reg;
315
316 reg = nuport_mac_readl(RX_START_DMA);
317 reg |= RX_DMA_RESET;
318 nuport_mac_writel(reg, RX_START_DMA);
319 }
320
321 /* I suppose this might do something, but I am not sure actually */
322 static void nuport_mac_disable_rx_dma(struct nuport_mac_priv *priv)
323 {
324 u32 reg;
325
326 reg = nuport_mac_readl(RX_DMA_ENH);
327 reg &= ~RX_DMA_INT_ENABLE;
328 nuport_mac_writel(reg, RX_DMA_ENH);
329 }
330
331 static void nuport_mac_enable_rx_dma(struct nuport_mac_priv *priv)
332 {
333 u32 reg;
334
335 reg = nuport_mac_readl(RX_DMA_ENH);
336 reg |= RX_DMA_INT_ENABLE;
337 nuport_mac_writel(reg, RX_DMA_ENH);
338 }
339
340 /* Add packets to the transmit queue */
341 static int nuport_mac_start_xmit(struct sk_buff *skb, struct net_device *dev)
342 {
343 unsigned long flags;
344 struct nuport_mac_priv *priv = netdev_priv(dev);
345 int ret;
346
347 if (netif_queue_stopped(dev)) {
348 netdev_warn(dev, "netif queue was stopped, restarting\n");
349 netif_start_queue(dev);
350 }
351
352 spin_lock_irqsave(&priv->lock, flags);
353 if (priv->first_pkt) {
354 ret = nuport_mac_start_tx_dma(priv, skb);
355 if (ret) {
356 netif_stop_queue(dev);
357 spin_unlock_irqrestore(&priv->lock, flags);
358 netdev_err(dev, "transmit path busy\n");
359 return NETDEV_TX_BUSY;
360 }
361 priv->first_pkt = 0;
362 }
363
364 priv->tx_skb[priv->cur_tx] = skb;
365 dev->stats.tx_bytes += skb->len;
366 dev->stats.tx_packets++;
367 priv->valid_txskb[priv->cur_tx] = 1;
368 priv->cur_tx++;
369 dev->trans_start = jiffies;
370
371 if (priv->cur_tx >= TX_RING_SIZE)
372 priv->cur_tx = 0;
373
374 spin_unlock_irqrestore(&priv->lock, flags);
375
376 if (priv->valid_txskb[priv->cur_tx]) {
377 priv->tx_full = 1;
378 netdev_err(dev, "stopping queue\n");
379 netif_stop_queue(dev);
380 }
381
382 return NETDEV_TX_OK;
383 }
384
385 static void nuport_mac_adjust_link(struct net_device *dev)
386 {
387 struct nuport_mac_priv *priv = netdev_priv(dev);
388 struct phy_device *phydev = priv->phydev;
389 unsigned int status_changed = 0;
390 u32 reg;
391
392 BUG_ON(!phydev);
393
394 if (priv->old_link != phydev->link) {
395 status_changed = 1;
396 priv->old_link = phydev->link;
397 }
398
399 if (phydev->link & (priv->old_duplex != phydev->duplex)) {
400 reg = nuport_mac_readl(CTRL_REG);
401 if (phydev->duplex == DUPLEX_FULL)
402 reg |= DUPLEX_FULL;
403 else
404 reg &= ~DUPLEX_FULL;
405 nuport_mac_writel(reg, CTRL_REG);
406
407 status_changed = 1;
408 priv->old_duplex = phydev->duplex;
409 }
410
411 if (!status_changed)
412 return;
413
414 pr_info("%s: link %s", dev->name, phydev->link ?
415 "UP" : "DOWN");
416 if (phydev->link) {
417 pr_cont(" - %d/%s", phydev->speed,
418 phydev->duplex == DUPLEX_FULL ? "full" : "half");
419 }
420 pr_cont("\n");
421 }
422
423 static irqreturn_t nuport_mac_link_interrupt(int irq, void *dev_id)
424 {
425 struct net_device *dev = dev_id;
426 struct nuport_mac_priv *priv = netdev_priv(dev);
427 u32 reg;
428 u8 phy_addr;
429 unsigned long flags;
430 irqreturn_t ret = IRQ_HANDLED;
431
432 spin_lock_irqsave(&priv->lock, flags);
433 reg = nuport_mac_readl(LINK_INT_CSR);
434 phy_addr = (reg >> LINK_PHY_ADDR_SHIFT) & (PHY_MAX_ADDR - 1);
435
436 if (phy_addr != priv->phydev->addr) {
437 netdev_err(dev, "spurious PHY irq (phy: %d)\n", phy_addr);
438 ret = IRQ_NONE;
439 goto out;
440 }
441
442 priv->phydev->link = (reg & LINK_UP);
443 nuport_mac_adjust_link(dev);
444
445 out:
446 spin_unlock_irqrestore(&priv->lock, flags);
447 return ret;
448 }
449
450 static irqreturn_t nuport_mac_tx_interrupt(int irq, void *dev_id)
451 {
452 struct net_device *dev = (struct net_device *)dev_id;
453 struct nuport_mac_priv *priv = netdev_priv(dev);
454 struct sk_buff *skb;
455 unsigned long flags;
456 int ret;
457 u32 reg;
458
459 spin_lock_irqsave(&priv->lock, flags);
460 /* clear status word available if ready */
461 reg = nuport_mac_readl(TX_START_DMA);
462 if (reg & TX_DMA_STATUS_AVAIL) {
463 nuport_mac_writel(reg, TX_START_DMA);
464 reg = nuport_mac_readl(TX_DMA_STATUS);
465
466 if (reg & 1)
467 dev->stats.tx_errors++;
468 } else
469 netdev_dbg(dev, "no status word: %08x\n", reg);
470
471 skb = priv->tx_skb[priv->dma_tx];
472 priv->tx_skb[priv->dma_tx] = NULL;
473 priv->valid_txskb[priv->dma_tx] = 0;
474 dma_unmap_single(&priv->pdev->dev, priv->rx_addr, skb->len,
475 DMA_TO_DEVICE);
476 dev_kfree_skb_irq(skb);
477
478 priv->dma_tx++;
479 if (priv->dma_tx >= TX_RING_SIZE)
480 priv->dma_tx = 0;
481
482 if (!priv->valid_txskb[priv->dma_tx])
483 priv->first_pkt = 1;
484 else {
485 ret = nuport_mac_start_tx_dma(priv, priv->tx_skb[priv->dma_tx]);
486 if (ret)
487 netdev_err(dev, "failed to restart TX dma\n");
488 }
489
490 if (priv->tx_full) {
491 netdev_dbg(dev, "restarting transmit queue\n");
492 netif_wake_queue(dev);
493 priv->tx_full = 0;
494 }
495
496 spin_unlock_irqrestore(&priv->lock, flags);
497
498 return IRQ_HANDLED;
499 }
500
501 static unsigned int nuport_mac_has_work(struct nuport_mac_priv *priv)
502 {
503 unsigned int i;
504
505 for (i = 0; i < RX_RING_SIZE; i++)
506 if (priv->rx_skb[i])
507 return 1;
508
509 return 0;
510 }
511
512 static irqreturn_t nuport_mac_rx_interrupt(int irq, void *dev_id)
513 {
514 struct net_device *dev = (struct net_device *)dev_id;
515 struct nuport_mac_priv *priv = netdev_priv(dev);
516 unsigned long flags;
517 int ret;
518
519 spin_lock_irqsave(&priv->lock, flags);
520 if (!priv->rx_full) {
521 priv->pkt_len[priv->dma_rx] = nuport_mac_readl(RX_ACT_BYTES) - 4;
522 priv->irq_rxskb[priv->dma_rx] = 0;
523 priv->dma_rx++;
524
525 if (priv->dma_rx >= RX_RING_SIZE)
526 priv->dma_rx = 0;
527 } else
528 priv->rx_full = 0;
529
530 if (priv->irq_rxskb[priv->dma_rx] == 1) {
531 ret = nuport_mac_start_rx_dma(priv, priv->rx_skb[priv->dma_rx]);
532 if (ret)
533 netdev_err(dev, "failed to start rx dma\n");
534 } else {
535 priv->rx_full = 1;
536 netdev_dbg(dev, "RX ring full\n");
537 }
538
539 if (likely(nuport_mac_has_work(priv))) {
540 /* find a way to disable DMA rx irq */
541 nuport_mac_disable_rx_dma(priv);
542 napi_schedule(&priv->napi);
543 }
544 spin_unlock_irqrestore(&priv->lock, flags);
545
546 return IRQ_HANDLED;
547 }
548
549 /* Process received packets in tasklet */
550 static int nuport_mac_rx(struct net_device *dev, int limit)
551 {
552 struct nuport_mac_priv *priv = netdev_priv(dev);
553 struct sk_buff *skb;
554 int len, status;
555 int count = 0;
556
557 while (count < limit && !priv->irq_rxskb[priv->cur_rx]) {
558 skb = priv->rx_skb[priv->cur_rx];
559 len = priv->pkt_len[priv->cur_rx];
560
561 /* Remove 2 bytes added by RX buffer shifting */
562 len = len - priv->buffer_shifting_len;
563 skb->data = skb->data + priv->buffer_shifting_len;
564
565 /* Get packet status */
566 status = get_unaligned((u32 *) (skb->data + len));
567 skb->dev = dev;
568
569 dma_unmap_single(&priv->pdev->dev, priv->rx_addr, skb->len,
570 DMA_FROM_DEVICE);
571
572 /* packet filter failed */
573 if (!(status & (1 << 30))) {
574 dev_kfree_skb_irq(skb);
575 goto exit;
576 }
577
578 /* missed frame */
579 if (status & (1 << 31)) {
580 dev->stats.rx_missed_errors++;
581 dev_kfree_skb_irq(skb);
582 goto exit;
583 }
584
585 /* Not ethernet type */
586 if ((!(status & (1 << 18))) || (status & ERROR_FILTER_MASK))
587 dev->stats.rx_errors++;
588
589 if (len > MAX_ETH_FRAME_SIZE) {
590 dev_kfree_skb_irq(skb);
591 goto exit;
592 } else
593 skb_put(skb, len);
594
595 skb->protocol = eth_type_trans(skb, dev);
596 dev->stats.rx_packets++;
597
598 if (status & (1 << 29))
599 skb->pkt_type = PACKET_OTHERHOST;
600 if (status & (1 << 27))
601 skb->pkt_type = PACKET_MULTICAST;
602 if (status & (1 << 28))
603 skb->pkt_type = PACKET_BROADCAST;
604
605 skb->ip_summed = CHECKSUM_UNNECESSARY;
606
607 /* Pass the received packet to network layer */
608 status = netif_receive_skb(skb);
609 if (status != NET_RX_DROP)
610 dev->stats.rx_bytes += len - 4; /* Without CRC */
611 else
612 dev->stats.rx_dropped++;
613
614 dev->last_rx = jiffies;
615
616 exit:
617 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
618 skb_reserve(skb, RX_SKB_HEADROOM);
619 priv->rx_skb[priv->cur_rx] = skb;
620 priv->irq_rxskb[priv->cur_rx] = 1;
621 priv->cur_rx++;
622
623 if (priv->cur_rx >= RX_RING_SIZE)
624 priv->cur_rx = 0;
625 count++;
626 }
627
628 return count;
629 }
630
631 static int nuport_mac_poll(struct napi_struct *napi, int budget)
632 {
633 struct nuport_mac_priv *priv =
634 container_of(napi, struct nuport_mac_priv, napi);
635 struct net_device *dev = priv->dev;
636 int work_done;
637
638 work_done = nuport_mac_rx(dev, budget);
639
640 if (work_done < budget) {
641 napi_complete(napi);
642 nuport_mac_enable_rx_dma(priv);
643 }
644
645 return work_done;
646 }
647
648 static void nuport_mac_init_tx_ring(struct nuport_mac_priv *priv)
649 {
650 int i;
651
652 priv->cur_tx = priv->dma_tx = priv->tx_full = 0;
653 for (i = 0; i < TX_RING_SIZE; i++) {
654 priv->tx_skb[i] = NULL;
655 priv->valid_txskb[i] = 0;
656 }
657 priv->first_pkt = 1;
658 }
659
660 static int nuport_mac_init_rx_ring(struct net_device *dev)
661 {
662 struct nuport_mac_priv *priv = netdev_priv(dev);
663 struct sk_buff *skb;
664 int i;
665
666 priv->cur_rx = priv->dma_rx = priv->rx_full = 0;
667
668 for (i = 0; i < RX_RING_SIZE; i++) {
669 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
670 if (!skb)
671 return -ENOMEM;
672 skb_reserve(skb, RX_SKB_HEADROOM);
673 priv->rx_skb[i] = skb;
674 priv->irq_rxskb[i] = 1;
675 }
676
677 return 0;
678 }
679
680 static void nuport_mac_free_rx_ring(struct nuport_mac_priv *priv)
681 {
682 int i;
683
684 for (i = 0; i < RX_RING_SIZE; i++) {
685 if (!priv->rx_skb[i])
686 continue;
687
688 dev_kfree_skb(priv->rx_skb[i]);
689 priv->rx_skb[i] = NULL;
690 }
691
692 if (priv->rx_addr)
693 dma_unmap_single(&priv->pdev->dev, priv->rx_addr, RX_ALLOC_SIZE,
694 DMA_TO_DEVICE);
695 }
696
697 static void nuport_mac_read_mac_address(struct net_device *dev)
698 {
699 struct nuport_mac_priv *priv = netdev_priv(dev);
700 int i;
701
702 for (i = 0; i < 4; i++)
703 dev->dev_addr[i] = nuport_mac_readb(MAC_ADDR_LOW_REG + i);
704 dev->dev_addr[4] = nuport_mac_readb(MAC_ADDR_HIGH_REG);
705 dev->dev_addr[5] = nuport_mac_readb(MAC_ADDR_HIGH_REG + 1);
706
707 if (!is_valid_ether_addr(dev->dev_addr)) {
708 dev_info(&priv->pdev->dev, "using random address\n");
709 random_ether_addr(dev->dev_addr);
710 }
711 }
712
713 static int nuport_mac_change_mac_address(struct net_device *dev, void *mac_addr)
714 {
715 struct sockaddr *addr = mac_addr;
716 struct nuport_mac_priv *priv = netdev_priv(dev);
717 unsigned long *temp = (unsigned long *)dev->dev_addr;
718 u32 high, low;
719
720 if (netif_running(dev))
721 return -EBUSY;
722
723 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
724
725 spin_lock_irq(&priv->lock);
726
727 nuport_mac_writel(*temp, MAC_ADDR_LOW_REG);
728 temp = (unsigned long *)(dev->dev_addr + 4);
729 nuport_mac_writel(*temp, MAC_ADDR_HIGH_REG);
730
731 low = nuport_mac_readl(MAC_ADDR_LOW_REG);
732 high = nuport_mac_readl(MAC_ADDR_HIGH_REG);
733
734 spin_unlock_irq(&priv->lock);
735
736 return 0;
737 }
738
739 static int nuport_mac_open(struct net_device *dev)
740 {
741 int ret;
742 struct nuport_mac_priv *priv = netdev_priv(dev);
743 unsigned long flags;
744 u32 reg = 0;
745
746 ret = clk_enable(priv->emac_clk);
747 if (ret) {
748 netdev_err(dev, "failed to enable EMAC clock\n");
749 return ret;
750 }
751
752 /* Set MAC into full duplex mode by default */
753 reg |= RX_ENABLE | TX_ENABLE;
754 reg |= DEFER_CHECK | STRIP_PAD | DRTRY_DISABLE;
755 reg |= FULL_DUPLEX | HBD_DISABLE;
756 nuport_mac_writel(reg, CTRL_REG);
757
758 /* set mac address in hardware in case it was not already */
759 nuport_mac_change_mac_address(dev, dev->dev_addr);
760
761 ret = request_irq(priv->link_irq, &nuport_mac_link_interrupt,
762 0, dev->name, dev);
763 if (ret) {
764 netdev_err(dev, "unable to request link interrupt\n");
765 goto out_emac_clk;
766 }
767
768 ret = request_irq(priv->tx_irq, &nuport_mac_tx_interrupt,
769 0, dev->name, dev);
770 if (ret) {
771 netdev_err(dev, "unable to request rx interrupt\n");
772 goto out_link_irq;
773 }
774
775 /* Enable link interrupt monitoring for our PHY address */
776 reg = LINK_INT_EN | (priv->phydev->addr << LINK_PHY_ADDR_SHIFT);
777 /* MII_BMSR register to be watched */
778 reg |= (1 << LINK_PHY_REG_SHIFT);
779 /* BMSR_STATUS to be watched in particular */
780 reg |= (2 << LINK_BIT_UP_SHIFT);
781
782 spin_lock_irqsave(&priv->lock, flags);
783 nuport_mac_writel(reg, LINK_INT_CSR);
784 nuport_mac_writel(LINK_POLL_MASK, LINK_INT_POLL_TIME);
785 spin_unlock_irqrestore(&priv->lock, flags);
786
787 phy_start(priv->phydev);
788
789 ret = request_irq(priv->rx_irq, &nuport_mac_rx_interrupt,
790 0, dev->name, dev);
791 if (ret) {
792 netdev_err(dev, "unable to request tx interrupt\n");
793 goto out_tx_irq;
794 }
795
796 netif_start_queue(dev);
797
798 nuport_mac_init_tx_ring(priv);
799
800 ret = nuport_mac_init_rx_ring(dev);
801 if (ret) {
802 netdev_err(dev, "rx ring init failed\n");
803 goto out_rx_skb;
804 }
805
806 nuport_mac_reset_tx_dma(priv);
807 nuport_mac_reset_rx_dma(priv);
808
809 /* Start RX DMA */
810 spin_lock_irqsave(&priv->lock, flags);
811 ret = nuport_mac_start_rx_dma(priv, priv->rx_skb[0]);
812 spin_unlock_irqrestore(&priv->lock, flags);
813
814 napi_enable(&priv->napi);
815
816 return ret;
817
818 out_rx_skb:
819 nuport_mac_free_rx_ring(priv);
820 free_irq(priv->rx_irq, dev);
821 out_tx_irq:
822 free_irq(priv->tx_irq, dev);
823 out_link_irq:
824 free_irq(priv->link_irq, dev);
825 out_emac_clk:
826 clk_disable(priv->emac_clk);
827 return ret;
828 }
829
830 static int nuport_mac_close(struct net_device *dev)
831 {
832 u32 reg;
833 struct nuport_mac_priv *priv = netdev_priv(dev);
834
835 spin_lock_irq(&priv->lock);
836 reg = nuport_mac_readl(CTRL_REG);
837 reg &= ~(RX_ENABLE | TX_ENABLE);
838 nuport_mac_writel(reg, CTRL_REG);
839
840 napi_disable(&priv->napi);
841 netif_stop_queue(dev);
842
843 free_irq(priv->link_irq, dev);
844 /* disable PHY polling */
845 nuport_mac_writel(0, LINK_INT_CSR);
846 nuport_mac_writel(0, LINK_INT_POLL_TIME);
847 phy_stop(priv->phydev);
848
849 free_irq(priv->tx_irq, dev);
850 free_irq(priv->rx_irq, dev);
851 spin_unlock_irq(&priv->lock);
852
853 nuport_mac_free_rx_ring(priv);
854
855 clk_disable(priv->emac_clk);
856
857 return 0;
858 }
859
860 static void nuport_mac_tx_timeout(struct net_device *dev)
861 {
862 struct nuport_mac_priv *priv = netdev_priv(dev);
863 unsigned int i;
864
865 netdev_warn(dev, "transmit timeout, attempting recovery\n");
866
867 netdev_info(dev, "TX DMA regs\n");
868 for (i = 0; i < DMA_CHAN_WIDTH; i += 4)
869 netdev_info(dev, "[%02x]: 0x%08x\n", i, nuport_mac_readl(TX_DMA_BASE + i));
870 netdev_info(dev, "RX DMA regs\n");
871 for (i = 0; i < DMA_CHAN_WIDTH; i += 4)
872 netdev_info(dev, "[%02x]: 0x%08x\n", i, nuport_mac_readl(RX_DMA_BASE + i));
873
874 nuport_mac_init_tx_ring(priv);
875 nuport_mac_reset_tx_dma(priv);
876
877 netif_wake_queue(dev);
878 }
879
880 static int nuport_mac_mii_probe(struct net_device *dev)
881 {
882 struct nuport_mac_priv *priv = netdev_priv(dev);
883 struct phy_device *phydev = NULL;
884 int ret;
885
886 ret = clk_enable(priv->ephy_clk);
887 if (ret) {
888 netdev_err(dev, "unable to enable ePHY clk\n");
889 return ret;
890 }
891
892 phydev = phy_find_first(priv->mii_bus);
893 if (!phydev) {
894 netdev_err(dev, "no PHYs found\n");
895 ret = -ENODEV;
896 goto out;
897 }
898
899 phydev = phy_connect(dev, dev_name(&phydev->dev),
900 nuport_mac_adjust_link, 0,
901 PHY_INTERFACE_MODE_MII);
902 if (IS_ERR(phydev)) {
903 netdev_err(dev, "could not attach PHY\n");
904 ret = PTR_ERR(phydev);
905 goto out;
906 }
907
908 phydev->supported &= PHY_BASIC_FEATURES;
909 phydev->advertising = phydev->supported;
910 priv->phydev = phydev;
911 priv->old_link = 1;
912 priv->old_duplex = DUPLEX_FULL;
913
914 dev_info(&priv->pdev->dev, "attached PHY driver [%s] "
915 "(mii_bus:phy_addr=%d)\n",
916 phydev->drv->name, phydev->addr);
917
918 return 0;
919
920 out:
921 /* disable the Ethernet PHY clock for the moment */
922 clk_disable(priv->ephy_clk);
923
924 return ret;
925 }
926
927 static void nuport_mac_ethtool_drvinfo(struct net_device *dev,
928 struct ethtool_drvinfo *info)
929 {
930 strncpy(info->driver, "nuport-mac", sizeof(info->driver));
931 strncpy(info->version, "0.1", sizeof(info->version));
932 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
933 strncpy(info->bus_info, "internal", sizeof(info->bus_info));
934 info->n_stats = 0;
935 info->testinfo_len = 0;
936 info->regdump_len = 0;
937 info->eedump_len = 0;
938 }
939
940 static int nuport_mac_ethtool_get_settings(struct net_device *dev,
941 struct ethtool_cmd *cmd)
942 {
943 struct nuport_mac_priv *priv = netdev_priv(dev);
944
945 if (priv->phydev)
946 return phy_ethtool_gset(priv->phydev, cmd);
947
948 return -EINVAL;
949 }
950
951 static int nuport_mac_ethtool_set_settings(struct net_device *dev,
952 struct ethtool_cmd *cmd)
953 {
954 struct nuport_mac_priv *priv = netdev_priv(dev);
955
956 if (priv->phydev)
957 return phy_ethtool_sset(priv->phydev, cmd);
958
959 return -EINVAL;
960 }
961
962 static void nuport_mac_set_msglevel(struct net_device *dev, u32 msg_level)
963 {
964 struct nuport_mac_priv *priv = netdev_priv(dev);
965
966 priv->msg_level = msg_level;
967 }
968
969 static u32 nuport_mac_get_msglevel(struct net_device *dev)
970 {
971 struct nuport_mac_priv *priv = netdev_priv(dev);
972
973 return priv->msg_level;
974 }
975
976 static const struct ethtool_ops nuport_mac_ethtool_ops = {
977 .get_drvinfo = nuport_mac_ethtool_drvinfo,
978 .get_link = ethtool_op_get_link,
979 .get_settings = nuport_mac_ethtool_get_settings,
980 .set_settings = nuport_mac_ethtool_set_settings,
981 .set_msglevel = nuport_mac_set_msglevel,
982 .get_msglevel = nuport_mac_get_msglevel,
983 };
984
985 static const struct net_device_ops nuport_mac_ops = {
986 .ndo_open = nuport_mac_open,
987 .ndo_stop = nuport_mac_close,
988 .ndo_start_xmit = nuport_mac_start_xmit,
989 .ndo_change_mtu = eth_change_mtu,
990 .ndo_validate_addr = eth_validate_addr,
991 .ndo_set_mac_address = nuport_mac_change_mac_address,
992 .ndo_tx_timeout = nuport_mac_tx_timeout,
993 };
994
995 static int __init nuport_mac_probe(struct platform_device *pdev)
996 {
997 struct net_device *dev;
998 struct nuport_mac_priv *priv = NULL;
999 struct resource *regs, *dma;
1000 int ret = 0;
1001 int rx_irq, tx_irq, link_irq;
1002 int i;
1003 const unsigned int *intspec;
1004
1005 dev = alloc_etherdev(sizeof(struct nuport_mac_priv));
1006 if (!dev) {
1007 dev_err(&pdev->dev, "no memory for net_device\n");
1008 return -ENOMEM;
1009 }
1010
1011 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1012 dma = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1013 if (!regs || !dma) {
1014 dev_err(&pdev->dev, "failed to get regs resources\n");
1015 ret = -ENODEV;
1016 goto out;
1017 }
1018
1019 rx_irq = platform_get_irq(pdev, 0);
1020 tx_irq = platform_get_irq(pdev, 1);
1021 link_irq = platform_get_irq(pdev, 2);
1022 if (rx_irq < 0 || tx_irq < 0 || link_irq < 0) {
1023 ret = -ENODEV;
1024 goto out;
1025 }
1026
1027 platform_set_drvdata(pdev, dev);
1028 SET_NETDEV_DEV(dev, &pdev->dev);
1029 priv = netdev_priv(dev);
1030 priv->pdev = pdev;
1031 priv->dev = dev;
1032 spin_lock_init(&priv->lock);
1033
1034 intspec = of_get_property(pdev->dev.of_node,
1035 "nuport-mac,buffer-shifting", NULL);
1036 if (!intspec)
1037 priv->buffer_shifting_len = 0;
1038 else
1039 priv->buffer_shifting_len = 2;
1040
1041 priv->mac_base = devm_ioremap(&pdev->dev,
1042 regs->start, resource_size(regs));
1043 if (!priv->mac_base) {
1044 dev_err(&pdev->dev, "failed to remap regs\n");
1045 ret = -ENOMEM;
1046 goto out_platform;
1047 }
1048
1049 priv->dma_base = devm_ioremap(&pdev->dev,
1050 dma->start, resource_size(dma));
1051 if (!priv->dma_base) {
1052 dev_err(&pdev->dev, "failed to remap dma-regs\n");
1053 ret = -ENOMEM;
1054 goto out_platform;
1055 }
1056
1057 priv->emac_clk = clk_get(&pdev->dev, "emac");
1058 if (IS_ERR_OR_NULL(priv->emac_clk)) {
1059 dev_err(&pdev->dev, "failed to get emac clk\n");
1060 ret = PTR_ERR(priv->emac_clk);
1061 goto out_platform;
1062 }
1063
1064 priv->ephy_clk = clk_get(&pdev->dev, "ephy");
1065 if (IS_ERR_OR_NULL(priv->ephy_clk)) {
1066 dev_err(&pdev->dev, "failed to get ephy clk\n");
1067 ret = PTR_ERR(priv->ephy_clk);
1068 goto out_platform;
1069 }
1070
1071 priv->link_irq = link_irq;
1072 priv->rx_irq = rx_irq;
1073 priv->tx_irq = tx_irq;
1074 priv->msg_level = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK;
1075 dev->netdev_ops = &nuport_mac_ops;
1076 dev->ethtool_ops = &nuport_mac_ethtool_ops;
1077 dev->watchdog_timeo = HZ;
1078 dev->flags = IFF_BROADCAST; /* Supports Broadcast */
1079 dev->tx_queue_len = TX_RING_SIZE / 2;
1080
1081 netif_napi_add(dev, &priv->napi, nuport_mac_poll, 64);
1082
1083 priv->mii_bus = mdiobus_alloc();
1084 if (!priv->mii_bus) {
1085 dev_err(&pdev->dev, "mii bus allocation failed\n");
1086 goto out;
1087 }
1088
1089 priv->mii_bus->priv = dev;
1090 priv->mii_bus->read = nuport_mac_mii_read;
1091 priv->mii_bus->write = nuport_mac_mii_write;
1092 priv->mii_bus->reset = nuport_mac_mii_reset;
1093 priv->mii_bus->name = "nuport-mac-mii";
1094 priv->mii_bus->phy_mask = (1 << 0);
1095 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
1096 priv->mii_bus->irq = kzalloc(PHY_MAX_ADDR * sizeof(int), GFP_KERNEL);
1097 if (!priv->mii_bus->irq) {
1098 dev_err(&pdev->dev, "failed to allocate mii_bus irqs\n");
1099 ret = -ENOMEM;
1100 goto out_mdio;
1101 }
1102
1103 /* We support PHY interrupts routed back to the MAC */
1104 for (i = 0; i < PHY_MAX_ADDR; i++)
1105 priv->mii_bus->irq[i] = PHY_IGNORE_INTERRUPT;
1106
1107 ret = mdiobus_register(priv->mii_bus);
1108 if (ret) {
1109 dev_err(&pdev->dev, "failed to register mii_bus\n");
1110 goto out_mdio_irq;
1111 }
1112
1113 ret = nuport_mac_mii_probe(dev);
1114 if (ret) {
1115 dev_err(&pdev->dev, "failed to probe MII bus\n");
1116 goto out_mdio_unregister;
1117 }
1118
1119 ret = register_netdev(dev);
1120 if (ret) {
1121 dev_err(&pdev->dev, "failed to register net_device\n");
1122 goto out_mdio_probe;
1123 }
1124
1125 /* read existing mac address */
1126 nuport_mac_read_mac_address(dev);
1127
1128 dev_info(&pdev->dev, "registered (MAC: %pM)\n", dev->dev_addr);
1129
1130 return ret;
1131
1132 out_mdio_probe:
1133 phy_disconnect(priv->phydev);
1134 out_mdio_unregister:
1135 mdiobus_unregister(priv->mii_bus);
1136 out_mdio_irq:
1137 kfree(priv->mii_bus->irq);
1138 out_mdio:
1139 mdiobus_free(priv->mii_bus);
1140 out_platform:
1141 platform_set_drvdata(pdev, NULL);
1142 out:
1143 clk_put(priv->ephy_clk);
1144 clk_put(priv->emac_clk);
1145 free_netdev(dev);
1146 platform_set_drvdata(pdev, NULL);
1147 return ret;
1148 }
1149
1150 static int nuport_mac_remove(struct platform_device *pdev)
1151 {
1152 struct net_device *dev = platform_get_drvdata(pdev);
1153 struct nuport_mac_priv *priv = netdev_priv(dev);
1154
1155 unregister_netdev(dev);
1156 phy_disconnect(priv->phydev);
1157 mdiobus_unregister(priv->mii_bus);
1158 kfree(priv->mii_bus->irq);
1159 mdiobus_free(priv->mii_bus);
1160 clk_put(priv->ephy_clk);
1161 clk_put(priv->emac_clk);
1162 free_netdev(dev);
1163
1164 platform_set_drvdata(pdev, NULL);
1165
1166 return 0;
1167 }
1168
1169 static struct of_device_id nuport_eth_ids[] __initdata = {
1170 {.compatible = "moschip,nuport-mac",},
1171 { /* sentinel */ },
1172 };
1173
1174 static struct platform_driver nuport_eth_driver = {
1175 .driver = {
1176 .name = "nuport-mac",
1177 .owner = THIS_MODULE,
1178 .of_match_table = nuport_eth_ids,
1179 },
1180 .probe = nuport_mac_probe,
1181 .remove = __devexit_p(nuport_mac_remove),
1182 };
1183
1184 module_platform_driver(nuport_eth_driver);
1185
1186 MODULE_AUTHOR("Moschip Semiconductors Ltd.");
1187 MODULE_DESCRIPTION("Moschip MCS8140 Ethernet MAC driver");
1188 MODULE_LICENSE("GPL");