[mcs814x] nuport-mac: use dma_mapping_error() instead of checking pointers
[openwrt/svn-archive/archive.git] / target / linux / mcs814x / files-3.3 / drivers / net / ethernet / mcs8140 / nuport_mac.c
1 /*
2 * Moschip MCS8140 Ethernet MAC driver
3 *
4 * Copyright (C) 2003, Moschip Semiconductors
5 * Copyright (C) 2012, Florian Fainelli <florian@openwrt.org>
6 *
7 * Licensed under GPLv2
8 */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/ethtool.h>
17 #include <linux/mii.h>
18 #include <linux/io.h>
19 #include <linux/interrupt.h>
20 #include <linux/platform_device.h>
21 #include <linux/of.h>
22 #include <linux/irq.h>
23 #include <linux/err.h>
24 #include <linux/phy.h>
25 #include <linux/clk.h>
26 #include <linux/dma-mapping.h>
27
28 #include <asm/unaligned.h>
29 #include <asm/sizes.h>
30
31 /* Hardware registers */
32 #define MAC_BASE_ADDR ((priv->mac_base))
33
34 #define CTRL_REG (MAC_BASE_ADDR)
35 #define MII_BUSY (1 << 0)
36 #define MII_WRITE (1 << 1)
37 #define RX_ENABLE (1 << 2)
38 #define TX_ENABLE (1 << 3)
39 #define DEFER_CHECK (1 << 5)
40 #define STRIP_PAD (1 << 8)
41 #define DRTRY_DISABLE (1 << 10)
42 #define FULL_DUPLEX (1 << 20)
43 #define HBD_DISABLE (1 << 28)
44 #define MAC_ADDR_HIGH_REG (MAC_BASE_ADDR + 0x04)
45 #define MAC_ADDR_LOW_REG (MAC_BASE_ADDR + 0x08)
46 #define MII_ADDR_REG (MAC_BASE_ADDR + 0x14)
47 #define MII_ADDR_SHIFT (11)
48 #define MII_REG_SHIFT (6)
49 #define MII_DATA_REG (MAC_BASE_ADDR + 0x18)
50 /* Link interrupt registers */
51 #define LINK_INT_CSR (MAC_BASE_ADDR + 0xD0)
52 #define LINK_INT_EN (1 << 0)
53 #define LINK_PHY_ADDR_SHIFT (1)
54 #define LINK_PHY_REG_SHIFT (6)
55 #define LINK_BIT_UP_SHIFT (11)
56 #define LINK_UP (1 << 16)
57 #define LINK_INT_POLL_TIME (MAC_BASE_ADDR + 0xD4)
58 #define LINK_POLL_MASK ((1 << 20) - 1)
59
60 #define DMA_CHAN_WIDTH 32
61 #define DMA_RX_CHAN 0
62 #define DMA_TX_CHAN 2
63
64 /* Receive DMA registers */
65 #define RX_DMA_BASE ((priv->dma_base) + \
66 (DMA_CHAN_WIDTH * DMA_RX_CHAN))
67 #define RX_BUFFER_ADDR (RX_DMA_BASE + 0x00)
68 #define RX_MAX_BYTES (RX_DMA_BASE + 0x04)
69 #define RX_ACT_BYTES (RX_DMA_BASE + 0x08)
70 #define RX_START_DMA (RX_DMA_BASE + 0x0C)
71 #define RX_DMA_ENABLE (1 << 0)
72 #define RX_DMA_RESET (1 << 1)
73 #define RX_DMA_STATUS_FIFO (1 << 12)
74 #define RX_DMA_ENH (RX_DMA_BASE + 0x14)
75 #define RX_DMA_INT_ENABLE (1 << 1)
76
77 /* Transmit DMA registers */
78 #define TX_DMA_BASE ((priv->dma_base) + \
79 (DMA_CHAN_WIDTH * DMA_TX_CHAN))
80 #define TX_BUFFER_ADDR (TX_DMA_BASE + 0x00)
81 #define TX_PKT_BYTES (TX_DMA_BASE + 0x04)
82 #define TX_BYTES_SENT (TX_DMA_BASE + 0x08)
83 #define TX_START_DMA (TX_DMA_BASE + 0x0C)
84 #define TX_DMA_ENABLE (1 << 0)
85 #define TX_DMA_START_FRAME (1 << 2)
86 #define TX_DMA_END_FRAME (1 << 3)
87 #define TX_DMA_PAD_DISABLE (1 << 8)
88 #define TX_DMA_CRC_DISABLE (1 << 9)
89 #define TX_DMA_FIFO_FULL (1 << 16)
90 #define TX_DMA_FIFO_EMPTY (1 << 17)
91 #define TX_DMA_STATUS_AVAIL (1 << 18)
92 #define TX_DMA_RESET (1 << 24)
93 #define TX_DMA_STATUS (TX_DMA_BASE + 0x10)
94 #define TX_DMA_ENH (TX_DMA_BASE + 0x14)
95 #define TX_DMA_ENH_ENABLE (1 << 0)
96 #define TX_DMA_INT_FIFO (1 << 1)
97
98 #define RX_ALLOC_SIZE SZ_2K
99 #define MAX_ETH_FRAME_SIZE 1536
100 #define RX_SKB_TAILROOM 128
101 #define RX_SKB_HEADROOM (RX_ALLOC_SIZE - \
102 (MAX_ETH_FRAME_SIZE + RX_SKB_TAILROOM) + 0)
103
104 /* WDT Late COL Lenght COL Type */
105 #define ERROR_FILTER_MASK ((1<<14) | (1<<15) | (1<<16) | (1<<17) | (0<<18) | \
106 /* MII Dribbling CRC Len/type Control */\
107 (1<<19) | (1<<20) | (1<<21) | (0<<24) | (1<<25) | \
108 /* Unsup Missed */\
109 (1<<26) | (0<<31))
110 #define TX_RING_SIZE 30
111 #define RX_RING_SIZE 30
112
113 static inline u32 nuport_mac_readl(void __iomem *reg)
114 {
115 return __raw_readl(reg);
116 }
117
118 static inline u8 nuport_mac_readb(void __iomem *reg)
119 {
120 return __raw_readb(reg);
121 }
122
123 static inline void nuport_mac_writel(u32 value, void __iomem *reg)
124 {
125 __raw_writel(value, reg);
126 }
127
128 static inline void nuport_mac_writeb(u8 value, void __iomem *reg)
129 {
130 __raw_writel(value, reg);
131 }
132
133 /* MAC private data */
134 struct nuport_mac_priv {
135 spinlock_t lock;
136
137 void __iomem *mac_base;
138 void __iomem *dma_base;
139
140 int rx_irq;
141 int tx_irq;
142 int link_irq;
143 struct clk *emac_clk;
144 struct clk *ephy_clk;
145
146 /* Transmit buffers */
147 struct sk_buff *tx_skb[TX_RING_SIZE];
148 dma_addr_t tx_addr;
149 unsigned int valid_txskb[TX_RING_SIZE];
150 unsigned int cur_tx;
151 unsigned int dma_tx;
152 unsigned int tx_full;
153
154 /* Receive buffers */
155 struct sk_buff *rx_skb[RX_RING_SIZE];
156 dma_addr_t rx_addr;
157 unsigned int irq_rxskb[RX_RING_SIZE];
158 int pkt_len[RX_RING_SIZE];
159 unsigned int cur_rx;
160 unsigned int dma_rx;
161 unsigned int rx_full;
162
163 unsigned int first_pkt;
164
165 /* Private data */
166 struct napi_struct napi;
167 struct net_device *dev;
168 struct platform_device *pdev;
169 struct mii_bus *mii_bus;
170 struct phy_device *phydev;
171 int old_link;
172 int old_duplex;
173 u32 msg_level;
174 unsigned int buffer_shifting_len;
175 };
176
177 static inline int nuport_mac_mii_busy_wait(struct nuport_mac_priv *priv)
178 {
179 unsigned long curr;
180 unsigned long finish = jiffies + 3 * HZ;
181
182 do {
183 curr = jiffies;
184 if (!(nuport_mac_readl(MII_ADDR_REG) & MII_BUSY))
185 return 0;
186 cpu_relax();
187 } while (!time_after_eq(curr, finish));
188
189 return -EBUSY;
190 }
191
192 /* Read from PHY registers */
193 static int nuport_mac_mii_read(struct mii_bus *bus,
194 int mii_id, int regnum)
195 {
196 struct net_device *dev = bus->priv;
197 struct nuport_mac_priv *priv = netdev_priv(dev);
198 int ret;
199 u32 val = 0;
200
201 ret = nuport_mac_mii_busy_wait(priv);
202 if (ret)
203 return ret;
204
205 val |= (mii_id << MII_ADDR_SHIFT) | (regnum << MII_REG_SHIFT) | MII_BUSY;
206 nuport_mac_writel(val, MII_ADDR_REG);
207 ret = nuport_mac_mii_busy_wait(priv);
208 if (ret)
209 return ret;
210
211 return nuport_mac_readl(MII_DATA_REG);
212 }
213
214 static int nuport_mac_mii_write(struct mii_bus *bus, int mii_id,
215 int regnum, u16 value)
216 {
217 struct net_device *dev = bus->priv;
218 struct nuport_mac_priv *priv = netdev_priv(dev);
219 int ret;
220 u32 val = 0;
221
222 ret = nuport_mac_mii_busy_wait(priv);
223 if (ret)
224 return ret;
225
226 val |= (mii_id << MII_ADDR_SHIFT) | (regnum << MII_REG_SHIFT);
227 val |= MII_BUSY | MII_WRITE;
228 nuport_mac_writel(value, MII_DATA_REG);
229 nuport_mac_writel(val, MII_ADDR_REG);
230
231 return nuport_mac_mii_busy_wait(priv);
232 }
233
234 static int nuport_mac_mii_reset(struct mii_bus *bus)
235 {
236 return 0;
237 }
238
239 static int nuport_mac_start_tx_dma(struct nuport_mac_priv *priv,
240 struct sk_buff *skb)
241 {
242 u32 reg;
243 unsigned int timeout = 2048;
244
245 while (timeout--) {
246 reg = nuport_mac_readl(TX_START_DMA);
247 if (!(reg & TX_DMA_ENABLE)) {
248 netdev_dbg(priv->dev, "dma ready\n");
249 break;
250 }
251 cpu_relax();
252 }
253
254 if (!timeout)
255 return -EBUSY;
256
257 priv->tx_addr = dma_map_single(&priv->pdev->dev, skb->data,
258 skb->len, DMA_TO_DEVICE);
259 if (dma_mapping_error(&priv->pdev->dev, priv->tx_addr))
260 return -ENOMEM;
261
262 /* enable enhanced mode */
263 nuport_mac_writel(TX_DMA_ENH_ENABLE, TX_DMA_ENH);
264 nuport_mac_writel(priv->tx_addr, TX_BUFFER_ADDR);
265 nuport_mac_writel((skb->len) - 1, TX_PKT_BYTES);
266 wmb();
267 reg = TX_DMA_ENABLE | TX_DMA_START_FRAME | TX_DMA_END_FRAME;
268 nuport_mac_writel(reg, TX_START_DMA);
269
270 return 0;
271 }
272
273 static void nuport_mac_reset_tx_dma(struct nuport_mac_priv *priv)
274 {
275 u32 reg;
276
277 reg = nuport_mac_readl(TX_START_DMA);
278 reg |= TX_DMA_RESET;
279 nuport_mac_writel(reg, TX_START_DMA);
280 }
281
282 static int nuport_mac_start_rx_dma(struct nuport_mac_priv *priv,
283 struct sk_buff *skb)
284 {
285 u32 reg;
286 unsigned int timeout = 2048;
287
288 while (timeout--) {
289 reg = nuport_mac_readl(RX_START_DMA);
290 if (!(reg & RX_DMA_ENABLE)) {
291 netdev_dbg(priv->dev, "dma ready\n");
292 break;
293 }
294 cpu_relax();
295 }
296
297 if (!timeout)
298 return -EBUSY;
299
300 priv->rx_addr = dma_map_single(&priv->pdev->dev, skb->data,
301 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
302 if (dma_mapping_error(&priv->pdev->dev, priv->rx_addr))
303 return -ENOMEM;
304
305 nuport_mac_writel(priv->rx_addr, RX_BUFFER_ADDR);
306 wmb();
307 nuport_mac_writel(RX_DMA_ENABLE, RX_START_DMA);
308
309 return 0;
310 }
311
312 static void nuport_mac_reset_rx_dma(struct nuport_mac_priv *priv)
313 {
314 u32 reg;
315
316 reg = nuport_mac_readl(RX_START_DMA);
317 reg |= RX_DMA_RESET;
318 nuport_mac_writel(reg, RX_START_DMA);
319 }
320
321 /* I suppose this might do something, but I am not sure actually */
322 static void nuport_mac_disable_rx_dma(struct nuport_mac_priv *priv)
323 {
324 u32 reg;
325
326 reg = nuport_mac_readl(RX_DMA_ENH);
327 reg &= ~RX_DMA_INT_ENABLE;
328 nuport_mac_writel(reg, RX_DMA_ENH);
329 }
330
331 static void nuport_mac_enable_rx_dma(struct nuport_mac_priv *priv)
332 {
333 u32 reg;
334
335 reg = nuport_mac_readl(RX_DMA_ENH);
336 reg |= RX_DMA_INT_ENABLE;
337 nuport_mac_writel(reg, RX_DMA_ENH);
338 }
339
340 /* Add packets to the transmit queue */
341 static int nuport_mac_start_xmit(struct sk_buff *skb, struct net_device *dev)
342 {
343 unsigned long flags;
344 struct nuport_mac_priv *priv = netdev_priv(dev);
345 int ret;
346
347 if (netif_queue_stopped(dev)) {
348 netdev_warn(dev, "netif queue was stopped, restarting\n");
349 netif_start_queue(dev);
350 }
351
352 spin_lock_irqsave(&priv->lock, flags);
353 if (priv->first_pkt) {
354 ret = nuport_mac_start_tx_dma(priv, skb);
355 if (ret) {
356 netif_stop_queue(dev);
357 spin_unlock_irqrestore(&priv->lock, flags);
358 netdev_err(dev, "transmit path busy\n");
359 return NETDEV_TX_BUSY;
360 }
361 priv->first_pkt = 0;
362 }
363
364 priv->tx_skb[priv->cur_tx] = skb;
365 dev->stats.tx_bytes += skb->len;
366 dev->stats.tx_packets++;
367 priv->valid_txskb[priv->cur_tx] = 1;
368 priv->cur_tx++;
369 dev->trans_start = jiffies;
370
371 if (priv->cur_tx >= TX_RING_SIZE)
372 priv->cur_tx = 0;
373
374 spin_unlock_irqrestore(&priv->lock, flags);
375
376 if (priv->valid_txskb[priv->cur_tx]) {
377 priv->tx_full = 1;
378 netdev_err(dev, "stopping queue\n");
379 netif_stop_queue(dev);
380 }
381
382 return NETDEV_TX_OK;
383 }
384
385 static void nuport_mac_adjust_link(struct net_device *dev)
386 {
387 struct nuport_mac_priv *priv = netdev_priv(dev);
388 struct phy_device *phydev = priv->phydev;
389 unsigned int status_changed = 0;
390 u32 reg;
391
392 BUG_ON(!phydev);
393
394 if (priv->old_link != phydev->link) {
395 status_changed = 1;
396 priv->old_link = phydev->link;
397 }
398
399 if (phydev->link & (priv->old_duplex != phydev->duplex)) {
400 reg = nuport_mac_readl(CTRL_REG);
401 if (phydev->duplex == DUPLEX_FULL)
402 reg |= DUPLEX_FULL;
403 else
404 reg &= ~DUPLEX_FULL;
405 nuport_mac_writel(reg, CTRL_REG);
406
407 status_changed = 1;
408 priv->old_duplex = phydev->duplex;
409 }
410
411 if (!status_changed)
412 return;
413
414 pr_info("%s: link %s", dev->name, phydev->link ?
415 "UP" : "DOWN");
416 if (phydev->link) {
417 pr_cont(" - %d/%s", phydev->speed,
418 phydev->duplex == DUPLEX_FULL ? "full" : "half");
419 }
420 pr_cont("\n");
421 }
422
423 static irqreturn_t nuport_mac_link_interrupt(int irq, void *dev_id)
424 {
425 struct net_device *dev = dev_id;
426 struct nuport_mac_priv *priv = netdev_priv(dev);
427 u32 reg;
428 u8 phy_addr;
429 unsigned long flags;
430 irqreturn_t ret = IRQ_HANDLED;
431
432 spin_lock_irqsave(&priv->lock, flags);
433 reg = nuport_mac_readl(LINK_INT_CSR);
434 phy_addr = (reg >> LINK_PHY_ADDR_SHIFT) & (PHY_MAX_ADDR - 1);
435
436 if (phy_addr != priv->phydev->addr) {
437 netdev_err(dev, "spurious PHY irq (phy: %d)\n", phy_addr);
438 ret = IRQ_NONE;
439 goto out;
440 }
441
442 priv->phydev->link = (reg & LINK_UP);
443 nuport_mac_adjust_link(dev);
444
445 out:
446 spin_unlock_irqrestore(&priv->lock, flags);
447 return ret;
448 }
449
450 static irqreturn_t nuport_mac_tx_interrupt(int irq, void *dev_id)
451 {
452 struct net_device *dev = (struct net_device *)dev_id;
453 struct nuport_mac_priv *priv = netdev_priv(dev);
454 struct sk_buff *skb;
455 unsigned long flags;
456 int ret;
457 u32 reg;
458
459 spin_lock_irqsave(&priv->lock, flags);
460 /* clear status word available if ready */
461 reg = nuport_mac_readl(TX_START_DMA);
462 if (reg & TX_DMA_STATUS_AVAIL) {
463 nuport_mac_writel(reg, TX_START_DMA);
464 reg = nuport_mac_readl(TX_DMA_STATUS);
465
466 if (reg & 1)
467 dev->stats.tx_errors++;
468 } else
469 netdev_dbg(dev, "no status word: %08x\n", reg);
470
471 skb = priv->tx_skb[priv->dma_tx];
472 priv->tx_skb[priv->dma_tx] = NULL;
473 priv->valid_txskb[priv->dma_tx] = 0;
474 dma_unmap_single(&priv->pdev->dev, priv->rx_addr, skb->len,
475 DMA_TO_DEVICE);
476 dev_kfree_skb_irq(skb);
477
478 priv->dma_tx++;
479 if (priv->dma_tx >= TX_RING_SIZE)
480 priv->dma_tx = 0;
481
482 if (!priv->valid_txskb[priv->dma_tx])
483 priv->first_pkt = 1;
484 else {
485 ret = nuport_mac_start_tx_dma(priv, priv->tx_skb[priv->dma_tx]);
486 if (ret)
487 netdev_err(dev, "failed to restart TX dma\n");
488 }
489
490 if (priv->tx_full) {
491 netdev_dbg(dev, "restarting transmit queue\n");
492 netif_wake_queue(dev);
493 priv->tx_full = 0;
494 }
495
496 spin_unlock_irqrestore(&priv->lock, flags);
497
498 return IRQ_HANDLED;
499 }
500
501 static unsigned int nuport_mac_has_work(struct nuport_mac_priv *priv)
502 {
503 unsigned int i;
504
505 for (i = 0; i < RX_RING_SIZE; i++)
506 if (priv->rx_skb[i])
507 return 1;
508
509 return 0;
510 }
511
512 static irqreturn_t nuport_mac_rx_interrupt(int irq, void *dev_id)
513 {
514 struct net_device *dev = (struct net_device *)dev_id;
515 struct nuport_mac_priv *priv = netdev_priv(dev);
516 unsigned long flags;
517 int ret;
518
519 spin_lock_irqsave(&priv->lock, flags);
520 if (!priv->rx_full) {
521 priv->pkt_len[priv->dma_rx] = nuport_mac_readl(RX_ACT_BYTES) - 4;
522 priv->irq_rxskb[priv->dma_rx] = 0;
523 priv->dma_rx++;
524
525 if (priv->dma_rx >= RX_RING_SIZE)
526 priv->dma_rx = 0;
527 } else
528 priv->rx_full = 0;
529
530 if (priv->irq_rxskb[priv->dma_rx] == 1) {
531 ret = nuport_mac_start_rx_dma(priv, priv->rx_skb[priv->dma_rx]);
532 if (ret)
533 netdev_err(dev, "failed to start rx dma\n");
534 } else {
535 priv->rx_full = 1;
536 netdev_dbg(dev, "RX ring full\n");
537 }
538
539 if (likely(nuport_mac_has_work(priv))) {
540 /* find a way to disable DMA rx irq */
541 nuport_mac_disable_rx_dma(priv);
542 napi_schedule(&priv->napi);
543 }
544 spin_unlock_irqrestore(&priv->lock, flags);
545
546 return IRQ_HANDLED;
547 }
548
549 /* Process received packets in tasklet */
550 static int nuport_mac_rx(struct net_device *dev, int limit)
551 {
552 struct nuport_mac_priv *priv = netdev_priv(dev);
553 struct sk_buff *skb;
554 int len, status;
555 int count = 0;
556
557 while (count < limit && !priv->irq_rxskb[priv->cur_rx]) {
558 skb = priv->rx_skb[priv->cur_rx];
559 len = priv->pkt_len[priv->cur_rx];
560
561 /* Remove 2 bytes added by RX buffer shifting */
562 len = len - priv->buffer_shifting_len;
563 skb->data = skb->data + priv->buffer_shifting_len;
564
565 /* Get packet status */
566 status = get_unaligned((u32 *) (skb->data + len));
567 skb->dev = dev;
568
569 dma_unmap_single(&priv->pdev->dev, priv->rx_addr, skb->len,
570 DMA_FROM_DEVICE);
571
572 /* packet filter failed */
573 if (!(status & (1 << 30))) {
574 dev_kfree_skb_irq(skb);
575 goto exit;
576 }
577
578 /* missed frame */
579 if (status & (1 << 31)) {
580 dev->stats.rx_missed_errors++;
581 dev_kfree_skb_irq(skb);
582 goto exit;
583 }
584
585 /* Not ethernet type */
586 if ((!(status & (1 << 18))) || (status & ERROR_FILTER_MASK))
587 dev->stats.rx_errors++;
588
589 if (len > MAX_ETH_FRAME_SIZE) {
590 dev_kfree_skb_irq(skb);
591 goto exit;
592 } else
593 skb_put(skb, len);
594
595 skb->protocol = eth_type_trans(skb, dev);
596 dev->stats.rx_packets++;
597
598 if (status & (1 << 29))
599 skb->pkt_type = PACKET_OTHERHOST;
600 if (status & (1 << 27))
601 skb->pkt_type = PACKET_MULTICAST;
602 if (status & (1 << 28))
603 skb->pkt_type = PACKET_BROADCAST;
604
605 skb->ip_summed = CHECKSUM_UNNECESSARY;
606
607 /* Pass the received packet to network layer */
608 status = netif_receive_skb(skb);
609 if (status != NET_RX_DROP)
610 dev->stats.rx_bytes += len - 4; /* Without CRC */
611 else
612 dev->stats.rx_dropped++;
613
614 dev->last_rx = jiffies;
615
616 exit:
617 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
618 skb_reserve(skb, RX_SKB_HEADROOM);
619 priv->rx_skb[priv->cur_rx] = skb;
620 priv->irq_rxskb[priv->cur_rx] = 1;
621 priv->cur_rx++;
622
623 if (priv->cur_rx >= RX_RING_SIZE)
624 priv->cur_rx = 0;
625 count++;
626 }
627
628 return count;
629 }
630
631 static int nuport_mac_poll(struct napi_struct *napi, int budget)
632 {
633 struct nuport_mac_priv *priv =
634 container_of(napi, struct nuport_mac_priv, napi);
635 struct net_device *dev = priv->dev;
636 int work_done;
637
638 work_done = nuport_mac_rx(dev, budget);
639
640 if (work_done < budget) {
641 napi_complete(napi);
642 nuport_mac_enable_rx_dma(priv);
643 }
644
645 return work_done;
646 }
647
648 static void nuport_mac_init_tx_ring(struct nuport_mac_priv *priv)
649 {
650 int i;
651
652 priv->cur_tx = priv->dma_tx = priv->tx_full = 0;
653 for (i = 0; i < TX_RING_SIZE; i++) {
654 priv->tx_skb[i] = NULL;
655 priv->valid_txskb[i] = 0;
656 }
657 priv->first_pkt = 1;
658 }
659
660 static int nuport_mac_init_rx_ring(struct net_device *dev)
661 {
662 struct nuport_mac_priv *priv = netdev_priv(dev);
663 struct sk_buff *skb;
664 int i;
665
666 priv->cur_rx = priv->dma_rx = priv->rx_full = 0;
667
668 for (i = 0; i < RX_RING_SIZE; i++) {
669 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
670 if (!skb)
671 return -ENOMEM;
672 skb_reserve(skb, RX_SKB_HEADROOM);
673 priv->rx_skb[i] = skb;
674 priv->irq_rxskb[i] = 1;
675 }
676
677 return 0;
678 }
679
680 static void nuport_mac_free_rx_ring(struct nuport_mac_priv *priv)
681 {
682 int i;
683
684 for (i = 0; i < RX_RING_SIZE; i++) {
685 if (!priv->rx_skb[i])
686 continue;
687
688 dev_kfree_skb(priv->rx_skb[i]);
689 priv->rx_skb[i] = NULL;
690 }
691
692 if (priv->rx_addr)
693 dma_unmap_single(&priv->pdev->dev, priv->rx_addr, RX_ALLOC_SIZE,
694 DMA_TO_DEVICE);
695 }
696
697 static void nuport_mac_read_mac_address(struct net_device *dev)
698 {
699 struct nuport_mac_priv *priv = netdev_priv(dev);
700 int i;
701
702 for (i = 0; i < 4; i++)
703 dev->dev_addr[i] = nuport_mac_readb(MAC_ADDR_LOW_REG + i);
704 dev->dev_addr[4] = nuport_mac_readb(MAC_ADDR_HIGH_REG);
705 dev->dev_addr[5] = nuport_mac_readb(MAC_ADDR_HIGH_REG + 1);
706
707 if (!is_valid_ether_addr(dev->dev_addr)) {
708 dev_info(&priv->pdev->dev, "using random address\n");
709 random_ether_addr(dev->dev_addr);
710 }
711 }
712
713 static int nuport_mac_change_mac_address(struct net_device *dev, void *mac_addr)
714 {
715 struct sockaddr *addr = mac_addr;
716 struct nuport_mac_priv *priv = netdev_priv(dev);
717 unsigned long *temp = (unsigned long *)dev->dev_addr;
718 u32 high, low;
719
720 if (netif_running(dev))
721 return -EBUSY;
722
723 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
724
725 spin_lock_irq(&priv->lock);
726
727 nuport_mac_writel(*temp, MAC_ADDR_LOW_REG);
728 temp = (unsigned long *)(dev->dev_addr + 4);
729 nuport_mac_writel(*temp, MAC_ADDR_HIGH_REG);
730
731 low = nuport_mac_readl(MAC_ADDR_LOW_REG);
732 high = nuport_mac_readl(MAC_ADDR_HIGH_REG);
733
734 spin_unlock_irq(&priv->lock);
735
736 return 0;
737 }
738
739 static int nuport_mac_open(struct net_device *dev)
740 {
741 int ret;
742 struct nuport_mac_priv *priv = netdev_priv(dev);
743 unsigned long flags;
744 u32 reg = 0;
745
746 ret = clk_enable(priv->emac_clk);
747 if (ret) {
748 netdev_err(dev, "failed to enable EMAC clock\n");
749 return ret;
750 }
751
752 /* Set MAC into full duplex mode by default */
753 reg |= RX_ENABLE | TX_ENABLE;
754 reg |= DEFER_CHECK | STRIP_PAD | DRTRY_DISABLE;
755 reg |= FULL_DUPLEX | HBD_DISABLE;
756 nuport_mac_writel(reg, CTRL_REG);
757
758 /* set mac address in hardware in case it was not already */
759 nuport_mac_change_mac_address(dev, dev->dev_addr);
760
761 ret = request_irq(priv->link_irq, &nuport_mac_link_interrupt,
762 0, dev->name, dev);
763 if (ret) {
764 netdev_err(dev, "unable to request link interrupt\n");
765 goto out_emac_clk;
766 }
767
768 ret = request_irq(priv->tx_irq, &nuport_mac_tx_interrupt,
769 0, dev->name, dev);
770 if (ret) {
771 netdev_err(dev, "unable to request rx interrupt\n");
772 goto out_link_irq;
773 }
774
775 /* Enable link interrupt monitoring for our PHY address */
776 reg = LINK_INT_EN | (priv->phydev->addr << LINK_PHY_ADDR_SHIFT);
777 /* MII_BMSR register to be watched */
778 reg |= (1 << LINK_PHY_REG_SHIFT);
779 /* BMSR_STATUS to be watched in particular */
780 reg |= (2 << LINK_BIT_UP_SHIFT);
781
782 spin_lock_irqsave(&priv->lock, flags);
783 nuport_mac_writel(reg, LINK_INT_CSR);
784 nuport_mac_writel(LINK_POLL_MASK, LINK_INT_POLL_TIME);
785 spin_unlock_irqrestore(&priv->lock, flags);
786
787 phy_start(priv->phydev);
788
789 napi_enable(&priv->napi);
790
791 ret = request_irq(priv->rx_irq, &nuport_mac_rx_interrupt,
792 0, dev->name, dev);
793 if (ret) {
794 netdev_err(dev, "unable to request tx interrupt\n");
795 goto out_tx_irq;
796 }
797
798 netif_start_queue(dev);
799
800 nuport_mac_init_tx_ring(priv);
801
802 ret = nuport_mac_init_rx_ring(dev);
803 if (ret) {
804 netdev_err(dev, "rx ring init failed\n");
805 goto out_rx_skb;
806 }
807
808 nuport_mac_reset_tx_dma(priv);
809 nuport_mac_reset_rx_dma(priv);
810
811 /* Start RX DMA */
812 spin_lock_irqsave(&priv->lock, flags);
813 ret = nuport_mac_start_rx_dma(priv, priv->rx_skb[0]);
814 spin_unlock_irqrestore(&priv->lock, flags);
815
816 return ret;
817
818 out_rx_skb:
819 nuport_mac_free_rx_ring(priv);
820 free_irq(priv->rx_irq, dev);
821 out_tx_irq:
822 free_irq(priv->tx_irq, dev);
823 out_link_irq:
824 free_irq(priv->link_irq, dev);
825 out_emac_clk:
826 clk_disable(priv->emac_clk);
827 return ret;
828 }
829
830 static int nuport_mac_close(struct net_device *dev)
831 {
832 struct nuport_mac_priv *priv = netdev_priv(dev);
833
834 spin_lock_irq(&priv->lock);
835 napi_disable(&priv->napi);
836 netif_stop_queue(dev);
837
838 free_irq(priv->link_irq, dev);
839 /* disable PHY polling */
840 nuport_mac_writel(0, LINK_INT_CSR);
841 nuport_mac_writel(0, LINK_INT_POLL_TIME);
842 phy_stop(priv->phydev);
843
844 free_irq(priv->tx_irq, dev);
845 free_irq(priv->rx_irq, dev);
846 spin_unlock_irq(&priv->lock);
847
848 nuport_mac_free_rx_ring(priv);
849
850 clk_disable(priv->emac_clk);
851
852 return 0;
853 }
854
855 static void nuport_mac_tx_timeout(struct net_device *dev)
856 {
857 struct nuport_mac_priv *priv = netdev_priv(dev);
858 unsigned int i;
859
860 netdev_warn(dev, "transmit timeout, attempting recovery\n");
861
862 netdev_info(dev, "TX DMA regs\n");
863 for (i = 0; i < DMA_CHAN_WIDTH; i += 4)
864 netdev_info(dev, "[%02x]: 0x%08x\n", i, nuport_mac_readl(TX_DMA_BASE + i));
865 netdev_info(dev, "RX DMA regs\n");
866 for (i = 0; i < DMA_CHAN_WIDTH; i += 4)
867 netdev_info(dev, "[%02x]: 0x%08x\n", i, nuport_mac_readl(RX_DMA_BASE + i));
868
869 nuport_mac_init_tx_ring(priv);
870 nuport_mac_reset_tx_dma(priv);
871
872 netif_wake_queue(dev);
873 }
874
875 static int nuport_mac_mii_probe(struct net_device *dev)
876 {
877 struct nuport_mac_priv *priv = netdev_priv(dev);
878 struct phy_device *phydev = NULL;
879 int ret;
880
881 ret = clk_enable(priv->ephy_clk);
882 if (ret) {
883 netdev_err(dev, "unable to enable ePHY clk\n");
884 return ret;
885 }
886
887 phydev = phy_find_first(priv->mii_bus);
888 if (!phydev) {
889 netdev_err(dev, "no PHYs found\n");
890 ret = -ENODEV;
891 goto out;
892 }
893
894 phydev = phy_connect(dev, dev_name(&phydev->dev),
895 nuport_mac_adjust_link, 0,
896 PHY_INTERFACE_MODE_MII);
897 if (IS_ERR(phydev)) {
898 netdev_err(dev, "could not attach PHY\n");
899 ret = PTR_ERR(phydev);
900 goto out;
901 }
902
903 phydev->supported &= PHY_BASIC_FEATURES;
904 phydev->advertising = phydev->supported;
905 priv->phydev = phydev;
906 priv->old_link = 1;
907 priv->old_duplex = DUPLEX_FULL;
908
909 dev_info(&priv->pdev->dev, "attached PHY driver [%s] "
910 "(mii_bus:phy_addr=%d)\n",
911 phydev->drv->name, phydev->addr);
912
913 return 0;
914
915 out:
916 /* disable the Ethernet PHY clock for the moment */
917 clk_disable(priv->ephy_clk);
918
919 return ret;
920 }
921
922 static void nuport_mac_ethtool_drvinfo(struct net_device *dev,
923 struct ethtool_drvinfo *info)
924 {
925 strncpy(info->driver, "nuport-mac", sizeof(info->driver));
926 strncpy(info->version, "0.1", sizeof(info->version));
927 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
928 strncpy(info->bus_info, "internal", sizeof(info->bus_info));
929 info->n_stats = 0;
930 info->testinfo_len = 0;
931 info->regdump_len = 0;
932 info->eedump_len = 0;
933 }
934
935 static int nuport_mac_ethtool_get_settings(struct net_device *dev,
936 struct ethtool_cmd *cmd)
937 {
938 struct nuport_mac_priv *priv = netdev_priv(dev);
939
940 if (priv->phydev)
941 return phy_ethtool_gset(priv->phydev, cmd);
942
943 return -EINVAL;
944 }
945
946 static int nuport_mac_ethtool_set_settings(struct net_device *dev,
947 struct ethtool_cmd *cmd)
948 {
949 struct nuport_mac_priv *priv = netdev_priv(dev);
950
951 if (priv->phydev)
952 return phy_ethtool_sset(priv->phydev, cmd);
953
954 return -EINVAL;
955 }
956
957 static void nuport_mac_set_msglevel(struct net_device *dev, u32 msg_level)
958 {
959 struct nuport_mac_priv *priv = netdev_priv(dev);
960
961 priv->msg_level = msg_level;
962 }
963
964 static u32 nuport_mac_get_msglevel(struct net_device *dev)
965 {
966 struct nuport_mac_priv *priv = netdev_priv(dev);
967
968 return priv->msg_level;
969 }
970
971 static const struct ethtool_ops nuport_mac_ethtool_ops = {
972 .get_drvinfo = nuport_mac_ethtool_drvinfo,
973 .get_link = ethtool_op_get_link,
974 .get_settings = nuport_mac_ethtool_get_settings,
975 .set_settings = nuport_mac_ethtool_set_settings,
976 .set_msglevel = nuport_mac_set_msglevel,
977 .get_msglevel = nuport_mac_get_msglevel,
978 };
979
980 static const struct net_device_ops nuport_mac_ops = {
981 .ndo_open = nuport_mac_open,
982 .ndo_stop = nuport_mac_close,
983 .ndo_start_xmit = nuport_mac_start_xmit,
984 .ndo_change_mtu = eth_change_mtu,
985 .ndo_validate_addr = eth_validate_addr,
986 .ndo_set_mac_address = nuport_mac_change_mac_address,
987 .ndo_tx_timeout = nuport_mac_tx_timeout,
988 };
989
990 static int __init nuport_mac_probe(struct platform_device *pdev)
991 {
992 struct net_device *dev;
993 struct nuport_mac_priv *priv = NULL;
994 struct resource *regs, *dma;
995 int ret = 0;
996 int rx_irq, tx_irq, link_irq;
997 int i;
998 const unsigned int *intspec;
999
1000 dev = alloc_etherdev(sizeof(struct nuport_mac_priv));
1001 if (!dev) {
1002 dev_err(&pdev->dev, "no memory for net_device\n");
1003 return -ENOMEM;
1004 }
1005
1006 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1007 dma = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1008 if (!regs || !dma) {
1009 dev_err(&pdev->dev, "failed to get regs resources\n");
1010 ret = -ENODEV;
1011 goto out;
1012 }
1013
1014 rx_irq = platform_get_irq(pdev, 0);
1015 tx_irq = platform_get_irq(pdev, 1);
1016 link_irq = platform_get_irq(pdev, 2);
1017 if (rx_irq < 0 || tx_irq < 0 || link_irq < 0) {
1018 ret = -ENODEV;
1019 goto out;
1020 }
1021
1022 platform_set_drvdata(pdev, dev);
1023 SET_NETDEV_DEV(dev, &pdev->dev);
1024 priv = netdev_priv(dev);
1025 priv->pdev = pdev;
1026 priv->dev = dev;
1027 spin_lock_init(&priv->lock);
1028
1029 intspec = of_get_property(pdev->dev.of_node,
1030 "nuport-mac,buffer-shifting", NULL);
1031 if (!intspec)
1032 priv->buffer_shifting_len = 0;
1033 else
1034 priv->buffer_shifting_len = 2;
1035
1036 priv->mac_base = devm_ioremap(&pdev->dev,
1037 regs->start, resource_size(regs));
1038 if (!priv->mac_base) {
1039 dev_err(&pdev->dev, "failed to remap regs\n");
1040 ret = -ENOMEM;
1041 goto out_platform;
1042 }
1043
1044 priv->dma_base = devm_ioremap(&pdev->dev,
1045 dma->start, resource_size(dma));
1046 if (!priv->dma_base) {
1047 dev_err(&pdev->dev, "failed to remap dma-regs\n");
1048 ret = -ENOMEM;
1049 goto out_platform;
1050 }
1051
1052 priv->emac_clk = clk_get(&pdev->dev, "emac");
1053 if (IS_ERR_OR_NULL(priv->emac_clk)) {
1054 dev_err(&pdev->dev, "failed to get emac clk\n");
1055 ret = PTR_ERR(priv->emac_clk);
1056 goto out_platform;
1057 }
1058
1059 priv->ephy_clk = clk_get(&pdev->dev, "ephy");
1060 if (IS_ERR_OR_NULL(priv->ephy_clk)) {
1061 dev_err(&pdev->dev, "failed to get ephy clk\n");
1062 ret = PTR_ERR(priv->ephy_clk);
1063 goto out_platform;
1064 }
1065
1066 priv->link_irq = link_irq;
1067 priv->rx_irq = rx_irq;
1068 priv->tx_irq = tx_irq;
1069 priv->msg_level = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK;
1070 dev->netdev_ops = &nuport_mac_ops;
1071 dev->ethtool_ops = &nuport_mac_ethtool_ops;
1072 dev->watchdog_timeo = HZ;
1073 dev->flags = IFF_BROADCAST; /* Supports Broadcast */
1074 dev->tx_queue_len = TX_RING_SIZE / 2;
1075
1076 netif_napi_add(dev, &priv->napi, nuport_mac_poll, 64);
1077
1078 priv->mii_bus = mdiobus_alloc();
1079 if (!priv->mii_bus) {
1080 dev_err(&pdev->dev, "mii bus allocation failed\n");
1081 goto out;
1082 }
1083
1084 priv->mii_bus->priv = dev;
1085 priv->mii_bus->read = nuport_mac_mii_read;
1086 priv->mii_bus->write = nuport_mac_mii_write;
1087 priv->mii_bus->reset = nuport_mac_mii_reset;
1088 priv->mii_bus->name = "nuport-mac-mii";
1089 priv->mii_bus->phy_mask = (1 << 0);
1090 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
1091 priv->mii_bus->irq = kzalloc(PHY_MAX_ADDR * sizeof(int), GFP_KERNEL);
1092 if (!priv->mii_bus->irq) {
1093 dev_err(&pdev->dev, "failed to allocate mii_bus irqs\n");
1094 ret = -ENOMEM;
1095 goto out_mdio;
1096 }
1097
1098 /* We support PHY interrupts routed back to the MAC */
1099 for (i = 0; i < PHY_MAX_ADDR; i++)
1100 priv->mii_bus->irq[i] = PHY_IGNORE_INTERRUPT;
1101
1102 ret = mdiobus_register(priv->mii_bus);
1103 if (ret) {
1104 dev_err(&pdev->dev, "failed to register mii_bus\n");
1105 goto out_mdio_irq;
1106 }
1107
1108 ret = nuport_mac_mii_probe(dev);
1109 if (ret) {
1110 dev_err(&pdev->dev, "failed to probe MII bus\n");
1111 goto out_mdio_unregister;
1112 }
1113
1114 ret = register_netdev(dev);
1115 if (ret) {
1116 dev_err(&pdev->dev, "failed to register net_device\n");
1117 goto out_mdio_probe;
1118 }
1119
1120 /* read existing mac address */
1121 nuport_mac_read_mac_address(dev);
1122
1123 dev_info(&pdev->dev, "registered (MAC: %pM)\n", dev->dev_addr);
1124
1125 return ret;
1126
1127 out_mdio_probe:
1128 phy_disconnect(priv->phydev);
1129 out_mdio_unregister:
1130 mdiobus_unregister(priv->mii_bus);
1131 out_mdio_irq:
1132 kfree(priv->mii_bus->irq);
1133 out_mdio:
1134 mdiobus_free(priv->mii_bus);
1135 out_platform:
1136 platform_set_drvdata(pdev, NULL);
1137 out:
1138 clk_put(priv->ephy_clk);
1139 clk_put(priv->emac_clk);
1140 free_netdev(dev);
1141 platform_set_drvdata(pdev, NULL);
1142 return ret;
1143 }
1144
1145 static int nuport_mac_remove(struct platform_device *pdev)
1146 {
1147 struct net_device *dev = platform_get_drvdata(pdev);
1148 struct nuport_mac_priv *priv = netdev_priv(dev);
1149
1150 unregister_netdev(dev);
1151 phy_disconnect(priv->phydev);
1152 mdiobus_unregister(priv->mii_bus);
1153 kfree(priv->mii_bus->irq);
1154 mdiobus_free(priv->mii_bus);
1155 clk_put(priv->ephy_clk);
1156 clk_put(priv->emac_clk);
1157 free_netdev(dev);
1158
1159 platform_set_drvdata(pdev, NULL);
1160
1161 return 0;
1162 }
1163
1164 static struct of_device_id nuport_eth_ids[] __initdata = {
1165 {.compatible = "moschip,nuport-mac",},
1166 { /* sentinel */ },
1167 };
1168
1169 static struct platform_driver nuport_eth_driver = {
1170 .driver = {
1171 .name = "nuport-mac",
1172 .owner = THIS_MODULE,
1173 .of_match_table = nuport_eth_ids,
1174 },
1175 .probe = nuport_mac_probe,
1176 .remove = __devexit_p(nuport_mac_remove),
1177 };
1178
1179 module_platform_driver(nuport_eth_driver);
1180
1181 MODULE_AUTHOR("Moschip Semiconductors Ltd.");
1182 MODULE_DESCRIPTION("Moschip MCS8140 Ethernet MAC driver");
1183 MODULE_LICENSE("GPL");