cns3xxx: use kmalloc instead of kzalloc for ethernet rx buffers
[openwrt/openwrt.git] / target / linux / cns3xxx / files / drivers / net / ethernet / cavium / cns3xxx_eth.c
1 /*
2 * Cavium CNS3xxx Gigabit driver for Linux
3 *
4 * Copyright 2011 Gateworks Corporation
5 * Chris Lang <clang@gateworks.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 *
11 */
12
13 #include <linux/delay.h>
14 #include <linux/module.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmapool.h>
17 #include <linux/etherdevice.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/kernel.h>
21 #include <linux/phy.h>
22 #include <linux/platform_device.h>
23 #include <linux/skbuff.h>
24 #include <mach/irqs.h>
25 #include <mach/platform.h>
26
27 #define DRV_NAME "cns3xxx_eth"
28
29 #define RX_DESCS 128
30 #define TX_DESCS 128
31 #define TX_DESC_RESERVE 20
32
33 #define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
34 #define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
35 #define REGS_SIZE 336
36
37 #define RX_BUFFER_ALIGN 64
38 #define RX_BUFFER_ALIGN_MASK (~(RX_BUFFER_ALIGN - 1))
39
40 #define SKB_HEAD_ALIGN (((PAGE_SIZE - NET_SKB_PAD) % RX_BUFFER_ALIGN) + NET_SKB_PAD + NET_IP_ALIGN)
41 #define RX_SEGMENT_ALLOC_SIZE 2048
42 #define RX_SEGMENT_BUFSIZE (SKB_WITH_OVERHEAD(RX_SEGMENT_ALLOC_SIZE))
43 #define RX_SEGMENT_MRU (((RX_SEGMENT_BUFSIZE - SKB_HEAD_ALIGN) & RX_BUFFER_ALIGN_MASK) - NET_IP_ALIGN)
44 #define MAX_MTU 9500
45
46 #define NAPI_WEIGHT 64
47
48 /* MDIO Defines */
49 #define MDIO_CMD_COMPLETE 0x00008000
50 #define MDIO_WRITE_COMMAND 0x00002000
51 #define MDIO_READ_COMMAND 0x00004000
52 #define MDIO_REG_OFFSET 8
53 #define MDIO_VALUE_OFFSET 16
54
55 /* Descritor Defines */
56 #define END_OF_RING 0x40000000
57 #define FIRST_SEGMENT 0x20000000
58 #define LAST_SEGMENT 0x10000000
59 #define FORCE_ROUTE 0x04000000
60 #define IP_CHECKSUM 0x00040000
61 #define UDP_CHECKSUM 0x00020000
62 #define TCP_CHECKSUM 0x00010000
63
64 /* Port Config Defines */
65 #define PORT_BP_ENABLE 0x00020000
66 #define PORT_DISABLE 0x00040000
67 #define PORT_LEARN_DIS 0x00080000
68 #define PORT_BLOCK_STATE 0x00100000
69 #define PORT_BLOCK_MODE 0x00200000
70
71 #define PROMISC_OFFSET 29
72
73 /* Global Config Defines */
74 #define UNKNOWN_VLAN_TO_CPU 0x02000000
75 #define ACCEPT_CRC_PACKET 0x00200000
76 #define CRC_STRIPPING 0x00100000
77
78 /* VLAN Config Defines */
79 #define NIC_MODE 0x00008000
80 #define VLAN_UNAWARE 0x00000001
81
82 /* DMA AUTO Poll Defines */
83 #define TS_POLL_EN 0x00000020
84 #define TS_SUSPEND 0x00000010
85 #define FS_POLL_EN 0x00000002
86 #define FS_SUSPEND 0x00000001
87
88 /* DMA Ring Control Defines */
89 #define QUEUE_THRESHOLD 0x000000f0
90 #define CLR_FS_STATE 0x80000000
91
92 /* Interrupt Status Defines */
93 #define MAC0_STATUS_CHANGE 0x00004000
94 #define MAC1_STATUS_CHANGE 0x00008000
95 #define MAC2_STATUS_CHANGE 0x00010000
96 #define MAC0_RX_ERROR 0x00100000
97 #define MAC1_RX_ERROR 0x00200000
98 #define MAC2_RX_ERROR 0x00400000
99
100 struct tx_desc
101 {
102 u32 sdp; /* segment data pointer */
103
104 union {
105 struct {
106 u32 sdl:16; /* segment data length */
107 u32 tco:1;
108 u32 uco:1;
109 u32 ico:1;
110 u32 rsv_1:3; /* reserve */
111 u32 pri:3;
112 u32 fp:1; /* force priority */
113 u32 fr:1;
114 u32 interrupt:1;
115 u32 lsd:1;
116 u32 fsd:1;
117 u32 eor:1;
118 u32 cown:1;
119 };
120 u32 config0;
121 };
122
123 union {
124 struct {
125 u32 ctv:1;
126 u32 stv:1;
127 u32 sid:4;
128 u32 inss:1;
129 u32 dels:1;
130 u32 rsv_2:9;
131 u32 pmap:5;
132 u32 mark:3;
133 u32 ewan:1;
134 u32 fewan:1;
135 u32 rsv_3:5;
136 };
137 u32 config1;
138 };
139
140 union {
141 struct {
142 u32 c_vid:12;
143 u32 c_cfs:1;
144 u32 c_pri:3;
145 u32 s_vid:12;
146 u32 s_dei:1;
147 u32 s_pri:3;
148 };
149 u32 config2;
150 };
151
152 u8 alignment[16]; /* for 32 byte */
153 };
154
155 struct rx_desc
156 {
157 u32 sdp; /* segment data pointer */
158
159 union {
160 struct {
161 u32 sdl:16; /* segment data length */
162 u32 l4f:1;
163 u32 ipf:1;
164 u32 prot:4;
165 u32 hr:6;
166 u32 lsd:1;
167 u32 fsd:1;
168 u32 eor:1;
169 u32 cown:1;
170 };
171 u32 config0;
172 };
173
174 union {
175 struct {
176 u32 ctv:1;
177 u32 stv:1;
178 u32 unv:1;
179 u32 iwan:1;
180 u32 exdv:1;
181 u32 e_wan:1;
182 u32 rsv_1:2;
183 u32 sp:3;
184 u32 crc_err:1;
185 u32 un_eth:1;
186 u32 tc:2;
187 u32 rsv_2:1;
188 u32 ip_offset:5;
189 u32 rsv_3:11;
190 };
191 u32 config1;
192 };
193
194 union {
195 struct {
196 u32 c_vid:12;
197 u32 c_cfs:1;
198 u32 c_pri:3;
199 u32 s_vid:12;
200 u32 s_dei:1;
201 u32 s_pri:3;
202 };
203 u32 config2;
204 };
205
206 u8 alignment[16]; /* for 32 byte alignment */
207 };
208
209
210 struct switch_regs {
211 u32 phy_control;
212 u32 phy_auto_addr;
213 u32 mac_glob_cfg;
214 u32 mac_cfg[4];
215 u32 mac_pri_ctrl[5], __res;
216 u32 etype[2];
217 u32 udp_range[4];
218 u32 prio_etype_udp;
219 u32 prio_ipdscp[8];
220 u32 tc_ctrl;
221 u32 rate_ctrl;
222 u32 fc_glob_thrs;
223 u32 fc_port_thrs;
224 u32 mc_fc_glob_thrs;
225 u32 dc_glob_thrs;
226 u32 arl_vlan_cmd;
227 u32 arl_ctrl[3];
228 u32 vlan_cfg;
229 u32 pvid[2];
230 u32 vlan_ctrl[3];
231 u32 session_id[8];
232 u32 intr_stat;
233 u32 intr_mask;
234 u32 sram_test;
235 u32 mem_queue;
236 u32 farl_ctrl;
237 u32 fc_input_thrs, __res1[2];
238 u32 clk_skew_ctrl;
239 u32 mac_glob_cfg_ext, __res2[2];
240 u32 dma_ring_ctrl;
241 u32 dma_auto_poll_cfg;
242 u32 delay_intr_cfg, __res3;
243 u32 ts_dma_ctrl0;
244 u32 ts_desc_ptr0;
245 u32 ts_desc_base_addr0, __res4;
246 u32 fs_dma_ctrl0;
247 u32 fs_desc_ptr0;
248 u32 fs_desc_base_addr0, __res5;
249 u32 ts_dma_ctrl1;
250 u32 ts_desc_ptr1;
251 u32 ts_desc_base_addr1, __res6;
252 u32 fs_dma_ctrl1;
253 u32 fs_desc_ptr1;
254 u32 fs_desc_base_addr1;
255 u32 __res7[109];
256 u32 mac_counter0[13];
257 };
258
259 struct _tx_ring {
260 struct tx_desc *desc;
261 dma_addr_t phys_addr;
262 struct tx_desc *cur_addr;
263 struct sk_buff *buff_tab[TX_DESCS];
264 unsigned int phys_tab[TX_DESCS];
265 u32 free_index;
266 u32 count_index;
267 u32 cur_index;
268 int num_used;
269 int num_count;
270 bool stopped;
271 };
272
273 struct _rx_ring {
274 struct rx_desc *desc;
275 dma_addr_t phys_addr;
276 struct rx_desc *cur_addr;
277 void *buff_tab[RX_DESCS];
278 unsigned int phys_tab[RX_DESCS];
279 u32 cur_index;
280 u32 alloc_index;
281 int alloc_count;
282 };
283
284 struct sw {
285 struct resource *mem_res;
286 struct switch_regs __iomem *regs;
287 struct napi_struct napi;
288 struct cns3xxx_plat_info *plat;
289 struct _tx_ring *tx_ring;
290 struct _rx_ring *rx_ring;
291 struct sk_buff *frag_first;
292 struct sk_buff *frag_last;
293 };
294
295 struct port {
296 struct net_device *netdev;
297 struct phy_device *phydev;
298 struct sw *sw;
299 int id; /* logical port ID */
300 int speed, duplex;
301 };
302
303 static spinlock_t mdio_lock;
304 static DEFINE_SPINLOCK(tx_lock);
305 static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */
306 struct mii_bus *mdio_bus;
307 static int ports_open;
308 static struct port *switch_port_tab[4];
309 static struct dma_pool *rx_dma_pool;
310 static struct dma_pool *tx_dma_pool;
311 struct net_device *napi_dev;
312
313 static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
314 int write, u16 cmd)
315 {
316 int cycles = 0;
317 u32 temp = 0;
318
319 temp = __raw_readl(&mdio_regs->phy_control);
320 temp |= MDIO_CMD_COMPLETE;
321 __raw_writel(temp, &mdio_regs->phy_control);
322 udelay(10);
323
324 if (write) {
325 temp = (cmd << MDIO_VALUE_OFFSET);
326 temp |= MDIO_WRITE_COMMAND;
327 } else {
328 temp = MDIO_READ_COMMAND;
329 }
330 temp |= ((location & 0x1f) << MDIO_REG_OFFSET);
331 temp |= (phy_id & 0x1f);
332
333 __raw_writel(temp, &mdio_regs->phy_control);
334
335 while (((__raw_readl(&mdio_regs->phy_control) & MDIO_CMD_COMPLETE) == 0)
336 && cycles < 5000) {
337 udelay(1);
338 cycles++;
339 }
340
341 if (cycles == 5000) {
342 printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name,
343 phy_id);
344 return -1;
345 }
346
347 temp = __raw_readl(&mdio_regs->phy_control);
348 temp |= MDIO_CMD_COMPLETE;
349 __raw_writel(temp, &mdio_regs->phy_control);
350
351 if (write)
352 return 0;
353
354 return ((temp >> MDIO_VALUE_OFFSET) & 0xFFFF);
355 }
356
357 static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location)
358 {
359 unsigned long flags;
360 int ret;
361
362 spin_lock_irqsave(&mdio_lock, flags);
363 ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0);
364 spin_unlock_irqrestore(&mdio_lock, flags);
365 return ret;
366 }
367
368 static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location,
369 u16 val)
370 {
371 unsigned long flags;
372 int ret;
373
374 spin_lock_irqsave(&mdio_lock, flags);
375 ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val);
376 spin_unlock_irqrestore(&mdio_lock, flags);
377 return ret;
378 }
379
380 static int cns3xxx_mdio_register(void)
381 {
382 int err;
383
384 if (!(mdio_bus = mdiobus_alloc()))
385 return -ENOMEM;
386
387 mdio_regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
388
389 spin_lock_init(&mdio_lock);
390 mdio_bus->name = "CNS3xxx MII Bus";
391 mdio_bus->read = &cns3xxx_mdio_read;
392 mdio_bus->write = &cns3xxx_mdio_write;
393 strcpy(mdio_bus->id, "0");
394
395 if ((err = mdiobus_register(mdio_bus)))
396 mdiobus_free(mdio_bus);
397 return err;
398 }
399
400 static void cns3xxx_mdio_remove(void)
401 {
402 mdiobus_unregister(mdio_bus);
403 mdiobus_free(mdio_bus);
404 }
405
406 static void enable_tx_dma(struct sw *sw)
407 {
408 __raw_writel(0x1, &sw->regs->ts_dma_ctrl0);
409 }
410
411 static void enable_rx_dma(struct sw *sw)
412 {
413 __raw_writel(0x1, &sw->regs->fs_dma_ctrl0);
414 }
415
416 static void cns3xxx_adjust_link(struct net_device *dev)
417 {
418 struct port *port = netdev_priv(dev);
419 struct phy_device *phydev = port->phydev;
420
421 if (!phydev->link) {
422 if (port->speed) {
423 port->speed = 0;
424 printk(KERN_INFO "%s: link down\n", dev->name);
425 }
426 return;
427 }
428
429 if (port->speed == phydev->speed && port->duplex == phydev->duplex)
430 return;
431
432 port->speed = phydev->speed;
433 port->duplex = phydev->duplex;
434
435 printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
436 dev->name, port->speed, port->duplex ? "full" : "half");
437 }
438
439 irqreturn_t eth_rx_irq(int irq, void *pdev)
440 {
441 struct net_device *dev = pdev;
442 struct sw *sw = netdev_priv(dev);
443 if (likely(napi_schedule_prep(&sw->napi))) {
444 disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC);
445 __napi_schedule(&sw->napi);
446 }
447 return (IRQ_HANDLED);
448 }
449
450 irqreturn_t eth_stat_irq(int irq, void *pdev)
451 {
452 struct net_device *dev = pdev;
453 struct sw *sw = netdev_priv(dev);
454 u32 cfg;
455 u32 stat = __raw_readl(&sw->regs->intr_stat);
456 __raw_writel(0xffffffff, &sw->regs->intr_stat);
457
458 if (stat & MAC2_RX_ERROR)
459 switch_port_tab[3]->netdev->stats.rx_dropped++;
460 if (stat & MAC1_RX_ERROR)
461 switch_port_tab[1]->netdev->stats.rx_dropped++;
462 if (stat & MAC0_RX_ERROR)
463 switch_port_tab[0]->netdev->stats.rx_dropped++;
464
465 if (stat & MAC0_STATUS_CHANGE) {
466 cfg = __raw_readl(&sw->regs->mac_cfg[0]);
467 switch_port_tab[0]->phydev->link = (cfg & 0x1);
468 switch_port_tab[0]->phydev->duplex = ((cfg >> 4) & 0x1);
469 if (((cfg >> 2) & 0x3) == 2)
470 switch_port_tab[0]->phydev->speed = 1000;
471 else if (((cfg >> 2) & 0x3) == 1)
472 switch_port_tab[0]->phydev->speed = 100;
473 else
474 switch_port_tab[0]->phydev->speed = 10;
475 cns3xxx_adjust_link(switch_port_tab[0]->netdev);
476 }
477
478 if (stat & MAC1_STATUS_CHANGE) {
479 cfg = __raw_readl(&sw->regs->mac_cfg[1]);
480 switch_port_tab[1]->phydev->link = (cfg & 0x1);
481 switch_port_tab[1]->phydev->duplex = ((cfg >> 4) & 0x1);
482 if (((cfg >> 2) & 0x3) == 2)
483 switch_port_tab[1]->phydev->speed = 1000;
484 else if (((cfg >> 2) & 0x3) == 1)
485 switch_port_tab[1]->phydev->speed = 100;
486 else
487 switch_port_tab[1]->phydev->speed = 10;
488 cns3xxx_adjust_link(switch_port_tab[1]->netdev);
489 }
490
491 if (stat & MAC2_STATUS_CHANGE) {
492 cfg = __raw_readl(&sw->regs->mac_cfg[3]);
493 switch_port_tab[3]->phydev->link = (cfg & 0x1);
494 switch_port_tab[3]->phydev->duplex = ((cfg >> 4) & 0x1);
495 if (((cfg >> 2) & 0x3) == 2)
496 switch_port_tab[3]->phydev->speed = 1000;
497 else if (((cfg >> 2) & 0x3) == 1)
498 switch_port_tab[3]->phydev->speed = 100;
499 else
500 switch_port_tab[3]->phydev->speed = 10;
501 cns3xxx_adjust_link(switch_port_tab[3]->netdev);
502 }
503
504 return (IRQ_HANDLED);
505 }
506
507
508 static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
509 {
510 struct _rx_ring *rx_ring = sw->rx_ring;
511 unsigned int i = rx_ring->alloc_index;
512 struct rx_desc *desc = &(rx_ring)->desc[i];
513 void *buf;
514 unsigned int phys;
515
516 for (received += rx_ring->alloc_count; received > 0; received--) {
517 buf = kmalloc(RX_SEGMENT_ALLOC_SIZE, GFP_ATOMIC);
518 if (!buf)
519 break;
520
521 phys = dma_map_single(NULL, buf + SKB_HEAD_ALIGN,
522 RX_SEGMENT_MRU, DMA_FROM_DEVICE);
523 if (dma_mapping_error(NULL, phys)) {
524 kfree(buf);
525 break;
526 }
527
528 desc->sdl = RX_SEGMENT_MRU;
529 desc->sdp = phys;
530
531 wmb();
532
533 /* put the new buffer on RX-free queue */
534 rx_ring->buff_tab[i] = buf;
535 rx_ring->phys_tab[i] = phys;
536 if (i == RX_DESCS - 1) {
537 i = 0;
538 desc->config0 = END_OF_RING | FIRST_SEGMENT |
539 LAST_SEGMENT | RX_SEGMENT_MRU;
540 desc = &(rx_ring)->desc[i];
541 } else {
542 desc->config0 = FIRST_SEGMENT | LAST_SEGMENT |
543 RX_SEGMENT_MRU;
544 i++;
545 desc++;
546 }
547 }
548
549 rx_ring->alloc_count = received;
550 rx_ring->alloc_index = i;
551 }
552
553 static void eth_check_num_used(struct _tx_ring *tx_ring)
554 {
555 bool stop = false;
556 int i;
557
558 if (tx_ring->num_used >= TX_DESCS - TX_DESC_RESERVE)
559 stop = true;
560
561 if (tx_ring->stopped == stop)
562 return;
563
564 tx_ring->stopped = stop;
565 for (i = 0; i < 4; i++) {
566 struct port *port = switch_port_tab[i];
567 struct net_device *dev;
568
569 if (!port)
570 continue;
571
572 dev = port->netdev;
573 if (stop)
574 netif_stop_queue(dev);
575 else
576 netif_wake_queue(dev);
577 }
578 }
579
580 static void eth_complete_tx(struct sw *sw)
581 {
582 struct _tx_ring *tx_ring = sw->tx_ring;
583 struct tx_desc *desc;
584 int i;
585 int index;
586 int num_used = tx_ring->num_used;
587 struct sk_buff *skb;
588
589 index = tx_ring->free_index;
590 desc = &(tx_ring)->desc[index];
591 for (i = 0; i < num_used; i++) {
592 if (desc->cown) {
593 skb = tx_ring->buff_tab[index];
594 tx_ring->buff_tab[index] = 0;
595 if (skb)
596 dev_kfree_skb_any(skb);
597 dma_unmap_single(NULL, tx_ring->phys_tab[index],
598 desc->sdl, DMA_TO_DEVICE);
599 if (++index == TX_DESCS) {
600 index = 0;
601 desc = &(tx_ring)->desc[index];
602 } else {
603 desc++;
604 }
605 } else {
606 break;
607 }
608 }
609 tx_ring->free_index = index;
610 tx_ring->num_used -= i;
611 eth_check_num_used(tx_ring);
612 }
613
614 static int eth_poll(struct napi_struct *napi, int budget)
615 {
616 struct sw *sw = container_of(napi, struct sw, napi);
617 struct _rx_ring *rx_ring = sw->rx_ring;
618 int received = 0;
619 unsigned int length;
620 unsigned int i = rx_ring->cur_index;
621 struct rx_desc *desc = &(rx_ring)->desc[i];
622
623 while (desc->cown) {
624 struct sk_buff *skb;
625 int reserve = SKB_HEAD_ALIGN;
626
627 if (received >= budget)
628 break;
629
630 /* process received frame */
631 dma_unmap_single(NULL, rx_ring->phys_tab[i],
632 RX_SEGMENT_MRU, DMA_FROM_DEVICE);
633
634 skb = build_skb(rx_ring->buff_tab[i], 0);
635 if (!skb)
636 break;
637
638 skb->dev = switch_port_tab[desc->sp]->netdev;
639
640 length = desc->sdl;
641 if (desc->fsd && !desc->lsd)
642 length = RX_SEGMENT_MRU;
643
644 if (!desc->fsd) {
645 reserve -= NET_IP_ALIGN;
646 if (!desc->lsd)
647 length += NET_IP_ALIGN;
648 }
649
650 skb_reserve(skb, reserve);
651 skb_put(skb, length);
652
653 if (!sw->frag_first)
654 sw->frag_first = skb;
655 else {
656 if (sw->frag_first == sw->frag_last)
657 skb_frag_add_head(sw->frag_first, skb);
658 else
659 sw->frag_last->next = skb;
660 sw->frag_first->len += skb->len;
661 sw->frag_first->data_len += skb->len;
662 sw->frag_first->truesize += skb->truesize;
663 }
664 sw->frag_last = skb;
665
666 if (desc->lsd) {
667 struct net_device *dev;
668
669 skb = sw->frag_first;
670 dev = skb->dev;
671 skb->protocol = eth_type_trans(skb, dev);
672
673 dev->stats.rx_packets++;
674 dev->stats.rx_bytes += skb->len;
675
676 /* RX Hardware checksum offload */
677 skb->ip_summed = CHECKSUM_NONE;
678 switch (desc->prot) {
679 case 1:
680 case 2:
681 case 5:
682 case 6:
683 case 13:
684 case 14:
685 if (!desc->l4f) {
686 skb->ip_summed = CHECKSUM_UNNECESSARY;
687 napi_gro_receive(napi, skb);
688 break;
689 }
690 /* fall through */
691 default:
692 netif_receive_skb(skb);
693 break;
694 }
695
696 sw->frag_first = NULL;
697 sw->frag_last = NULL;
698 }
699
700 received++;
701 if (++i == RX_DESCS) {
702 i = 0;
703 desc = &(rx_ring)->desc[i];
704 } else {
705 desc++;
706 }
707 }
708
709 if (!received) {
710 napi_complete(napi);
711 enable_irq(IRQ_CNS3XXX_SW_R0RXC);
712 }
713
714 cns3xxx_alloc_rx_buf(sw, received);
715
716 rx_ring->cur_index = i;
717
718 wmb();
719 enable_rx_dma(sw);
720
721 spin_lock_bh(&tx_lock);
722 eth_complete_tx(sw);
723 spin_unlock_bh(&tx_lock);
724
725 return received;
726 }
727
728 static void eth_set_desc(struct _tx_ring *tx_ring, int index, int index_last,
729 void *data, int len, u32 config0, u32 pmap)
730 {
731 struct tx_desc *tx_desc = &(tx_ring)->desc[index];
732 unsigned int phys;
733
734 phys = dma_map_single(NULL, data, len, DMA_TO_DEVICE);
735 tx_desc->sdp = phys;
736 tx_desc->pmap = pmap;
737 tx_ring->phys_tab[index] = phys;
738
739 config0 |= len;
740 if (index == TX_DESCS - 1)
741 config0 |= END_OF_RING;
742 if (index == index_last)
743 config0 |= LAST_SEGMENT;
744
745 wmb();
746 tx_desc->config0 = config0;
747 }
748
749 static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
750 {
751 struct port *port = netdev_priv(dev);
752 struct sw *sw = port->sw;
753 struct _tx_ring *tx_ring = sw->tx_ring;
754 struct sk_buff *skb1;
755 char pmap = (1 << port->id);
756 int nr_frags = skb_shinfo(skb)->nr_frags;
757 int nr_desc = nr_frags;
758 int index0, index, index_last;
759 int len0;
760 unsigned int i;
761 u32 config0;
762
763 if (pmap == 8)
764 pmap = (1 << 4);
765
766 skb_walk_frags(skb, skb1)
767 nr_desc++;
768
769 spin_lock_bh(&tx_lock);
770
771 eth_complete_tx(sw);
772 if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) {
773 spin_unlock_bh(&tx_lock);
774 return NETDEV_TX_BUSY;
775 }
776
777 index = index0 = tx_ring->cur_index;
778 index_last = (index0 + nr_desc) % TX_DESCS;
779 tx_ring->cur_index = (index_last + 1) % TX_DESCS;
780
781 spin_unlock_bh(&tx_lock);
782
783 config0 = FORCE_ROUTE;
784 if (skb->ip_summed == CHECKSUM_PARTIAL)
785 config0 |= UDP_CHECKSUM | TCP_CHECKSUM;
786
787 len0 = skb->len;
788
789 /* fragments */
790 for (i = 0; i < nr_frags; i++) {
791 struct skb_frag_struct *frag;
792 void *addr;
793
794 index = (index + 1) % TX_DESCS;
795
796 frag = &skb_shinfo(skb)->frags[i];
797 addr = page_address(skb_frag_page(frag)) + frag->page_offset;
798
799 eth_set_desc(tx_ring, index, index_last, addr, frag->size,
800 config0, pmap);
801 }
802
803 if (nr_frags)
804 len0 = skb->len - skb->data_len;
805
806 skb_walk_frags(skb, skb1) {
807 index = (index + 1) % TX_DESCS;
808 len0 -= skb1->len;
809
810 eth_set_desc(tx_ring, index, index_last, skb1->data, skb1->len,
811 config0, pmap);
812 }
813
814 tx_ring->buff_tab[index0] = skb;
815 eth_set_desc(tx_ring, index0, index_last, skb->data, len0,
816 config0 | FIRST_SEGMENT, pmap);
817
818 wmb();
819
820 spin_lock(&tx_lock);
821 tx_ring->num_used += nr_desc + 1;
822 spin_unlock(&tx_lock);
823
824 dev->stats.tx_packets++;
825 dev->stats.tx_bytes += skb->len;
826
827 enable_tx_dma(sw);
828
829 return NETDEV_TX_OK;
830 }
831
832 static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
833 {
834 struct port *port = netdev_priv(dev);
835
836 if (!netif_running(dev))
837 return -EINVAL;
838 return phy_mii_ioctl(port->phydev, req, cmd);
839 }
840
841 /* ethtool support */
842
843 static void cns3xxx_get_drvinfo(struct net_device *dev,
844 struct ethtool_drvinfo *info)
845 {
846 strcpy(info->driver, DRV_NAME);
847 strcpy(info->bus_info, "internal");
848 }
849
850 static int cns3xxx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
851 {
852 struct port *port = netdev_priv(dev);
853 return phy_ethtool_gset(port->phydev, cmd);
854 }
855
856 static int cns3xxx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
857 {
858 struct port *port = netdev_priv(dev);
859 return phy_ethtool_sset(port->phydev, cmd);
860 }
861
862 static int cns3xxx_nway_reset(struct net_device *dev)
863 {
864 struct port *port = netdev_priv(dev);
865 return phy_start_aneg(port->phydev);
866 }
867
868 static struct ethtool_ops cns3xxx_ethtool_ops = {
869 .get_drvinfo = cns3xxx_get_drvinfo,
870 .get_settings = cns3xxx_get_settings,
871 .set_settings = cns3xxx_set_settings,
872 .nway_reset = cns3xxx_nway_reset,
873 .get_link = ethtool_op_get_link,
874 };
875
876
877 static int init_rings(struct sw *sw)
878 {
879 int i;
880 struct _rx_ring *rx_ring = sw->rx_ring;
881 struct _tx_ring *tx_ring = sw->tx_ring;
882
883 __raw_writel(0, &sw->regs->fs_dma_ctrl0);
884 __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
885 __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
886 __raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
887
888 __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
889
890 if (!(rx_dma_pool = dma_pool_create(DRV_NAME, NULL,
891 RX_POOL_ALLOC_SIZE, 32, 0)))
892 return -ENOMEM;
893
894 if (!(rx_ring->desc = dma_pool_alloc(rx_dma_pool, GFP_KERNEL,
895 &rx_ring->phys_addr)))
896 return -ENOMEM;
897 memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
898
899 /* Setup RX buffers */
900 for (i = 0; i < RX_DESCS; i++) {
901 struct rx_desc *desc = &(rx_ring)->desc[i];
902 void *buf;
903
904 buf = kzalloc(RX_SEGMENT_ALLOC_SIZE, GFP_KERNEL);
905 if (!buf)
906 return -ENOMEM;
907
908 desc->sdl = RX_SEGMENT_MRU;
909 if (i == (RX_DESCS - 1))
910 desc->eor = 1;
911 desc->fsd = 1;
912 desc->lsd = 1;
913
914 desc->sdp = dma_map_single(NULL, buf + SKB_HEAD_ALIGN,
915 RX_SEGMENT_MRU, DMA_FROM_DEVICE);
916 if (dma_mapping_error(NULL, desc->sdp))
917 return -EIO;
918
919 rx_ring->buff_tab[i] = buf;
920 rx_ring->phys_tab[i] = desc->sdp;
921 desc->cown = 0;
922 }
923 __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0);
924 __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0);
925
926 if (!(tx_dma_pool = dma_pool_create(DRV_NAME, NULL,
927 TX_POOL_ALLOC_SIZE, 32, 0)))
928 return -ENOMEM;
929
930 if (!(tx_ring->desc = dma_pool_alloc(tx_dma_pool, GFP_KERNEL,
931 &tx_ring->phys_addr)))
932 return -ENOMEM;
933 memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
934
935 /* Setup TX buffers */
936 for (i = 0; i < TX_DESCS; i++) {
937 struct tx_desc *desc = &(tx_ring)->desc[i];
938 tx_ring->buff_tab[i] = 0;
939
940 if (i == (TX_DESCS - 1))
941 desc->eor = 1;
942 desc->cown = 1;
943 }
944 __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
945 __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0);
946
947 return 0;
948 }
949
950 static void destroy_rings(struct sw *sw)
951 {
952 int i;
953 if (sw->rx_ring->desc) {
954 for (i = 0; i < RX_DESCS; i++) {
955 struct _rx_ring *rx_ring = sw->rx_ring;
956 struct rx_desc *desc = &(rx_ring)->desc[i];
957 struct sk_buff *skb = sw->rx_ring->buff_tab[i];
958
959 if (!skb)
960 continue;
961
962 dma_unmap_single(NULL, desc->sdp, RX_SEGMENT_MRU,
963 DMA_FROM_DEVICE);
964 dev_kfree_skb(skb);
965 }
966 dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr);
967 dma_pool_destroy(rx_dma_pool);
968 rx_dma_pool = 0;
969 sw->rx_ring->desc = 0;
970 }
971 if (sw->tx_ring->desc) {
972 for (i = 0; i < TX_DESCS; i++) {
973 struct _tx_ring *tx_ring = sw->tx_ring;
974 struct tx_desc *desc = &(tx_ring)->desc[i];
975 struct sk_buff *skb = sw->tx_ring->buff_tab[i];
976 if (skb) {
977 dma_unmap_single(NULL, desc->sdp,
978 skb->len, DMA_TO_DEVICE);
979 dev_kfree_skb(skb);
980 }
981 }
982 dma_pool_free(tx_dma_pool, sw->tx_ring->desc, sw->tx_ring->phys_addr);
983 dma_pool_destroy(tx_dma_pool);
984 tx_dma_pool = 0;
985 sw->tx_ring->desc = 0;
986 }
987 }
988
989 static int eth_open(struct net_device *dev)
990 {
991 struct port *port = netdev_priv(dev);
992 struct sw *sw = port->sw;
993 u32 temp;
994
995 port->speed = 0; /* force "link up" message */
996 phy_start(port->phydev);
997
998 netif_start_queue(dev);
999
1000 if (!ports_open) {
1001 request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev);
1002 request_irq(IRQ_CNS3XXX_SW_STATUS, eth_stat_irq, IRQF_SHARED, "gig_stat", napi_dev);
1003 napi_enable(&sw->napi);
1004 netif_start_queue(napi_dev);
1005
1006 __raw_writel(~(MAC0_STATUS_CHANGE | MAC1_STATUS_CHANGE | MAC2_STATUS_CHANGE |
1007 MAC0_RX_ERROR | MAC1_RX_ERROR | MAC2_RX_ERROR), &sw->regs->intr_mask);
1008
1009 temp = __raw_readl(&sw->regs->mac_cfg[2]);
1010 temp &= ~(PORT_DISABLE);
1011 __raw_writel(temp, &sw->regs->mac_cfg[2]);
1012
1013 temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
1014 temp &= ~(TS_SUSPEND | FS_SUSPEND);
1015 __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
1016
1017 enable_rx_dma(sw);
1018 }
1019 temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1020 temp &= ~(PORT_DISABLE);
1021 __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1022
1023 ports_open++;
1024 netif_carrier_on(dev);
1025
1026 return 0;
1027 }
1028
1029 static int eth_close(struct net_device *dev)
1030 {
1031 struct port *port = netdev_priv(dev);
1032 struct sw *sw = port->sw;
1033 u32 temp;
1034
1035 ports_open--;
1036
1037 temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1038 temp |= (PORT_DISABLE);
1039 __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1040
1041 netif_stop_queue(dev);
1042
1043 phy_stop(port->phydev);
1044
1045 if (!ports_open) {
1046 disable_irq(IRQ_CNS3XXX_SW_R0RXC);
1047 free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev);
1048 disable_irq(IRQ_CNS3XXX_SW_STATUS);
1049 free_irq(IRQ_CNS3XXX_SW_STATUS, napi_dev);
1050 napi_disable(&sw->napi);
1051 netif_stop_queue(napi_dev);
1052 temp = __raw_readl(&sw->regs->mac_cfg[2]);
1053 temp |= (PORT_DISABLE);
1054 __raw_writel(temp, &sw->regs->mac_cfg[2]);
1055
1056 __raw_writel(TS_SUSPEND | FS_SUSPEND,
1057 &sw->regs->dma_auto_poll_cfg);
1058 }
1059
1060 netif_carrier_off(dev);
1061 return 0;
1062 }
1063
1064 static void eth_rx_mode(struct net_device *dev)
1065 {
1066 struct port *port = netdev_priv(dev);
1067 struct sw *sw = port->sw;
1068 u32 temp;
1069
1070 temp = __raw_readl(&sw->regs->mac_glob_cfg);
1071
1072 if (dev->flags & IFF_PROMISC) {
1073 if (port->id == 3)
1074 temp |= ((1 << 2) << PROMISC_OFFSET);
1075 else
1076 temp |= ((1 << port->id) << PROMISC_OFFSET);
1077 } else {
1078 if (port->id == 3)
1079 temp &= ~((1 << 2) << PROMISC_OFFSET);
1080 else
1081 temp &= ~((1 << port->id) << PROMISC_OFFSET);
1082 }
1083 __raw_writel(temp, &sw->regs->mac_glob_cfg);
1084 }
1085
1086 static int eth_set_mac(struct net_device *netdev, void *p)
1087 {
1088 struct port *port = netdev_priv(netdev);
1089 struct sw *sw = port->sw;
1090 struct sockaddr *addr = p;
1091 u32 cycles = 0;
1092
1093 if (!is_valid_ether_addr(addr->sa_data))
1094 return -EADDRNOTAVAIL;
1095
1096 /* Invalidate old ARL Entry */
1097 if (port->id == 3)
1098 __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1099 else
1100 __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1101 __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) |
1102 (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])),
1103 &sw->regs->arl_ctrl[1]);
1104
1105 __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) |
1106 (1 << 1)),
1107 &sw->regs->arl_ctrl[2]);
1108 __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
1109
1110 while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
1111 && cycles < 5000) {
1112 udelay(1);
1113 cycles++;
1114 }
1115
1116 cycles = 0;
1117 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1118
1119 if (port->id == 3)
1120 __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1121 else
1122 __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1123 __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) |
1124 (addr->sa_data[2] << 8) | (addr->sa_data[3])),
1125 &sw->regs->arl_ctrl[1]);
1126
1127 __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) |
1128 (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]);
1129 __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
1130
1131 while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
1132 && cycles < 5000) {
1133 udelay(1);
1134 cycles++;
1135 }
1136 return 0;
1137 }
1138
1139 static int cns3xxx_change_mtu(struct net_device *dev, int new_mtu)
1140 {
1141 if (new_mtu > MAX_MTU)
1142 return -EINVAL;
1143
1144 dev->mtu = new_mtu;
1145 return 0;
1146 }
1147
1148 static const struct net_device_ops cns3xxx_netdev_ops = {
1149 .ndo_open = eth_open,
1150 .ndo_stop = eth_close,
1151 .ndo_start_xmit = eth_xmit,
1152 .ndo_set_rx_mode = eth_rx_mode,
1153 .ndo_do_ioctl = eth_ioctl,
1154 .ndo_change_mtu = cns3xxx_change_mtu,
1155 .ndo_set_mac_address = eth_set_mac,
1156 .ndo_validate_addr = eth_validate_addr,
1157 };
1158
1159 static int eth_init_one(struct platform_device *pdev)
1160 {
1161 int i;
1162 struct port *port;
1163 struct sw *sw;
1164 struct net_device *dev;
1165 struct cns3xxx_plat_info *plat = pdev->dev.platform_data;
1166 u32 regs_phys;
1167 char phy_id[MII_BUS_ID_SIZE + 3];
1168 int err;
1169 u32 temp;
1170
1171 if (!(napi_dev = alloc_etherdev(sizeof(struct sw))))
1172 return -ENOMEM;
1173 strcpy(napi_dev->name, "switch%d");
1174 napi_dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST;
1175
1176 SET_NETDEV_DEV(napi_dev, &pdev->dev);
1177 sw = netdev_priv(napi_dev);
1178 memset(sw, 0, sizeof(struct sw));
1179 sw->regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
1180 regs_phys = CNS3XXX_SWITCH_BASE;
1181 sw->mem_res = request_mem_region(regs_phys, REGS_SIZE, napi_dev->name);
1182 if (!sw->mem_res) {
1183 err = -EBUSY;
1184 goto err_free;
1185 }
1186
1187 temp = __raw_readl(&sw->regs->phy_auto_addr);
1188 temp |= (3 << 30); /* maximum frame length: 9600 bytes */
1189 __raw_writel(temp, &sw->regs->phy_auto_addr);
1190
1191 for (i = 0; i < 4; i++) {
1192 temp = __raw_readl(&sw->regs->mac_cfg[i]);
1193 temp |= (PORT_DISABLE);
1194 __raw_writel(temp, &sw->regs->mac_cfg[i]);
1195 }
1196
1197 temp = PORT_DISABLE;
1198 __raw_writel(temp, &sw->regs->mac_cfg[2]);
1199
1200 temp = __raw_readl(&sw->regs->vlan_cfg);
1201 temp |= NIC_MODE | VLAN_UNAWARE;
1202 __raw_writel(temp, &sw->regs->vlan_cfg);
1203
1204 __raw_writel(UNKNOWN_VLAN_TO_CPU |
1205 CRC_STRIPPING, &sw->regs->mac_glob_cfg);
1206
1207 if (!(sw->rx_ring = kmalloc(sizeof(struct _rx_ring), GFP_KERNEL))) {
1208 err = -ENOMEM;
1209 goto err_free;
1210 }
1211 memset(sw->rx_ring, 0, sizeof(struct _rx_ring));
1212
1213 if (!(sw->tx_ring = kmalloc(sizeof(struct _tx_ring), GFP_KERNEL))) {
1214 err = -ENOMEM;
1215 goto err_free_rx;
1216 }
1217 memset(sw->tx_ring, 0, sizeof(struct _tx_ring));
1218
1219 if ((err = init_rings(sw)) != 0) {
1220 destroy_rings(sw);
1221 err = -ENOMEM;
1222 goto err_free_rings;
1223 }
1224 platform_set_drvdata(pdev, napi_dev);
1225
1226 netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT);
1227
1228 for (i = 0; i < 3; i++) {
1229 if (!(plat->ports & (1 << i))) {
1230 continue;
1231 }
1232
1233 if (!(dev = alloc_etherdev(sizeof(struct port)))) {
1234 goto free_ports;
1235 }
1236
1237 port = netdev_priv(dev);
1238 port->netdev = dev;
1239 if (i == 2)
1240 port->id = 3;
1241 else
1242 port->id = i;
1243 port->sw = sw;
1244
1245 temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1246 temp |= (PORT_DISABLE | PORT_BLOCK_STATE | PORT_LEARN_DIS);
1247 __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1248
1249 dev->netdev_ops = &cns3xxx_netdev_ops;
1250 dev->ethtool_ops = &cns3xxx_ethtool_ops;
1251 dev->tx_queue_len = 1000;
1252 dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST;
1253
1254 switch_port_tab[port->id] = port;
1255 memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN);
1256
1257 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]);
1258 port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, 0,
1259 PHY_INTERFACE_MODE_RGMII);
1260 if ((err = IS_ERR(port->phydev))) {
1261 switch_port_tab[port->id] = 0;
1262 free_netdev(dev);
1263 goto free_ports;
1264 }
1265
1266 port->phydev->irq = PHY_IGNORE_INTERRUPT;
1267
1268 if ((err = register_netdev(dev))) {
1269 phy_disconnect(port->phydev);
1270 switch_port_tab[port->id] = 0;
1271 free_netdev(dev);
1272 goto free_ports;
1273 }
1274
1275 printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]);
1276 netif_carrier_off(dev);
1277 dev = 0;
1278 }
1279
1280 return 0;
1281
1282 free_ports:
1283 err = -ENOMEM;
1284 for (--i; i >= 0; i--) {
1285 if (switch_port_tab[i]) {
1286 port = switch_port_tab[i];
1287 dev = port->netdev;
1288 unregister_netdev(dev);
1289 phy_disconnect(port->phydev);
1290 switch_port_tab[i] = 0;
1291 free_netdev(dev);
1292 }
1293 }
1294 err_free_rings:
1295 kfree(sw->tx_ring);
1296 err_free_rx:
1297 kfree(sw->rx_ring);
1298 err_free:
1299 free_netdev(napi_dev);
1300 return err;
1301 }
1302
1303 static int eth_remove_one(struct platform_device *pdev)
1304 {
1305 struct net_device *dev = platform_get_drvdata(pdev);
1306 struct sw *sw = netdev_priv(dev);
1307 int i;
1308 destroy_rings(sw);
1309
1310 for (i = 3; i >= 0; i--) {
1311 if (switch_port_tab[i]) {
1312 struct port *port = switch_port_tab[i];
1313 struct net_device *dev = port->netdev;
1314 unregister_netdev(dev);
1315 phy_disconnect(port->phydev);
1316 switch_port_tab[i] = 0;
1317 free_netdev(dev);
1318 }
1319 }
1320
1321 release_resource(sw->mem_res);
1322 free_netdev(napi_dev);
1323 return 0;
1324 }
1325
1326 static struct platform_driver cns3xxx_eth_driver = {
1327 .driver.name = DRV_NAME,
1328 .probe = eth_init_one,
1329 .remove = eth_remove_one,
1330 };
1331
1332 static int __init eth_init_module(void)
1333 {
1334 int err;
1335 if ((err = cns3xxx_mdio_register()))
1336 return err;
1337 return platform_driver_register(&cns3xxx_eth_driver);
1338 }
1339
1340 static void __exit eth_cleanup_module(void)
1341 {
1342 platform_driver_unregister(&cns3xxx_eth_driver);
1343 cns3xxx_mdio_remove();
1344 }
1345
1346 module_init(eth_init_module);
1347 module_exit(eth_cleanup_module);
1348
1349 MODULE_AUTHOR("Chris Lang");
1350 MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver");
1351 MODULE_LICENSE("GPL v2");
1352 MODULE_ALIAS("platform:cns3xxx_eth");