1 From 51e66f289f280a33bb17047717d2e6539a2917e1 Mon Sep 17 00:00:00 2001
2 From: Alison Wang <b18965@freescale.com>
3 Date: Thu, 4 Aug 2011 09:59:44 +0800
4 Subject: [PATCH 21/52] Add ethernet switch driver for MCF54418
6 Add ethernet switch driver support for MCF54418.
8 Signed-off-by: Alison Wang <b18965@freescale.com>
10 arch/m68k/coldfire/m5441x/l2switch.c | 284 +++
11 arch/m68k/include/asm/mcfswitch.h | 324 +++
12 drivers/net/Kconfig | 8 +
13 drivers/net/Makefile | 1 +
14 drivers/net/modelo_switch.c | 4293 ++++++++++++++++++++++++++++++++++
15 drivers/net/modelo_switch.h | 1141 +++++++++
16 include/linux/fsl_devices.h | 17 +
18 8 files changed, 6076 insertions(+), 0 deletions(-)
19 create mode 100644 arch/m68k/coldfire/m5441x/l2switch.c
20 create mode 100644 arch/m68k/include/asm/mcfswitch.h
21 create mode 100644 drivers/net/modelo_switch.c
22 create mode 100644 drivers/net/modelo_switch.h
25 +++ b/arch/m68k/coldfire/m5441x/l2switch.c
30 + * Sub-architcture dependant initialization code for the Freescale
31 + * 5441X L2 Switch module.
33 + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
34 + * ShrekWu B16972@freescale.com
37 + * This program is free software; you can redistribute it and/or modify it
38 + * under the terms of the GNU General Public License as published by the
39 + * Free Software Foundation; either version 2 of the License, or (at your
40 + * option) any later version.
42 + * This program is distributed in the hope that it will be useful,
43 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
44 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
45 + * GNU General Public License for more details.
47 + * You should have received a copy of the GNU General Public License
48 + * along with this program; if not, write to the Free Software
49 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
51 +#include <linux/kernel.h>
52 +#include <linux/sched.h>
53 +#include <linux/param.h>
54 +#include <linux/init.h>
55 +#include <linux/interrupt.h>
56 +#include <linux/device.h>
57 +#include <linux/platform_device.h>
58 +#include <linux/fsl_devices.h>
60 +#include <asm/traps.h>
61 +#include <asm/machdep.h>
62 +#include <asm/coldfire.h>
63 +#include <asm/mcfswitch.h>
64 +#include <asm/mcfsim.h>
66 +static unsigned char switch_mac_default[] = {
67 + 0x00, 0x04, 0x9F, 0x00, 0xB3, 0x49,
70 +static unsigned char switch_mac_addr[6];
72 +static void switch_request_intrs(struct net_device *dev,
73 + irqreturn_t switch_net_irq_handler(int irq, void *private),
74 + void *irq_privatedata)
76 + struct switch_enet_private *fep;
78 + static const struct idesc {
82 + /*{ "esw_isr(EBERR)", 38 },*/
83 + { "esw_isr(RxBuffer)", 39 },
84 + { "esw_isr(RxFrame)", 40 },
85 + { "esw_isr(TxBuffer)", 41 },
86 + { "esw_isr(TxFrame)", 42 },
87 + { "esw_isr(QM)", 43 },
88 + { "esw_isr(P0OutputDiscard)", 44 },
89 + { "esw_isr(P1OutputDiscard)", 45 },
90 + { "esw_isr(P2OutputDiscard)", 46 },
91 + { "esw_isr(LearningRecord)", 47 },
95 + fep = netdev_priv(dev);
96 + /*intrruption L2 ethernet SWITCH */
99 + /* Setup interrupt handlers. */
100 + for (idp = id; idp->name; idp++) {
101 + if (request_irq(b+idp->irq,
102 + switch_net_irq_handler, IRQF_DISABLED,
103 + idp->name, irq_privatedata) != 0)
104 + printk(KERN_ERR "FEC: Could not alloc %s IRQ(%d)!\n",
105 + idp->name, b+idp->irq);
108 + /* Configure RMII */
109 + MCF_GPIO_PAR_FEC = (MCF_GPIO_PAR_FEC &
110 + MCF_GPIO_PAR_FEC_FEC_MASK) |
111 + MCF_GPIO_PAR_FEC_FEC_RMII0FUL_1FUL;
114 + (MCF_GPIO_PAR_FEC &
115 + MCF_GPIO_PAR_FEC_FEC_MASK) |
116 + MCF_GPIO_PAR_FEC_FEC_RMII0FUL_1FUL;
118 + MCF_GPIO_SRCR_FEC = 0x0F;
120 + MCF_GPIO_PAR_SIMP0H =
121 + (MCF_GPIO_PAR_SIMP0H &
122 + MCF_GPIO_PAR_SIMP0H_DAT_MASK) |
123 + MCF_GPIO_PAR_SIMP0H_DAT_GPIO;
127 + MCF_GPIO_PDDR_G4_MASK) |
128 + MCF_GPIO_PDDR_G4_OUTPUT;
132 + MCF_GPIO_PODR_G4_MASK);
135 +static void switch_set_mii(struct net_device *dev)
137 + struct switch_enet_private *fep = netdev_priv(dev);
138 + volatile switch_t *fecp;
142 + MCF_FEC_RCR0 = (MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE |
143 + MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD);
144 + MCF_FEC_RCR1 = (MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE |
145 + MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD);
147 + MCF_FEC_TCR0 = MCF_FEC_TCR_FDEN;
148 + MCF_FEC_TCR1 = MCF_FEC_TCR_FDEN;
150 +#ifdef MODELO_ENHANCE_BUFFER
151 + MCF_FEC_ECR0 = MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588;
152 + MCF_FEC_ECR1 = MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588;
153 +#else /*legac buffer*/
154 + MCF_FEC_ECR0 = MCF_FEC_ECR_ETHER_EN;
155 + MCF_FEC_ECR1 = MCF_FEC_ECR_ETHER_EN;
158 + * Set MII speed to 2.5 MHz
160 + MCF_FEC_MSCR0 = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2;
161 + MCF_FEC_MSCR1 = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2;
165 +static void switch_get_mac(struct net_device *dev)
167 + struct switch_enet_private *fep = netdev_priv(dev);
168 + volatile switch_t *fecp;
169 + unsigned char *iap;
173 + if (FEC_FLASHMAC) {
175 + * Get MAC address from FLASH.
176 + * If it is all 1's or 0's, use the default.
178 + iap = FEC_FLASHMAC;
179 + if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
180 + (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
181 + iap = switch_mac_default;
182 + if ((iap[0] == 0xff) && (iap[1] == 0xff) &&
183 + (iap[2] == 0xff) && (iap[3] == 0xff) &&
184 + (iap[4] == 0xff) && (iap[5] == 0xff))
185 + iap = switch_mac_default;
188 + iap = &switch_mac_addr[0];
190 + if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
191 + (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
192 + iap = switch_mac_default;
193 + if ((iap[0] == 0xff) && (iap[1] == 0xff) &&
194 + (iap[2] == 0xff) && (iap[3] == 0xff) &&
195 + (iap[4] == 0xff) && (iap[5] == 0xff))
196 + iap = switch_mac_default;
199 + memcpy(dev->dev_addr, iap, ETH_ALEN);
200 + /* Adjust MAC if using default MAC address */
201 + if (iap == switch_mac_default)
202 + dev->dev_addr[ETH_ALEN-1] = switch_mac_default[ETH_ALEN-1] +
206 +static void switch_enable_phy_intr(void)
210 +static void switch_disable_phy_intr(void)
214 +static void switch_phy_ack_intr(void)
218 +static void switch_localhw_setup(void)
222 +static void switch_uncache(unsigned long addr)
226 +static void switch_platform_flush_cache(void)
231 + * Define the fixed address of the FEC hardware.
233 +static unsigned int switch_platform_hw[] = {
238 +static struct coldfire_switch_platform_data mcf5441x_switch_data = {
240 + .switch_hw = switch_platform_hw,
241 + .request_intrs = switch_request_intrs,
242 + .set_mii = switch_set_mii,
243 + .get_mac = switch_get_mac,
244 + .enable_phy_intr = switch_enable_phy_intr,
245 + .disable_phy_intr = switch_disable_phy_intr,
246 + .phy_ack_intr = switch_phy_ack_intr,
247 + .localhw_setup = switch_localhw_setup,
248 + .uncache = switch_uncache,
249 + .platform_flush_cache = switch_platform_flush_cache,
252 +static struct resource l2switch_coldfire_resources[] = {
254 + .start = 0xFC0DC000,
256 + .flags = IORESOURCE_MEM,
259 + .start = (64 + 64 + 64 + 38),
260 + .end = (64 + 64 + 64 + 48),
261 + .flags = IORESOURCE_IRQ,
264 + .start = 0xFC0E0000,
266 + .flags = IORESOURCE_MEM,
270 +static struct platform_device l2switch_coldfire_device = {
271 + .name = "coldfire-switch",
273 + .resource = l2switch_coldfire_resources,
274 + .num_resources = ARRAY_SIZE(l2switch_coldfire_resources),
276 + .platform_data = &mcf5441x_switch_data,
277 + .coherent_dma_mask = ~0, /* $$$ REVISIT */
282 +static int __init mcf5441x_switch_dev_init(void)
286 + retval = platform_device_register(&l2switch_coldfire_device);
289 + printk(KERN_ERR "MCF5441x L2Switch: platform_device_register"
290 + " failed with code=%d\n", retval);
296 +static int __init param_switch_addr_setup(char *str)
301 + for (i = 0; i < 6; i++) {
302 + switch_mac_addr[i] = str ? simple_strtoul(str, &end, 16) : 0;
304 + str = (*end) ? end + 1 : end;
308 +__setup("switchaddr=", param_switch_addr_setup);
310 +arch_initcall(mcf5441x_switch_dev_init);
312 +++ b/arch/m68k/include/asm/mcfswitch.h
314 +/****************************************************************************/
317 + * mcfswitch -- L2 SWITCH Controller for Motorola ColdFire SoC
320 + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
322 + * This program is free software; you can redistribute it and/or modify it
323 + * under the terms of the GNU General Public License as published by the
324 + * Free Software Foundation; either version 2 of the License, or (at your
325 + * option) any later version.
327 + * This program is distributed in the hope that it will be useful,
328 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
329 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
330 + * GNU General Public License for more details.
332 + * You should have received a copy of the GNU General Public License
333 + * along with this program; if not, write to the Free Software
334 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
337 +/****************************************************************************/
340 +/****************************************************************************/
341 +#include <linux/netdevice.h>
342 +#include <linux/etherdevice.h>
343 +#include <linux/skbuff.h>
344 +#include <linux/spinlock.h>
345 +#include <linux/workqueue.h>
346 +#include <linux/platform_device.h>
347 +#include <asm/pgtable.h>
349 +#define FEC_FLASHMAC 0
350 +#define SWITCH_EPORT_NUMBER 2
352 +#ifdef CONFIG_SWITCH_DMA_USE_SRAM
353 +#define TX_RING_SIZE 8 /* Must be power of two */
354 +#define TX_RING_MOD_MASK 7 /* for this to work */
356 +#define TX_RING_SIZE 16 /* Must be power of two */
357 +#define TX_RING_MOD_MASK 15 /* for this to work */
360 +typedef struct l2switch_port_statistics_status {
361 + /*outgoing frames discarded due to transmit queue congestion*/
362 + unsigned long MCF_ESW_POQC;
363 + /*incoming frames discarded due to VLAN domain mismatch*/
364 + unsigned long MCF_ESW_PMVID;
365 + /*incoming frames discarded due to untagged discard*/
366 + unsigned long MCF_ESW_PMVTAG;
367 + /*incoming frames discarded due port is in blocking state*/
368 + unsigned long MCF_ESW_PBL;
369 +} esw_port_statistics_status;
371 +typedef struct l2switch {
372 + unsigned long ESW_REVISION;
373 + unsigned long ESW_SCRATCH;
374 + unsigned long ESW_PER;
375 + unsigned long reserved0[1];
376 + unsigned long ESW_VLANV;
377 + unsigned long ESW_DBCR;
378 + unsigned long ESW_DMCR;
379 + unsigned long ESW_BKLR;
380 + unsigned long ESW_BMPC;
381 + unsigned long ESW_MODE;
382 + unsigned long ESW_VIMSEL;
383 + unsigned long ESW_VOMSEL;
384 + unsigned long ESW_VIMEN;
385 + unsigned long ESW_VID;/*0x34*/
387 + unsigned long esw_reserved0[2];
388 + unsigned long ESW_MCR;/*0x40*/
389 + unsigned long ESW_EGMAP;
390 + unsigned long ESW_INGMAP;
391 + unsigned long ESW_INGSAL;
392 + unsigned long ESW_INGSAH;
393 + unsigned long ESW_INGDAL;
394 + unsigned long ESW_INGDAH;
395 + unsigned long ESW_ENGSAL;
396 + unsigned long ESW_ENGSAH;
397 + unsigned long ESW_ENGDAL;
398 + unsigned long ESW_ENGDAH;
399 + unsigned long ESW_MCVAL;/*0x6C*/
400 + /*from 0x70--0x7C*/
401 + unsigned long esw_reserved1[4];
402 + unsigned long ESW_MMSR;/*0x80*/
403 + unsigned long ESW_LMT;
404 + unsigned long ESW_LFC;
405 + unsigned long ESW_PCSR;
406 + unsigned long ESW_IOSR;
407 + unsigned long ESW_QWT;/*0x94*/
408 + unsigned long esw_reserved2[1];/*0x98*/
409 + unsigned long ESW_P0BCT;/*0x9C*/
411 + unsigned long esw_reserved3[7];
412 + unsigned long ESW_P0FFEN;/*0xBC*/
413 + unsigned long ESW_PSNP[8];
414 + unsigned long ESW_IPSNP[8];
415 + unsigned long ESW_PVRES[3];
416 + /*from 0x10C-0x13C*/
417 + unsigned long esw_reserved4[13];
418 + unsigned long ESW_IPRES;/*0x140*/
419 + /*from 0x144-0x17C*/
420 + unsigned long esw_reserved5[15];
422 + /*port0-port2 Priority Configuration 0xFC0D_C180-C188*/
423 + unsigned long ESW_PRES[3];
424 + /*from 0x18C-0x1FC*/
425 + unsigned long esw_reserved6[29];
427 + /*port0-port2 VLAN ID 0xFC0D_C200-C208*/
428 + unsigned long ESW_PID[3];
429 + /*from 0x20C-0x27C*/
430 + unsigned long esw_reserved7[29];
432 + /*port0-port2 VLAN domain resolution entry 0xFC0D_C280-C2FC*/
433 + unsigned long ESW_VRES[32];
435 + unsigned long ESW_DISCN;/*0x300*/
436 + unsigned long ESW_DISCB;
437 + unsigned long ESW_NDISCN;
438 + unsigned long ESW_NDISCB;/*0xFC0DC30C*/
439 + /*per port statistics 0xFC0DC310_C33C*/
440 + esw_port_statistics_status port_statistics_status[3];
441 + /*from 0x340-0x400*/
442 + unsigned long esw_reserved8[48];
444 + /*0xFC0DC400---0xFC0DC418*/
445 + /*unsigned long MCF_ESW_ISR;*/
446 + unsigned long switch_ievent; /* Interrupt event reg */
447 + /*unsigned long MCF_ESW_IMR;*/
448 + unsigned long switch_imask; /* Interrupt mask reg */
449 + /*unsigned long MCF_ESW_RDSR;*/
450 + unsigned long fec_r_des_start; /* Receive descriptor ring */
451 + /*unsigned long MCF_ESW_TDSR;*/
452 + unsigned long fec_x_des_start; /* Transmit descriptor ring */
453 + /*unsigned long MCF_ESW_MRBR;*/
454 + unsigned long fec_r_buff_size; /* Maximum receive buff size */
455 + /*unsigned long MCF_ESW_RDAR;*/
456 + unsigned long fec_r_des_active; /* Receive descriptor reg */
457 + /*unsigned long MCF_ESW_TDAR;*/
458 + unsigned long fec_x_des_active; /* Transmit descriptor reg */
459 + /*from 0x420-0x4FC*/
460 + unsigned long esw_reserved9[57];
462 + /*0xFC0DC500---0xFC0DC508*/
463 + unsigned long ESW_LREC0;
464 + unsigned long ESW_LREC1;
465 + unsigned long ESW_LSR;
468 +typedef struct _64bTableEntry {
469 + unsigned int lo; /* lower 32 bits */
470 + unsigned int hi; /* upper 32 bits */
471 +} AddrTable64bEntry;
473 +typedef struct l2switchaddrtable {
474 + AddrTable64bEntry eswTable64bEntry[2048];
477 +#define MCF_FEC_MSCR0 (*(volatile unsigned long *)(0xFC0D4044))
478 +#define MCF_FEC_MSCR1 (*(volatile unsigned long *)(0xFC0D8044))
479 +#define MCF_FEC_RCR0 (*(volatile unsigned long *)(0xFC0D4084))
480 +#define MCF_FEC_RCR1 (*(volatile unsigned long *)(0xFC0D8084))
481 +#define MCF_FEC_TCR0 (*(volatile unsigned long *)(0xFC0D40C4))
482 +#define MCF_FEC_TCR1 (*(volatile unsigned long *)(0xFC0D80C4))
483 +#define MCF_FEC_ECR0 (*(volatile unsigned long *)(0xFC0D4024))
484 +#define MCF_FEC_ECR1 (*(volatile unsigned long *)(0xFC0D8024))
486 +#define MCF_FEC_RCR_PROM (0x00000008)
487 +#define MCF_FEC_RCR_RMII_MODE (0x00000100)
488 +#define MCF_FEC_RCR_MAX_FL(x) (((x)&0x00003FFF)<<16)
489 +#define MCF_FEC_RCR_CRC_FWD (0x00004000)
491 +#define MCF_FEC_TCR_FDEN (0x00000004)
493 +#define MCF_FEC_ECR_ETHER_EN (0x00000002)
494 +#define MCF_FEC_ECR_ENA_1588 (0x00000010)
497 +typedef struct bufdesc {
498 + unsigned short cbd_sc; /* Control and status info */
499 + unsigned short cbd_datlen; /* Data length */
500 + unsigned long cbd_bufaddr; /* Buffer address */
501 +#ifdef MODELO_BUFFER
502 + unsigned long ebd_status;
503 + unsigned short length_proto_type;
504 + unsigned short payload_checksum;
506 + unsigned long timestamp;
507 + unsigned long reserverd_word1;
508 + unsigned long reserverd_word2;
512 +/* Forward declarations of some structures to support different PHYs
516 + void (*funct)(uint mii_reg, struct net_device *dev);
523 + const phy_cmd_t *config;
524 + const phy_cmd_t *startup;
525 + const phy_cmd_t *ack_int;
526 + const phy_cmd_t *shutdown;
529 +/* The switch buffer descriptors track the ring buffers. The rx_bd_base and
530 + * tx_bd_base always point to the base of the buffer descriptors. The
531 + * cur_rx and cur_tx point to the currently available buffer.
532 + * The dirty_tx tracks the current buffer that is being sent by the
533 + * controller. The cur_tx and dirty_tx are equal under both completely
534 + * empty and completely full conditions. The empty/ready indicator in
535 + * the buffer descriptor determines the actual condition.
537 +struct switch_enet_private {
538 + /* Hardware registers of the switch device */
539 + volatile switch_t *hwp;
540 + volatile eswAddrTable_t *hwentry;
542 + struct net_device *netdev;
543 + struct platform_device *pdev;
544 + /* The saved address of a sent-in-place packet/buffer, for skfree(). */
545 + unsigned char *tx_bounce[TX_RING_SIZE];
546 + struct sk_buff *tx_skbuff[TX_RING_SIZE];
550 + /* CPM dual port RAM relative addresses.
552 + cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
554 + cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
555 + cbd_t *dirty_tx; /* The ring entries to be free()ed. */
557 + /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
558 + spinlock_t hw_lock;
560 + /* hold while accessing the mii_list_t() elements */
561 + spinlock_t mii_lock;
562 + struct mii_bus *mdio_bus;
563 + struct phy_device *phydev[SWITCH_EPORT_NUMBER];
569 + phy_info_t const *phy;
570 + struct work_struct phy_task;
571 + volatile switch_t *phy_hwp;
573 + uint sequence_done;
574 + uint mii_phy_task_queued;
591 + /* --------------Statistics--------------------------- */
592 + /* when a new element deleted a element with in
593 + * a block due to lack of space */
594 + int atBlockOverflows;
595 + /* Peak number of valid entries in the address table */
597 + /* current number of valid entries in the address table */
599 + /* maximum entries within a block found
600 + * (updated within ageing)*/
601 + int atMaxEntriesPerBlock;
603 + /* -------------------ageing function------------------ */
604 + /* maximum age allowed for an entry */
606 + /* last LUT entry to block that was
607 + * inspected by the Ageing task*/
609 + /* last element within block inspected by the Ageing task */
610 + int ageBlockElemIdx;
611 + /* complete table has been processed by ageing process */
613 + /* delay setting */
615 + /* current delay Counter */
618 + /* ----------------timer related---------------------------- */
619 + /* current time (for timestamping) */
621 + /* flag set by timer when currTime changed
622 + * and cleared by serving function*/
625 + /* Timer for Aging */
626 + struct timer_list timer_aging;
627 + int learning_irqhandle_enable;
630 +struct switch_platform_private {
631 + struct platform_device *pdev;
633 + unsigned long quirks;
634 + int num_slots; /* Slots on controller */
635 + struct switch_enet_private *fep_host[0]; /* Pointers to hosts */
638 --- a/drivers/net/Kconfig
639 +++ b/drivers/net/Kconfig
640 @@ -1950,6 +1950,14 @@ config FEC
641 Say Y here if you want to use the built-in 10/100 Fast ethernet
642 controller on some Motorola ColdFire and Freescale i.MX processors.
644 +config MODELO_SWITCH
645 + bool "ethernet switch controller (of ColdFire CPUs)"
646 + depends on !FEC && M5441X
648 + Say Y here if you want to use the built-in ethernet switch
649 + controller on some ColdFire processors.
650 + The Integrated Ethernet switch engine is compatible with
651 + 10/100 MAC-NET core.
654 bool "Second FEC ethernet controller (on some ColdFire CPUs)"
655 --- a/drivers/net/Makefile
656 +++ b/drivers/net/Makefile
657 @@ -127,6 +127,7 @@ ifeq ($(CONFIG_FEC_1588), y)
658 obj-$(CONFIG_FEC) += fec_1588.o
660 obj-$(CONFIG_FEC_548x) += fec_m547x.o
661 +obj-$(CONFIG_MODELO_SWITCH) += modelo_switch.o
662 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
663 ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
664 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
666 +++ b/drivers/net/modelo_switch.c
669 + * L2 switch Controller (Etheren switch) driver for MCF5441x.
671 + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
672 + * Shrek Wu (B16972@freescale.com)
673 + * Alison Wang (b18965@freescale.com)
674 + * Jason Jin (Jason.jin@freescale.com)
676 + * This program is free software; you can redistribute it and/or modify it
677 + * under the terms of the GNU General Public License as published by the
678 + * Free Software Foundation; either version 2 of the License, or (at your
679 + * option) any later version.
682 +#include <linux/module.h>
683 +#include <linux/kernel.h>
684 +#include <linux/string.h>
685 +#include <linux/ptrace.h>
686 +#include <linux/errno.h>
687 +#include <linux/ioport.h>
688 +#include <linux/slab.h>
689 +#include <linux/interrupt.h>
690 +#include <linux/pci.h>
691 +#include <linux/init.h>
692 +#include <linux/delay.h>
693 +#include <linux/netdevice.h>
694 +#include <linux/etherdevice.h>
695 +#include <linux/skbuff.h>
696 +#include <linux/spinlock.h>
697 +#include <linux/workqueue.h>
698 +#include <linux/bitops.h>
699 +#include <linux/platform_device.h>
700 +#include <linux/fsl_devices.h>
701 +#include <linux/phy.h>
702 +#include <linux/kthread.h>
703 +#include <linux/syscalls.h>
704 +#include <linux/uaccess.h>
705 +#include <linux/io.h>
706 +#include <linux/signal.h>
708 +#include <asm/irq.h>
709 +#include <asm/pgtable.h>
710 +#include <asm/cacheflush.h>
711 +#include <asm/coldfire.h>
712 +#include <asm/mcfsim.h>
713 +#include "modelo_switch.h"
715 +#define SWITCH_MAX_PORTS 1
716 +#define CONFIG_FEC_SHARED_PHY
718 +/* Interrupt events/masks.
720 +#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
721 +#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
722 +#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
723 +#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
724 +#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
725 +#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
726 +#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
727 +#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
728 +#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
729 +#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
731 +static int switch_enet_open(struct net_device *dev);
732 +static int switch_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
733 +static irqreturn_t switch_enet_interrupt(int irq, void *dev_id);
734 +static void switch_enet_tx(struct net_device *dev);
735 +static void switch_enet_rx(struct net_device *dev);
736 +static int switch_enet_close(struct net_device *dev);
737 +static void set_multicast_list(struct net_device *dev);
738 +static void switch_restart(struct net_device *dev, int duplex);
739 +static void switch_stop(struct net_device *dev);
740 +static void switch_set_mac_address(struct net_device *dev);
744 +/* Make MII read/write commands for the FEC.
746 +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
747 +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
750 +/* Transmitter timeout.
752 +#define TX_TIMEOUT (2*HZ)
754 +/*last read entry from learning interface*/
756 +/* switch ports status */
757 +struct port_status ports_link_status;
759 +/* the user space pid, used to send the link change to user space */
762 +/* ----------------------------------------------------------------*/
764 + * Calculate Galois Field Arithmetic CRC for Polynom x^8+x^2+x+1.
765 + * It omits the final shift in of 8 zeroes a "normal" CRC would do
766 + * (getting the remainder).
768 + * Examples (hexadecimal values):<br>
769 + * 10-11-12-13-14-15 => CRC=0xc2
770 + * 10-11-cc-dd-ee-00 => CRC=0xe6
772 + * param: pmacaddress
773 + * A 6-byte array with the MAC address.
774 + * The first byte is the first byte transmitted
775 + * return The 8-bit CRC in bits 7:0
777 +int crc8_calc(unsigned char *pmacaddress)
787 + for (byt = 0; byt < 6; byt++) {
788 + inval = (((int)pmacaddress[byt]) & 0xff);
790 + * shift bit 0 to bit 8 so all our bits
791 + * travel through bit 8
792 + * (simplifies below calc)
796 + for (bit = 0; bit < 8; bit++) {
797 + /* next input bit comes into d7 after shift */
798 + crc |= inval & 0x100;
808 + /* upper bits are clean as we shifted in zeroes! */
812 +void read_atable(struct switch_enet_private *fep,
813 + int index, unsigned long *read_lo, unsigned long *read_hi)
815 + unsigned long atable_base = 0xFC0E0000;
817 + *read_lo = *((volatile unsigned long *)(atable_base + (index<<3)));
818 + *read_hi = *((volatile unsigned long *)(atable_base + (index<<3) + 4));
821 +void write_atable(struct switch_enet_private *fep,
822 + int index, unsigned long write_lo, unsigned long write_hi)
824 + unsigned long atable_base = 0xFC0E0000;
826 + *((volatile unsigned long *)(atable_base + (index<<3))) = write_lo;
827 + *((volatile unsigned long *)(atable_base + (index<<3) + 4)) = write_hi;
830 +/* Check if the Port Info FIFO has data available
831 + * for reading. 1 valid, 0 invalid*/
832 +int esw_portinfofifo_status(struct switch_enet_private *fep)
834 + volatile switch_t *fecp;
836 + return fecp->ESW_LSR;
839 +/* Initialize the Port Info FIFO. */
840 +void esw_portinfofifo_initialize(struct switch_enet_private *fep)
842 + volatile switch_t *fecp;
846 + /*disable all learn*/
847 + fecp->switch_imask &= (~MCF_ESW_IMR_LRN);
848 + /* remove all entries from FIFO */
849 + while (esw_portinfofifo_status(fep)) {
850 + /* read one data word */
851 + tmp = fecp->ESW_LREC0;
852 + tmp = fecp->ESW_LREC1;
857 +/* Read one element from the HW receive FIFO (Queue)
858 + * if available and return it.
859 + * return ms_HwPortInfo or null if no data is available
861 +eswPortInfo *esw_portinfofifo_read(struct switch_enet_private *fep)
863 + volatile switch_t *fecp;
867 + /* check learning record valid */
868 + if (fecp->ESW_LSR == 0)
871 + /*read word from FIFO*/
872 + g_info.maclo = fecp->ESW_LREC0;
874 + /*but verify that we actually did so
875 + * (0=no data available)*/
876 + if (g_info.maclo == 0)
879 + /* read 2nd word from FIFO */
880 + tmp = fecp->ESW_LREC1;
881 + g_info.machi = tmp & 0xffff;
882 + g_info.hash = (tmp >> 16) & 0xff;
883 + g_info.port = (tmp >> 24) & 0xf;
889 + * Clear complete MAC Look Up Table
891 +void esw_clear_atable(struct switch_enet_private *fep)
894 + for (index = 0; index < 2048; index++)
895 + write_atable(fep, index, 0, 0);
898 +void esw_dump_atable(struct switch_enet_private *fep)
901 + unsigned long read_lo, read_hi;
902 + for (index = 0; index < 2048; index++)
903 + read_atable(fep, index, &read_lo, &read_hi);
907 + * pdates MAC address lookup table with a static entry
908 + * Searches if the MAC address is already there in the block and replaces
909 + * the older entry with new one. If MAC address is not there then puts a
910 + * new entry in the first empty slot available in the block
912 + * mac_addr Pointer to the array containing MAC address to
913 + * be put as static entry
914 + * port Port bitmask numbers to be added in static entry,
915 + * valid values are 1-7
916 + * priority Priority for the static entry in table
918 + * return 0 for a successful update else -1 when no slot available
920 +int esw_update_atable_static(unsigned char *mac_addr,
921 + unsigned int port, unsigned int priority,
922 + struct switch_enet_private *fep)
924 + unsigned long block_index, entry, index_end;
925 + unsigned long read_lo, read_hi;
926 + unsigned long write_lo, write_hi;
928 + write_lo = (unsigned long)((mac_addr[3] << 24) |
929 + (mac_addr[2] << 16) |
930 + (mac_addr[1] << 8) |
932 + write_hi = (unsigned long)(0 |
933 + (port << AT_SENTRY_PORTMASK_shift) |
934 + (priority << AT_SENTRY_PRIO_shift) |
935 + (AT_ENTRY_TYPE_STATIC << AT_ENTRY_TYPE_shift) |
936 + (AT_ENTRY_RECORD_VALID << AT_ENTRY_VALID_shift) |
937 + (mac_addr[5] << 8) | (mac_addr[4]));
939 + block_index = GET_BLOCK_PTR(crc8_calc(mac_addr));
940 + index_end = block_index + ATABLE_ENTRY_PER_SLOT;
941 + /* Now search all the entries in the selected block */
942 + for (entry = block_index; entry < index_end; entry++) {
943 + read_atable(fep, entry, &read_lo, &read_hi);
945 + * MAC address matched, so update the
947 + * even if its a dynamic one
949 + if ((read_lo == write_lo) && ((read_hi & 0x0000ffff) ==
950 + (write_hi & 0x0000ffff))) {
951 + write_atable(fep, entry, write_lo, write_hi);
953 + } else if (!(read_hi & (1 << 16))) {
955 + * Fill this empty slot (valid bit zero),
956 + * assuming no holes in the block
958 + write_atable(fep, entry, write_lo, write_hi);
959 + fep->atCurrEntries++;
964 + /* No space available for this static entry */
968 +/* lookup entry in given Address Table slot and
969 + * insert (learn) it if it is not found.
970 + * return 0 if entry was found and updated.
971 + * 1 if entry was not found and has been inserted (learned).
973 +int esw_update_atable_dynamic(unsigned char *mac_addr, unsigned int port,
974 + unsigned int currTime, struct switch_enet_private *fep)
976 + unsigned long block_index, entry, index_end;
977 + unsigned long read_lo, read_hi;
978 + unsigned long write_lo, write_hi;
980 + int time, timeold, indexold;
982 + /* prepare update port and timestamp */
983 + write_hi = (mac_addr[5] << 8) | (mac_addr[4]);
984 + write_lo = (unsigned long)((mac_addr[3] << 24) |
985 + (mac_addr[2] << 16) |
986 + (mac_addr[1] << 8) |
988 + tmp = AT_ENTRY_RECORD_VALID << AT_ENTRY_VALID_shift;
989 + tmp |= AT_ENTRY_TYPE_DYNAMIC << AT_ENTRY_TYPE_shift;
990 + tmp |= currTime << AT_DENTRY_TIME_shift;
991 + tmp |= port << AT_DENTRY_PORT_shift;
995 + * linear search through all slot
996 + * entries and update if found
998 + block_index = GET_BLOCK_PTR(crc8_calc(mac_addr));
999 + index_end = block_index + ATABLE_ENTRY_PER_SLOT;
1000 + /* Now search all the entries in the selected block */
1001 + for (entry = block_index; entry < index_end; entry++) {
1002 + read_atable(fep, entry, &read_lo, &read_hi);
1004 + if ((read_lo == write_lo) &&
1005 + ((read_hi & 0x0000ffff) ==
1006 + (write_hi & 0x0000ffff))) {
1007 + /* found correct address,
1008 + * update timestamp. */
1009 + write_atable(fep, entry, write_lo, tmp);
1011 + } else if (!(read_hi & (1 << 16))) {
1012 + /* slot is empty, then use it
1014 + * Note: There are no holes,
1015 + * therefore cannot be any
1016 + * more that need to be compared.
1018 + write_atable(fep, entry, write_lo, tmp);
1019 + /* statistics (we do it between writing
1020 + * .hi an .lo due to
1021 + * hardware limitation...
1023 + fep->atCurrEntries++;
1024 + /* newly inserted */
1030 + * no more entry available in blockk ...
1031 + * overwrite oldest
1035 + for (entry = block_index; entry < index_end; entry++) {
1036 + read_atable(fep, entry, &read_lo, &read_hi);
1037 + time = AT_EXTRACT_TIMESTAMP(read_hi);
1038 + time = TIMEDELTA(currTime, time);
1039 + if (time > timeold) {
1040 + /* is it older ?*/
1046 + write_atable(fep, indexold, write_lo, tmp);
1047 + /* Statistics (do it inbetween
1048 + * writing to .lo and .hi*/
1049 + fep->atBlockOverflows++;
1050 + /* newly inserted */
1054 +int esw_update_atable_dynamic1(unsigned long write_lo, unsigned long write_hi,
1055 + int block_index, unsigned int port, unsigned int currTime,
1056 + struct switch_enet_private *fep)
1058 + unsigned long entry, index_end;
1059 + unsigned long read_lo, read_hi;
1060 + unsigned long tmp;
1061 + int time, timeold, indexold;
1063 + /* prepare update port and timestamp */
1064 + tmp = AT_ENTRY_RECORD_VALID << AT_ENTRY_VALID_shift;
1065 + tmp |= AT_ENTRY_TYPE_DYNAMIC << AT_ENTRY_TYPE_shift;
1066 + tmp |= currTime << AT_DENTRY_TIME_shift;
1067 + tmp |= port << AT_DENTRY_PORT_shift;
1071 + * linear search through all slot
1072 + * entries and update if found
1074 + index_end = block_index + ATABLE_ENTRY_PER_SLOT;
1075 + /* Now search all the entries in the selected block */
1076 + for (entry = block_index; entry < index_end; entry++) {
1077 + read_atable(fep, entry, &read_lo, &read_hi);
1078 + if ((read_lo == write_lo) &&
1079 + ((read_hi & 0x0000ffff) ==
1080 + (write_hi & 0x0000ffff))) {
1081 + /* found correct address,
1082 + * update timestamp. */
1083 + write_atable(fep, entry, write_lo, tmp);
1085 + } else if (!(read_hi & (1 << 16))) {
1086 + /* slot is empty, then use it
1088 + * Note: There are no holes,
1089 + * therefore cannot be any
1090 + * more that need to be compared.
1092 + write_atable(fep, entry, write_lo, tmp);
1093 + /* statistics (we do it between writing
1094 + * .hi an .lo due to
1095 + * hardware limitation...
1097 + fep->atCurrEntries++;
1098 + /* newly inserted */
1104 + * no more entry available in block ...
1105 + * overwrite oldest
1109 + for (entry = block_index; entry < index_end; entry++) {
1110 + read_atable(fep, entry, &read_lo, &read_hi);
1111 + time = AT_EXTRACT_TIMESTAMP(read_hi);
1112 + time = TIMEDELTA(currTime, time);
1113 + if (time > timeold) {
1114 + /* is it older ?*/
1120 + write_atable(fep, indexold, write_lo, tmp);
1121 + /* Statistics (do it inbetween
1122 + * writing to .lo and .hi*/
1123 + fep->atBlockOverflows++;
1124 + /* newly inserted */
1129 + * Delete one dynamic entry within the given block
1130 + * of 64-bit entries.
1131 + * return number of valid entries in the block after deletion.
1133 +int esw_del_atable_dynamic(struct switch_enet_private *fep,
1134 + int blockidx, int entryidx)
1136 + unsigned long index_start, index_end;
1138 + unsigned long read_lo, read_hi;
1140 + /* the entry to delete */
1141 + index_start = blockidx + entryidx;
1142 + /* one after last */
1143 + index_end = blockidx + ATABLE_ENTRY_PER_SLOT;
1145 + fep->atCurrEntries--;
1147 + if (entryidx == (ATABLE_ENTRY_PER_SLOT - 1)) {
1148 + /* if it is the very last entry,
1149 + * just delete it without further efford*/
1150 + write_atable(fep, index_start, 0, 0);
1151 + /*number of entries left*/
1152 + i = ATABLE_ENTRY_PER_SLOT - 1;
1155 + /*not the last in the block, then
1156 + * shift all that follow the one
1157 + * that is deleted to avoid "holes".
1159 + for (i = index_start; i < (index_end - 1); i++) {
1160 + read_atable(fep, i + 1, &read_lo, &read_hi);
1161 + /* move it down */
1162 + write_atable(fep, i, read_lo, read_hi);
1163 + if (!(read_hi & (1 << 16))) {
1164 + /* stop if we just copied the last */
1165 + return i - blockidx;
1169 + /*moved all entries up to the last.
1170 + * then set invalid flag in the last*/
1171 + write_atable(fep, index_end - 1, 0, 0);
1172 + /* number of valid entries left */
1173 + return i - blockidx;
1177 +void esw_atable_dynamicms_del_entries_for_port(
1178 + struct switch_enet_private *fep, int port_index)
1180 + unsigned long read_lo, read_hi;
1181 + unsigned int port_idx;
1184 + for (i = 0; i < ESW_ATABLE_MEM_NUM_ENTRIES; i++) {
1185 + read_atable(fep, i, &read_lo, &read_hi);
1186 + if (read_hi & (1 << 16)) {
1187 + port_idx = AT_EXTRACT_PORT(read_hi);
1189 + if (port_idx == port_index)
1190 + write_atable(fep, i, 0, 0);
1195 +void esw_atable_dynamicms_del_entries_for_other_port(
1196 + struct switch_enet_private *fep,
1199 + unsigned long read_lo, read_hi;
1200 + unsigned int port_idx;
1203 + for (i = 0; i < ESW_ATABLE_MEM_NUM_ENTRIES; i++) {
1204 + read_atable(fep, i, &read_lo, &read_hi);
1205 + if (read_hi & (1 << 16)) {
1206 + port_idx = AT_EXTRACT_PORT(read_hi);
1208 + if (port_idx != port_index)
1209 + write_atable(fep, i, 0, 0);
1215 + * Scan one complete block (Slot) for outdated entries and delete them.
1216 + * blockidx index of block of entries that should be analyzed.
1217 + * return number of deleted entries, 0 if nothing was modified.
1219 +int esw_atable_dynamicms_check_block_age(
1220 + struct switch_enet_private *fep, int blockidx) {
1222 + int i, tm, tdelta;
1223 + int deleted = 0, entries = 0;
1224 + unsigned long read_lo, read_hi;
1225 + /* Scan all entries from last down to
1226 + * have faster deletion speed if necessary*/
1227 + for (i = (blockidx + ATABLE_ENTRY_PER_SLOT - 1);
1228 + i >= blockidx; i--) {
1229 + read_atable(fep, i, &read_lo, &read_hi);
1231 + if (read_hi & (1 << 16)) {
1232 + /* the entry is valide*/
1233 + tm = AT_EXTRACT_TIMESTAMP(read_hi);
1234 + tdelta = TIMEDELTA(fep->currTime, tm);
1235 + if (tdelta > fep->ageMax) {
1236 + esw_del_atable_dynamic(fep,
1237 + blockidx, i-blockidx);
1246 + /*update statistics*/
1247 + if (fep->atMaxEntriesPerBlock < entries)
1248 + fep->atMaxEntriesPerBlock = entries;
1253 +/* scan the complete address table and find the most current entry.
1254 + * The time of the most current entry then is used as current time
1255 + * for the context structure.
1256 + * In addition the atCurrEntries value is updated as well.
1257 + * return time that has been set in the context.
1259 +int esw_atable_dynamicms_find_set_latesttime(
1260 + struct switch_enet_private *fep) {
1262 + int tm_min, tm_max, tm;
1263 + int delta, current, i;
1264 + unsigned long read_lo, read_hi;
1266 + tm_min = (1 << AT_DENTRY_TIMESTAMP_WIDTH) - 1;
1270 + for (i = 0; i < ESW_ATABLE_MEM_NUM_ENTRIES; i++) {
1271 + read_atable(fep, i, &read_lo, &read_hi);
1272 + if (read_hi & (1 << 16)) {
1273 + /*the entry is valid*/
1274 + tm = AT_EXTRACT_TIMESTAMP(read_hi);
1283 + delta = TIMEDELTA(tm_max, tm_min);
1284 + if (delta < fep->ageMax) {
1285 + /*Difference must be in range*/
1286 + fep->currTime = tm_max;
1288 + fep->currTime = tm_min;
1291 + fep->atCurrEntries = current;
1292 + return fep->currTime;
1295 +int esw_atable_dynamicms_get_port(
1296 + struct switch_enet_private *fep,
1297 + unsigned long write_lo,
1298 + unsigned long write_hi,
1302 + unsigned long read_lo, read_hi, port;
1304 + index_end = block_index + ATABLE_ENTRY_PER_SLOT;
1305 + /* Now search all the entries in the selected block */
1306 + for (i = block_index; i < index_end; i++) {
1307 + read_atable(fep, i, &read_lo, &read_hi);
1309 + if ((read_lo == write_lo) &&
1310 + ((read_hi & 0x0000ffff) ==
1311 + (write_hi & 0x0000ffff))) {
1312 + /* found correct address,*/
1313 + if (read_hi & (1 << 16)) {
1314 + /*extract the port index from the valid entry*/
1315 + port = AT_EXTRACT_PORT(read_hi);
1324 +/* Get the port index from the source MAC address
1325 + * of the received frame
1326 + * @return port index
1328 +int esw_atable_dynamicms_get_portindex_from_mac(
1329 + struct switch_enet_private *fep,
1330 + unsigned char *mac_addr,
1331 + unsigned long write_lo,
1332 + unsigned long write_hi)
1336 + /*compute the block index*/
1337 + blockIdx = GET_BLOCK_PTR(crc8_calc(mac_addr));
1338 + /* Get the ingress port index of the received BPDU */
1339 + rc = esw_atable_dynamicms_get_port(fep,
1340 + write_lo, write_hi, blockIdx);
1345 +/* dynamicms MAC address table learn and migration*/
1346 +int esw_atable_dynamicms_learn_migration(
1347 + struct switch_enet_private *fep,
1350 + eswPortInfo *pESWPortInfo;
1354 + pESWPortInfo = esw_portinfofifo_read(fep);
1355 + /* Anything to learn */
1356 + if (pESWPortInfo != 0) {
1357 + /*get block index from lookup table*/
1358 + index = GET_BLOCK_PTR(pESWPortInfo->hash);
1359 + inserted = esw_update_atable_dynamic1(
1360 + pESWPortInfo->maclo,
1361 + pESWPortInfo->machi, index,
1362 + pESWPortInfo->port, currTime, fep);
1367 +/* -----------------------------------------------------------------*/
1369 + * esw_forced_forward
1370 + * The frame is forwared to the forced destination ports.
1371 + * It only replace the MAC lookup function,
1372 + * all other filtering(eg.VLAN verification) act as normal
1374 +int esw_forced_forward(struct switch_enet_private *fep,
1375 + int port1, int port2, int enable)
1377 + unsigned long tmp = 0;
1378 + volatile switch_t *fecp;
1382 + /* Enable Forced forwarding for port num */
1383 + if ((port1 == 1) && (port2 == 1))
1384 + tmp |= MCF_ESW_P0FFEN_FD(3);
1385 + else if (port1 == 1)
1386 + /*Enable Forced forwarding for port 1 only*/
1387 + tmp |= MCF_ESW_P0FFEN_FD(1);
1388 + else if (port2 == 1)
1389 + /*Enable Forced forwarding for port 2 only*/
1390 + tmp |= MCF_ESW_P0FFEN_FD(2);
1392 + printk(KERN_ERR "%s:do not support "
1393 + "the forced forward mode"
1394 + "port1 %x port2 %x\n",
1395 + __func__, port1, port2);
1400 + tmp |= MCF_ESW_P0FFEN_FEN;
1401 + else if (enable == 0)
1402 + tmp &= ~MCF_ESW_P0FFEN_FEN;
1404 + printk(KERN_ERR "%s: the enable %x is error\n",
1405 + __func__, enable);
1409 + fecp->ESW_P0FFEN = tmp;
1413 +void esw_get_forced_forward(
1414 + struct switch_enet_private *fep,
1415 + unsigned long *ulForceForward)
1417 + volatile switch_t *fecp;
1420 + *ulForceForward = fecp->ESW_P0FFEN;
1423 +void esw_get_port_enable(
1424 + struct switch_enet_private *fep,
1425 + unsigned long *ulPortEnable)
1427 + volatile switch_t *fecp;
1430 + *ulPortEnable = fecp->ESW_PER;
1433 + * enable or disable port n tx or rx
1434 + * tx_en 0 disable port n tx
1435 + * tx_en 1 enable port n tx
1436 + * rx_en 0 disbale port n rx
1437 + * rx_en 1 enable port n rx
1439 +int esw_port_enable_config(struct switch_enet_private *fep,
1440 + int port, int tx_en, int rx_en)
1442 + unsigned long tmp = 0;
1443 + volatile switch_t *fecp;
1446 + tmp = fecp->ESW_PER;
1449 + tmp |= MCF_ESW_PER_TE0;
1450 + else if (port == 1)
1451 + tmp |= MCF_ESW_PER_TE1;
1452 + else if (port == 2)
1453 + tmp |= MCF_ESW_PER_TE2;
1455 + printk(KERN_ERR "%s:do not support the"
1456 + " port %x tx enable\n",
1460 + } else if (tx_en == 0) {
1462 + tmp &= (~MCF_ESW_PER_TE0);
1463 + else if (port == 1)
1464 + tmp &= (~MCF_ESW_PER_TE1);
1465 + else if (port == 2)
1466 + tmp &= (~MCF_ESW_PER_TE2);
1468 + printk(KERN_ERR "%s:do not support "
1469 + "the port %x tx disable\n",
1474 + printk(KERN_ERR "%s:do not support the port %x"
1475 + " tx op value %x\n",
1476 + __func__, port, tx_en);
1482 + tmp |= MCF_ESW_PER_RE0;
1483 + else if (port == 1)
1484 + tmp |= MCF_ESW_PER_RE1;
1485 + else if (port == 2)
1486 + tmp |= MCF_ESW_PER_RE2;
1488 + printk(KERN_ERR "%s:do not support the "
1489 + "port %x rx enable\n",
1493 + } else if (rx_en == 0) {
1495 + tmp &= (~MCF_ESW_PER_RE0);
1496 + else if (port == 1)
1497 + tmp &= (~MCF_ESW_PER_RE1);
1498 + else if (port == 2)
1499 + tmp &= (~MCF_ESW_PER_RE2);
1501 + printk(KERN_ERR "%s:do not support the "
1502 + "port %x rx disable\n",
1507 + printk(KERN_ERR "%s:do not support the port %x"
1508 + " rx op value %x\n",
1509 + __func__, port, tx_en);
1513 + fecp->ESW_PER = tmp;
1518 +void esw_get_port_broadcast(struct switch_enet_private *fep,
1519 + unsigned long *ulPortBroadcast)
1521 + volatile switch_t *fecp;
1524 + *ulPortBroadcast = fecp->ESW_DBCR;
1527 +int esw_port_broadcast_config(struct switch_enet_private *fep,
1528 + int port, int enable)
1530 + unsigned long tmp = 0;
1531 + volatile switch_t *fecp;
1535 + if ((port > 2) || (port < 0)) {
1536 + printk(KERN_ERR "%s:do not support the port %x"
1537 + " default broadcast\n",
1542 + tmp = fecp->ESW_DBCR;
1543 + if (enable == 1) {
1545 + tmp |= MCF_ESW_DBCR_P0;
1546 + else if (port == 1)
1547 + tmp |= MCF_ESW_DBCR_P1;
1548 + else if (port == 2)
1549 + tmp |= MCF_ESW_DBCR_P2;
1550 + } else if (enable == 0) {
1552 + tmp &= ~MCF_ESW_DBCR_P0;
1553 + else if (port == 1)
1554 + tmp &= ~MCF_ESW_DBCR_P1;
1555 + else if (port == 2)
1556 + tmp &= ~MCF_ESW_DBCR_P2;
1559 + fecp->ESW_DBCR = tmp;
1564 +void esw_get_port_multicast(struct switch_enet_private *fep,
1565 + unsigned long *ulPortMulticast)
1567 + volatile switch_t *fecp;
1570 + *ulPortMulticast = fecp->ESW_DMCR;
1573 +int esw_port_multicast_config(struct switch_enet_private *fep,
1574 + int port, int enable)
1576 + unsigned long tmp = 0;
1577 + volatile switch_t *fecp;
1581 + if ((port > 2) || (port < 0)) {
1582 + printk(KERN_ERR "%s:do not support the port %x"
1583 + " default broadcast\n",
1588 + tmp = fecp->ESW_DMCR;
1589 + if (enable == 1) {
1591 + tmp |= MCF_ESW_DMCR_P0;
1592 + else if (port == 1)
1593 + tmp |= MCF_ESW_DMCR_P1;
1594 + else if (port == 2)
1595 + tmp |= MCF_ESW_DMCR_P2;
1596 + } else if (enable == 0) {
1598 + tmp &= ~MCF_ESW_DMCR_P0;
1599 + else if (port == 1)
1600 + tmp &= ~MCF_ESW_DMCR_P1;
1601 + else if (port == 2)
1602 + tmp &= ~MCF_ESW_DMCR_P2;
1605 + fecp->ESW_DMCR = tmp;
1610 +void esw_get_port_blocking(struct switch_enet_private *fep,
1611 + unsigned long *ulPortBlocking)
1613 + volatile switch_t *fecp;
1616 + *ulPortBlocking = (fecp->ESW_BKLR & 0x0000000f);
1619 +int esw_port_blocking_config(struct switch_enet_private *fep,
1620 + int port, int enable)
1622 + unsigned long tmp = 0;
1623 + volatile switch_t *fecp;
1627 + if ((port > 2) || (port < 0)) {
1628 + printk(KERN_ERR "%s:do not support the port %x"
1629 + " default broadcast\n",
1634 + tmp = fecp->ESW_BKLR;
1635 + if (enable == 1) {
1637 + tmp |= MCF_ESW_BKLR_BE0;
1638 + else if (port == 1)
1639 + tmp |= MCF_ESW_BKLR_BE1;
1640 + else if (port == 2)
1641 + tmp |= MCF_ESW_BKLR_BE2;
1642 + } else if (enable == 0) {
1644 + tmp &= ~MCF_ESW_BKLR_BE0;
1645 + else if (port == 1)
1646 + tmp &= ~MCF_ESW_BKLR_BE1;
1647 + else if (port == 2)
1648 + tmp &= ~MCF_ESW_BKLR_BE2;
1651 + fecp->ESW_BKLR = tmp;
1656 +void esw_get_port_learning(struct switch_enet_private *fep,
1657 + unsigned long *ulPortLearning)
1659 + volatile switch_t *fecp;
1662 + *ulPortLearning = (fecp->ESW_BKLR & 0x000f0000) >> 16;
1665 +int esw_port_learning_config(struct switch_enet_private *fep,
1666 + int port, int disable)
1668 + unsigned long tmp = 0;
1669 + volatile switch_t *fecp;
1673 + if ((port > 2) || (port < 0)) {
1674 + printk(KERN_ERR "%s:do not support the port %x"
1675 + " default broadcast\n",
1680 + tmp = fecp->ESW_BKLR;
1681 + if (disable == 0) {
1682 + fep->learning_irqhandle_enable = 0;
1684 + tmp |= MCF_ESW_BKLR_LD0;
1685 + else if (port == 1)
1686 + tmp |= MCF_ESW_BKLR_LD1;
1687 + else if (port == 2)
1688 + tmp |= MCF_ESW_BKLR_LD2;
1689 + } else if (disable == 1) {
1691 + tmp &= ~MCF_ESW_BKLR_LD0;
1692 + else if (port == 1)
1693 + tmp &= ~MCF_ESW_BKLR_LD1;
1694 + else if (port == 2)
1695 + tmp &= ~MCF_ESW_BKLR_LD2;
1698 + fecp->ESW_BKLR = tmp;
1701 +/*********************************************************************/
1702 +void esw_mac_lookup_table_range(struct switch_enet_private *fep)
1705 + unsigned long read_lo, read_hi;
1706 + /* Pointer to switch address look up memory*/
1707 + for (index = 0; index < 2048; index++)
1708 + write_atable(fep, index, index, (~index));
1710 + /* Pointer to switch address look up memory*/
1711 + for (index = 0; index < 2048; index++) {
1712 + read_atable(fep, index, &read_lo, &read_hi);
1713 + if (read_lo != index) {
1714 + printk(KERN_ERR "%s:Mismatch at low %d\n",
1719 + if (read_hi != (~index)) {
1720 + printk(KERN_ERR "%s:Mismatch at high %d\n",
1728 + * Checks IP Snoop options of handling the snooped frame.
1729 + * mode 0 : The snooped frame is forward only to management port
1730 + * mode 1 : The snooped frame is copy to management port and
1731 + * normal forwarding is checked.
1732 + * mode 2 : The snooped frame is discarded.
1733 + * mode 3 : Disable the ip snoop function
1734 + * ip_header_protocol : the IP header protocol field
1736 +int esw_ip_snoop_config(struct switch_enet_private *fep,
1737 + int mode, unsigned long ip_header_protocol)
1739 + volatile switch_t *fecp;
1740 + unsigned long tmp = 0, protocol_type = 0;
1744 + /* Config IP Snooping */
1746 + /* Enable IP Snooping */
1747 + tmp = MCF_ESW_IPSNP_EN;
1748 + tmp |= MCF_ESW_IPSNP_MODE(0);/*For Forward*/
1749 + } else if (mode == 1) {
1750 + /* Enable IP Snooping */
1751 + tmp = MCF_ESW_IPSNP_EN;
1752 + /*For Forward and copy_to_mangmnt_port*/
1753 + tmp |= MCF_ESW_IPSNP_MODE(1);
1754 + } else if (mode == 2) {
1755 + /* Enable IP Snooping */
1756 + tmp = MCF_ESW_IPSNP_EN;
1757 + tmp |= MCF_ESW_IPSNP_MODE(2);/*discard*/
1758 + } else if (mode == 3) {
1759 + /* disable IP Snooping */
1760 + tmp = MCF_ESW_IPSNP_EN;
1761 + tmp &= ~MCF_ESW_IPSNP_EN;
1763 + printk(KERN_ERR "%s: the mode %x "
1764 + "we do not support\n", __func__, mode);
1768 + protocol_type = ip_header_protocol;
1769 + for (num = 0; num < 8; num++) {
1770 + if (protocol_type ==
1771 + AT_EXTRACT_IP_PROTOCOL(fecp->ESW_IPSNP[num])) {
1772 + fecp->ESW_IPSNP[num] =
1773 + tmp | MCF_ESW_IPSNP_PROTOCOL(protocol_type);
1775 + } else if (!(fecp->ESW_IPSNP[num])) {
1776 + fecp->ESW_IPSNP[num] =
1777 + tmp | MCF_ESW_IPSNP_PROTOCOL(protocol_type);
1782 + printk(KERN_INFO "IP snooping table is full\n");
1789 +void esw_get_ip_snoop_config(struct switch_enet_private *fep,
1790 + unsigned long *ulpESW_IPSNP)
1793 + volatile switch_t *fecp;
1796 + for (i = 0; i < 8; i++)
1797 + *(ulpESW_IPSNP + i) = fecp->ESW_IPSNP[i];
1800 + * Checks TCP/UDP Port Snoop options of handling the snooped frame.
1801 + * mode 0 : The snooped frame is forward only to management port
1802 + * mode 1 : The snooped frame is copy to management port and
1803 + * normal forwarding is checked.
1804 + * mode 2 : The snooped frame is discarded.
1805 + * mode 3 : Disable the TCP/UDP port snoop function
1806 + * compare_port : port number in the TCP/UDP header
1807 + * compare_num 1: TCP/UDP source port number is compared
1808 + * compare_num 2: TCP/UDP destination port number is compared
1809 + * compare_num 3: TCP/UDP source and destination port number is compared
1811 +int esw_tcpudp_port_snoop_config(struct switch_enet_private *fep,
1812 + int mode, int compare_port, int compare_num)
1814 + volatile switch_t *fecp;
1815 + unsigned long tmp;
1820 + /* Enable TCP/UDP port Snooping */
1821 + tmp = MCF_ESW_PSNP_EN;
1823 + tmp |= MCF_ESW_PSNP_MODE(0);/*For Forward*/
1824 + else if (mode == 1)/*For Forward and copy_to_mangmnt_port*/
1825 + tmp |= MCF_ESW_PSNP_MODE(1);
1826 + else if (mode == 2)
1827 + tmp |= MCF_ESW_PSNP_MODE(2);/*discard*/
1828 + else if (mode == 3) /*disable the port function*/
1829 + tmp &= (~MCF_ESW_PSNP_EN);
1831 + printk(KERN_ERR "%s: the mode %x we do not support\n",
1836 + if (compare_num == 1)
1837 + tmp |= MCF_ESW_PSNP_CS;
1838 + else if (compare_num == 2)
1839 + tmp |= MCF_ESW_PSNP_CD;
1840 + else if (compare_num == 3)
1841 + tmp |= MCF_ESW_PSNP_CD | MCF_ESW_PSNP_CS;
1843 + printk(KERN_ERR "%s: the compare port address %x"
1844 + " we do not support\n",
1845 + __func__, compare_num);
1849 + for (num = 0; num < 8; num++) {
1850 + if (compare_port ==
1851 + AT_EXTRACT_TCP_UDP_PORT(fecp->ESW_PSNP[num])) {
1852 + fecp->ESW_PSNP[num] =
1853 + tmp | MCF_ESW_PSNP_PORT_COMPARE(compare_port);
1855 + } else if (!(fecp->ESW_PSNP[num])) {
1856 + fecp->ESW_PSNP[num] =
1857 + tmp | MCF_ESW_PSNP_PORT_COMPARE(compare_port);
1862 + printk(KERN_INFO "TCP/UDP port snooping table is full\n");
1869 +void esw_get_tcpudp_port_snoop_config(
1870 + struct switch_enet_private *fep,
1871 + unsigned long *ulpESW_PSNP)
1874 + volatile switch_t *fecp;
1877 + for (i = 0; i < 8; i++)
1878 + *(ulpESW_PSNP + i) = fecp->ESW_PSNP[i];
1880 +/*-----------------mirror----------------------------------------*/
1881 +void esw_get_port_mirroring(struct switch_enet_private *fep)
1883 + volatile switch_t *fecp;
1887 + printk(KERN_INFO "Mirror Port: %1ld Egress Port Match:%s "
1888 + "Ingress Port Match:%s\n", fecp->ESW_MCR & 0xf,
1889 + (fecp->ESW_MCR >> 6) & 1 ? "Y" : "N",
1890 + (fecp->ESW_MCR >> 5) & 1 ? "Y" : "N");
1892 + if ((fecp->ESW_MCR >> 6) & 1)
1893 + printk(KERN_INFO "Egress Port to be mirrored: Port %ld\n",
1894 + fecp->ESW_EGMAP >> 1);
1895 + if ((fecp->ESW_MCR >> 5) & 1)
1896 + printk(KERN_INFO "Ingress Port to be mirrored: Port %ld\n",
1897 + fecp->ESW_INGMAP >> 1);
1899 + printk(KERN_INFO "Egress Des Address Match:%s "
1900 + "Egress Src Address Match:%s\n",
1901 + (fecp->ESW_MCR >> 10) & 1 ? "Y" : "N",
1902 + (fecp->ESW_MCR >> 9) & 1 ? "Y" : "N");
1903 + printk(KERN_INFO "Ingress Des Address Match:%s "
1904 + "Ingress Src Address Match:%s\n",
1905 + (fecp->ESW_MCR >> 8) & 1 ? "Y" : "N",
1906 + (fecp->ESW_MCR >> 7) & 1 ? "Y" : "N");
1908 + if ((fecp->ESW_MCR >> 10) & 1)
1909 + printk(KERN_INFO "Egress Des Address to be mirrored: "
1910 + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx\n",
1911 + fecp->ESW_ENGDAL & 0xff, (fecp->ESW_ENGDAL >> 8) & 0xff,
1912 + (fecp->ESW_ENGDAL >> 16) & 0xff,
1913 + (fecp->ESW_ENGDAL >> 24) & 0xff,
1914 + fecp->ESW_ENGDAH & 0xff,
1915 + (fecp->ESW_ENGDAH >> 8) & 0xff);
1916 + if ((fecp->ESW_MCR >> 9) & 1)
1917 + printk("Egress Src Address to be mirrored: "
1918 + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx\n",
1919 + fecp->ESW_ENGSAL & 0xff, (fecp->ESW_ENGSAL >> 8) & 0xff,
1920 + (fecp->ESW_ENGSAL >> 16) & 0xff,
1921 + (fecp->ESW_ENGSAL >> 24) & 0xff,
1922 + fecp->ESW_ENGSAH & 0xff,
1923 + (fecp->ESW_ENGSAH >> 8) & 0xff);
1924 + if ((fecp->ESW_MCR >> 8) & 1)
1925 + printk("Ingress Des Address to be mirrored: "
1926 + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx\n",
1927 + fecp->ESW_INGDAL & 0xff, (fecp->ESW_INGDAL >> 8) & 0xff,
1928 + (fecp->ESW_INGDAL >> 16) & 0xff,
1929 + (fecp->ESW_INGDAL >> 24) & 0xff,
1930 + fecp->ESW_INGDAH & 0xff,
1931 + (fecp->ESW_INGDAH >> 8) & 0xff);
1932 + if ((fecp->ESW_MCR >> 7) & 1)
1933 + printk("Ingress Src Address to be mirrored: "
1934 + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx\n",
1935 + fecp->ESW_INGSAL & 0xff, (fecp->ESW_INGSAL >> 8) & 0xff,
1936 + (fecp->ESW_INGSAL >> 16) & 0xff,
1937 + (fecp->ESW_INGSAL >> 24) & 0xff,
1938 + fecp->ESW_INGSAH & 0xff,
1939 + (fecp->ESW_INGSAH >> 8) & 0xff);
1942 +int esw_port_mirroring_config_port_match(struct switch_enet_private *fep,
1943 + int mirror_port, int port_match_en, int port)
1945 + volatile switch_t *fecp;
1946 + unsigned long tmp = 0;
1950 + tmp = fecp->ESW_MCR;
1951 + if (mirror_port != (tmp & 0xf))
1954 + switch (port_match_en) {
1955 + case MIRROR_EGRESS_PORT_MATCH:
1956 + tmp |= MCF_ESW_MCR_EGMAP;
1958 + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG0;
1959 + else if (port == 1)
1960 + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG1;
1961 + else if (port == 2)
1962 + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG2;
1964 + case MIRROR_INGRESS_PORT_MATCH:
1965 + tmp |= MCF_ESW_MCR_INGMAP;
1967 + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING0;
1968 + else if (port == 1)
1969 + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING1;
1970 + else if (port == 2)
1971 + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING2;
1978 + tmp = tmp & 0x07e0;
1979 + if (port_match_en)
1980 + tmp |= MCF_ESW_MCR_MEN | MCF_ESW_MCR_PORT(mirror_port);
1982 + fecp->ESW_MCR = tmp;
1986 +int esw_port_mirroring_config(struct switch_enet_private *fep,
1987 + int mirror_port, int port, int mirror_enable,
1988 + unsigned char *src_mac, unsigned char *des_mac,
1989 + int egress_en, int ingress_en,
1990 + int egress_mac_src_en, int egress_mac_des_en,
1991 + int ingress_mac_src_en, int ingress_mac_des_en)
1993 + volatile switch_t *fecp;
1994 + unsigned long tmp;
1998 + /*mirroring config*/
2000 + if (egress_en == 1) {
2001 + tmp |= MCF_ESW_MCR_EGMAP;
2003 + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG0;
2004 + else if (port == 1)
2005 + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG1;
2006 + else if (port == 2)
2007 + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG2;
2009 + printk(KERN_ERR "%s: the port %x we do not support\n",
2013 + } else if (egress_en == 0) {
2014 + tmp &= (~MCF_ESW_MCR_EGMAP);
2016 + printk(KERN_ERR "%s: egress_en %x we do not support\n",
2017 + __func__, egress_en);
2021 + if (ingress_en == 1) {
2022 + tmp |= MCF_ESW_MCR_INGMAP;
2024 + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING0;
2025 + else if (port == 1)
2026 + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING1;
2027 + else if (port == 2)
2028 + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING2;
2030 + printk(KERN_ERR "%s: the port %x we do not support\n",
2034 + } else if (ingress_en == 0) {
2035 + tmp &= ~MCF_ESW_MCR_INGMAP;
2037 + printk(KERN_ERR "%s: ingress_en %x we do not support\n",
2038 + __func__, ingress_en);
2042 + if (egress_mac_src_en == 1) {
2043 + tmp |= MCF_ESW_MCR_EGSA;
2044 + fecp->ESW_ENGSAH = (src_mac[5] << 8) | (src_mac[4]);
2045 + fecp->ESW_ENGSAL = (unsigned long)((src_mac[3] << 24) |
2046 + (src_mac[2] << 16) |
2047 + (src_mac[1] << 8) |
2049 + } else if (egress_mac_src_en == 0) {
2050 + tmp &= ~MCF_ESW_MCR_EGSA;
2052 + printk(KERN_ERR "%s: egress_mac_src_en %x we do not support\n",
2053 + __func__, egress_mac_src_en);
2057 + if (egress_mac_des_en == 1) {
2058 + tmp |= MCF_ESW_MCR_EGDA;
2059 + fecp->ESW_ENGDAH = (des_mac[5] << 8) | (des_mac[4]);
2060 + fecp->ESW_ENGDAL = (unsigned long)((des_mac[3] << 24) |
2061 + (des_mac[2] << 16) |
2062 + (des_mac[1] << 8) |
2064 + } else if (egress_mac_des_en == 0) {
2065 + tmp &= ~MCF_ESW_MCR_EGDA;
2067 + printk(KERN_ERR "%s: egress_mac_des_en %x we do not support\n",
2068 + __func__, egress_mac_des_en);
2072 + if (ingress_mac_src_en == 1) {
2073 + tmp |= MCF_ESW_MCR_INGSA;
2074 + fecp->ESW_INGSAH = (src_mac[5] << 8) | (src_mac[4]);
2075 + fecp->ESW_INGSAL = (unsigned long)((src_mac[3] << 24) |
2076 + (src_mac[2] << 16) |
2077 + (src_mac[1] << 8) |
2079 + } else if (ingress_mac_src_en == 0) {
2080 + tmp &= ~MCF_ESW_MCR_INGSA;
2082 + printk(KERN_ERR "%s: ingress_mac_src_en %x we do not support\n",
2083 + __func__, ingress_mac_src_en);
2087 + if (ingress_mac_des_en == 1) {
2088 + tmp |= MCF_ESW_MCR_INGDA;
2089 + fecp->ESW_INGDAH = (des_mac[5] << 8) | (des_mac[4]);
2090 + fecp->ESW_INGDAL = (unsigned long)((des_mac[3] << 24) |
2091 + (des_mac[2] << 16) |
2092 + (des_mac[1] << 8) |
2094 + } else if (ingress_mac_des_en == 0) {
2095 + tmp &= ~MCF_ESW_MCR_INGDA;
2097 + printk(KERN_ERR "%s: ingress_mac_des_en %x we do not support\n",
2098 + __func__, ingress_mac_des_en);
2102 + if (mirror_enable == 1)
2103 + tmp |= MCF_ESW_MCR_MEN | MCF_ESW_MCR_PORT(mirror_port);
2104 + else if (mirror_enable == 0)
2105 + tmp &= ~MCF_ESW_MCR_MEN;
2107 + printk(KERN_ERR "%s: the mirror enable %x is error\n",
2108 + __func__, mirror_enable);
2111 + fecp->ESW_MCR = tmp;
2115 +int esw_port_mirroring_config_addr_match(struct switch_enet_private *fep,
2116 + int mirror_port, int addr_match_enable, unsigned char *mac_addr)
2118 + volatile switch_t *fecp;
2119 + unsigned long tmp = 0;
2123 + tmp = fecp->ESW_MCR;
2124 + if (mirror_port != (tmp & 0xf))
2127 + switch (addr_match_enable) {
2128 + case MIRROR_EGRESS_SOURCE_MATCH:
2129 + tmp |= MCF_ESW_MCR_EGSA;
2130 + fecp->ESW_ENGSAH = (mac_addr[5] << 8) | (mac_addr[4]);
2131 + fecp->ESW_ENGSAL = (unsigned long)((mac_addr[3] << 24) |
2132 + (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]);
2134 + case MIRROR_INGRESS_SOURCE_MATCH:
2135 + tmp |= MCF_ESW_MCR_INGSA;
2136 + fecp->ESW_INGSAH = (mac_addr[5] << 8) | (mac_addr[4]);
2137 + fecp->ESW_INGSAL = (unsigned long)((mac_addr[3] << 24) |
2138 + (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]);
2140 + case MIRROR_EGRESS_DESTINATION_MATCH:
2141 + tmp |= MCF_ESW_MCR_EGDA;
2142 + fecp->ESW_ENGDAH = (mac_addr[5] << 8) | (mac_addr[4]);
2143 + fecp->ESW_ENGDAL = (unsigned long)((mac_addr[3] << 24) |
2144 + (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]);
2146 + case MIRROR_INGRESS_DESTINATION_MATCH:
2147 + tmp |= MCF_ESW_MCR_INGDA;
2148 + fecp->ESW_INGDAH = (mac_addr[5] << 8) | (mac_addr[4]);
2149 + fecp->ESW_INGDAL = (unsigned long)((mac_addr[3] << 24) |
2150 + (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]);
2157 + tmp = tmp & 0x07e0;
2158 + if (addr_match_enable)
2159 + tmp |= MCF_ESW_MCR_MEN | MCF_ESW_MCR_PORT(mirror_port);
2161 + fecp->ESW_MCR = tmp;
2165 +void esw_get_vlan_verification(struct switch_enet_private *fep,
2166 + unsigned long *ulValue)
2168 + volatile switch_t *fecp;
2170 + *ulValue = fecp->ESW_VLANV;
2173 +int esw_set_vlan_verification(struct switch_enet_private *fep, int port,
2174 + int vlan_domain_verify_en, int vlan_discard_unknown_en)
2176 + volatile switch_t *fecp;
2179 + if ((port < 0) || (port > 2)) {
2180 + printk(KERN_ERR "%s: do not support the port %d\n",
2185 + if (vlan_domain_verify_en == 1) {
2187 + fecp->ESW_VLANV |= MCF_ESW_VLANV_VV0;
2188 + else if (port == 1)
2189 + fecp->ESW_VLANV |= MCF_ESW_VLANV_VV1;
2190 + else if (port == 2)
2191 + fecp->ESW_VLANV |= MCF_ESW_VLANV_VV2;
2192 + } else if (vlan_domain_verify_en == 0) {
2194 + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_VV0;
2195 + else if (port == 1)
2196 + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_VV1;
2197 + else if (port == 2)
2198 + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_VV2;
2200 + printk(KERN_INFO "%s: donot support "
2201 + "vlan_domain_verify %x\n",
2202 + __func__, vlan_domain_verify_en);
2206 + if (vlan_discard_unknown_en == 1) {
2208 + fecp->ESW_VLANV |= MCF_ESW_VLANV_DU0;
2209 + else if (port == 1)
2210 + fecp->ESW_VLANV |= MCF_ESW_VLANV_DU1;
2211 + else if (port == 2)
2212 + fecp->ESW_VLANV |= MCF_ESW_VLANV_DU2;
2213 + } else if (vlan_discard_unknown_en == 0) {
2215 + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_DU0;
2216 + else if (port == 1)
2217 + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_DU1;
2218 + else if (port == 2)
2219 + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_DU2;
2221 + printk(KERN_INFO "%s: donot support "
2222 + "vlan_discard_unknown %x\n",
2223 + __func__, vlan_discard_unknown_en);
2230 +void esw_get_vlan_resolution_table(struct switch_enet_private *fep,
2231 + struct eswVlanTableItem *tableaddr)
2233 + volatile switch_t *fecp;
2238 + for (i = 0; i < 32; i++) {
2239 + if (fecp->ESW_VRES[i]) {
2240 + tableaddr->table[i].port_vlanid =
2241 + fecp->ESW_VRES[i] >> 3;
2242 + tableaddr->table[i].vlan_domain_port =
2243 + fecp->ESW_VRES[i] & 7;
2247 + tableaddr->valid_num = vnum;
2250 +int esw_set_vlan_id(struct switch_enet_private *fep, unsigned long configData)
2252 + volatile switch_t *fecp;
2257 + for (i = 0; i < 32; i++) {
2258 + if (fecp->ESW_VRES[i] == 0) {
2259 + fecp->ESW_VRES[i] = MCF_ESW_VRES_VLANID(configData);
2261 + } else if (((fecp->ESW_VRES[i] >> 3) & 0xfff) == configData) {
2262 + printk(KERN_INFO "The VLAN already exists\n");
2267 + printk(KERN_INFO "The VLAN can't create, because VLAN table is full\n");
2271 +int esw_set_vlan_id_cleared(struct switch_enet_private *fep,
2272 + unsigned long configData)
2274 + volatile switch_t *fecp;
2279 + for (i = 0; i < 32; i++) {
2280 + if (((fecp->ESW_VRES[i] >> 3) & 0xfff) == configData) {
2281 + fecp->ESW_VRES[i] = 0;
2288 +int esw_set_port_in_vlan_id(struct switch_enet_private *fep,
2289 + eswIoctlVlanResoultionTable configData)
2291 + volatile switch_t *fecp;
2297 + for (i = 0; i < 32; i++) {
2298 + if (fecp->ESW_VRES[i] == 0) {
2301 + } else if (((fecp->ESW_VRES[i] >> 3) & 0xfff) ==
2302 + configData.port_vlanid) {
2303 + /* update the port members of this vlan */
2304 + fecp->ESW_VRES[i] |= 1 << configData.vlan_domain_port;
2308 + /* creat a new vlan in vlan table */
2309 + fecp->ESW_VRES[lastnum] = MCF_ESW_VRES_VLANID(configData.port_vlanid) |
2310 + (1 << configData.vlan_domain_port);
2314 +int esw_set_vlan_resolution_table(struct switch_enet_private *fep,
2315 + unsigned short port_vlanid, int vlan_domain_num,
2316 + int vlan_domain_port)
2318 + volatile switch_t *fecp;
2321 + if ((vlan_domain_num < 0)
2322 + || (vlan_domain_num > 31)) {
2323 + printk(KERN_ERR "%s: do not support the "
2324 + "vlan_domain_num %d\n",
2325 + __func__, vlan_domain_num);
2329 + if ((vlan_domain_port < 0)
2330 + || (vlan_domain_port > 7)) {
2331 + printk(KERN_ERR "%s: do not support the "
2332 + "vlan_domain_port %d\n",
2333 + __func__, vlan_domain_port);
2337 + fecp->ESW_VRES[vlan_domain_num] =
2338 + MCF_ESW_VRES_VLANID(port_vlanid)
2339 + | vlan_domain_port;
2344 +void esw_get_vlan_input_config(struct switch_enet_private *fep,
2345 + eswIoctlVlanInputStatus *pVlanInputConfig)
2347 + volatile switch_t *fecp;
2351 + for (i = 0; i < 3; i++)
2352 + pVlanInputConfig->ESW_PID[i] = fecp->ESW_PID[i];
2354 + pVlanInputConfig->ESW_VLANV = fecp->ESW_VLANV;
2355 + pVlanInputConfig->ESW_VIMSEL = fecp->ESW_VIMSEL;
2356 + pVlanInputConfig->ESW_VIMEN = fecp->ESW_VIMEN;
2358 + for (i = 0; i < 32; i++)
2359 + pVlanInputConfig->ESW_VRES[i] = fecp->ESW_VRES[i];
2363 +int esw_vlan_input_process(struct switch_enet_private *fep,
2364 + int port, int mode, unsigned short port_vlanid)
2366 + volatile switch_t *fecp;
2370 + if ((mode < 0) || (mode > 5)) {
2371 + printk(KERN_ERR "%s: do not support the"
2372 + " VLAN input processing mode %d\n",
2377 + if ((port < 0) || (port > 3)) {
2378 + printk(KERN_ERR "%s: do not support the port %d\n",
2383 + fecp->ESW_PID[port] = MCF_ESW_PID_VLANID(port_vlanid);
2386 + fecp->ESW_VIMEN &= ~MCF_ESW_VIMEN_EN0;
2388 + fecp->ESW_VIMEN |= MCF_ESW_VIMEN_EN0;
2390 + fecp->ESW_VIMSEL &= ~MCF_ESW_VIMSEL_IM0(3);
2391 + fecp->ESW_VIMSEL |= MCF_ESW_VIMSEL_IM0(mode);
2392 + } else if (port == 1) {
2394 + fecp->ESW_VIMEN &= ~MCF_ESW_VIMEN_EN1;
2396 + fecp->ESW_VIMEN |= MCF_ESW_VIMEN_EN1;
2398 + fecp->ESW_VIMSEL &= ~MCF_ESW_VIMSEL_IM1(3);
2399 + fecp->ESW_VIMSEL |= MCF_ESW_VIMSEL_IM1(mode);
2400 + } else if (port == 2) {
2402 + fecp->ESW_VIMEN &= ~MCF_ESW_VIMEN_EN2;
2404 + fecp->ESW_VIMEN |= MCF_ESW_VIMEN_EN2;
2406 + fecp->ESW_VIMSEL &= ~MCF_ESW_VIMSEL_IM2(3);
2407 + fecp->ESW_VIMSEL |= MCF_ESW_VIMSEL_IM2(mode);
2409 + printk(KERN_ERR "%s: do not support the port %d\n",
2417 +void esw_get_vlan_output_config(struct switch_enet_private *fep,
2418 + unsigned long *ulVlanOutputConfig)
2420 + volatile switch_t *fecp;
2423 + *ulVlanOutputConfig = fecp->ESW_VOMSEL;
2426 +int esw_vlan_output_process(struct switch_enet_private *fep,
2427 + int port, int mode)
2429 + volatile switch_t *fecp;
2433 + if ((port < 0) || (port > 2)) {
2434 + printk(KERN_ERR "%s: do not support the port %d\n",
2440 + fecp->ESW_VOMSEL &= ~MCF_ESW_VOMSEL_OM0(3);
2441 + fecp->ESW_VOMSEL |= MCF_ESW_VOMSEL_OM0(mode);
2442 + } else if (port == 1) {
2443 + fecp->ESW_VOMSEL &= ~MCF_ESW_VOMSEL_OM1(3);
2444 + fecp->ESW_VOMSEL |= MCF_ESW_VOMSEL_OM1(mode);
2445 + } else if (port == 2) {
2446 + fecp->ESW_VOMSEL &= ~MCF_ESW_VOMSEL_OM2(3);
2447 + fecp->ESW_VOMSEL |= MCF_ESW_VOMSEL_OM2(mode);
2449 + printk(KERN_ERR "%s: do not support the port %d\n",
2457 +/*------------frame calssify and priority resolution------------*/
2458 +/*vlan priority lookup*/
2459 +int esw_framecalssify_vlan_priority_lookup(struct switch_enet_private *fep,
2460 + int port, int func_enable, int vlan_pri_table_num,
2461 + int vlan_pri_table_value)
2463 + volatile switch_t *fecp;
2467 + if ((port < 0) || (port > 3)) {
2468 + printk(KERN_ERR "%s: do not support the port %d\n",
2473 + if (func_enable == 0) {
2474 + fecp->ESW_PRES[port] &= ~MCF_ESW_PRES_VLAN;
2475 + printk(KERN_ERR "%s: disable port %d VLAN priority "
2476 + "lookup function\n", __func__, port);
2480 + if ((vlan_pri_table_num < 0) || (vlan_pri_table_num > 7)) {
2481 + printk(KERN_ERR "%s: do not support the priority %d\n",
2482 + __func__, vlan_pri_table_num);
2486 + fecp->ESW_PVRES[port] |= ((vlan_pri_table_value & 0x3)
2487 + << (vlan_pri_table_num*3));
2488 + /* enable port VLAN priority lookup function*/
2489 + fecp->ESW_PRES[port] |= MCF_ESW_PRES_VLAN;
2493 +int esw_framecalssify_ip_priority_lookup(struct switch_enet_private *fep,
2494 + int port, int func_enable, int ipv4_en, int ip_priority_num,
2495 + int ip_priority_value)
2497 + volatile switch_t *fecp;
2498 + unsigned long tmp = 0, tmp_prio = 0;
2502 + if ((port < 0) || (port > 3)) {
2503 + printk(KERN_ERR "%s: do not support the port %d\n",
2508 + if (func_enable == 0) {
2509 + fecp->ESW_PRES[port] &= ~MCF_ESW_PRES_IP;
2510 + printk(KERN_ERR "%s: disable port %d ip priority "
2511 + "lookup function\n", __func__, port);
2515 + /* IPV4 priority 64 entry table lookup*/
2516 + /* IPv4 head 6 bit TOS field*/
2517 + if (ipv4_en == 1) {
2518 + if ((ip_priority_num < 0) || (ip_priority_num > 63)) {
2519 + printk(KERN_ERR "%s: do not support the table entry %d\n",
2520 + __func__, ip_priority_num);
2523 + } else { /* IPV6 priority 256 entry table lookup*/
2524 + /* IPv6 head 8 bit COS field*/
2525 + if ((ip_priority_num < 0) || (ip_priority_num > 255)) {
2526 + printk(KERN_ERR "%s: do not support the table entry %d\n",
2527 + __func__, ip_priority_num);
2532 + /* IP priority table lookup : address*/
2533 + tmp = MCF_ESW_IPRES_ADDRESS(ip_priority_num);
2534 + /* IP priority table lookup : ipv4sel*/
2536 + tmp = tmp | MCF_ESW_IPRES_IPV4SEL;
2537 + /* IP priority table lookup : priority*/
2539 + tmp |= MCF_ESW_IPRES_PRI0(ip_priority_value);
2540 + else if (port == 1)
2541 + tmp |= MCF_ESW_IPRES_PRI1(ip_priority_value);
2542 + else if (port == 2)
2543 + tmp |= MCF_ESW_IPRES_PRI2(ip_priority_value);
2546 + fecp->ESW_IPRES = MCF_ESW_IPRES_READ |
2547 + MCF_ESW_IPRES_ADDRESS(ip_priority_num);
2548 + tmp_prio = fecp->ESW_IPRES;
2550 + fecp->ESW_IPRES = tmp | tmp_prio;
2552 + fecp->ESW_IPRES = MCF_ESW_IPRES_READ |
2553 + MCF_ESW_IPRES_ADDRESS(ip_priority_num);
2554 + tmp_prio = fecp->ESW_IPRES;
2556 + /* enable port IP priority lookup function*/
2557 + fecp->ESW_PRES[port] |= MCF_ESW_PRES_IP;
2561 +int esw_framecalssify_mac_priority_lookup(
2562 + struct switch_enet_private *fep, int port)
2564 + volatile switch_t *fecp;
2566 + if ((port < 0) || (port > 3)) {
2567 + printk(KERN_ERR "%s: do not support the port %d\n",
2573 + fecp->ESW_PRES[port] |= MCF_ESW_PRES_MAC;
2578 +int esw_frame_calssify_priority_init(struct switch_enet_private *fep,
2579 + int port, unsigned char priority_value)
2581 + volatile switch_t *fecp;
2585 + if ((port < 0) || (port > 3)) {
2586 + printk(KERN_ERR "%s: do not support the port %d\n",
2590 + /*disable all priority lookup function*/
2591 + fecp->ESW_PRES[port] = 0;
2592 + fecp->ESW_PRES[port] = MCF_ESW_PRES_DFLT_PRI(priority_value & 0x7);
2597 +/*---------------------------------------------------------------------------*/
2598 +int esw_get_statistics_status(struct switch_enet_private *fep,
2599 + esw_statistics_status *pStatistics)
2601 + volatile switch_t *fecp;
2604 + pStatistics->ESW_DISCN = fecp->ESW_DISCN;
2605 + pStatistics->ESW_DISCB = fecp->ESW_DISCB;
2606 + pStatistics->ESW_NDISCN = fecp->ESW_NDISCN;
2607 + pStatistics->ESW_NDISCB = fecp->ESW_NDISCB;
2611 +int esw_get_port_statistics_status(struct switch_enet_private *fep,
2612 + int port, esw_port_statistics_status *pPortStatistics)
2614 + volatile switch_t *fecp;
2616 + if ((port < 0) || (port > 3)) {
2617 + printk(KERN_ERR "%s: do not support the port %d\n",
2624 + pPortStatistics->MCF_ESW_POQC =
2625 + fecp->port_statistics_status[port].MCF_ESW_POQC;
2626 + pPortStatistics->MCF_ESW_PMVID =
2627 + fecp->port_statistics_status[port].MCF_ESW_PMVID;
2628 + pPortStatistics->MCF_ESW_PMVTAG =
2629 + fecp->port_statistics_status[port].MCF_ESW_PMVTAG;
2630 + pPortStatistics->MCF_ESW_PBL =
2631 + fecp->port_statistics_status[port].MCF_ESW_PBL;
2634 +/*----------------------------------------------------------------------*/
2635 +int esw_get_output_queue_status(struct switch_enet_private *fep,
2636 + esw_output_queue_status *pOutputQueue)
2638 + volatile switch_t *fecp;
2641 + pOutputQueue->ESW_MMSR = fecp->ESW_MMSR;
2642 + pOutputQueue->ESW_LMT = fecp->ESW_LMT;
2643 + pOutputQueue->ESW_LFC = fecp->ESW_LFC;
2644 + pOutputQueue->ESW_IOSR = fecp->ESW_IOSR;
2645 + pOutputQueue->ESW_PCSR = fecp->ESW_PCSR;
2646 + pOutputQueue->ESW_QWT = fecp->ESW_QWT;
2647 + pOutputQueue->ESW_P0BCT = fecp->ESW_P0BCT;
2651 +/* set output queue memory status and configure*/
2652 +int esw_set_output_queue_memory(struct switch_enet_private *fep,
2653 + int fun_num, esw_output_queue_status *pOutputQueue)
2655 + volatile switch_t *fecp;
2659 + if (fun_num == 1) {
2660 + /* memory manager status*/
2661 + fecp->ESW_MMSR = pOutputQueue->ESW_MMSR;
2662 + } else if (fun_num == 2) {
2663 + /*low memory threshold*/
2664 + fecp->ESW_LMT = pOutputQueue->ESW_LMT;
2665 + } else if (fun_num == 3) {
2666 + /*lowest number of free cells*/
2667 + fecp->ESW_LFC = pOutputQueue->ESW_LFC;
2668 + } else if (fun_num == 4) {
2670 + fecp->ESW_QWT = pOutputQueue->ESW_QWT;
2671 + } else if (fun_num == 5) {
2672 + /*port 0 backpressure congenstion thresled*/
2673 + fecp->ESW_P0BCT = pOutputQueue->ESW_P0BCT;
2675 + printk(KERN_ERR "%s: do not support the cmd %x\n",
2676 + __func__, fun_num);
2681 +/*--------------------------------------------------------------------*/
2682 +int esw_get_irq_status(struct switch_enet_private *fep,
2683 + eswIoctlIrqStatus *pIrqStatus)
2685 + volatile switch_t *fecp;
2688 + pIrqStatus->isr = fecp->switch_ievent;
2689 + pIrqStatus->imr = fecp->switch_imask;
2690 + pIrqStatus->rx_buf_pointer = fecp->fec_r_des_start;
2691 + pIrqStatus->tx_buf_pointer = fecp->fec_x_des_start;
2692 + pIrqStatus->rx_max_size = fecp->fec_r_buff_size;
2693 + pIrqStatus->rx_buf_active = fecp->fec_r_des_active;
2694 + pIrqStatus->tx_buf_active = fecp->fec_x_des_active;
2698 +int esw_set_irq_mask(struct switch_enet_private *fep,
2699 + unsigned long mask, int enable)
2701 + volatile switch_t *fecp;
2706 + fecp->switch_imask |= mask;
2707 + else if (enable == 1)
2708 + fecp->switch_imask &= (~mask);
2710 + printk(KERN_INFO "%s: enable %lx is error value\n",
2717 +void esw_clear_irq_event(struct switch_enet_private *fep,
2718 + unsigned long mask)
2720 + volatile switch_t *fecp;
2723 + fecp->switch_ievent |= mask;
2726 +void esw_get_switch_mode(struct switch_enet_private *fep,
2727 + unsigned long *ulModeConfig)
2729 + volatile switch_t *fecp;
2732 + *ulModeConfig = fecp->ESW_MODE;
2735 +void esw_switch_mode_configure(struct switch_enet_private *fep,
2736 + unsigned long configure)
2738 + volatile switch_t *fecp;
2741 + fecp->ESW_MODE |= configure;
2744 +void esw_get_bridge_port(struct switch_enet_private *fep,
2745 + unsigned long *ulBMPConfig)
2747 + volatile switch_t *fecp;
2750 + *ulBMPConfig = fecp->ESW_BMPC;
2753 +void esw_bridge_port_configure(struct switch_enet_private *fep,
2754 + unsigned long configure)
2756 + volatile switch_t *fecp;
2759 + fecp->ESW_BMPC = configure;
2762 +int esw_get_port_all_status(struct switch_enet_private *fep,
2763 + unsigned char portnum, struct port_all_status *port_alstatus)
2765 + volatile switch_t *fecp;
2766 + unsigned long PortBlocking;
2767 + unsigned long PortLearning;
2768 + unsigned long VlanVerify;
2769 + unsigned long DiscardUnknown;
2770 + unsigned long MultiReso;
2771 + unsigned long BroadReso;
2772 + unsigned long FTransmit;
2773 + unsigned long FReceive;
2776 + PortBlocking = fecp->ESW_BKLR & 0x0000000f;
2777 + PortLearning = (fecp->ESW_BKLR & 0x000f0000) >> 16;
2778 + VlanVerify = fecp->ESW_VLANV & 0x0000000f;
2779 + DiscardUnknown = (fecp->ESW_VLANV & 0x000f0000) >> 16;
2780 + MultiReso = fecp->ESW_DMCR & 0x0000000f;
2781 + BroadReso = fecp->ESW_DBCR & 0x0000000f;
2782 + FTransmit = fecp->ESW_PER & 0x0000000f;
2783 + FReceive = (fecp->ESW_PER & 0x000f0000) >> 16;
2785 + switch (portnum) {
2787 + port_alstatus->link_status = 1;
2788 + port_alstatus->block_status = PortBlocking & 1;
2789 + port_alstatus->learn_status = PortLearning & 1;
2790 + port_alstatus->vlan_verify = VlanVerify & 1;
2791 + port_alstatus->discard_unknown = DiscardUnknown & 1;
2792 + port_alstatus->multi_reso = MultiReso & 1;
2793 + port_alstatus->broad_reso = BroadReso & 1;
2794 + port_alstatus->ftransmit = FTransmit & 1;
2795 + port_alstatus->freceive = FReceive & 1;
2798 + port_alstatus->link_status =
2799 + ports_link_status.port1_link_status;
2800 + port_alstatus->block_status = (PortBlocking >> 1) & 1;
2801 + port_alstatus->learn_status = (PortLearning >> 1) & 1;
2802 + port_alstatus->vlan_verify = (VlanVerify >> 1) & 1;
2803 + port_alstatus->discard_unknown = (DiscardUnknown >> 1) & 1;
2804 + port_alstatus->multi_reso = (MultiReso >> 1) & 1;
2805 + port_alstatus->broad_reso = (BroadReso >> 1) & 1;
2806 + port_alstatus->ftransmit = (FTransmit >> 1) & 1;
2807 + port_alstatus->freceive = (FReceive >> 1) & 1;
2810 + port_alstatus->link_status =
2811 + ports_link_status.port2_link_status;
2812 + port_alstatus->block_status = (PortBlocking >> 2) & 1;
2813 + port_alstatus->learn_status = (PortLearning >> 2) & 1;
2814 + port_alstatus->vlan_verify = (VlanVerify >> 2) & 1;
2815 + port_alstatus->discard_unknown = (DiscardUnknown >> 2) & 1;
2816 + port_alstatus->multi_reso = (MultiReso >> 2) & 1;
2817 + port_alstatus->broad_reso = (BroadReso >> 2) & 1;
2818 + port_alstatus->ftransmit = (FTransmit >> 2) & 1;
2819 + port_alstatus->freceive = (FReceive >> 2) & 1;
2822 + printk(KERN_ERR "%s:do not support the port %d",
2823 + __func__, portnum);
2829 +int esw_atable_get_entry_port_number(struct switch_enet_private *fep,
2830 + unsigned char *mac_addr, unsigned char *port)
2832 + int block_index, block_index_end, entry;
2833 + unsigned long read_lo, read_hi;
2834 + unsigned long mac_addr_lo, mac_addr_hi;
2836 + mac_addr_lo = (unsigned long)((mac_addr[3]<<24) | (mac_addr[2]<<16) |
2837 + (mac_addr[1]<<8) | mac_addr[0]);
2838 + mac_addr_hi = (unsigned long)((mac_addr[5]<<8) | (mac_addr[4]));
2840 + block_index = GET_BLOCK_PTR(crc8_calc(mac_addr));
2841 + block_index_end = block_index + ATABLE_ENTRY_PER_SLOT;
2843 + /* now search all the entries in the selected block */
2844 + for (entry = block_index; entry < block_index_end; entry++) {
2845 + read_atable(fep, entry, &read_lo, &read_hi);
2846 + if ((read_lo == mac_addr_lo) &&
2847 + ((read_hi & 0x0000ffff) ==
2848 + (mac_addr_hi & 0x0000ffff))) {
2849 + /* found the correct address */
2850 + if ((read_hi & (1 << 16)) && (!(read_hi & (1 << 17))))
2851 + *port = AT_EXTRACT_PORT(read_hi);
2860 +int esw_get_mac_address_lookup_table(struct switch_enet_private *fep,
2861 + unsigned long *tableaddr, unsigned long *dnum, unsigned long *snum)
2863 + unsigned long read_lo, read_hi;
2864 + unsigned long entry;
2865 + unsigned long dennum = 0;
2866 + unsigned long sennum = 0;
2868 + for (entry = 0; entry < ESW_ATABLE_MEM_NUM_ENTRIES; entry++) {
2869 + read_atable(fep, entry, &read_lo, &read_hi);
2870 + if ((read_hi & (1 << 17)) && (read_hi & (1 << 16))) {
2871 + /* static entry */
2872 + *(tableaddr + (2047 - sennum) * 11) = entry;
2873 + *(tableaddr + (2047 - sennum) * 11 + 2) =
2874 + read_lo & 0x000000ff;
2875 + *(tableaddr + (2047 - sennum) * 11 + 3) =
2876 + (read_lo & 0x0000ff00) >> 8;
2877 + *(tableaddr + (2047 - sennum) * 11 + 4) =
2878 + (read_lo & 0x00ff0000) >> 16;
2879 + *(tableaddr + (2047 - sennum) * 11 + 5) =
2880 + (read_lo & 0xff000000) >> 24;
2881 + *(tableaddr + (2047 - sennum) * 11 + 6) =
2882 + read_hi & 0x000000ff;
2883 + *(tableaddr + (2047 - sennum) * 11 + 7) =
2884 + (read_hi & 0x0000ff00) >> 8;
2885 + *(tableaddr + (2047 - sennum) * 11 + 8) =
2886 + AT_EXTRACT_PORTMASK(read_hi);
2887 + *(tableaddr + (2047 - sennum) * 11 + 9) =
2888 + AT_EXTRACT_PRIO(read_hi);
2890 + } else if ((read_hi & (1 << 16)) && (!(read_hi & (1 << 17)))) {
2891 + /* dynamic entry */
2892 + *(tableaddr + dennum * 11) = entry;
2893 + *(tableaddr + dennum * 11 + 2) = read_lo & 0xff;
2894 + *(tableaddr + dennum * 11 + 3) =
2895 + (read_lo & 0x0000ff00) >> 8;
2896 + *(tableaddr + dennum * 11 + 4) =
2897 + (read_lo & 0x00ff0000) >> 16;
2898 + *(tableaddr + dennum * 11 + 5) =
2899 + (read_lo & 0xff000000) >> 24;
2900 + *(tableaddr + dennum * 11 + 6) = read_hi & 0xff;
2901 + *(tableaddr + dennum * 11 + 7) =
2902 + (read_hi & 0x0000ff00) >> 8;
2903 + *(tableaddr + dennum * 11 + 8) =
2904 + AT_EXTRACT_PORT(read_hi);
2905 + *(tableaddr + dennum * 11 + 9) =
2906 + AT_EXTRACT_TIMESTAMP(read_hi);
2916 +/*----------------------------------------------------------------------------*/
2917 +/* The timer should create an interrupt every 4 seconds*/
2918 +static void l2switch_aging_timer(unsigned long data)
2920 + struct switch_enet_private *fep;
2922 + fep = (struct switch_enet_private *)data;
2925 + TIMEINCREMENT(fep->currTime);
2926 + fep->timeChanged++;
2929 + mod_timer(&fep->timer_aging, jiffies + LEARNING_AGING_TIMER);
2932 +/* ----------------------------------------------------------------------- */
2933 +void esw_check_rxb_txb_interrupt(struct switch_enet_private *fep)
2935 + volatile switch_t *fecp;
2938 + /*Enable Forced forwarding for port 1*/
2939 + fecp->ESW_P0FFEN = MCF_ESW_P0FFEN_FEN |
2940 + MCF_ESW_P0FFEN_FD(1);
2941 + /*Disable learning for all ports*/
2942 + MCF_ESW_IMR = MCF_ESW_IMR_TXB | MCF_ESW_IMR_TXF |
2943 + MCF_ESW_IMR_RXB | MCF_ESW_IMR_RXF;
2946 +/*----------------------------------------------------------------*/
2947 +static int switch_enet_learning(void *arg)
2949 + struct switch_enet_private *fep = arg;
2950 + volatile switch_t *fecp;
2953 + while (!kthread_should_stop()) {
2954 + set_current_state(TASK_INTERRUPTIBLE);
2956 + /* check learning record valid */
2957 + if (fecp->ESW_LSR)
2958 + esw_atable_dynamicms_learn_migration(fep,
2961 + schedule_timeout(HZ/100);
2967 +static int switch_enet_ioctl(struct net_device *dev,
2968 + struct ifreq *ifr, int cmd)
2970 + struct switch_enet_private *fep = netdev_priv(dev);
2971 + volatile switch_t *fecp;
2974 + fecp = (volatile switch_t *)dev->base_addr;
2977 + /*------------------------------------------------------------*/
2978 + case ESW_SET_PORTENABLE_CONF:
2980 + eswIoctlPortEnableConfig configData;
2981 + ret = copy_from_user(&configData,
2983 + sizeof(eswIoctlPortEnableConfig));
2987 + ret = esw_port_enable_config(fep,
2989 + configData.tx_enable,
2990 + configData.rx_enable);
2993 + case ESW_SET_BROADCAST_CONF:
2995 + eswIoctlPortConfig configData;
2996 + ret = copy_from_user(&configData,
2997 + ifr->ifr_data, sizeof(eswIoctlPortConfig));
3001 + ret = esw_port_broadcast_config(fep,
3002 + configData.port, configData.enable);
3006 + case ESW_SET_MULTICAST_CONF:
3008 + eswIoctlPortConfig configData;
3009 + ret = copy_from_user(&configData,
3010 + ifr->ifr_data, sizeof(eswIoctlPortConfig));
3014 + ret = esw_port_multicast_config(fep,
3015 + configData.port, configData.enable);
3019 + case ESW_SET_BLOCKING_CONF:
3021 + eswIoctlPortConfig configData;
3022 + ret = copy_from_user(&configData,
3023 + ifr->ifr_data, sizeof(eswIoctlPortConfig));
3028 + ret = esw_port_blocking_config(fep,
3029 + configData.port, configData.enable);
3033 + case ESW_SET_LEARNING_CONF:
3035 + eswIoctlPortConfig configData;
3037 + ret = copy_from_user(&configData,
3038 + ifr->ifr_data, sizeof(eswIoctlPortConfig));
3042 + ret = esw_port_learning_config(fep,
3043 + configData.port, configData.enable);
3047 + case ESW_SET_PORT_ENTRY_EMPTY:
3049 + unsigned long portnum;
3051 + ret = copy_from_user(&portnum,
3052 + ifr->ifr_data, sizeof(portnum));
3055 + esw_atable_dynamicms_del_entries_for_port(fep, portnum);
3059 + case ESW_SET_OTHER_PORT_ENTRY_EMPTY:
3061 + unsigned long portnum;
3063 + ret = copy_from_user(&portnum,
3064 + ifr->ifr_data, sizeof(portnum));
3068 + esw_atable_dynamicms_del_entries_for_other_port(fep, portnum);
3072 + case ESW_SET_IP_SNOOP_CONF:
3074 + eswIoctlIpsnoopConfig configData;
3076 + ret = copy_from_user(&configData,
3077 + ifr->ifr_data, sizeof(eswIoctlIpsnoopConfig));
3081 + ret = esw_ip_snoop_config(fep, configData.mode,
3082 + configData.ip_header_protocol);
3086 + case ESW_SET_PORT_SNOOP_CONF:
3088 + eswIoctlPortsnoopConfig configData;
3090 + ret = copy_from_user(&configData,
3091 + ifr->ifr_data, sizeof(eswIoctlPortsnoopConfig));
3095 + ret = esw_tcpudp_port_snoop_config(fep, configData.mode,
3096 + configData.compare_port,
3097 + configData.compare_num);
3101 + case ESW_SET_PORT_MIRROR_CONF_PORT_MATCH:
3103 + struct eswIoctlMirrorCfgPortMatch configData;
3105 + ret = copy_from_user(&configData,
3106 + ifr->ifr_data, sizeof(configData));
3109 + ret = esw_port_mirroring_config_port_match(fep,
3110 + configData.mirror_port, configData.port_match_en,
3115 + case ESW_SET_PORT_MIRROR_CONF:
3117 + eswIoctlPortMirrorConfig configData;
3119 + ret = copy_from_user(&configData,
3120 + ifr->ifr_data, sizeof(eswIoctlPortMirrorConfig));
3124 + ret = esw_port_mirroring_config(fep,
3125 + configData.mirror_port, configData.port,
3126 + configData.mirror_enable,
3127 + configData.src_mac, configData.des_mac,
3128 + configData.egress_en, configData.ingress_en,
3129 + configData.egress_mac_src_en,
3130 + configData.egress_mac_des_en,
3131 + configData.ingress_mac_src_en,
3132 + configData.ingress_mac_des_en);
3136 + case ESW_SET_PORT_MIRROR_CONF_ADDR_MATCH:
3138 + struct eswIoctlMirrorCfgAddrMatch configData;
3140 + ret = copy_from_user(&configData,
3141 + ifr->ifr_data, sizeof(configData));
3145 + ret = esw_port_mirroring_config_addr_match(fep,
3146 + configData.mirror_port, configData.addr_match_en,
3147 + configData.mac_addr);
3151 + case ESW_SET_PIRORITY_VLAN:
3153 + eswIoctlPriorityVlanConfig configData;
3155 + ret = copy_from_user(&configData,
3156 + ifr->ifr_data, sizeof(eswIoctlPriorityVlanConfig));
3160 + ret = esw_framecalssify_vlan_priority_lookup(fep,
3161 + configData.port, configData.func_enable,
3162 + configData.vlan_pri_table_num,
3163 + configData.vlan_pri_table_value);
3167 + case ESW_SET_PIRORITY_IP:
3169 + eswIoctlPriorityIPConfig configData;
3171 + ret = copy_from_user(&configData,
3172 + ifr->ifr_data, sizeof(eswIoctlPriorityIPConfig));
3176 + ret = esw_framecalssify_ip_priority_lookup(fep,
3177 + configData.port, configData.func_enable,
3178 + configData.ipv4_en, configData.ip_priority_num,
3179 + configData.ip_priority_value);
3183 + case ESW_SET_PIRORITY_MAC:
3185 + eswIoctlPriorityMacConfig configData;
3187 + ret = copy_from_user(&configData,
3188 + ifr->ifr_data, sizeof(eswIoctlPriorityMacConfig));
3192 + ret = esw_framecalssify_mac_priority_lookup(fep,
3197 + case ESW_SET_PIRORITY_DEFAULT:
3199 + eswIoctlPriorityDefaultConfig configData;
3201 + ret = copy_from_user(&configData,
3202 + ifr->ifr_data, sizeof(eswIoctlPriorityDefaultConfig));
3206 + ret = esw_frame_calssify_priority_init(fep,
3207 + configData.port, configData.priority_value);
3211 + case ESW_SET_P0_FORCED_FORWARD:
3213 + eswIoctlP0ForcedForwardConfig configData;
3215 + ret = copy_from_user(&configData,
3216 + ifr->ifr_data, sizeof(eswIoctlP0ForcedForwardConfig));
3220 + ret = esw_forced_forward(fep, configData.port1,
3221 + configData.port2, configData.enable);
3225 + case ESW_SET_BRIDGE_CONFIG:
3227 + unsigned long configData;
3229 + ret = copy_from_user(&configData,
3230 + ifr->ifr_data, sizeof(unsigned long));
3234 + esw_bridge_port_configure(fep, configData);
3238 + case ESW_SET_SWITCH_MODE:
3240 + unsigned long configData;
3242 + ret = copy_from_user(&configData,
3243 + ifr->ifr_data, sizeof(unsigned long));
3247 + esw_switch_mode_configure(fep, configData);
3251 + case ESW_SET_OUTPUT_QUEUE_MEMORY:
3253 + eswIoctlOutputQueue configData;
3255 + ret = copy_from_user(&configData,
3256 + ifr->ifr_data, sizeof(eswIoctlOutputQueue));
3260 + ret = esw_set_output_queue_memory(fep,
3261 + configData.fun_num, &configData.sOutputQueue);
3265 + case ESW_SET_VLAN_OUTPUT_PROCESS:
3267 + eswIoctlVlanOutputConfig configData;
3269 + ret = copy_from_user(&configData,
3270 + ifr->ifr_data, sizeof(eswIoctlVlanOutputConfig));
3274 + ret = esw_vlan_output_process(fep,
3275 + configData.port, configData.mode);
3279 + case ESW_SET_VLAN_INPUT_PROCESS:
3281 + eswIoctlVlanInputConfig configData;
3283 + ret = copy_from_user(&configData,
3285 + sizeof(eswIoctlVlanInputConfig));
3289 + ret = esw_vlan_input_process(fep, configData.port,
3290 + configData.mode, configData.port_vlanid);
3294 + case ESW_SET_VLAN_DOMAIN_VERIFICATION:
3296 + eswIoctlVlanVerificationConfig configData;
3298 + ret = copy_from_user(&configData,
3300 + sizeof(eswIoctlVlanVerificationConfig));
3304 + ret = esw_set_vlan_verification(
3305 + fep, configData.port,
3306 + configData.vlan_domain_verify_en,
3307 + configData.vlan_discard_unknown_en);
3311 + case ESW_SET_VLAN_RESOLUTION_TABLE:
3313 + eswIoctlVlanResoultionTable configData;
3315 + ret = copy_from_user(&configData,
3317 + sizeof(eswIoctlVlanResoultionTable));
3321 + ret = esw_set_vlan_resolution_table(
3322 + fep, configData.port_vlanid,
3323 + configData.vlan_domain_num,
3324 + configData.vlan_domain_port);
3329 + case ESW_SET_VLAN_ID:
3331 + unsigned long configData;
3332 + ret = copy_from_user(&configData, ifr->ifr_data,
3333 + sizeof(configData));
3337 + ret = esw_set_vlan_id(fep, configData);
3341 + case ESW_SET_VLAN_ID_CLEARED:
3343 + unsigned long configData;
3344 + ret = copy_from_user(&configData, ifr->ifr_data,
3345 + sizeof(configData));
3349 + ret = esw_set_vlan_id_cleared(fep, configData);
3353 + case ESW_SET_PORT_IN_VLAN_ID:
3355 + eswIoctlVlanResoultionTable configData;
3357 + ret = copy_from_user(&configData, ifr->ifr_data,
3358 + sizeof(configData));
3362 + ret = esw_set_port_in_vlan_id(fep, configData);
3366 + /*--------------------------------------------------------------------*/
3367 + case ESW_UPDATE_STATIC_MACTABLE:
3369 + eswIoctlUpdateStaticMACtable configData;
3371 + ret = copy_from_user(&configData,
3372 + ifr->ifr_data, sizeof(eswIoctlUpdateStaticMACtable));
3376 + ret = esw_update_atable_static(configData.mac_addr,
3377 + configData.port, configData.priority, fep);
3381 + case ESW_CLEAR_ALL_MACTABLE:
3383 + esw_clear_atable(fep);
3387 + /*-------------------get----------------------------------------------*/
3388 + case ESW_GET_STATISTICS_STATUS:
3390 + esw_statistics_status Statistics;
3391 + esw_port_statistics_status PortSta;
3394 + ret = esw_get_statistics_status(fep, &Statistics);
3396 + printk(KERN_ERR "%s: cmd %x fail\n", __func__, cmd);
3399 + printk(KERN_INFO "DISCN : %10ld DISCB : %10ld\n",
3400 + Statistics.ESW_DISCN, Statistics.ESW_DISCB);
3401 + printk(KERN_INFO "NDISCN: %10ld NDISCB: %10ld\n",
3402 + Statistics.ESW_NDISCN, Statistics.ESW_NDISCB);
3404 + for (i = 0; i < 3; i++) {
3405 + ret = esw_get_port_statistics_status(fep, i,
3408 + printk(KERN_ERR "%s: cmd %x fail\n",
3412 + printk(KERN_INFO "port %d: POQC : %ld\n",
3413 + i, PortSta.MCF_ESW_POQC);
3414 + printk(KERN_INFO " PMVID : %ld\n",
3415 + PortSta.MCF_ESW_PMVID);
3416 + printk(KERN_INFO " PMVTAG: %ld\n",
3417 + PortSta.MCF_ESW_PMVTAG);
3418 + printk(KERN_INFO " PBL : %ld\n",
3419 + PortSta.MCF_ESW_PBL);
3424 + case ESW_GET_LEARNING_CONF:
3426 + unsigned long PortLearning;
3428 + esw_get_port_learning(fep, &PortLearning);
3429 + ret = copy_to_user(ifr->ifr_data, &PortLearning,
3430 + sizeof(unsigned long));
3436 + case ESW_GET_BLOCKING_CONF:
3438 + unsigned long PortBlocking;
3440 + esw_get_port_blocking(fep, &PortBlocking);
3441 + ret = copy_to_user(ifr->ifr_data, &PortBlocking,
3442 + sizeof(unsigned long));
3448 + case ESW_GET_MULTICAST_CONF:
3450 + unsigned long PortMulticast;
3452 + esw_get_port_multicast(fep, &PortMulticast);
3453 + ret = copy_to_user(ifr->ifr_data, &PortMulticast,
3454 + sizeof(unsigned long));
3460 + case ESW_GET_BROADCAST_CONF:
3462 + unsigned long PortBroadcast;
3464 + esw_get_port_broadcast(fep, &PortBroadcast);
3465 + ret = copy_to_user(ifr->ifr_data, &PortBroadcast,
3466 + sizeof(unsigned long));
3472 + case ESW_GET_PORTENABLE_CONF:
3474 + unsigned long PortEnable;
3476 + esw_get_port_enable(fep, &PortEnable);
3477 + ret = copy_to_user(ifr->ifr_data, &PortEnable,
3478 + sizeof(unsigned long));
3484 + case ESW_GET_IP_SNOOP_CONF:
3486 + unsigned long ESW_IPSNP[8];
3489 + esw_get_ip_snoop_config(fep, (unsigned long *)ESW_IPSNP);
3490 + printk(KERN_INFO "IP Protocol Mode Type\n");
3491 + for (i = 0; i < 8; i++) {
3492 + if (ESW_IPSNP[i] != 0)
3493 + printk(KERN_INFO "%3ld "
3495 + (ESW_IPSNP[i] >> 8) & 0xff,
3496 + (ESW_IPSNP[i] >> 1) & 3,
3497 + ESW_IPSNP[i] & 1 ? "Active" :
3503 + case ESW_GET_PORT_SNOOP_CONF:
3505 + unsigned long ESW_PSNP[8];
3508 + esw_get_tcpudp_port_snoop_config(fep,
3509 + (unsigned long *)ESW_PSNP);
3510 + printk(KERN_INFO "TCP/UDP Port SrcCompare DesCompare "
3512 + for (i = 0; i < 8; i++) {
3513 + if (ESW_PSNP[i] != 0)
3514 + printk(KERN_INFO "%5ld %s "
3516 + (ESW_PSNP[i] >> 16) & 0xffff,
3517 + (ESW_PSNP[i] >> 4) & 1 ? "Y" : "N",
3518 + (ESW_PSNP[i] >> 3) & 1 ? "Y" : "N",
3519 + (ESW_PSNP[i] >> 1) & 3,
3520 + ESW_PSNP[i] & 1 ? "Active" :
3526 + case ESW_GET_PORT_MIRROR_CONF:
3527 + esw_get_port_mirroring(fep);
3530 + case ESW_GET_P0_FORCED_FORWARD:
3532 + unsigned long ForceForward;
3534 + esw_get_forced_forward(fep, &ForceForward);
3535 + ret = copy_to_user(ifr->ifr_data, &ForceForward,
3536 + sizeof(unsigned long));
3542 + case ESW_GET_SWITCH_MODE:
3544 + unsigned long Config;
3546 + esw_get_switch_mode(fep, &Config);
3547 + ret = copy_to_user(ifr->ifr_data, &Config,
3548 + sizeof(unsigned long));
3554 + case ESW_GET_BRIDGE_CONFIG:
3556 + unsigned long Config;
3558 + esw_get_bridge_port(fep, &Config);
3559 + ret = copy_to_user(ifr->ifr_data, &Config,
3560 + sizeof(unsigned long));
3565 + case ESW_GET_OUTPUT_QUEUE_STATUS:
3567 + esw_output_queue_status Config;
3568 + esw_get_output_queue_status(fep,
3570 + ret = copy_to_user(ifr->ifr_data, &Config,
3571 + sizeof(esw_output_queue_status));
3577 + case ESW_GET_VLAN_OUTPUT_PROCESS:
3579 + unsigned long Config;
3583 + esw_get_vlan_output_config(fep, &Config);
3585 + for (i = 0; i < 3; i++) {
3586 + tmp = (Config >> (i << 1)) & 3;
3589 + printk(KERN_INFO "port %d: vlan output "
3590 + "manipulation enable (mode %d)\n",
3593 + printk(KERN_INFO "port %d: vlan output "
3594 + "manipulation disable\n", i);
3599 + case ESW_GET_VLAN_INPUT_PROCESS:
3601 + eswIoctlVlanInputStatus Config;
3604 + esw_get_vlan_input_config(fep, &Config);
3606 + for (i = 0; i < 3; i++) {
3607 + if (((Config.ESW_VIMEN >> i) & 1) == 0)
3608 + printk(KERN_INFO "port %d: vlan input "
3609 + "manipulation disable\n", i);
3611 + printk("port %d: vlan input manipulation enable"
3612 + " (mode %ld, vlan id %ld)\n", i,
3613 + (((Config.ESW_VIMSEL >> (i << 1)) & 3)
3614 + + 1), Config.ESW_PID[i]);
3619 + case ESW_GET_VLAN_RESOLUTION_TABLE:
3621 + struct eswVlanTableItem vtableitem;
3622 + unsigned char tmp0, tmp1, tmp2;
3625 + esw_get_vlan_resolution_table(fep, &vtableitem);
3627 + printk(KERN_INFO "VLAN Name VLAN Id Ports\n");
3628 + for (i = 0; i < vtableitem.valid_num; i++) {
3629 + tmp0 = vtableitem.table[i].vlan_domain_port & 1;
3630 + tmp1 = (vtableitem.table[i].vlan_domain_port >> 1) & 1;
3631 + tmp2 = (vtableitem.table[i].vlan_domain_port >> 2) & 1;
3632 + printk(KERN_INFO "%2d %4d %s%s%s\n",
3633 + i, vtableitem.table[i].port_vlanid,
3634 + tmp0 ? "0 " : "", tmp1 ? "1 " : "",
3640 + case ESW_GET_VLAN_DOMAIN_VERIFICATION:
3642 + unsigned long Config;
3644 + esw_get_vlan_verification(fep, &Config);
3645 + ret = copy_to_user(ifr->ifr_data, &Config,
3646 + sizeof(unsigned long));
3652 + case ESW_GET_ENTRY_PORT_NUMBER:
3654 + unsigned char mac_addr[6];
3655 + unsigned char portnum;
3657 + ret = copy_from_user(mac_addr,
3658 + ifr->ifr_data, sizeof(mac_addr));
3662 + ret = esw_atable_get_entry_port_number(fep, mac_addr,
3665 + ret = copy_to_user(ifr->ifr_data, &portnum,
3666 + sizeof(unsigned char));
3672 + case ESW_GET_LOOKUP_TABLE:
3674 + unsigned long *ConfigData;
3675 + unsigned long dennum, sennum;
3679 + ConfigData = kmalloc(sizeof(struct eswAddrTableEntryExample) *
3680 + ESW_ATABLE_MEM_NUM_ENTRIES, GFP_KERNEL);
3681 + ret = esw_get_mac_address_lookup_table(fep, ConfigData,
3682 + &dennum, &sennum);
3683 + printk(KERN_INFO "Dynamic entries number: %ld\n", dennum);
3684 + printk(KERN_INFO "Static entries number: %ld\n", sennum);
3685 + printk(KERN_INFO "Type MAC address Port Timestamp\n");
3686 + for (i = 0; i < dennum; i++) {
3687 + printk(KERN_INFO "dynamic "
3688 + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx "
3689 + "%01lx %4ld\n", *(ConfigData + i * 11 + 2),
3690 + *(ConfigData + i * 11 + 3),
3691 + *(ConfigData + i * 11 + 4),
3692 + *(ConfigData + i * 11 + 5),
3693 + *(ConfigData + i * 11 + 6),
3694 + *(ConfigData + i * 11 + 7),
3695 + *(ConfigData + i * 11 + 8),
3696 + *(ConfigData + i * 11 + 9));
3700 + printk(KERN_INFO "Type MAC address"
3701 + " Port Priority\n");
3703 + for (i = 0; i < sennum; i++) {
3704 + printk(KERN_INFO "static %02lx-%02lx-%02lx-%02lx"
3706 + *(ConfigData + (2047 - i) * 11 + 2),
3707 + *(ConfigData + (2047 - i) * 11 + 3),
3708 + *(ConfigData + (2047 - i) * 11 + 4),
3709 + *(ConfigData + (2047 - i) * 11 + 5),
3710 + *(ConfigData + (2047 - i) * 11 + 6),
3711 + *(ConfigData + (2047 - i) * 11 + 7));
3713 + tmp = *(ConfigData + (2047 - i) * 11 + 8);
3714 + if ((tmp == 0) || (tmp == 2) || (tmp == 4))
3715 + printk("%01x ", tmp >> 1);
3716 + else if (tmp == 3)
3718 + else if (tmp == 5)
3720 + else if (tmp == 6)
3723 + printk("%4ld\n", *(ConfigData + (2047 - i) * 11 + 9));
3725 + kfree(ConfigData);
3729 + case ESW_GET_PORT_STATUS:
3731 + unsigned long PortBlocking;
3733 + esw_get_port_blocking(fep, &PortBlocking);
3735 + ports_link_status.port0_block_status = PortBlocking & 1;
3736 + ports_link_status.port1_block_status = (PortBlocking >> 1) & 1;
3737 + ports_link_status.port2_block_status = PortBlocking >> 2;
3739 + ret = copy_to_user(ifr->ifr_data, &ports_link_status,
3740 + sizeof(ports_link_status));
3746 + case ESW_GET_PORT_ALL_STATUS:
3748 + unsigned char portnum;
3749 + struct port_all_status port_astatus;
3751 + ret = copy_from_user(&portnum,
3752 + ifr->ifr_data, sizeof(portnum));
3756 + esw_get_port_all_status(fep, portnum, &port_astatus);
3757 + printk(KERN_INFO "Port %d status:\n", portnum);
3758 + printk(KERN_INFO "Link:%-4s Blocking:%1s "
3760 + port_astatus.link_status ? "Up" : "Down",
3761 + port_astatus.block_status ? "Y" : "N",
3762 + port_astatus.learn_status ? "N" : "Y");
3763 + printk(KERN_INFO "VLAN Verify:%1s Discard Unknown:%1s "
3764 + "Multicast Res:%1s\n",
3765 + port_astatus.vlan_verify ? "Y" : "N",
3766 + port_astatus.discard_unknown ? "Y" : "N",
3767 + port_astatus.multi_reso ? "Y" : "N");
3768 + printk(KERN_INFO "Broadcast Res:%1s Transmit:%-7s "
3770 + port_astatus.broad_reso ? "Y" : "N",
3771 + port_astatus.ftransmit ? "Enable" : "Disable",
3772 + port_astatus.freceive ? "Enable" : "Disable");
3777 + case ESW_GET_USER_PID:
3780 + ret = copy_from_user(&get_pid,
3781 + ifr->ifr_data, sizeof(get_pid));
3785 + user_pid = get_pid;
3788 + /*------------------------------------------------------------------*/
3790 + return -EOPNOTSUPP;
3796 +static netdev_tx_t switch_enet_start_xmit(struct sk_buff *skb,
3797 + struct net_device *dev)
3799 + struct switch_enet_private *fep;
3800 + volatile switch_t *fecp;
3802 + unsigned short status;
3803 + unsigned long flags;
3805 + fep = netdev_priv(dev);
3806 + fecp = (switch_t *)fep->hwp;
3808 + spin_lock_irqsave(&fep->hw_lock, flags);
3809 + /* Fill in a Tx ring entry */
3810 + bdp = fep->cur_tx;
3812 + status = bdp->cbd_sc;
3814 + /* Clear all of the status flags.
3816 + status &= ~BD_ENET_TX_STATS;
3818 + /* Set buffer length and buffer pointer.
3820 + bdp->cbd_bufaddr = __pa(skb->data);
3821 + bdp->cbd_datlen = skb->len;
3824 + * On some FEC implementations data must be aligned on
3825 + * 4-byte boundaries. Use bounce buffers to copy data
3826 + * and get it aligned. Ugh.
3828 + if (bdp->cbd_bufaddr & 0x3) {
3829 + unsigned int index1;
3830 + index1 = bdp - fep->tx_bd_base;
3832 + memcpy(fep->tx_bounce[index1],
3833 + (void *)skb->data, bdp->cbd_datlen);
3834 + bdp->cbd_bufaddr = __pa(fep->tx_bounce[index1]);
3837 + /* Save skb pointer. */
3838 + fep->tx_skbuff[fep->skb_cur] = skb;
3840 + dev->stats.tx_bytes += skb->len;
3841 + fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
3843 + /* Push the data cache so the CPM does not get stale memory
3846 + flush_dcache_range((unsigned long)skb->data,
3847 + (unsigned long)skb->data + skb->len);
3849 + /* Send it on its way. Tell FEC it's ready, interrupt when done,
3850 + * it's the last BD of the frame, and to put the CRC on the end.
3853 + status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
3854 + | BD_ENET_TX_LAST | BD_ENET_TX_TC);
3855 + bdp->cbd_sc = status;
3856 + dev->trans_start = jiffies;
3858 + /* Trigger transmission start */
3859 + fecp->fec_x_des_active = MCF_ESW_TDAR_X_DES_ACTIVE;
3861 + /* If this was the last BD in the ring,
3862 + * start at the beginning again.*/
3863 + if (status & BD_ENET_TX_WRAP)
3864 + bdp = fep->tx_bd_base;
3868 + if (bdp == fep->dirty_tx) {
3870 + netif_stop_queue(dev);
3871 + printk(KERN_ERR "%s: net stop\n", __func__);
3874 + fep->cur_tx = (cbd_t *)bdp;
3876 + spin_unlock_irqrestore(&fep->hw_lock, flags);
3878 + return NETDEV_TX_OK;
3881 +static void switch_timeout(struct net_device *dev)
3883 + struct switch_enet_private *fep = netdev_priv(dev);
3885 + printk(KERN_ERR "%s: transmit timed out.\n", dev->name);
3886 + dev->stats.tx_errors++;
3887 + switch_restart(dev, fep->full_duplex);
3888 + netif_wake_queue(dev);
3891 +/* The interrupt handler.
3892 + * This is called from the MPC core interrupt.
3894 +static irqreturn_t switch_enet_interrupt(int irq, void *dev_id)
3896 + struct net_device *dev = dev_id;
3897 + volatile switch_t *fecp;
3899 + irqreturn_t ret = IRQ_NONE;
3901 + fecp = (switch_t *)dev->base_addr;
3903 + /* Get the interrupt events that caused us to be here.
3906 + int_events = fecp->switch_ievent;
3907 + fecp->switch_ievent = int_events;
3908 + /* Handle receive event in its own function. */
3910 + /* Transmit OK, or non-fatal error. Update the buffer
3911 + descriptors. Switch handles all errors, we just discover
3912 + them as part of the transmit process.
3914 + if (int_events & MCF_ESW_ISR_OD0)
3915 + ret = IRQ_HANDLED;
3917 + if (int_events & MCF_ESW_ISR_OD1)
3918 + ret = IRQ_HANDLED;
3920 + if (int_events & MCF_ESW_ISR_OD2)
3921 + ret = IRQ_HANDLED;
3923 + if (int_events & MCF_ESW_ISR_RXB)
3924 + ret = IRQ_HANDLED;
3926 + if (int_events & MCF_ESW_ISR_RXF) {
3927 + ret = IRQ_HANDLED;
3928 + switch_enet_rx(dev);
3931 + if (int_events & MCF_ESW_ISR_TXB)
3932 + ret = IRQ_HANDLED;
3934 + if (int_events & MCF_ESW_ISR_TXF) {
3935 + ret = IRQ_HANDLED;
3936 + switch_enet_tx(dev);
3939 + } while (int_events);
3944 +static void switch_enet_tx(struct net_device *dev)
3946 + struct switch_enet_private *fep;
3948 + unsigned short status;
3949 + struct sk_buff *skb;
3951 + fep = netdev_priv(dev);
3952 + spin_lock_irq(&fep->hw_lock);
3953 + bdp = fep->dirty_tx;
3955 + while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
3956 + if (bdp == fep->cur_tx && fep->tx_full == 0)
3959 + skb = fep->tx_skbuff[fep->skb_dirty];
3960 + /* Check for errors. */
3961 + if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
3962 + BD_ENET_TX_RL | BD_ENET_TX_UN |
3963 + BD_ENET_TX_CSL)) {
3964 + dev->stats.tx_errors++;
3965 + if (status & BD_ENET_TX_HB) /* No heartbeat */
3966 + dev->stats.tx_heartbeat_errors++;
3967 + if (status & BD_ENET_TX_LC) /* Late collision */
3968 + dev->stats.tx_window_errors++;
3969 + if (status & BD_ENET_TX_RL) /* Retrans limit */
3970 + dev->stats.tx_aborted_errors++;
3971 + if (status & BD_ENET_TX_UN) /* Underrun */
3972 + dev->stats.tx_fifo_errors++;
3973 + if (status & BD_ENET_TX_CSL) /* Carrier lost */
3974 + dev->stats.tx_carrier_errors++;
3976 + dev->stats.tx_packets++;
3979 + /* Deferred means some collisions occurred during transmit,
3980 + * but we eventually sent the packet OK.
3982 + if (status & BD_ENET_TX_DEF)
3983 + dev->stats.collisions++;
3985 + /* Free the sk buffer associated with this last transmit.
3987 + dev_kfree_skb_any(skb);
3988 + fep->tx_skbuff[fep->skb_dirty] = NULL;
3989 + fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
3991 + /* Update pointer to next buffer descriptor to be transmitted.
3993 + if (status & BD_ENET_TX_WRAP)
3994 + bdp = fep->tx_bd_base;
3998 + /* Since we have freed up a buffer, the ring is no longer
4001 + if (fep->tx_full) {
4003 + printk(KERN_ERR "%s: tx full is zero\n", __func__);
4004 + if (netif_queue_stopped(dev))
4005 + netif_wake_queue(dev);
4008 + fep->dirty_tx = (cbd_t *)bdp;
4009 + spin_unlock_irq(&fep->hw_lock);
4013 +/* During a receive, the cur_rx points to the current incoming buffer.
4014 + * When we update through the ring, if the next incoming buffer has
4015 + * not been given to the system, we just set the empty indicator,
4016 + * effectively tossing the packet.
4018 +static void switch_enet_rx(struct net_device *dev)
4020 + struct switch_enet_private *fep;
4021 + volatile switch_t *fecp;
4023 + unsigned short status;
4024 + struct sk_buff *skb;
4028 + fep = netdev_priv(dev);
4029 + /*fecp = (volatile switch_t *)dev->base_addr;*/
4030 + fecp = (volatile switch_t *)fep->hwp;
4032 + spin_lock_irq(&fep->hw_lock);
4033 + /* First, grab all of the stats for the incoming packet.
4034 + * These get messed up if we get called due to a busy condition.
4036 + bdp = fep->cur_rx;
4038 + while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
4040 + /* Since we have allocated space to hold a complete frame,
4041 + * the last indicator should be set.
4043 + if ((status & BD_ENET_RX_LAST) == 0)
4044 + printk(KERN_ERR "SWITCH ENET: rcv is not +last\n");
4047 + goto rx_processing_done;
4049 + /* Check for errors. */
4050 + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
4051 + BD_ENET_RX_CR | BD_ENET_RX_OV)) {
4052 + dev->stats.rx_errors++;
4053 + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
4054 + /* Frame too long or too short. */
4055 + dev->stats.rx_length_errors++;
4057 + if (status & BD_ENET_RX_NO) /* Frame alignment */
4058 + dev->stats.rx_frame_errors++;
4059 + if (status & BD_ENET_RX_CR) /* CRC Error */
4060 + dev->stats.rx_crc_errors++;
4061 + if (status & BD_ENET_RX_OV) /* FIFO overrun */
4062 + dev->stats.rx_fifo_errors++;
4064 + /* Report late collisions as a frame error.
4065 + * On this error, the BD is closed, but we don't know what we
4066 + * have in the buffer. So, just drop this frame on the floor.
4068 + if (status & BD_ENET_RX_CL) {
4069 + dev->stats.rx_errors++;
4070 + dev->stats.rx_frame_errors++;
4071 + goto rx_processing_done;
4073 + /* Process the incoming frame */
4074 + dev->stats.rx_packets++;
4075 + pkt_len = bdp->cbd_datlen;
4076 + dev->stats.rx_bytes += pkt_len;
4077 + data = (__u8 *)__va(bdp->cbd_bufaddr);
4079 + /* This does 16 byte alignment, exactly what we need.
4080 + * The packet length includes FCS, but we don't want to
4081 + * include that when passing upstream as it messes up
4082 + * bridging applications.
4084 + skb = dev_alloc_skb(pkt_len);
4087 + dev->stats.rx_dropped++;
4089 + skb_put(skb, pkt_len); /* Make room */
4090 + skb_copy_to_linear_data(skb, data, pkt_len);
4091 + skb->protocol = eth_type_trans(skb, dev);
4094 +rx_processing_done:
4096 + /* Clear the status flags for this buffer */
4097 + status &= ~BD_ENET_RX_STATS;
4099 + /* Mark the buffer empty */
4100 + status |= BD_ENET_RX_EMPTY;
4101 + bdp->cbd_sc = status;
4103 + /* Update BD pointer to next entry */
4104 + if (status & BD_ENET_RX_WRAP)
4105 + bdp = fep->rx_bd_base;
4109 + /* Doing this here will keep the FEC running while we process
4110 + * incoming frames. On a heavily loaded network, we should be
4111 + * able to keep up at the expense of system resources.
4113 + fecp->fec_r_des_active = MCF_ESW_RDAR_R_DES_ACTIVE;
4115 + fep->cur_rx = (cbd_t *)bdp;
4117 + spin_unlock_irq(&fep->hw_lock);
4120 +static int fec_mdio_transfer(struct mii_bus *bus, int phy_id,
4121 + int reg, int regval)
4123 + struct net_device *dev = bus->priv;
4124 + unsigned long flags;
4125 + struct switch_enet_private *fep;
4129 + fep = netdev_priv(dev);
4130 + spin_lock_irqsave(&fep->mii_lock, flags);
4132 + regval |= phy_id << 23;
4133 + MCF_FEC_MMFR0 = regval;
4135 + /* wait for it to finish, this takes about 23 us on lite5200b */
4136 + while (!(MCF_FEC_EIR0 & FEC_ENET_MII) && --tries)
4140 + printk(KERN_ERR "%s timeout\n", __func__);
4141 + return -ETIMEDOUT;
4144 + MCF_FEC_EIR0 = FEC_ENET_MII;
4145 + retval = MCF_FEC_MMFR0;
4146 + spin_unlock_irqrestore(&fep->mii_lock, flags);
4152 +static int coldfire_fec_mdio_read(struct mii_bus *bus,
4153 + int phy_id, int reg)
4156 + ret = fec_mdio_transfer(bus, phy_id, reg,
4157 + mk_mii_read(reg));
4161 +static int coldfire_fec_mdio_write(struct mii_bus *bus,
4162 + int phy_id, int reg, u16 data)
4164 + return fec_mdio_transfer(bus, phy_id, reg,
4165 + mk_mii_write(reg, data));
4168 +static void switch_adjust_link1(struct net_device *dev)
4170 + struct switch_enet_private *priv = netdev_priv(dev);
4171 + struct phy_device *phydev1 = priv->phydev[0];
4172 + int new_state = 0;
4174 + if (phydev1->link != PHY_DOWN) {
4175 + if (phydev1->duplex != priv->phy1_duplex) {
4177 + priv->phy1_duplex = phydev1->duplex;
4180 + if (phydev1->speed != priv->phy1_speed) {
4182 + priv->phy1_speed = phydev1->speed;
4185 + if (priv->phy1_old_link == PHY_DOWN) {
4187 + priv->phy1_old_link = phydev1->link;
4189 + } else if (priv->phy1_old_link) {
4191 + priv->phy1_old_link = PHY_DOWN;
4192 + priv->phy1_speed = 0;
4193 + priv->phy1_duplex = -1;
4197 + ports_link_status.port1_link_status = phydev1->link;
4198 + if (phydev1->link == PHY_DOWN)
4199 + esw_atable_dynamicms_del_entries_for_port(priv, 1);
4201 + /*Send the new status to user space*/
4202 + if (user_pid != 1)
4203 + sys_tkill(user_pid, SIGUSR1);
4207 +static void switch_adjust_link2(struct net_device *dev)
4209 + struct switch_enet_private *priv = netdev_priv(dev);
4210 + struct phy_device *phydev2 = priv->phydev[1];
4211 + int new_state = 0;
4213 + if (phydev2->link != PHY_DOWN) {
4214 + if (phydev2->duplex != priv->phy2_duplex) {
4216 + priv->phy2_duplex = phydev2->duplex;
4219 + if (phydev2->speed != priv->phy2_speed) {
4221 + priv->phy2_speed = phydev2->speed;
4224 + if (priv->phy2_old_link == PHY_DOWN) {
4226 + priv->phy2_old_link = phydev2->link;
4228 + } else if (priv->phy2_old_link) {
4230 + priv->phy2_old_link = PHY_DOWN;
4231 + priv->phy2_speed = 0;
4232 + priv->phy2_duplex = -1;
4236 + ports_link_status.port2_link_status = phydev2->link;
4237 + if (phydev2->link == PHY_DOWN)
4238 + esw_atable_dynamicms_del_entries_for_port(priv, 2);
4240 + /*Send the new status to user space*/
4241 + if (user_pid != 1)
4242 + sys_tkill(user_pid, SIGUSR1);
4246 +static int coldfire_switch_init_phy(struct net_device *dev)
4248 + struct switch_enet_private *priv = netdev_priv(dev);
4249 + struct phy_device *phydev[SWITCH_EPORT_NUMBER] = {NULL, NULL};
4250 + int i, startnode = 0;
4252 + /* search for connect PHY device */
4253 + for (i = 0; i < PHY_MAX_ADDR; i++) {
4254 + struct phy_device *const tmp_phydev =
4255 + priv->mdio_bus->phy_map[i];
4260 +#ifdef CONFIG_FEC_SHARED_PHY
4261 + if (priv->index == 0)
4262 + phydev[i] = tmp_phydev;
4263 + else if (priv->index == 1) {
4264 + if (startnode == 1) {
4265 + phydev[i] = tmp_phydev;
4272 + printk(KERN_INFO "%s now we do not"
4273 + "support (%d) more than"
4276 + __func__, startnode);
4278 + phydev[i] = tmp_phydev;
4282 + /* now we are supposed to have a proper phydev, to attach to... */
4283 + if ((!phydev[0]) && (!phydev[1])) {
4284 + printk(KERN_INFO "%s: Don't found any phy device at all\n",
4289 + priv->phy1_link = PHY_DOWN;
4290 + priv->phy1_old_link = PHY_DOWN;
4291 + priv->phy1_speed = 0;
4292 + priv->phy1_duplex = -1;
4294 + priv->phy2_link = PHY_DOWN;
4295 + priv->phy2_old_link = PHY_DOWN;
4296 + priv->phy2_speed = 0;
4297 + priv->phy2_duplex = -1;
4299 + phydev[0] = phy_connect(dev, dev_name(&phydev[0]->dev),
4300 + &switch_adjust_link1, 0, PHY_INTERFACE_MODE_MII);
4301 + if (IS_ERR(phydev[0])) {
4302 + printk(KERN_ERR " %s phy_connect failed\n", __func__);
4303 + return PTR_ERR(phydev[0]);
4306 + phydev[1] = phy_connect(dev, dev_name(&phydev[1]->dev),
4307 + &switch_adjust_link2, 0, PHY_INTERFACE_MODE_MII);
4308 + if (IS_ERR(phydev[1])) {
4309 + printk(KERN_ERR " %s phy_connect failed\n", __func__);
4310 + return PTR_ERR(phydev[1]);
4313 + for (i = 0; i < SWITCH_EPORT_NUMBER; i++) {
4314 + printk(KERN_INFO "attached phy %i to driver %s\n",
4315 + phydev[i]->addr, phydev[i]->drv->name);
4316 + priv->phydev[i] = phydev[i];
4321 +/* -----------------------------------------------------------------------*/
4322 +static int switch_enet_open(struct net_device *dev)
4324 + struct switch_enet_private *fep = netdev_priv(dev);
4325 + volatile switch_t *fecp;
4328 + fecp = (volatile switch_t *)fep->hwp;
4329 + /* I should reset the ring buffers here, but I don't yet know
4330 + * a simple way to do that.
4332 + switch_set_mac_address(dev);
4334 + fep->phy1_link = 0;
4335 + fep->phy2_link = 0;
4337 + coldfire_switch_init_phy(dev);
4338 + for (i = 0; i < SWITCH_EPORT_NUMBER; i++) {
4339 + phy_write(fep->phydev[i], MII_BMCR, BMCR_RESET);
4340 + phy_start(fep->phydev[i]);
4343 + fep->phy1_old_link = 0;
4344 + fep->phy2_old_link = 0;
4345 + fep->phy1_link = 1;
4346 + fep->phy2_link = 1;
4348 + /* no phy, go full duplex, it's most likely a hub chip */
4349 + switch_restart(dev, 1);
4351 + /* if the fec is the fist open, we need to do nothing*/
4352 + /* if the fec is not the fist open, we need to restart the FEC*/
4353 + if (fep->sequence_done == 0)
4354 + switch_restart(dev, 1);
4356 + fep->sequence_done = 0;
4358 + fep->currTime = 0;
4359 + fep->learning_irqhandle_enable = 0;
4361 + MCF_ESW_PER = 0x70007;
4362 + fecp->ESW_DBCR = MCF_ESW_DBCR_P0 | MCF_ESW_DBCR_P1 | MCF_ESW_DBCR_P2;
4363 + fecp->ESW_DMCR = MCF_ESW_DMCR_P0 | MCF_ESW_DMCR_P1 | MCF_ESW_DMCR_P2;
4365 + netif_start_queue(dev);
4371 +static int switch_enet_close(struct net_device *dev)
4373 + struct switch_enet_private *fep = netdev_priv(dev);
4376 + /* Don't know what to do yet.*/
4378 + netif_stop_queue(dev);
4381 + for (i = 0; i < SWITCH_EPORT_NUMBER; i++) {
4382 + phy_disconnect(fep->phydev[i]);
4383 + phy_stop(fep->phydev[i]);
4384 + phy_write(fep->phydev[i], MII_BMCR, BMCR_PDOWN);
4390 +/* Set or clear the multicast filter for this adaptor.
4391 + * Skeleton taken from sunlance driver.
4392 + * The CPM Ethernet implementation allows Multicast as well as individual
4393 + * MAC address filtering. Some of the drivers check to make sure it is
4394 + * a group multicast address, and discard those that are not. I guess I
4395 + * will do the same for now, but just remove the test if you want
4396 + * individual filtering as well (do the upper net layers want or support
4397 + * this kind of feature?).
4400 +#define HASH_BITS 6 /* #bits in hash */
4401 +#define CRC32_POLY 0xEDB88320
4403 +static void set_multicast_list(struct net_device *dev)
4405 + struct switch_enet_private *fep;
4406 + volatile switch_t *ep;
4407 + unsigned int i, bit, data, crc;
4408 + struct netdev_hw_addr *ha;
4410 + fep = netdev_priv(dev);
4413 + if (dev->flags & IFF_PROMISC) {
4414 + printk(KERN_INFO "%s IFF_PROMISC\n", __func__);
4416 + if (dev->flags & IFF_ALLMULTI)
4417 + /* Catch all multicast addresses, so set the
4418 + * filter to all 1's.
4420 + printk(KERN_INFO "%s IFF_ALLMULTI\n", __func__);
4422 + netdev_for_each_mc_addr(ha, dev) {
4423 + if (!(ha->addr[0] & 1))
4426 + /* calculate crc32 value of mac address
4430 + for (i = 0; i < dev->addr_len; i++) {
4431 + data = ha->addr[i];
4432 + for (bit = 0; bit < 8; bit++,
4434 + crc = (crc >> 1) ^
4435 + (((crc ^ data) & 1) ?
4445 +/* Set a MAC change in hardware.*/
4446 +static void switch_set_mac_address(struct net_device *dev)
4448 + volatile switch_t *fecp;
4450 + fecp = ((struct switch_enet_private *)netdev_priv(dev))->hwp;
4453 +static void switch_hw_init(void)
4455 + /* GPIO config - RMII mode for both MACs */
4456 + MCF_GPIO_PAR_FEC = (MCF_GPIO_PAR_FEC &
4457 + MCF_GPIO_PAR_FEC_FEC_MASK) |
4458 + MCF_GPIO_PAR_FEC_FEC_RMII0FUL_1FUL;
4460 + /* Initialize MAC 0/1 */
4462 + MCF_FEC_RCR0 = (MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE |
4463 + MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD);
4464 + MCF_FEC_RCR1 = (MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE |
4465 + MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD);
4467 + MCF_FEC_TCR0 = MCF_FEC_TCR_FDEN;
4468 + MCF_FEC_TCR1 = MCF_FEC_TCR_FDEN;
4470 +#ifdef MODELO_BUFFER
4471 + MCF_FEC_ECR0 = MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588;
4472 + MCF_FEC_ECR1 = MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588;
4474 + MCF_FEC_ECR0 = MCF_FEC_ECR_ETHER_EN;
4475 + MCF_FEC_ECR1 = MCF_FEC_ECR_ETHER_EN;
4477 + MCF_FEC_MSCR0 = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2;
4478 + MCF_FEC_MSCR1 = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2;
4480 + MCF_FEC_EIMR0 = FEC_ENET_TXF | FEC_ENET_RXF;
4481 + MCF_FEC_EIMR1 = FEC_ENET_TXF | FEC_ENET_RXF;
4486 +static const struct net_device_ops switch_netdev_ops = {
4487 + .ndo_open = switch_enet_open,
4488 + .ndo_stop = switch_enet_close,
4489 + .ndo_start_xmit = switch_enet_start_xmit,
4490 + .ndo_set_multicast_list = set_multicast_list,
4491 + .ndo_do_ioctl = switch_enet_ioctl,
4492 + .ndo_tx_timeout = switch_timeout,
4495 +/* Initialize the FEC Ethernet.
4498 + * XXX: We need to clean up on failure exits here.
4500 +static int switch_enet_init(struct platform_device *pdev)
4502 + struct net_device *dev = platform_get_drvdata(pdev);
4503 + struct switch_enet_private *fep = netdev_priv(dev);
4504 + unsigned long mem_addr;
4507 + volatile switch_t *fecp;
4509 + struct coldfire_switch_platform_data *plat =
4510 + pdev->dev.platform_data;
4512 + /* Allocate memory for buffer descriptors.
4514 + mem_addr = __get_free_page(GFP_DMA);
4515 + if (mem_addr == 0) {
4516 + printk(KERN_ERR "Switch: allocate descriptor memory failed?\n");
4520 + spin_lock_init(&fep->hw_lock);
4521 + spin_lock_init(&fep->mii_lock);
4523 + /* Create an Ethernet device instance.
4525 + fecp = (volatile switch_t *)plat->switch_hw[0];
4527 + fep->netdev = dev;
4530 + * SWITCH CONFIGURATION
4532 + fecp->ESW_MODE = MCF_ESW_MODE_SW_RST;
4534 + /* enable switch*/
4535 + fecp->ESW_MODE = MCF_ESW_MODE_STATRST;
4536 + fecp->ESW_MODE = MCF_ESW_MODE_SW_EN;
4538 + /* Enable transmit/receive on all ports */
4539 + fecp->ESW_PER = 0xffffffff;
4541 + /* Management port configuration,
4542 + * make port 0 as management port */
4543 + fecp->ESW_BMPC = 0;
4545 + /* clear all switch irq*/
4546 + fecp->switch_ievent = 0xffffffff;
4547 + fecp->switch_imask = 0;
4551 + /* Set the Ethernet address. If using multiple Enets on the 8xx,
4552 + * this needs some work to get unique addresses.
4554 + * This is our default MAC address unless the user changes
4555 + * it via eth_mac_addr (our dev->set_mac_addr handler).
4557 + if (plat && plat->get_mac)
4558 + plat->get_mac(dev);
4560 + cbd_base = (cbd_t *)mem_addr;
4561 + /* XXX: missing check for allocation failure */
4562 + if (plat && plat->uncache)
4563 + plat->uncache(mem_addr);
4565 + /* Set receive and transmit descriptor base.
4567 + fep->rx_bd_base = cbd_base;
4568 + fep->tx_bd_base = cbd_base + RX_RING_SIZE;
4570 + dev->base_addr = (unsigned long)fecp;
4572 + /* The FEC Ethernet specific entries in the device structure. */
4573 + dev->watchdog_timeo = TX_TIMEOUT;
4574 + dev->netdev_ops = &switch_netdev_ops;
4576 + fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
4577 + fep->cur_rx = fep->rx_bd_base;
4579 + fep->skb_cur = fep->skb_dirty = 0;
4581 + /* Initialize the receive buffer descriptors. */
4582 + bdp = fep->rx_bd_base;
4584 + for (i = 0; i < SWITCH_ENET_RX_PAGES; i++) {
4586 + /* Allocate a page.
4588 + mem_addr = __get_free_page(GFP_DMA);
4589 + /* XXX: missing check for allocation failure */
4590 + if (plat && plat->uncache)
4591 + plat->uncache(mem_addr);
4593 + /* Initialize the BD for every fragment in the page.
4595 + for (j = 0; j < SWITCH_ENET_RX_FRPPG; j++) {
4596 + bdp->cbd_sc = BD_ENET_RX_EMPTY;
4597 + bdp->cbd_bufaddr = __pa(mem_addr);
4598 +#ifdef MODELO_BUFFER
4599 + bdp->bdu = 0x00000000;
4600 + bdp->ebd_status = RX_BD_INT;
4602 + mem_addr += SWITCH_ENET_RX_FRSIZE;
4607 + /* Set the last buffer to wrap.
4610 + bdp->cbd_sc |= BD_SC_WRAP;
4612 + /* ...and the same for transmmit.
4614 + bdp = fep->tx_bd_base;
4615 + for (i = 0, j = SWITCH_ENET_TX_FRPPG; i < TX_RING_SIZE; i++) {
4616 + if (j >= SWITCH_ENET_TX_FRPPG) {
4617 + mem_addr = __get_free_page(GFP_DMA);
4620 + mem_addr += SWITCH_ENET_TX_FRSIZE;
4623 + fep->tx_bounce[i] = (unsigned char *) mem_addr;
4625 + /* Initialize the BD for every fragment in the page.
4628 + bdp->cbd_bufaddr = 0;
4632 + /* Set the last buffer to wrap.
4635 + bdp->cbd_sc |= BD_SC_WRAP;
4637 + /* Set receive and transmit descriptor base.
4639 + fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base));
4640 + fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base));
4642 + /* Install our interrupt handlers. This varies depending on
4643 + * the architecture.
4645 + if (plat && plat->request_intrs)
4646 + plat->request_intrs(dev, switch_enet_interrupt, dev);
4648 + fecp->fec_r_buff_size = RX_BUFFER_SIZE;
4649 + fecp->fec_r_des_active = MCF_ESW_RDAR_R_DES_ACTIVE;
4651 + /* setup MII interface */
4652 + if (plat && plat->set_mii)
4653 + plat->set_mii(dev);
4655 + /* Clear and enable interrupts */
4656 + fecp->switch_ievent = 0xffffffff;
4657 + fecp->switch_imask = MCF_ESW_IMR_RXB | MCF_ESW_IMR_TXB |
4658 + MCF_ESW_IMR_RXF | MCF_ESW_IMR_TXF;
4659 + esw_clear_atable(fep);
4660 + /* Queue up command to detect the PHY and initialize the
4661 + * remainder of the interface.
4663 +#ifndef CONFIG_FEC_SHARED_PHY
4664 + fep->phy_addr = 0;
4666 + fep->phy_addr = fep->index;
4669 + fep->sequence_done = 1;
4673 +/* This function is called to start or restart the FEC during a link
4674 + * change. This only happens when switching between half and full
4677 +static void switch_restart(struct net_device *dev, int duplex)
4679 + struct switch_enet_private *fep;
4681 + volatile switch_t *fecp;
4683 + struct coldfire_switch_platform_data *plat;
4685 + fep = netdev_priv(dev);
4687 + plat = fep->pdev->dev.platform_data;
4688 + /* Whack a reset. We should wait for this.*/
4693 + fecp->ESW_MODE = MCF_ESW_MODE_SW_RST;
4695 + fecp->ESW_MODE = MCF_ESW_MODE_STATRST;
4696 + fecp->ESW_MODE = MCF_ESW_MODE_SW_EN;
4698 + /* Enable transmit/receive on all ports */
4699 + fecp->ESW_PER = 0xffffffff;
4701 + /* Management port configuration,
4702 + * make port 0 as management port */
4703 + fecp->ESW_BMPC = 0;
4705 + /* Clear any outstanding interrupt.
4707 + fecp->switch_ievent = 0xffffffff;
4709 + /* Set station address.*/
4710 + switch_set_mac_address(dev);
4714 + /* Reset all multicast.*/
4716 + /* Set maximum receive buffer size.
4718 + fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
4720 + if (plat && plat->localhw_setup)
4721 + plat->localhw_setup();
4722 + /* Set receive and transmit descriptor base.
4724 + fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base));
4725 + fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base));
4727 + fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
4728 + fep->cur_rx = fep->rx_bd_base;
4730 + /* Reset SKB transmit buffers.
4732 + fep->skb_cur = fep->skb_dirty = 0;
4733 + for (i = 0; i <= TX_RING_MOD_MASK; i++) {
4734 + if (fep->tx_skbuff[i] != NULL) {
4735 + dev_kfree_skb_any(fep->tx_skbuff[i]);
4736 + fep->tx_skbuff[i] = NULL;
4740 + /* Initialize the receive buffer descriptors.
4742 + bdp = fep->rx_bd_base;
4743 + for (i = 0; i < RX_RING_SIZE; i++) {
4745 + /* Initialize the BD for every fragment in the page.
4747 + bdp->cbd_sc = BD_ENET_RX_EMPTY;
4748 +#ifdef MODELO_BUFFER
4749 + bdp->bdu = 0x00000000;
4750 + bdp->ebd_status = RX_BD_INT;
4755 + /* Set the last buffer to wrap.
4758 + bdp->cbd_sc |= BD_SC_WRAP;
4760 + /* ...and the same for transmmit.
4762 + bdp = fep->tx_bd_base;
4763 + for (i = 0; i < TX_RING_SIZE; i++) {
4765 + /* Initialize the BD for every fragment in the page.*/
4767 + bdp->cbd_bufaddr = 0;
4771 + /* Set the last buffer to wrap.*/
4773 + bdp->cbd_sc |= BD_SC_WRAP;
4775 + fep->full_duplex = duplex;
4777 + /* And last, enable the transmit and receive processing.*/
4778 + fecp->fec_r_buff_size = RX_BUFFER_SIZE;
4779 + fecp->fec_r_des_active = MCF_ESW_RDAR_R_DES_ACTIVE;
4781 + /* Enable interrupts we wish to service.
4783 + fecp->switch_ievent = 0xffffffff;
4784 + fecp->switch_imask = MCF_ESW_IMR_RXF | MCF_ESW_IMR_TXF |
4785 + MCF_ESW_IMR_RXB | MCF_ESW_IMR_TXB;
4788 +static void switch_stop(struct net_device *dev)
4790 + volatile switch_t *fecp;
4791 + struct switch_enet_private *fep;
4792 + struct coldfire_switch_platform_data *plat;
4794 + fep = netdev_priv(dev);
4796 + plat = fep->pdev->dev.platform_data;
4798 + ** We cannot expect a graceful transmit stop without link !!!
4800 + if (fep->phy1_link)
4802 + if (fep->phy2_link)
4805 + /* Whack a reset. We should wait for this.
4810 +static int fec_mdio_register(struct net_device *dev)
4813 + struct switch_enet_private *fep = netdev_priv(dev);
4815 + fep->mdio_bus = mdiobus_alloc();
4816 + if (!fep->mdio_bus) {
4817 + printk(KERN_ERR "ethernet switch mdiobus_alloc fail\n");
4821 + fep->mdio_bus->name = "Coldfire switch MII 0 Bus";
4822 + strcpy(fep->mdio_bus->id, "0");
4824 + fep->mdio_bus->read = &coldfire_fec_mdio_read;
4825 + fep->mdio_bus->write = &coldfire_fec_mdio_write;
4826 + fep->mdio_bus->priv = dev;
4827 + err = mdiobus_register(fep->mdio_bus);
4829 + mdiobus_free(fep->mdio_bus);
4830 + printk(KERN_ERR "%s: ethernet mdiobus_register fail\n",
4835 + printk(KERN_INFO "mdiobus_register %s ok\n",
4836 + fep->mdio_bus->name);
4840 +static int __devinit eth_switch_probe(struct platform_device *pdev)
4842 + struct net_device *dev;
4844 + struct switch_enet_private *fep;
4845 + struct task_struct *task;
4847 + printk(KERN_INFO "Ethernet Switch Version 1.0\n");
4849 + dev = alloc_etherdev(sizeof(struct switch_enet_private));
4851 + printk(KERN_ERR "%s: ethernet switch alloc_etherdev fail\n",
4856 + SET_NETDEV_DEV(dev, &pdev->dev);
4858 + fep = netdev_priv(dev);
4859 + memset(fep, 0, sizeof(*fep));
4862 + platform_set_drvdata(pdev, dev);
4863 + printk(KERN_ERR "%s: ethernet switch port 0 init\n",
4865 + err = switch_enet_init(pdev);
4868 + platform_set_drvdata(pdev, NULL);
4871 + err = fec_mdio_register(dev);
4873 + printk(KERN_ERR "%s: ethernet switch fec_mdio_register\n",
4876 + platform_set_drvdata(pdev, NULL);
4880 + /* setup timer for Learning Aging function */
4881 + init_timer(&fep->timer_aging);
4882 + fep->timer_aging.function = l2switch_aging_timer;
4883 + fep->timer_aging.data = (unsigned long) fep;
4884 + fep->timer_aging.expires = jiffies + LEARNING_AGING_TIMER;
4885 + add_timer(&fep->timer_aging);
4887 + /* register network device*/
4888 + if (register_netdev(dev) != 0) {
4889 + /* XXX: missing cleanup here */
4891 + platform_set_drvdata(pdev, NULL);
4892 + printk(KERN_ERR "%s: ethernet switch register_netdev fail\n",
4897 + task = kthread_run(switch_enet_learning, fep,
4898 + "modelo l2switch");
4899 + if (IS_ERR(task)) {
4900 + err = PTR_ERR(task);
4904 + printk(KERN_INFO "%s: ethernet switch %pM\n",
4905 + dev->name, dev->dev_addr);
4909 +static int __devexit eth_switch_remove(struct platform_device *pdev)
4912 + struct net_device *dev;
4913 + struct switch_enet_private *fep;
4914 + struct switch_platform_private *chip;
4916 + chip = platform_get_drvdata(pdev);
4918 + for (i = 0; i < chip->num_slots; i++) {
4919 + fep = chip->fep_host[i];
4920 + dev = fep->netdev;
4921 + fep->sequence_done = 1;
4922 + unregister_netdev(dev);
4925 + del_timer_sync(&fep->timer_aging);
4928 + platform_set_drvdata(pdev, NULL);
4932 + printk(KERN_ERR "%s: can not get the "
4933 + "switch_platform_private %x\n", __func__,
4934 + (unsigned int)chip);
4939 +static struct platform_driver eth_switch_driver = {
4940 + .probe = eth_switch_probe,
4941 + .remove = __devexit_p(eth_switch_remove),
4943 + .name = "coldfire-switch",
4944 + .owner = THIS_MODULE,
4948 +static int __init coldfire_switch_init(void)
4950 + return platform_driver_register(ð_switch_driver);
4953 +static void __exit coldfire_switch_exit(void)
4955 + platform_driver_unregister(ð_switch_driver);
4958 +module_init(coldfire_switch_init);
4959 +module_exit(coldfire_switch_exit);
4960 +MODULE_LICENSE("GPL");
4962 +++ b/drivers/net/modelo_switch.h
4964 +/****************************************************************************/
4967 + * mcfswitch -- L2 Switch Controller for Modelo ColdFire SoC
4970 + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
4972 + * This program is free software; you can redistribute it and/or modify
4973 + * it under the terms of the GNU General Public License as published by
4974 + * the Free Software Foundation; either version 2 of the License, or (at
4975 + * your option) any later version.
4979 +/****************************************************************************/
4982 +/****************************************************************************/
4983 +/* The Switch stores dest/src/type, data, and checksum for receive packets.
4985 +#define PKT_MAXBUF_SIZE 1518
4986 +#define PKT_MINBUF_SIZE 64
4987 +#define PKT_MAXBLR_SIZE 1520
4990 + * The 5441x RX control register also contains maximum frame
4993 +#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
4996 + * Some hardware gets it MAC address out of local flash memory.
4997 + * if this is non-zero then assume it is the address to get MAC from.
4999 +#define FEC_FLASHMAC 0
5001 +/* The number of Tx and Rx buffers. These are allocated from the page
5002 + * pool. The code may assume these are power of two, so it it best
5003 + * to keep them that size.
5004 + * We don't need to allocate pages for the transmitter. We just use
5005 + * the skbuffer directly.
5007 +#ifdef CONFIG_SWITCH_DMA_USE_SRAM
5008 +#define SWITCH_ENET_RX_PAGES 6
5010 +#define SWITCH_ENET_RX_PAGES 8
5013 +#define SWITCH_ENET_RX_FRSIZE 2048
5014 +#define SWITCH_ENET_RX_FRPPG (PAGE_SIZE / SWITCH_ENET_RX_FRSIZE)
5015 +#define RX_RING_SIZE (SWITCH_ENET_RX_FRPPG * SWITCH_ENET_RX_PAGES)
5016 +#define SWITCH_ENET_TX_FRSIZE 2048
5017 +#define SWITCH_ENET_TX_FRPPG (PAGE_SIZE / SWITCH_ENET_TX_FRSIZE)
5019 +#ifdef CONFIG_SWITCH_DMA_USE_SRAM
5020 +#define TX_RING_SIZE 8 /* Must be power of two */
5021 +#define TX_RING_MOD_MASK 7 /* for this to work */
5023 +#define TX_RING_SIZE 16 /* Must be power of two */
5024 +#define TX_RING_MOD_MASK 15 /* for this to work */
5027 +#define SWITCH_EPORT_NUMBER 2
5029 +#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
5030 +#error "L2SWITCH: descriptor ring size constants too large"
5032 +/*-----------------------------------------------------------------------*/
5033 +typedef struct l2switch_output_queue_status {
5034 + unsigned long ESW_MMSR;
5035 + unsigned long ESW_LMT;
5036 + unsigned long ESW_LFC;
5037 + unsigned long ESW_PCSR;
5038 + unsigned long ESW_IOSR;
5039 + unsigned long ESW_QWT;
5040 + unsigned long esw_reserved;
5041 + unsigned long ESW_P0BCT;
5042 +} esw_output_queue_status;
5044 +typedef struct l2switch_statistics_status {
5046 + * Total number of incoming frames processed
5047 + * but discarded in switch
5049 + unsigned long ESW_DISCN;
5050 + /*Sum of bytes of frames counted in ESW_DISCN*/
5051 + unsigned long ESW_DISCB;
5053 + * Total number of incoming frames processed
5054 + * but not discarded in switch
5056 + unsigned long ESW_NDISCN;
5057 + /*Sum of bytes of frames counted in ESW_NDISCN*/
5058 + unsigned long ESW_NDISCB;
5059 +} esw_statistics_status;
5061 +typedef struct l2switch_port_statistics_status {
5062 + /*outgoing frames discarded due to transmit queue congestion*/
5063 + unsigned long MCF_ESW_POQC;
5064 + /*incoming frames discarded due to VLAN domain mismatch*/
5065 + unsigned long MCF_ESW_PMVID;
5066 + /*incoming frames discarded due to untagged discard*/
5067 + unsigned long MCF_ESW_PMVTAG;
5068 + /*incoming frames discarded due port is in blocking state*/
5069 + unsigned long MCF_ESW_PBL;
5070 +} esw_port_statistics_status;
5072 +typedef struct l2switch {
5073 + unsigned long ESW_REVISION;
5074 + unsigned long ESW_SCRATCH;
5075 + unsigned long ESW_PER;
5076 + unsigned long reserved0[1];
5077 + unsigned long ESW_VLANV;
5078 + unsigned long ESW_DBCR;
5079 + unsigned long ESW_DMCR;
5080 + unsigned long ESW_BKLR;
5081 + unsigned long ESW_BMPC;
5082 + unsigned long ESW_MODE;
5083 + unsigned long ESW_VIMSEL;
5084 + unsigned long ESW_VOMSEL;
5085 + unsigned long ESW_VIMEN;
5086 + unsigned long ESW_VID;/*0x34*/
5087 + /*from 0x38 0x3C*/
5088 + unsigned long esw_reserved0[2];
5089 + unsigned long ESW_MCR;/*0x40*/
5090 + unsigned long ESW_EGMAP;
5091 + unsigned long ESW_INGMAP;
5092 + unsigned long ESW_INGSAL;
5093 + unsigned long ESW_INGSAH;
5094 + unsigned long ESW_INGDAL;
5095 + unsigned long ESW_INGDAH;
5096 + unsigned long ESW_ENGSAL;
5097 + unsigned long ESW_ENGSAH;
5098 + unsigned long ESW_ENGDAL;
5099 + unsigned long ESW_ENGDAH;
5100 + unsigned long ESW_MCVAL;/*0x6C*/
5101 + /*from 0x70--0x7C*/
5102 + unsigned long esw_reserved1[4];
5103 + unsigned long ESW_MMSR;/*0x80*/
5104 + unsigned long ESW_LMT;
5105 + unsigned long ESW_LFC;
5106 + unsigned long ESW_PCSR;
5107 + unsigned long ESW_IOSR;
5108 + unsigned long ESW_QWT;/*0x94*/
5109 + unsigned long esw_reserved2[1];/*0x98*/
5110 + unsigned long ESW_P0BCT;/*0x9C*/
5111 + /*from 0xA0-0xB8*/
5112 + unsigned long esw_reserved3[7];
5113 + unsigned long ESW_P0FFEN;/*0xBC*/
5114 + unsigned long ESW_PSNP[8];
5115 + unsigned long ESW_IPSNP[8];
5116 + /*port0-port2 VLAN Priority resolution map 0xFC0D_C100-C108*/
5117 + unsigned long ESW_PVRES[3];
5118 + /*from 0x10C-0x13C*/
5119 + unsigned long esw_reserved4[13];
5120 + unsigned long ESW_IPRES;/*0x140*/
5121 + /*from 0x144-0x17C*/
5122 + unsigned long esw_reserved5[15];
5124 + /*port0-port2 Priority Configuration 0xFC0D_C180-C188*/
5125 + unsigned long ESW_PRES[3];
5126 + /*from 0x18C-0x1FC*/
5127 + unsigned long esw_reserved6[29];
5129 + /*port0-port2 VLAN ID 0xFC0D_C200-C208*/
5130 + unsigned long ESW_PID[3];
5131 + /*from 0x20C-0x27C*/
5132 + unsigned long esw_reserved7[29];
5134 + /*port0-port2 VLAN domain resolution entry 0xFC0D_C280-C2FC*/
5135 + unsigned long ESW_VRES[32];
5137 + unsigned long ESW_DISCN;/*0x300*/
5138 + unsigned long ESW_DISCB;
5139 + unsigned long ESW_NDISCN;
5140 + unsigned long ESW_NDISCB;/*0xFC0DC30C*/
5141 + /*per port statistics 0xFC0DC310_C33C*/
5142 + esw_port_statistics_status port_statistics_status[3];
5143 + /*from 0x340-0x400*/
5144 + unsigned long esw_reserved8[48];
5146 + /*0xFC0DC400---0xFC0DC418*/
5147 + /*unsigned long MCF_ESW_ISR;*/
5148 + unsigned long switch_ievent; /* Interrupt event reg */
5149 + /*unsigned long MCF_ESW_IMR;*/
5150 + unsigned long switch_imask; /* Interrupt mask reg */
5151 + /*unsigned long MCF_ESW_RDSR;*/
5152 + unsigned long fec_r_des_start; /* Receive descriptor ring */
5153 + /*unsigned long MCF_ESW_TDSR;*/
5154 + unsigned long fec_x_des_start; /* Transmit descriptor ring */
5155 + /*unsigned long MCF_ESW_MRBR;*/
5156 + unsigned long fec_r_buff_size; /* Maximum receive buff size */
5157 + /*unsigned long MCF_ESW_RDAR;*/
5158 + unsigned long fec_r_des_active; /* Receive descriptor reg */
5159 + /*unsigned long MCF_ESW_TDAR;*/
5160 + unsigned long fec_x_des_active; /* Transmit descriptor reg */
5161 + /*from 0x420-0x4FC*/
5162 + unsigned long esw_reserved9[57];
5164 + /*0xFC0DC500---0xFC0DC508*/
5165 + unsigned long ESW_LREC0;
5166 + unsigned long ESW_LREC1;
5167 + unsigned long ESW_LSR;
5170 +typedef struct _64bTableEntry {
5171 + unsigned int lo; /* lower 32 bits */
5172 + unsigned int hi; /* upper 32 bits */
5173 +} AddrTable64bEntry;
5175 +typedef struct l2switchaddrtable {
5176 + AddrTable64bEntry eswTable64bEntry[2048];
5179 +/*unsigned long MCF_ESW_LOOKUP_MEM;*/
5180 +#define MCF_ESW_REVISION (*(volatile unsigned long *)(0xFC0DC000))
5181 +#define MCF_ESW_PER (*(volatile unsigned long *)(0xFC0DC008))
5182 +#define MCF_ESW_VLANV (*(volatile unsigned long *)(0xFC0DC010))
5183 +#define MCF_ESW_DBCR (*(volatile unsigned long *)(0xFC0DC014))
5184 +#define MCF_ESW_DMCR (*(volatile unsigned long *)(0xFC0DC018))
5185 +#define MCF_ESW_BKLR (*(volatile unsigned long *)(0xFC0DC01C))
5186 +#define MCF_ESW_BMPC (*(volatile unsigned long *)(0xFC0DC020))
5187 +#define MCF_ESW_MODE (*(volatile unsigned long *)(0xFC0DC024))
5189 +#define MCF_ESW_ISR (*(volatile unsigned long *)(0xFC0DC400))
5190 +#define MCF_ESW_IMR (*(volatile unsigned long *)(0xFC0DC404))
5191 +#define MCF_ESW_TDAR (*(volatile unsigned long *)(0xFC0DC418))
5192 +#define MCF_ESW_LOOKUP_MEM (*(volatile unsigned long *)(0xFC0E0000))
5194 +#define MCF_PPMCR0 (*(volatile unsigned short *)(0xFC04002D))
5195 +#define MCF_PPMHR0 (*(volatile unsigned long *)(0xFC040030))
5197 +#define MCF_FEC_EIR0 (*(volatile unsigned long *)(0xFC0D4004))
5198 +#define MCF_FEC_EIR1 (*(volatile unsigned long *)(0xFC0D8004))
5199 +#define MCF_FEC_EIMR0 (*(volatile unsigned long *)(0xFC0D4008))
5200 +#define MCF_FEC_EIMR1 (*(volatile unsigned long *)(0xFC0D8008))
5201 +#define MCF_FEC_MMFR0 (*(volatile unsigned long *)(0xFC0D4040))
5202 +#define MCF_FEC_MMFR1 (*(volatile unsigned long *)(0xFC0D8040))
5203 +#define MCF_FEC_MSCR0 (*(volatile unsigned long *)(0xFC0D4044))
5204 +#define MCF_FEC_MSCR1 (*(volatile unsigned long *)(0xFC0D8044))
5205 +#define MCF_FEC_RCR0 (*(volatile unsigned long *)(0xFC0D4084))
5206 +#define MCF_FEC_RCR1 (*(volatile unsigned long *)(0xFC0D8084))
5207 +#define MCF_FEC_TCR0 (*(volatile unsigned long *)(0xFC0D40C4))
5208 +#define MCF_FEC_TCR1 (*(volatile unsigned long *)(0xFC0D80C4))
5209 +#define MCF_FEC_ECR0 (*(volatile unsigned long *)(0xFC0D4024))
5210 +#define MCF_FEC_ECR1 (*(volatile unsigned long *)(0xFC0D8024))
5213 +#define MCF_FEC_RCR_PROM (0x00000008)
5214 +#define MCF_FEC_RCR_RMII_MODE (0x00000100)
5215 +#define MCF_FEC_RCR_MAX_FL(x) (((x)&0x00003FFF)<<16)
5216 +#define MCF_FEC_RCR_CRC_FWD (0x00004000)
5218 +#define MCF_FEC_TCR_FDEN (0x00000004)
5220 +#define MCF_FEC_ECR_ETHER_EN (0x00000002)
5221 +#define MCF_FEC_ECR_ENA_1588 (0x00000010)
5223 +/*-------------ioctl command ---------------------------------------*/
5224 +#define ESW_SET_LEARNING_CONF 0x9101
5225 +#define ESW_GET_LEARNING_CONF 0x9201
5226 +#define ESW_SET_BLOCKING_CONF 0x9102
5227 +#define ESW_GET_BLOCKING_CONF 0x9202
5228 +#define ESW_SET_MULTICAST_CONF 0x9103
5229 +#define ESW_GET_MULTICAST_CONF 0x9203
5230 +#define ESW_SET_BROADCAST_CONF 0x9104
5231 +#define ESW_GET_BROADCAST_CONF 0x9204
5232 +#define ESW_SET_PORTENABLE_CONF 0x9105
5233 +#define ESW_GET_PORTENABLE_CONF 0x9205
5234 +#define ESW_SET_IP_SNOOP_CONF 0x9106
5235 +#define ESW_GET_IP_SNOOP_CONF 0x9206
5236 +#define ESW_SET_PORT_SNOOP_CONF 0x9107
5237 +#define ESW_GET_PORT_SNOOP_CONF 0x9207
5238 +#define ESW_SET_PORT_MIRROR_CONF 0x9108
5239 +#define ESW_GET_PORT_MIRROR_CONF 0x9208
5240 +#define ESW_SET_PIRORITY_VLAN 0x9109
5241 +#define ESW_GET_PIRORITY_VLAN 0x9209
5242 +#define ESW_SET_PIRORITY_IP 0x910A
5243 +#define ESW_GET_PIRORITY_IP 0x920A
5244 +#define ESW_SET_PIRORITY_MAC 0x910B
5245 +#define ESW_GET_PIRORITY_MAC 0x920B
5246 +#define ESW_SET_PIRORITY_DEFAULT 0x910C
5247 +#define ESW_GET_PIRORITY_DEFAULT 0x920C
5248 +#define ESW_SET_P0_FORCED_FORWARD 0x910D
5249 +#define ESW_GET_P0_FORCED_FORWARD 0x920D
5250 +#define ESW_SET_SWITCH_MODE 0x910E
5251 +#define ESW_GET_SWITCH_MODE 0x920E
5252 +#define ESW_SET_BRIDGE_CONFIG 0x910F
5253 +#define ESW_GET_BRIDGE_CONFIG 0x920F
5254 +#define ESW_SET_VLAN_OUTPUT_PROCESS 0x9110
5255 +#define ESW_GET_VLAN_OUTPUT_PROCESS 0x9210
5256 +#define ESW_SET_VLAN_INPUT_PROCESS 0x9111
5257 +#define ESW_GET_VLAN_INPUT_PROCESS 0x9211
5258 +#define ESW_SET_VLAN_DOMAIN_VERIFICATION 0x9112
5259 +#define ESW_GET_VLAN_DOMAIN_VERIFICATION 0x9212
5260 +#define ESW_SET_VLAN_RESOLUTION_TABLE 0x9113
5261 +#define ESW_GET_VLAN_RESOLUTION_TABLE 0x9213
5262 +#define ESW_GET_ENTRY_PORT_NUMBER 0x9214
5263 +#define ESW_GET_LOOKUP_TABLE 0x9215
5264 +#define ESW_GET_PORT_STATUS 0x9216
5265 +#define ESW_SET_VLAN_ID 0x9114
5266 +#define ESW_SET_VLAN_ID_CLEARED 0x9115
5267 +#define ESW_SET_PORT_IN_VLAN_ID 0x9116
5268 +#define ESW_SET_PORT_ENTRY_EMPTY 0x9117
5269 +#define ESW_SET_OTHER_PORT_ENTRY_EMPTY 0x9118
5270 +#define ESW_GET_PORT_ALL_STATUS 0x9217
5271 +#define ESW_SET_PORT_MIRROR_CONF_PORT_MATCH 0x9119
5272 +#define ESW_SET_PORT_MIRROR_CONF_ADDR_MATCH 0x911A
5274 +#define ESW_GET_STATISTICS_STATUS 0x9221
5275 +#define ESW_SET_OUTPUT_QUEUE_MEMORY 0x9125
5276 +#define ESW_GET_OUTPUT_QUEUE_STATUS 0x9225
5277 +#define ESW_UPDATE_STATIC_MACTABLE 0x9226
5278 +#define ESW_CLEAR_ALL_MACTABLE 0x9227
5279 +#define ESW_GET_USER_PID 0x9228
5281 +typedef struct _eswIOCTL_PORT_CONF {
5284 +} eswIoctlPortConfig;
5286 +typedef struct _eswIOCTL_PORT_EN_CONF {
5290 +} eswIoctlPortEnableConfig;
5292 +typedef struct _eswIOCTL_IP_SNOOP_CONF {
5294 + unsigned long ip_header_protocol;
5295 +} eswIoctlIpsnoopConfig;
5297 +typedef struct _eswIOCTL_P0_FORCED_FORWARD_CONF {
5301 +} eswIoctlP0ForcedForwardConfig;
5303 +typedef struct _eswIOCTL_PORT_SNOOP_CONF {
5305 + unsigned short compare_port;
5307 +} eswIoctlPortsnoopConfig;
5309 +typedef struct _eswIOCTL_PORT_Mirror_CONF {
5314 + int egress_mac_src_en;
5315 + int egress_mac_des_en;
5316 + int ingress_mac_src_en;
5317 + int ingress_mac_des_en;
5318 + unsigned char *src_mac;
5319 + unsigned char *des_mac;
5320 + int mirror_enable;
5321 +} eswIoctlPortMirrorConfig;
5323 +struct eswIoctlMirrorCfgPortMatch {
5325 + int port_match_en;
5329 +struct eswIoctlMirrorCfgAddrMatch {
5331 + int addr_match_en;
5332 + unsigned char *mac_addr;
5335 +typedef struct _eswIOCTL_PRIORITY_VLAN_CONF {
5338 + int vlan_pri_table_num;
5339 + int vlan_pri_table_value;
5340 +} eswIoctlPriorityVlanConfig;
5342 +typedef struct _eswIOCTL_PRIORITY_IP_CONF {
5346 + int ip_priority_num;
5347 + int ip_priority_value;
5348 +} eswIoctlPriorityIPConfig;
5350 +typedef struct _eswIOCTL_PRIORITY_MAC_CONF {
5352 +} eswIoctlPriorityMacConfig;
5354 +typedef struct _eswIOCTL_PRIORITY_DEFAULT_CONF {
5356 + unsigned char priority_value;
5357 +} eswIoctlPriorityDefaultConfig;
5359 +typedef struct _eswIOCTL_IRQ_STATUS {
5360 + unsigned long isr;
5361 + unsigned long imr;
5362 + unsigned long rx_buf_pointer;
5363 + unsigned long tx_buf_pointer;
5364 + unsigned long rx_max_size;
5365 + unsigned long rx_buf_active;
5366 + unsigned long tx_buf_active;
5367 +} eswIoctlIrqStatus;
5369 +typedef struct _eswIOCTL_PORT_Mirror_STATUS {
5370 + unsigned long ESW_MCR;
5371 + unsigned long ESW_EGMAP;
5372 + unsigned long ESW_INGMAP;
5373 + unsigned long ESW_INGSAL;
5374 + unsigned long ESW_INGSAH;
5375 + unsigned long ESW_INGDAL;
5376 + unsigned long ESW_INGDAH;
5377 + unsigned long ESW_ENGSAL;
5378 + unsigned long ESW_ENGSAH;
5379 + unsigned long ESW_ENGDAL;
5380 + unsigned long ESW_ENGDAH;
5381 + unsigned long ESW_MCVAL;
5382 +} eswIoctlPortMirrorStatus;
5384 +typedef struct _eswIOCTL_VLAN_OUTPUT_CONF {
5387 +} eswIoctlVlanOutputConfig;
5389 +typedef struct _eswIOCTL_VLAN_INPUT_CONF {
5392 + unsigned short port_vlanid;
5393 +} eswIoctlVlanInputConfig;
5395 +typedef struct _eswIOCTL_VLAN_DOMAIN_VERIFY_CONF {
5397 + int vlan_domain_verify_en;
5398 + int vlan_discard_unknown_en;
5399 +} eswIoctlVlanVerificationConfig;
5401 +typedef struct _eswIOCTL_VLAN_RESOULATION_TABLE {
5402 + unsigned short port_vlanid;
5403 + unsigned char vlan_domain_port;
5404 + unsigned char vlan_domain_num;
5405 +} eswIoctlVlanResoultionTable;
5407 +struct eswVlanTableItem {
5408 + eswIoctlVlanResoultionTable table[32];
5409 + unsigned char valid_num;
5412 +typedef struct _eswIOCTL_VLAN_INPUT_STATUS {
5413 + unsigned long ESW_VLANV;
5414 + unsigned long ESW_PID[3];
5415 + unsigned long ESW_VIMSEL;
5416 + unsigned long ESW_VIMEN;
5417 + unsigned long ESW_VRES[32];
5418 +} eswIoctlVlanInputStatus;
5420 +typedef struct _eswIOCTL_Static_MACTable {
5421 + unsigned char *mac_addr;
5424 +} eswIoctlUpdateStaticMACtable;
5426 +typedef struct _eswIOCTL_OUTPUT_QUEUE {
5428 + esw_output_queue_status sOutputQueue;
5429 +} eswIoctlOutputQueue;
5431 +/*=============================================================*/
5432 +#define LEARNING_AGING_TIMER (10 * HZ)
5434 + * Info received from Hardware Learning FIFO,
5435 + * holding MAC address and corresponding Hash Value and
5436 + * port number where the frame was received (disassembled).
5438 +typedef struct _eswPortInfo {
5439 + /* MAC lower 32 bits (first byte is 7:0). */
5440 + unsigned int maclo;
5441 + /* MAC upper 16 bits (47:32). */
5442 + unsigned int machi;
5443 + /* the hash value for this MAC address. */
5444 + unsigned int hash;
5445 + /* the port number this MAC address is associated with. */
5446 + unsigned int port;
5450 + * Hardware Look up Address Table 64-bit element.
5452 +typedef volatile struct _64bitTableEntry {
5453 + unsigned int lo; /* lower 32 bits */
5454 + unsigned int hi; /* upper 32 bits */
5455 +} eswTable64bitEntry;
5457 +struct eswAddrTableEntryExample {
5458 + /* the entry number */
5459 + unsigned short entrynum;
5460 + /* mac address array */
5461 + unsigned char mac_addr[6];
5462 + unsigned char item1;
5463 + unsigned short item2;
5467 + * Define the buffer descriptor structure.
5469 +typedef struct bufdesc {
5470 + unsigned short cbd_sc; /* Control and status info */
5471 + unsigned short cbd_datlen; /* Data length */
5472 + unsigned long cbd_bufaddr; /* Buffer address */
5473 +#ifdef MODELO_BUFFER
5474 + unsigned long ebd_status;
5475 + unsigned short length_proto_type;
5476 + unsigned short payload_checksum;
5477 + unsigned long bdu;
5478 + unsigned long timestamp;
5479 + unsigned long reserverd_word1;
5480 + unsigned long reserverd_word2;
5484 +/* Forward declarations of some structures to support different PHYs
5488 + void (*funct)(uint mii_reg, struct net_device *dev);
5495 + const phy_cmd_t *config;
5496 + const phy_cmd_t *startup;
5497 + const phy_cmd_t *ack_int;
5498 + const phy_cmd_t *shutdown;
5501 +struct port_status {
5502 + /* 1: link is up, 0: link is down */
5503 + int port1_link_status;
5504 + int port2_link_status;
5505 + /* 1: blocking, 0: unblocking */
5506 + int port0_block_status;
5507 + int port1_block_status;
5508 + int port2_block_status;
5511 +struct port_all_status {
5512 + /* 1: link is up, 0: link is down */
5514 + /* 1: blocking, 0: unblocking */
5516 + /* 1: unlearning, 0: learning */
5518 + /* vlan domain verify 1: enable 0: disable */
5520 + /* discard unknow 1: enable 0: disable */
5521 + int discard_unknown;
5522 + /* multicast resolution 1: enable 0: disable */
5524 + /* broadcast resolution 1: enable 0: disalbe */
5526 + /* transmit 1: enable 0: disable */
5528 + /* receive 1: enable 0: disable */
5532 +/* The switch buffer descriptors track the ring buffers. The rx_bd_base and
5533 + * tx_bd_base always point to the base of the buffer descriptors. The
5534 + * cur_rx and cur_tx point to the currently available buffer.
5535 + * The dirty_tx tracks the current buffer that is being sent by the
5536 + * controller. The cur_tx and dirty_tx are equal under both completely
5537 + * empty and completely full conditions. The empty/ready indicator in
5538 + * the buffer descriptor determines the actual condition.
5540 +struct switch_enet_private {
5541 + /* Hardware registers of the switch device */
5542 + volatile switch_t *hwp;
5543 + volatile eswAddrTable_t *hwentry;
5545 + struct net_device *netdev;
5546 + struct platform_device *pdev;
5547 + /* The saved address of a sent-in-place packet/buffer, for skfree(). */
5548 + unsigned char *tx_bounce[TX_RING_SIZE];
5549 + struct sk_buff *tx_skbuff[TX_RING_SIZE];
5553 + /* CPM dual port RAM relative addresses.
5555 + cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
5556 + cbd_t *tx_bd_base;
5557 + cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
5558 + cbd_t *dirty_tx; /* The ring entries to be free()ed. */
5560 + /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
5561 + spinlock_t hw_lock;
5563 + /* hold while accessing the mii_list_t() elements */
5564 + spinlock_t mii_lock;
5565 + struct mii_bus *mdio_bus;
5566 + struct phy_device *phydev[SWITCH_EPORT_NUMBER];
5572 + phy_info_t const *phy;
5573 + struct work_struct phy_task;
5574 + volatile switch_t *phy_hwp;
5576 + uint sequence_done;
5577 + uint mii_phy_task_queued;
5586 + int phy1_old_link;
5591 + int phy2_old_link;
5594 + /* --------------Statistics--------------------------- */
5595 + /* when a new element deleted a element with in
5596 + * a block due to lack of space */
5597 + int atBlockOverflows;
5598 + /* Peak number of valid entries in the address table */
5600 + /* current number of valid entries in the address table */
5601 + int atCurrEntries;
5602 + /* maximum entries within a block found
5603 + * (updated within ageing)*/
5604 + int atMaxEntriesPerBlock;
5606 + /* -------------------ageing function------------------ */
5607 + /* maximum age allowed for an entry */
5609 + /* last LUT entry to block that was
5610 + * inspected by the Ageing task*/
5612 + /* last element within block inspected by the Ageing task */
5613 + int ageBlockElemIdx;
5614 + /* complete table has been processed by ageing process */
5616 + /* delay setting */
5618 + /* current delay Counter */
5621 + /* ----------------timer related---------------------------- */
5622 + /* current time (for timestamping) */
5624 + /* flag set by timer when currTime changed
5625 + * and cleared by serving function*/
5629 + /* Timer for Aging */
5630 + struct timer_list timer_aging;
5631 + int learning_irqhandle_enable;
5634 +struct switch_platform_private {
5635 + unsigned long quirks;
5636 + int num_slots; /* Slots on controller */
5637 + struct switch_enet_private *fep_host[0]; /* Pointers to hosts */
5640 +/******************************************************************************/
5641 +/* Recieve is empty */
5642 +#define BD_SC_EMPTY ((unsigned short)0x8000)
5643 +/* Transmit is ready */
5644 +#define BD_SC_READY ((unsigned short)0x8000)
5645 +/* Last buffer descriptor */
5646 +#define BD_SC_WRAP ((unsigned short)0x2000)
5647 +/* Interrupt on change */
5648 +#define BD_SC_INTRPT ((unsigned short)0x1000)
5649 +/* Continous mode */
5650 +#define BD_SC_CM ((unsigned short)0x0200)
5651 +/* Rec'd too many idles */
5652 +#define BD_SC_ID ((unsigned short)0x0100)
5654 +#define BD_SC_P ((unsigned short)0x0100)
5655 +/* Break received */
5656 +#define BD_SC_BR ((unsigned short)0x0020)
5657 +/* Framing error */
5658 +#define BD_SC_FR ((unsigned short)0x0010)
5660 +#define BD_SC_PR ((unsigned short)0x0008)
5662 +#define BD_SC_OV ((unsigned short)0x0002)
5663 +#define BD_SC_CD ((unsigned short)0x0001)
5665 +/* Buffer descriptor control/status used by Ethernet receive.
5667 +#define BD_ENET_RX_EMPTY ((unsigned short)0x8000)
5668 +#define BD_ENET_RX_WRAP ((unsigned short)0x2000)
5669 +#define BD_ENET_RX_INTR ((unsigned short)0x1000)
5670 +#define BD_ENET_RX_LAST ((unsigned short)0x0800)
5671 +#define BD_ENET_RX_FIRST ((unsigned short)0x0400)
5672 +#define BD_ENET_RX_MISS ((unsigned short)0x0100)
5673 +#define BD_ENET_RX_LG ((unsigned short)0x0020)
5674 +#define BD_ENET_RX_NO ((unsigned short)0x0010)
5675 +#define BD_ENET_RX_SH ((unsigned short)0x0008)
5676 +#define BD_ENET_RX_CR ((unsigned short)0x0004)
5677 +#define BD_ENET_RX_OV ((unsigned short)0x0002)
5678 +#define BD_ENET_RX_CL ((unsigned short)0x0001)
5679 +/* All status bits */
5680 +#define BD_ENET_RX_STATS ((unsigned short)0x013f)
5682 +/* Buffer descriptor control/status used by Ethernet transmit.
5684 +#define BD_ENET_TX_READY ((unsigned short)0x8000)
5685 +#define BD_ENET_TX_PAD ((unsigned short)0x4000)
5686 +#define BD_ENET_TX_WRAP ((unsigned short)0x2000)
5687 +#define BD_ENET_TX_INTR ((unsigned short)0x1000)
5688 +#define BD_ENET_TX_LAST ((unsigned short)0x0800)
5689 +#define BD_ENET_TX_TC ((unsigned short)0x0400)
5690 +#define BD_ENET_TX_DEF ((unsigned short)0x0200)
5691 +#define BD_ENET_TX_HB ((unsigned short)0x0100)
5692 +#define BD_ENET_TX_LC ((unsigned short)0x0080)
5693 +#define BD_ENET_TX_RL ((unsigned short)0x0040)
5694 +#define BD_ENET_TX_RCMASK ((unsigned short)0x003c)
5695 +#define BD_ENET_TX_UN ((unsigned short)0x0002)
5696 +#define BD_ENET_TX_CSL ((unsigned short)0x0001)
5697 +/* All status bits */
5698 +#define BD_ENET_TX_STATS ((unsigned short)0x03ff)
5700 +/*Copy from validation code */
5701 +#define RX_BUFFER_SIZE 1520
5702 +#define TX_BUFFER_SIZE 1520
5703 +#define NUM_RXBDS 20
5704 +#define NUM_TXBDS 20
5706 +#define TX_BD_R 0x8000
5707 +#define TX_BD_TO1 0x4000
5708 +#define TX_BD_W 0x2000
5709 +#define TX_BD_TO2 0x1000
5710 +#define TX_BD_L 0x0800
5711 +#define TX_BD_TC 0x0400
5713 +#define TX_BD_INT 0x40000000
5714 +#define TX_BD_TS 0x20000000
5715 +#define TX_BD_PINS 0x10000000
5716 +#define TX_BD_IINS 0x08000000
5717 +#define TX_BD_TXE 0x00008000
5718 +#define TX_BD_UE 0x00002000
5719 +#define TX_BD_EE 0x00001000
5720 +#define TX_BD_FE 0x00000800
5721 +#define TX_BD_LCE 0x00000400
5722 +#define TX_BD_OE 0x00000200
5723 +#define TX_BD_TSE 0x00000100
5724 +#define TX_BD_BDU 0x80000000
5726 +#define RX_BD_E 0x8000
5727 +#define RX_BD_R01 0x4000
5728 +#define RX_BD_W 0x2000
5729 +#define RX_BD_R02 0x1000
5730 +#define RX_BD_L 0x0800
5731 +#define RX_BD_M 0x0100
5732 +#define RX_BD_BC 0x0080
5733 +#define RX_BD_MC 0x0040
5734 +#define RX_BD_LG 0x0020
5735 +#define RX_BD_NO 0x0010
5736 +#define RX_BD_CR 0x0004
5737 +#define RX_BD_OV 0x0002
5738 +#define RX_BD_TR 0x0001
5740 +#define RX_BD_ME 0x80000000
5741 +#define RX_BD_PE 0x04000000
5742 +#define RX_BD_CE 0x02000000
5743 +#define RX_BD_UC 0x01000000
5744 +#define RX_BD_INT 0x00800000
5745 +#define RX_BD_ICE 0x00000020
5746 +#define RX_BD_PCR 0x00000010
5747 +#define RX_BD_VLAN 0x00000004
5748 +#define RX_BD_IPV6 0x00000002
5749 +#define RX_BD_FRAG 0x00000001
5750 +#define RX_BD_BDU 0x80000000
5751 +/****************************************************************************/
5753 +/* Address Table size in bytes(2048 64bit entry ) */
5754 +#define ESW_ATABLE_MEM_SIZE (2048*8)
5755 +/* How many 64-bit elements fit in the address table */
5756 +#define ESW_ATABLE_MEM_NUM_ENTRIES (2048)
5757 +/* Address Table Maximum number of entries in each Slot */
5758 +#define ATABLE_ENTRY_PER_SLOT 8
5759 +/* log2(ATABLE_ENTRY_PER_SLOT)*/
5760 +#define ATABLE_ENTRY_PER_SLOT_bits 3
5761 +/* entry size in byte */
5762 +#define ATABLE_ENTRY_SIZE 8
5763 +/* slot size in byte */
5764 +#define ATABLE_SLOT_SIZE (ATABLE_ENTRY_PER_SLOT * ATABLE_ENTRY_SIZE)
5765 +/* width of timestamp variable (bits) within address table entry */
5766 +#define AT_DENTRY_TIMESTAMP_WIDTH 10
5767 +/* number of bits for port number storage */
5768 +#define AT_DENTRY_PORT_WIDTH 4
5769 +/* number of bits for port bitmask number storage */
5770 +#define AT_SENTRY_PORT_WIDTH 7
5771 +/* address table static entry port bitmask start address bit */
5772 +#define AT_SENTRY_PORTMASK_shift 21
5773 +/* number of bits for port priority storage */
5774 +#define AT_SENTRY_PRIO_WIDTH 7
5775 +/* address table static entry priority start address bit */
5776 +#define AT_SENTRY_PRIO_shift 18
5777 +/* address table dynamic entry port start address bit */
5778 +#define AT_DENTRY_PORT_shift 28
5779 +/* address table dynamic entry timestamp start address bit */
5780 +#define AT_DENTRY_TIME_shift 18
5781 +/* address table entry record type start address bit */
5782 +#define AT_ENTRY_TYPE_shift 17
5783 +/* address table entry record type bit: 1 static, 0 dynamic */
5784 +#define AT_ENTRY_TYPE_STATIC 1
5785 +#define AT_ENTRY_TYPE_DYNAMIC 0
5786 +/* address table entry record valid start address bit */
5787 +#define AT_ENTRY_VALID_shift 16
5788 +#define AT_ENTRY_RECORD_VALID 1
5790 +#define AT_EXTRACT_VALID(x) \
5791 + ((x >> AT_ENTRY_VALID_shift) & AT_ENTRY_RECORD_VALID)
5793 +#define AT_EXTRACT_PORTMASK(x) \
5794 + ((x >> AT_SENTRY_PORTMASK_shift) & AT_SENTRY_PORT_WIDTH)
5796 +#define AT_EXTRACT_PRIO(x) \
5797 + ((x >> AT_SENTRY_PRIO_shift) & AT_SENTRY_PRIO_WIDTH)
5799 +/* return block corresponding to the 8 bit hash value calculated */
5800 +#define GET_BLOCK_PTR(hash) (hash << 3)
5801 +#define AT_EXTRACT_TIMESTAMP(x) \
5802 + ((x >> AT_DENTRY_TIME_shift) & ((1 << AT_DENTRY_TIMESTAMP_WIDTH)-1))
5803 +#define AT_EXTRACT_PORT(x) \
5804 + ((x >> AT_DENTRY_PORT_shift) & ((1 << AT_DENTRY_PORT_WIDTH)-1))
5805 +#define AT_SEXTRACT_PORT(x) \
5806 + ((~((x >> AT_SENTRY_PORTMASK_shift) & \
5807 + ((1 << AT_DENTRY_PORT_WIDTH)-1))) >> 1)
5808 +#define TIMEDELTA(newtime, oldtime) \
5809 + ((newtime - oldtime) & \
5810 + ((1 << AT_DENTRY_TIMESTAMP_WIDTH)-1))
5812 +#define AT_EXTRACT_IP_PROTOCOL(x) ((x >> 8) & 0xff)
5813 +#define AT_EXTRACT_TCP_UDP_PORT(x) ((x >> 16) & 0xffff)
5815 +/* increment time value respecting modulo. */
5816 +#define TIMEINCREMENT(time) \
5817 + ((time) = ((time)+1) & ((1 << AT_DENTRY_TIMESTAMP_WIDTH)-1))
5818 +/* ------------------------------------------------------------------------- */
5819 +/* Bit definitions and macros for MCF_ESW_REVISION */
5820 +#define MCF_ESW_REVISION_CORE_REVISION(x) (((x)&0x0000FFFF)<<0)
5821 +#define MCF_ESW_REVISION_CUSTOMER_REVISION(x) (((x)&0x0000FFFF)<<16)
5823 +/* Bit definitions and macros for MCF_ESW_PER */
5824 +#define MCF_ESW_PER_TE0 (0x00000001)
5825 +#define MCF_ESW_PER_TE1 (0x00000002)
5826 +#define MCF_ESW_PER_TE2 (0x00000004)
5827 +#define MCF_ESW_PER_RE0 (0x00010000)
5828 +#define MCF_ESW_PER_RE1 (0x00020000)
5829 +#define MCF_ESW_PER_RE2 (0x00040000)
5831 +/* Bit definitions and macros for MCF_ESW_VLANV */
5832 +#define MCF_ESW_VLANV_VV0 (0x00000001)
5833 +#define MCF_ESW_VLANV_VV1 (0x00000002)
5834 +#define MCF_ESW_VLANV_VV2 (0x00000004)
5835 +#define MCF_ESW_VLANV_DU0 (0x00010000)
5836 +#define MCF_ESW_VLANV_DU1 (0x00020000)
5837 +#define MCF_ESW_VLANV_DU2 (0x00040000)
5839 +/* Bit definitions and macros for MCF_ESW_DBCR */
5840 +#define MCF_ESW_DBCR_P0 (0x00000001)
5841 +#define MCF_ESW_DBCR_P1 (0x00000002)
5842 +#define MCF_ESW_DBCR_P2 (0x00000004)
5844 +/* Bit definitions and macros for MCF_ESW_DMCR */
5845 +#define MCF_ESW_DMCR_P0 (0x00000001)
5846 +#define MCF_ESW_DMCR_P1 (0x00000002)
5847 +#define MCF_ESW_DMCR_P2 (0x00000004)
5849 +/* Bit definitions and macros for MCF_ESW_BKLR */
5850 +#define MCF_ESW_BKLR_BE0 (0x00000001)
5851 +#define MCF_ESW_BKLR_BE1 (0x00000002)
5852 +#define MCF_ESW_BKLR_BE2 (0x00000004)
5853 +#define MCF_ESW_BKLR_LD0 (0x00010000)
5854 +#define MCF_ESW_BKLR_LD1 (0x00020000)
5855 +#define MCF_ESW_BKLR_LD2 (0x00040000)
5857 +/* Bit definitions and macros for MCF_ESW_BMPC */
5858 +#define MCF_ESW_BMPC_PORT(x) (((x)&0x0000000F)<<0)
5859 +#define MCF_ESW_BMPC_MSG_TX (0x00000020)
5860 +#define MCF_ESW_BMPC_EN (0x00000040)
5861 +#define MCF_ESW_BMPC_DIS (0x00000080)
5862 +#define MCF_ESW_BMPC_PRIORITY(x) (((x)&0x00000007)<<13)
5863 +#define MCF_ESW_BMPC_PORTMASK(x) (((x)&0x00000007)<<16)
5865 +/* Bit definitions and macros for MCF_ESW_MODE */
5866 +#define MCF_ESW_MODE_SW_RST (0x00000001)
5867 +#define MCF_ESW_MODE_SW_EN (0x00000002)
5868 +#define MCF_ESW_MODE_STOP (0x00000080)
5869 +#define MCF_ESW_MODE_CRC_TRAN (0x00000100)
5870 +#define MCF_ESW_MODE_P0CT (0x00000200)
5871 +#define MCF_ESW_MODE_STATRST (0x80000000)
5873 +/* Bit definitions and macros for MCF_ESW_VIMSEL */
5874 +#define MCF_ESW_VIMSEL_IM0(x) (((x)&0x00000003)<<0)
5875 +#define MCF_ESW_VIMSEL_IM1(x) (((x)&0x00000003)<<2)
5876 +#define MCF_ESW_VIMSEL_IM2(x) (((x)&0x00000003)<<4)
5878 +/* Bit definitions and macros for MCF_ESW_VOMSEL */
5879 +#define MCF_ESW_VOMSEL_OM0(x) (((x)&0x00000003)<<0)
5880 +#define MCF_ESW_VOMSEL_OM1(x) (((x)&0x00000003)<<2)
5881 +#define MCF_ESW_VOMSEL_OM2(x) (((x)&0x00000003)<<4)
5883 +/* Bit definitions and macros for MCF_ESW_VIMEN */
5884 +#define MCF_ESW_VIMEN_EN0 (0x00000001)
5885 +#define MCF_ESW_VIMEN_EN1 (0x00000002)
5886 +#define MCF_ESW_VIMEN_EN2 (0x00000004)
5888 +/* Bit definitions and macros for MCF_ESW_VID */
5889 +#define MCF_ESW_VID_TAG(x) (((x)&0xFFFFFFFF)<<0)
5891 +/* Bit definitions and macros for MCF_ESW_MCR */
5892 +#define MCF_ESW_MCR_PORT(x) (((x)&0x0000000F)<<0)
5893 +#define MCF_ESW_MCR_MEN (0x00000010)
5894 +#define MCF_ESW_MCR_INGMAP (0x00000020)
5895 +#define MCF_ESW_MCR_EGMAP (0x00000040)
5896 +#define MCF_ESW_MCR_INGSA (0x00000080)
5897 +#define MCF_ESW_MCR_INGDA (0x00000100)
5898 +#define MCF_ESW_MCR_EGSA (0x00000200)
5899 +#define MCF_ESW_MCR_EGDA (0x00000400)
5901 +/* Bit definitions and macros for MCF_ESW_EGMAP */
5902 +#define MCF_ESW_EGMAP_EG0 (0x00000001)
5903 +#define MCF_ESW_EGMAP_EG1 (0x00000002)
5904 +#define MCF_ESW_EGMAP_EG2 (0x00000004)
5906 +/* Bit definitions and macros for MCF_ESW_INGMAP */
5907 +#define MCF_ESW_INGMAP_ING0 (0x00000001)
5908 +#define MCF_ESW_INGMAP_ING1 (0x00000002)
5909 +#define MCF_ESW_INGMAP_ING2 (0x00000004)
5911 +/* Bit definitions and macros for MCF_ESW_INGSAL */
5912 +#define MCF_ESW_INGSAL_ADDLOW(x) (((x)&0xFFFFFFFF)<<0)
5914 +/* Bit definitions and macros for MCF_ESW_INGSAH */
5915 +#define MCF_ESW_INGSAH_ADDHIGH(x) (((x)&0x0000FFFF)<<0)
5917 +/* Bit definitions and macros for MCF_ESW_INGDAL */
5918 +#define MCF_ESW_INGDAL_ADDLOW(x) (((x)&0xFFFFFFFF)<<0)
5920 +/* Bit definitions and macros for MCF_ESW_INGDAH */
5921 +#define MCF_ESW_INGDAH_ADDHIGH(x) (((x)&0x0000FFFF)<<0)
5923 +/* Bit definitions and macros for MCF_ESW_ENGSAL */
5924 +#define MCF_ESW_ENGSAL_ADDLOW(x) (((x)&0xFFFFFFFF)<<0)
5926 +/* Bit definitions and macros for MCF_ESW_ENGSAH */
5927 +#define MCF_ESW_ENGSAH_ADDHIGH(x) (((x)&0x0000FFFF)<<0)
5929 +/* Bit definitions and macros for MCF_ESW_ENGDAL */
5930 +#define MCF_ESW_ENGDAL_ADDLOW(x) (((x)&0xFFFFFFFF)<<0)
5932 +/* Bit definitions and macros for MCF_ESW_ENGDAH */
5933 +#define MCF_ESW_ENGDAH_ADDHIGH(x) (((x)&0x0000FFFF)<<0)
5935 +/* Bit definitions and macros for MCF_ESW_MCVAL */
5936 +#define MCF_ESW_MCVAL_COUNT(x) (((x)&0x000000FF)<<0)
5938 +/* Bit definitions and macros for MCF_ESW_MMSR */
5939 +#define MCF_ESW_MMSR_BUSY (0x00000001)
5940 +#define MCF_ESW_MMSR_NOCELL (0x00000002)
5941 +#define MCF_ESW_MMSR_MEMFULL (0x00000004)
5942 +#define MCF_ESW_MMSR_MFLATCH (0x00000008)
5943 +#define MCF_ESW_MMSR_DQ_GRNT (0x00000040)
5944 +#define MCF_ESW_MMSR_CELLS_AVAIL(x) (((x)&0x000000FF)<<16)
5946 +/* Bit definitions and macros for MCF_ESW_LMT */
5947 +#define MCF_ESW_LMT_THRESH(x) (((x)&0x000000FF)<<0)
5949 +/* Bit definitions and macros for MCF_ESW_LFC */
5950 +#define MCF_ESW_LFC_COUNT(x) (((x)&0xFFFFFFFF)<<0)
5952 +/* Bit definitions and macros for MCF_ESW_PCSR */
5953 +#define MCF_ESW_PCSR_PC0 (0x00000001)
5954 +#define MCF_ESW_PCSR_PC1 (0x00000002)
5955 +#define MCF_ESW_PCSR_PC2 (0x00000004)
5957 +/* Bit definitions and macros for MCF_ESW_IOSR */
5958 +#define MCF_ESW_IOSR_OR0 (0x00000001)
5959 +#define MCF_ESW_IOSR_OR1 (0x00000002)
5960 +#define MCF_ESW_IOSR_OR2 (0x00000004)
5962 +/* Bit definitions and macros for MCF_ESW_QWT */
5963 +#define MCF_ESW_QWT_Q0WT(x) (((x)&0x0000001F)<<0)
5964 +#define MCF_ESW_QWT_Q1WT(x) (((x)&0x0000001F)<<8)
5965 +#define MCF_ESW_QWT_Q2WT(x) (((x)&0x0000001F)<<16)
5966 +#define MCF_ESW_QWT_Q3WT(x) (((x)&0x0000001F)<<24)
5968 +/* Bit definitions and macros for MCF_ESW_P0BCT */
5969 +#define MCF_ESW_P0BCT_THRESH(x) (((x)&0x000000FF)<<0)
5971 +/* Bit definitions and macros for MCF_ESW_P0FFEN */
5972 +#define MCF_ESW_P0FFEN_FEN (0x00000001)
5973 +#define MCF_ESW_P0FFEN_FD(x) (((x)&0x00000003)<<2)
5975 +/* Bit definitions and macros for MCF_ESW_PSNP */
5976 +#define MCF_ESW_PSNP_EN (0x00000001)
5977 +#define MCF_ESW_PSNP_MODE(x) (((x)&0x00000003)<<1)
5978 +#define MCF_ESW_PSNP_CD (0x00000008)
5979 +#define MCF_ESW_PSNP_CS (0x00000010)
5980 +#define MCF_ESW_PSNP_PORT_COMPARE(x) (((x)&0x0000FFFF)<<16)
5982 +/* Bit definitions and macros for MCF_ESW_IPSNP */
5983 +#define MCF_ESW_IPSNP_EN (0x00000001)
5984 +#define MCF_ESW_IPSNP_MODE(x) (((x)&0x00000003)<<1)
5985 +#define MCF_ESW_IPSNP_PROTOCOL(x) (((x)&0x000000FF)<<8)
5987 +/* Bit definitions and macros for MCF_ESW_PVRES */
5988 +#define MCF_ESW_PVRES_PRI0(x) (((x)&0x00000007)<<0)
5989 +#define MCF_ESW_PVRES_PRI1(x) (((x)&0x00000007)<<3)
5990 +#define MCF_ESW_PVRES_PRI2(x) (((x)&0x00000007)<<6)
5991 +#define MCF_ESW_PVRES_PRI3(x) (((x)&0x00000007)<<9)
5992 +#define MCF_ESW_PVRES_PRI4(x) (((x)&0x00000007)<<12)
5993 +#define MCF_ESW_PVRES_PRI5(x) (((x)&0x00000007)<<15)
5994 +#define MCF_ESW_PVRES_PRI6(x) (((x)&0x00000007)<<18)
5995 +#define MCF_ESW_PVRES_PRI7(x) (((x)&0x00000007)<<21)
5997 +/* Bit definitions and macros for MCF_ESW_IPRES */
5998 +#define MCF_ESW_IPRES_ADDRESS(x) (((x)&0x000000FF)<<0)
5999 +#define MCF_ESW_IPRES_IPV4SEL (0x00000100)
6000 +#define MCF_ESW_IPRES_PRI0(x) (((x)&0x00000003)<<9)
6001 +#define MCF_ESW_IPRES_PRI1(x) (((x)&0x00000003)<<11)
6002 +#define MCF_ESW_IPRES_PRI2(x) (((x)&0x00000003)<<13)
6003 +#define MCF_ESW_IPRES_READ (0x80000000)
6005 +/* Bit definitions and macros for MCF_ESW_PRES */
6006 +#define MCF_ESW_PRES_VLAN (0x00000001)
6007 +#define MCF_ESW_PRES_IP (0x00000002)
6008 +#define MCF_ESW_PRES_MAC (0x00000004)
6009 +#define MCF_ESW_PRES_DFLT_PRI(x) (((x)&0x00000007)<<4)
6011 +/* Bit definitions and macros for MCF_ESW_PID */
6012 +#define MCF_ESW_PID_VLANID(x) (((x)&0x0000FFFF)<<0)
6014 +/* Bit definitions and macros for MCF_ESW_VRES */
6015 +#define MCF_ESW_VRES_P0 (0x00000001)
6016 +#define MCF_ESW_VRES_P1 (0x00000002)
6017 +#define MCF_ESW_VRES_P2 (0x00000004)
6018 +#define MCF_ESW_VRES_VLANID(x) (((x)&0x00000FFF)<<3)
6020 +/* Bit definitions and macros for MCF_ESW_DISCN */
6021 +#define MCF_ESW_DISCN_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6023 +/* Bit definitions and macros for MCF_ESW_DISCB */
6024 +#define MCF_ESW_DISCB_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6026 +/* Bit definitions and macros for MCF_ESW_NDISCN */
6027 +#define MCF_ESW_NDISCN_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6029 +/* Bit definitions and macros for MCF_ESW_NDISCB */
6030 +#define MCF_ESW_NDISCB_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6032 +/* Bit definitions and macros for MCF_ESW_POQC */
6033 +#define MCF_ESW_POQC_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6035 +/* Bit definitions and macros for MCF_ESW_PMVID */
6036 +#define MCF_ESW_PMVID_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6038 +/* Bit definitions and macros for MCF_ESW_PMVTAG */
6039 +#define MCF_ESW_PMVTAG_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6041 +/* Bit definitions and macros for MCF_ESW_PBL */
6042 +#define MCF_ESW_PBL_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6044 +/* Bit definitions and macros for MCF_ESW_ISR */
6045 +#define MCF_ESW_ISR_EBERR (0x00000001)
6046 +#define MCF_ESW_ISR_RXB (0x00000002)
6047 +#define MCF_ESW_ISR_RXF (0x00000004)
6048 +#define MCF_ESW_ISR_TXB (0x00000008)
6049 +#define MCF_ESW_ISR_TXF (0x00000010)
6050 +#define MCF_ESW_ISR_QM (0x00000020)
6051 +#define MCF_ESW_ISR_OD0 (0x00000040)
6052 +#define MCF_ESW_ISR_OD1 (0x00000080)
6053 +#define MCF_ESW_ISR_OD2 (0x00000100)
6054 +#define MCF_ESW_ISR_LRN (0x00000200)
6056 +/* Bit definitions and macros for MCF_ESW_IMR */
6057 +#define MCF_ESW_IMR_EBERR (0x00000001)
6058 +#define MCF_ESW_IMR_RXB (0x00000002)
6059 +#define MCF_ESW_IMR_RXF (0x00000004)
6060 +#define MCF_ESW_IMR_TXB (0x00000008)
6061 +#define MCF_ESW_IMR_TXF (0x00000010)
6062 +#define MCF_ESW_IMR_QM (0x00000020)
6063 +#define MCF_ESW_IMR_OD0 (0x00000040)
6064 +#define MCF_ESW_IMR_OD1 (0x00000080)
6065 +#define MCF_ESW_IMR_OD2 (0x00000100)
6066 +#define MCF_ESW_IMR_LRN (0x00000200)
6068 +/* Bit definitions and macros for MCF_ESW_RDSR */
6069 +#define MCF_ESW_RDSR_ADDRESS(x) (((x)&0x3FFFFFFF)<<2)
6071 +/* Bit definitions and macros for MCF_ESW_TDSR */
6072 +#define MCF_ESW_TDSR_ADDRESS(x) (((x)&0x3FFFFFFF)<<2)
6074 +/* Bit definitions and macros for MCF_ESW_MRBR */
6075 +#define MCF_ESW_MRBR_SIZE(x) (((x)&0x000003FF)<<4)
6077 +/* Bit definitions and macros for MCF_ESW_RDAR */
6078 +#define MCF_ESW_RDAR_R_DES_ACTIVE (0x01000000)
6080 +/* Bit definitions and macros for MCF_ESW_TDAR */
6081 +#define MCF_ESW_TDAR_X_DES_ACTIVE (0x01000000)
6083 +/* Bit definitions and macros for MCF_ESW_LREC0 */
6084 +#define MCF_ESW_LREC0_MACADDR0(x) (((x)&0xFFFFFFFF)<<0)
6086 +/* Bit definitions and macros for MCF_ESW_LREC1 */
6087 +#define MCF_ESW_LREC1_MACADDR1(x) (((x)&0x0000FFFF)<<0)
6088 +#define MCF_ESW_LREC1_HASH(x) (((x)&0x000000FF)<<16)
6089 +#define MCF_ESW_LREC1_SWPORT(x) (((x)&0x00000003)<<24)
6091 +/* Bit definitions and macros for MCF_ESW_LSR */
6092 +#define MCF_ESW_LSR_DA (0x00000001)
6094 +/* port mirroring port number match */
6095 +#define MIRROR_EGRESS_PORT_MATCH 1
6096 +#define MIRROR_INGRESS_PORT_MATCH 2
6098 +/* port mirroring mac address match */
6099 +#define MIRROR_EGRESS_SOURCE_MATCH 1
6100 +#define MIRROR_INGRESS_SOURCE_MATCH 2
6101 +#define MIRROR_EGRESS_DESTINATION_MATCH 3
6102 +#define MIRROR_INGRESS_DESTINATION_MATCH 4
6104 +#endif /* SWITCH_H */
6105 --- a/include/linux/fsl_devices.h
6106 +++ b/include/linux/fsl_devices.h
6107 @@ -129,4 +129,21 @@ struct fsl_ata_platform_data {
6109 int (*get_clk_rate)(void);
6113 +struct coldfire_switch_platform_data {
6115 + unsigned int *switch_hw;
6116 + void (*request_intrs)(struct net_device *dev,
6117 + irqreturn_t (*)(int, void *),
6118 + void *irq_privatedata);
6119 + void (*set_mii)(struct net_device *dev);
6120 + void (*get_mac)(struct net_device *dev);
6121 + void (*enable_phy_intr)(void);
6122 + void (*disable_phy_intr)(void);
6123 + void (*phy_ack_intr)(void);
6124 + void (*localhw_setup)(void);
6125 + void (*uncache)(unsigned long addr);
6126 + void (*platform_flush_cache)(void);
6128 #endif /* _FSL_DEVICE_H_ */
6129 --- a/net/core/dev.c
6130 +++ b/net/core/dev.c
6131 @@ -4756,6 +4756,10 @@ static int dev_ifsioc(struct net *net, s
6133 if ((cmd >= SIOCDEVPRIVATE &&
6134 cmd <= SIOCDEVPRIVATE + 15) ||
6135 +#if defined(CONFIG_MODELO_SWITCH)
6139 cmd == SIOCBONDENSLAVE ||
6140 cmd == SIOCBONDRELEASE ||
6141 cmd == SIOCBONDSETHWADDR ||
6142 @@ -4948,6 +4952,10 @@ int dev_ioctl(struct net *net, unsigned
6145 if (cmd == SIOCWANDEV ||
6146 +#if defined(CONFIG_MODELO_SWITCH)
6150 (cmd >= SIOCDEVPRIVATE &&
6151 cmd <= SIOCDEVPRIVATE + 15)) {
6152 dev_load(net, ifr.ifr_name);