[coldfire]: switch to 2.6.38
[openwrt/svn-archive/archive.git] / target / linux / coldfire / patches / 021-Add-ethernet-switch-driver-for-MCF54418.patch
1 From 51e66f289f280a33bb17047717d2e6539a2917e1 Mon Sep 17 00:00:00 2001
2 From: Alison Wang <b18965@freescale.com>
3 Date: Thu, 4 Aug 2011 09:59:44 +0800
4 Subject: [PATCH 21/52] Add ethernet switch driver for MCF54418
5
6 Add ethernet switch driver support for MCF54418.
7
8 Signed-off-by: Alison Wang <b18965@freescale.com>
9 ---
10 arch/m68k/coldfire/m5441x/l2switch.c | 284 +++
11 arch/m68k/include/asm/mcfswitch.h | 324 +++
12 drivers/net/Kconfig | 8 +
13 drivers/net/Makefile | 1 +
14 drivers/net/modelo_switch.c | 4293 ++++++++++++++++++++++++++++++++++
15 drivers/net/modelo_switch.h | 1141 +++++++++
16 include/linux/fsl_devices.h | 17 +
17 net/core/dev.c | 8 +
18 8 files changed, 6076 insertions(+), 0 deletions(-)
19 create mode 100644 arch/m68k/coldfire/m5441x/l2switch.c
20 create mode 100644 arch/m68k/include/asm/mcfswitch.h
21 create mode 100644 drivers/net/modelo_switch.c
22 create mode 100644 drivers/net/modelo_switch.h
23
24 --- /dev/null
25 +++ b/arch/m68k/coldfire/m5441x/l2switch.c
26 @@ -0,0 +1,284 @@
27 +/*
28 + * l2switch.c
29 + *
30 + * Sub-architcture dependant initialization code for the Freescale
31 + * 5441X L2 Switch module.
32 + *
33 + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
34 + * ShrekWu B16972@freescale.com
35 + *
36 + *
37 + * This program is free software; you can redistribute it and/or modify it
38 + * under the terms of the GNU General Public License as published by the
39 + * Free Software Foundation; either version 2 of the License, or (at your
40 + * option) any later version.
41 + *
42 + * This program is distributed in the hope that it will be useful,
43 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
44 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
45 + * GNU General Public License for more details.
46 + *
47 + * You should have received a copy of the GNU General Public License
48 + * along with this program; if not, write to the Free Software
49 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
50 + */
51 +#include <linux/kernel.h>
52 +#include <linux/sched.h>
53 +#include <linux/param.h>
54 +#include <linux/init.h>
55 +#include <linux/interrupt.h>
56 +#include <linux/device.h>
57 +#include <linux/platform_device.h>
58 +#include <linux/fsl_devices.h>
59 +
60 +#include <asm/traps.h>
61 +#include <asm/machdep.h>
62 +#include <asm/coldfire.h>
63 +#include <asm/mcfswitch.h>
64 +#include <asm/mcfsim.h>
65 +
66 +static unsigned char switch_mac_default[] = {
67 + 0x00, 0x04, 0x9F, 0x00, 0xB3, 0x49,
68 +};
69 +
70 +static unsigned char switch_mac_addr[6];
71 +
72 +static void switch_request_intrs(struct net_device *dev,
73 + irqreturn_t switch_net_irq_handler(int irq, void *private),
74 + void *irq_privatedata)
75 +{
76 + struct switch_enet_private *fep;
77 + int b;
78 + static const struct idesc {
79 + char *name;
80 + unsigned short irq;
81 + } *idp, id[] = {
82 + /*{ "esw_isr(EBERR)", 38 },*/
83 + { "esw_isr(RxBuffer)", 39 },
84 + { "esw_isr(RxFrame)", 40 },
85 + { "esw_isr(TxBuffer)", 41 },
86 + { "esw_isr(TxFrame)", 42 },
87 + { "esw_isr(QM)", 43 },
88 + { "esw_isr(P0OutputDiscard)", 44 },
89 + { "esw_isr(P1OutputDiscard)", 45 },
90 + { "esw_isr(P2OutputDiscard)", 46 },
91 + { "esw_isr(LearningRecord)", 47 },
92 + { NULL },
93 + };
94 +
95 + fep = netdev_priv(dev);
96 + /*intrruption L2 ethernet SWITCH */
97 + b = 64 + 64 + 64;
98 +
99 + /* Setup interrupt handlers. */
100 + for (idp = id; idp->name; idp++) {
101 + if (request_irq(b+idp->irq,
102 + switch_net_irq_handler, IRQF_DISABLED,
103 + idp->name, irq_privatedata) != 0)
104 + printk(KERN_ERR "FEC: Could not alloc %s IRQ(%d)!\n",
105 + idp->name, b+idp->irq);
106 + }
107 +
108 + /* Configure RMII */
109 + MCF_GPIO_PAR_FEC = (MCF_GPIO_PAR_FEC &
110 + MCF_GPIO_PAR_FEC_FEC_MASK) |
111 + MCF_GPIO_PAR_FEC_FEC_RMII0FUL_1FUL;
112 +
113 + MCF_GPIO_PAR_FEC =
114 + (MCF_GPIO_PAR_FEC &
115 + MCF_GPIO_PAR_FEC_FEC_MASK) |
116 + MCF_GPIO_PAR_FEC_FEC_RMII0FUL_1FUL;
117 +
118 + MCF_GPIO_SRCR_FEC = 0x0F;
119 +
120 + MCF_GPIO_PAR_SIMP0H =
121 + (MCF_GPIO_PAR_SIMP0H &
122 + MCF_GPIO_PAR_SIMP0H_DAT_MASK) |
123 + MCF_GPIO_PAR_SIMP0H_DAT_GPIO;
124 +
125 + MCF_GPIO_PDDR_G =
126 + (MCF_GPIO_PDDR_G &
127 + MCF_GPIO_PDDR_G4_MASK) |
128 + MCF_GPIO_PDDR_G4_OUTPUT;
129 +
130 + MCF_GPIO_PODR_G =
131 + (MCF_GPIO_PODR_G &
132 + MCF_GPIO_PODR_G4_MASK);
133 +}
134 +
135 +static void switch_set_mii(struct net_device *dev)
136 +{
137 + struct switch_enet_private *fep = netdev_priv(dev);
138 + volatile switch_t *fecp;
139 +
140 + fecp = fep->hwp;
141 +
142 + MCF_FEC_RCR0 = (MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE |
143 + MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD);
144 + MCF_FEC_RCR1 = (MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE |
145 + MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD);
146 + /* TCR */
147 + MCF_FEC_TCR0 = MCF_FEC_TCR_FDEN;
148 + MCF_FEC_TCR1 = MCF_FEC_TCR_FDEN;
149 + /* ECR */
150 +#ifdef MODELO_ENHANCE_BUFFER
151 + MCF_FEC_ECR0 = MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588;
152 + MCF_FEC_ECR1 = MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588;
153 +#else /*legac buffer*/
154 + MCF_FEC_ECR0 = MCF_FEC_ECR_ETHER_EN;
155 + MCF_FEC_ECR1 = MCF_FEC_ECR_ETHER_EN;
156 +#endif
157 + /*
158 + * Set MII speed to 2.5 MHz
159 + */
160 + MCF_FEC_MSCR0 = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2;
161 + MCF_FEC_MSCR1 = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2;
162 +
163 +}
164 +
165 +static void switch_get_mac(struct net_device *dev)
166 +{
167 + struct switch_enet_private *fep = netdev_priv(dev);
168 + volatile switch_t *fecp;
169 + unsigned char *iap;
170 +
171 + fecp = fep->hwp;
172 +
173 + if (FEC_FLASHMAC) {
174 + /*
175 + * Get MAC address from FLASH.
176 + * If it is all 1's or 0's, use the default.
177 + */
178 + iap = FEC_FLASHMAC;
179 + if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
180 + (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
181 + iap = switch_mac_default;
182 + if ((iap[0] == 0xff) && (iap[1] == 0xff) &&
183 + (iap[2] == 0xff) && (iap[3] == 0xff) &&
184 + (iap[4] == 0xff) && (iap[5] == 0xff))
185 + iap = switch_mac_default;
186 +
187 + } else {
188 + iap = &switch_mac_addr[0];
189 +
190 + if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
191 + (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
192 + iap = switch_mac_default;
193 + if ((iap[0] == 0xff) && (iap[1] == 0xff) &&
194 + (iap[2] == 0xff) && (iap[3] == 0xff) &&
195 + (iap[4] == 0xff) && (iap[5] == 0xff))
196 + iap = switch_mac_default;
197 + }
198 +
199 + memcpy(dev->dev_addr, iap, ETH_ALEN);
200 + /* Adjust MAC if using default MAC address */
201 + if (iap == switch_mac_default)
202 + dev->dev_addr[ETH_ALEN-1] = switch_mac_default[ETH_ALEN-1] +
203 + fep->index;
204 +}
205 +
206 +static void switch_enable_phy_intr(void)
207 +{
208 +}
209 +
210 +static void switch_disable_phy_intr(void)
211 +{
212 +}
213 +
214 +static void switch_phy_ack_intr(void)
215 +{
216 +}
217 +
218 +static void switch_localhw_setup(void)
219 +{
220 +}
221 +
222 +static void switch_uncache(unsigned long addr)
223 +{
224 +}
225 +
226 +static void switch_platform_flush_cache(void)
227 +{
228 +}
229 +
230 +/*
231 + * Define the fixed address of the FEC hardware.
232 + */
233 +static unsigned int switch_platform_hw[] = {
234 + (0xfc0dc000),
235 + (0xfc0e000),
236 +};
237 +
238 +static struct coldfire_switch_platform_data mcf5441x_switch_data = {
239 + .hash_table = 0,
240 + .switch_hw = switch_platform_hw,
241 + .request_intrs = switch_request_intrs,
242 + .set_mii = switch_set_mii,
243 + .get_mac = switch_get_mac,
244 + .enable_phy_intr = switch_enable_phy_intr,
245 + .disable_phy_intr = switch_disable_phy_intr,
246 + .phy_ack_intr = switch_phy_ack_intr,
247 + .localhw_setup = switch_localhw_setup,
248 + .uncache = switch_uncache,
249 + .platform_flush_cache = switch_platform_flush_cache,
250 +};
251 +
252 +static struct resource l2switch_coldfire_resources[] = {
253 + [0] = {
254 + .start = 0xFC0DC000,
255 + .end = 0xFC0DC508,
256 + .flags = IORESOURCE_MEM,
257 + },
258 + [1] = {
259 + .start = (64 + 64 + 64 + 38),
260 + .end = (64 + 64 + 64 + 48),
261 + .flags = IORESOURCE_IRQ,
262 + },
263 + [2] = {
264 + .start = 0xFC0E0000,
265 + .end = 0xFC0E3FFC,
266 + .flags = IORESOURCE_MEM,
267 + },
268 +};
269 +
270 +static struct platform_device l2switch_coldfire_device = {
271 + .name = "coldfire-switch",
272 + .id = 0,
273 + .resource = l2switch_coldfire_resources,
274 + .num_resources = ARRAY_SIZE(l2switch_coldfire_resources),
275 + .dev = {
276 + .platform_data = &mcf5441x_switch_data,
277 + .coherent_dma_mask = ~0, /* $$$ REVISIT */
278 + }
279 +};
280 +
281 +
282 +static int __init mcf5441x_switch_dev_init(void)
283 +{
284 + int retval = 0;
285 +
286 + retval = platform_device_register(&l2switch_coldfire_device);
287 +
288 + if (retval < 0) {
289 + printk(KERN_ERR "MCF5441x L2Switch: platform_device_register"
290 + " failed with code=%d\n", retval);
291 + }
292 +
293 + return retval;
294 +}
295 +
296 +static int __init param_switch_addr_setup(char *str)
297 +{
298 + char *end;
299 + int i;
300 +
301 + for (i = 0; i < 6; i++) {
302 + switch_mac_addr[i] = str ? simple_strtoul(str, &end, 16) : 0;
303 + if (str)
304 + str = (*end) ? end + 1 : end;
305 + }
306 + return 0;
307 +}
308 +__setup("switchaddr=", param_switch_addr_setup);
309 +
310 +arch_initcall(mcf5441x_switch_dev_init);
311 --- /dev/null
312 +++ b/arch/m68k/include/asm/mcfswitch.h
313 @@ -0,0 +1,324 @@
314 +/****************************************************************************/
315 +
316 +/*
317 + * mcfswitch -- L2 SWITCH Controller for Motorola ColdFire SoC
318 + * processors.
319 + *
320 + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
321 + *
322 + * This program is free software; you can redistribute it and/or modify it
323 + * under the terms of the GNU General Public License as published by the
324 + * Free Software Foundation; either version 2 of the License, or (at your
325 + * option) any later version.
326 + *
327 + * This program is distributed in the hope that it will be useful,
328 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
329 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
330 + * GNU General Public License for more details.
331 + *
332 + * You should have received a copy of the GNU General Public License
333 + * along with this program; if not, write to the Free Software
334 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
335 + */
336 +
337 +/****************************************************************************/
338 +#ifndef SWITCH_H
339 +#define SWITCH_H
340 +/****************************************************************************/
341 +#include <linux/netdevice.h>
342 +#include <linux/etherdevice.h>
343 +#include <linux/skbuff.h>
344 +#include <linux/spinlock.h>
345 +#include <linux/workqueue.h>
346 +#include <linux/platform_device.h>
347 +#include <asm/pgtable.h>
348 +
349 +#define FEC_FLASHMAC 0
350 +#define SWITCH_EPORT_NUMBER 2
351 +
352 +#ifdef CONFIG_SWITCH_DMA_USE_SRAM
353 +#define TX_RING_SIZE 8 /* Must be power of two */
354 +#define TX_RING_MOD_MASK 7 /* for this to work */
355 +#else
356 +#define TX_RING_SIZE 16 /* Must be power of two */
357 +#define TX_RING_MOD_MASK 15 /* for this to work */
358 +#endif
359 +
360 +typedef struct l2switch_port_statistics_status {
361 + /*outgoing frames discarded due to transmit queue congestion*/
362 + unsigned long MCF_ESW_POQC;
363 + /*incoming frames discarded due to VLAN domain mismatch*/
364 + unsigned long MCF_ESW_PMVID;
365 + /*incoming frames discarded due to untagged discard*/
366 + unsigned long MCF_ESW_PMVTAG;
367 + /*incoming frames discarded due port is in blocking state*/
368 + unsigned long MCF_ESW_PBL;
369 +} esw_port_statistics_status;
370 +
371 +typedef struct l2switch {
372 + unsigned long ESW_REVISION;
373 + unsigned long ESW_SCRATCH;
374 + unsigned long ESW_PER;
375 + unsigned long reserved0[1];
376 + unsigned long ESW_VLANV;
377 + unsigned long ESW_DBCR;
378 + unsigned long ESW_DMCR;
379 + unsigned long ESW_BKLR;
380 + unsigned long ESW_BMPC;
381 + unsigned long ESW_MODE;
382 + unsigned long ESW_VIMSEL;
383 + unsigned long ESW_VOMSEL;
384 + unsigned long ESW_VIMEN;
385 + unsigned long ESW_VID;/*0x34*/
386 + /*from 0x38 0x3C*/
387 + unsigned long esw_reserved0[2];
388 + unsigned long ESW_MCR;/*0x40*/
389 + unsigned long ESW_EGMAP;
390 + unsigned long ESW_INGMAP;
391 + unsigned long ESW_INGSAL;
392 + unsigned long ESW_INGSAH;
393 + unsigned long ESW_INGDAL;
394 + unsigned long ESW_INGDAH;
395 + unsigned long ESW_ENGSAL;
396 + unsigned long ESW_ENGSAH;
397 + unsigned long ESW_ENGDAL;
398 + unsigned long ESW_ENGDAH;
399 + unsigned long ESW_MCVAL;/*0x6C*/
400 + /*from 0x70--0x7C*/
401 + unsigned long esw_reserved1[4];
402 + unsigned long ESW_MMSR;/*0x80*/
403 + unsigned long ESW_LMT;
404 + unsigned long ESW_LFC;
405 + unsigned long ESW_PCSR;
406 + unsigned long ESW_IOSR;
407 + unsigned long ESW_QWT;/*0x94*/
408 + unsigned long esw_reserved2[1];/*0x98*/
409 + unsigned long ESW_P0BCT;/*0x9C*/
410 + /*from 0xA0-0xB8*/
411 + unsigned long esw_reserved3[7];
412 + unsigned long ESW_P0FFEN;/*0xBC*/
413 + unsigned long ESW_PSNP[8];
414 + unsigned long ESW_IPSNP[8];
415 + unsigned long ESW_PVRES[3];
416 + /*from 0x10C-0x13C*/
417 + unsigned long esw_reserved4[13];
418 + unsigned long ESW_IPRES;/*0x140*/
419 + /*from 0x144-0x17C*/
420 + unsigned long esw_reserved5[15];
421 +
422 + /*port0-port2 Priority Configuration 0xFC0D_C180-C188*/
423 + unsigned long ESW_PRES[3];
424 + /*from 0x18C-0x1FC*/
425 + unsigned long esw_reserved6[29];
426 +
427 + /*port0-port2 VLAN ID 0xFC0D_C200-C208*/
428 + unsigned long ESW_PID[3];
429 + /*from 0x20C-0x27C*/
430 + unsigned long esw_reserved7[29];
431 +
432 + /*port0-port2 VLAN domain resolution entry 0xFC0D_C280-C2FC*/
433 + unsigned long ESW_VRES[32];
434 +
435 + unsigned long ESW_DISCN;/*0x300*/
436 + unsigned long ESW_DISCB;
437 + unsigned long ESW_NDISCN;
438 + unsigned long ESW_NDISCB;/*0xFC0DC30C*/
439 + /*per port statistics 0xFC0DC310_C33C*/
440 + esw_port_statistics_status port_statistics_status[3];
441 + /*from 0x340-0x400*/
442 + unsigned long esw_reserved8[48];
443 +
444 + /*0xFC0DC400---0xFC0DC418*/
445 + /*unsigned long MCF_ESW_ISR;*/
446 + unsigned long switch_ievent; /* Interrupt event reg */
447 + /*unsigned long MCF_ESW_IMR;*/
448 + unsigned long switch_imask; /* Interrupt mask reg */
449 + /*unsigned long MCF_ESW_RDSR;*/
450 + unsigned long fec_r_des_start; /* Receive descriptor ring */
451 + /*unsigned long MCF_ESW_TDSR;*/
452 + unsigned long fec_x_des_start; /* Transmit descriptor ring */
453 + /*unsigned long MCF_ESW_MRBR;*/
454 + unsigned long fec_r_buff_size; /* Maximum receive buff size */
455 + /*unsigned long MCF_ESW_RDAR;*/
456 + unsigned long fec_r_des_active; /* Receive descriptor reg */
457 + /*unsigned long MCF_ESW_TDAR;*/
458 + unsigned long fec_x_des_active; /* Transmit descriptor reg */
459 + /*from 0x420-0x4FC*/
460 + unsigned long esw_reserved9[57];
461 +
462 + /*0xFC0DC500---0xFC0DC508*/
463 + unsigned long ESW_LREC0;
464 + unsigned long ESW_LREC1;
465 + unsigned long ESW_LSR;
466 +} switch_t;
467 +
468 +typedef struct _64bTableEntry {
469 + unsigned int lo; /* lower 32 bits */
470 + unsigned int hi; /* upper 32 bits */
471 +} AddrTable64bEntry;
472 +
473 +typedef struct l2switchaddrtable {
474 + AddrTable64bEntry eswTable64bEntry[2048];
475 +} eswAddrTable_t;
476 +
477 +#define MCF_FEC_MSCR0 (*(volatile unsigned long *)(0xFC0D4044))
478 +#define MCF_FEC_MSCR1 (*(volatile unsigned long *)(0xFC0D8044))
479 +#define MCF_FEC_RCR0 (*(volatile unsigned long *)(0xFC0D4084))
480 +#define MCF_FEC_RCR1 (*(volatile unsigned long *)(0xFC0D8084))
481 +#define MCF_FEC_TCR0 (*(volatile unsigned long *)(0xFC0D40C4))
482 +#define MCF_FEC_TCR1 (*(volatile unsigned long *)(0xFC0D80C4))
483 +#define MCF_FEC_ECR0 (*(volatile unsigned long *)(0xFC0D4024))
484 +#define MCF_FEC_ECR1 (*(volatile unsigned long *)(0xFC0D8024))
485 +
486 +#define MCF_FEC_RCR_PROM (0x00000008)
487 +#define MCF_FEC_RCR_RMII_MODE (0x00000100)
488 +#define MCF_FEC_RCR_MAX_FL(x) (((x)&0x00003FFF)<<16)
489 +#define MCF_FEC_RCR_CRC_FWD (0x00004000)
490 +
491 +#define MCF_FEC_TCR_FDEN (0x00000004)
492 +
493 +#define MCF_FEC_ECR_ETHER_EN (0x00000002)
494 +#define MCF_FEC_ECR_ENA_1588 (0x00000010)
495 +
496 +
497 +typedef struct bufdesc {
498 + unsigned short cbd_sc; /* Control and status info */
499 + unsigned short cbd_datlen; /* Data length */
500 + unsigned long cbd_bufaddr; /* Buffer address */
501 +#ifdef MODELO_BUFFER
502 + unsigned long ebd_status;
503 + unsigned short length_proto_type;
504 + unsigned short payload_checksum;
505 + unsigned long bdu;
506 + unsigned long timestamp;
507 + unsigned long reserverd_word1;
508 + unsigned long reserverd_word2;
509 +#endif
510 +} cbd_t;
511 +
512 +/* Forward declarations of some structures to support different PHYs
513 + */
514 +typedef struct {
515 + uint mii_data;
516 + void (*funct)(uint mii_reg, struct net_device *dev);
517 +} phy_cmd_t;
518 +
519 +typedef struct {
520 + uint id;
521 + char *name;
522 +
523 + const phy_cmd_t *config;
524 + const phy_cmd_t *startup;
525 + const phy_cmd_t *ack_int;
526 + const phy_cmd_t *shutdown;
527 +} phy_info_t;
528 +
529 +/* The switch buffer descriptors track the ring buffers. The rx_bd_base and
530 + * tx_bd_base always point to the base of the buffer descriptors. The
531 + * cur_rx and cur_tx point to the currently available buffer.
532 + * The dirty_tx tracks the current buffer that is being sent by the
533 + * controller. The cur_tx and dirty_tx are equal under both completely
534 + * empty and completely full conditions. The empty/ready indicator in
535 + * the buffer descriptor determines the actual condition.
536 + */
537 +struct switch_enet_private {
538 + /* Hardware registers of the switch device */
539 + volatile switch_t *hwp;
540 + volatile eswAddrTable_t *hwentry;
541 +
542 + struct net_device *netdev;
543 + struct platform_device *pdev;
544 + /* The saved address of a sent-in-place packet/buffer, for skfree(). */
545 + unsigned char *tx_bounce[TX_RING_SIZE];
546 + struct sk_buff *tx_skbuff[TX_RING_SIZE];
547 + ushort skb_cur;
548 + ushort skb_dirty;
549 +
550 + /* CPM dual port RAM relative addresses.
551 + */
552 + cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
553 + cbd_t *tx_bd_base;
554 + cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
555 + cbd_t *dirty_tx; /* The ring entries to be free()ed. */
556 + uint tx_full;
557 + /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
558 + spinlock_t hw_lock;
559 +
560 + /* hold while accessing the mii_list_t() elements */
561 + spinlock_t mii_lock;
562 + struct mii_bus *mdio_bus;
563 + struct phy_device *phydev[SWITCH_EPORT_NUMBER];
564 +
565 + uint phy_id;
566 + uint phy_id_done;
567 + uint phy_status;
568 + uint phy_speed;
569 + phy_info_t const *phy;
570 + struct work_struct phy_task;
571 + volatile switch_t *phy_hwp;
572 +
573 + uint sequence_done;
574 + uint mii_phy_task_queued;
575 +
576 + uint phy_addr;
577 +
578 + int index;
579 + int opened;
580 + int full_duplex;
581 + int msg_enable;
582 + int phy1_link;
583 + int phy1_old_link;
584 + int phy1_duplex;
585 + int phy1_speed;
586 +
587 + int phy2_link;
588 + int phy2_old_link;
589 + int phy2_duplex;
590 + int phy2_speed;
591 + /* --------------Statistics--------------------------- */
592 + /* when a new element deleted a element with in
593 + * a block due to lack of space */
594 + int atBlockOverflows;
595 + /* Peak number of valid entries in the address table */
596 + int atMaxEntries;
597 + /* current number of valid entries in the address table */
598 + int atCurrEntries;
599 + /* maximum entries within a block found
600 + * (updated within ageing)*/
601 + int atMaxEntriesPerBlock;
602 +
603 + /* -------------------ageing function------------------ */
604 + /* maximum age allowed for an entry */
605 + int ageMax;
606 + /* last LUT entry to block that was
607 + * inspected by the Ageing task*/
608 + int ageLutIdx;
609 + /* last element within block inspected by the Ageing task */
610 + int ageBlockElemIdx;
611 + /* complete table has been processed by ageing process */
612 + int ageCompleted;
613 + /* delay setting */
614 + int ageDelay;
615 + /* current delay Counter */
616 + int ageDelayCnt;
617 +
618 + /* ----------------timer related---------------------------- */
619 + /* current time (for timestamping) */
620 + int currTime;
621 + /* flag set by timer when currTime changed
622 + * and cleared by serving function*/
623 + int timeChanged;
624 +
625 + /* Timer for Aging */
626 + struct timer_list timer_aging;
627 + int learning_irqhandle_enable;
628 +};
629 +
630 +struct switch_platform_private {
631 + struct platform_device *pdev;
632 +
633 + unsigned long quirks;
634 + int num_slots; /* Slots on controller */
635 + struct switch_enet_private *fep_host[0]; /* Pointers to hosts */
636 +};
637 +#endif
638 --- a/drivers/net/Kconfig
639 +++ b/drivers/net/Kconfig
640 @@ -1950,6 +1950,14 @@ config FEC
641 Say Y here if you want to use the built-in 10/100 Fast ethernet
642 controller on some Motorola ColdFire and Freescale i.MX processors.
643
644 +config MODELO_SWITCH
645 + bool "ethernet switch controller (of ColdFire CPUs)"
646 + depends on !FEC && M5441X
647 + help
648 + Say Y here if you want to use the built-in ethernet switch
649 + controller on some ColdFire processors.
650 + The Integrated Ethernet switch engine is compatible with
651 + 10/100 MAC-NET core.
652
653 config FEC2
654 bool "Second FEC ethernet controller (on some ColdFire CPUs)"
655 --- a/drivers/net/Makefile
656 +++ b/drivers/net/Makefile
657 @@ -127,6 +127,7 @@ ifeq ($(CONFIG_FEC_1588), y)
658 obj-$(CONFIG_FEC) += fec_1588.o
659 endif
660 obj-$(CONFIG_FEC_548x) += fec_m547x.o
661 +obj-$(CONFIG_MODELO_SWITCH) += modelo_switch.o
662 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
663 ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
664 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
665 --- /dev/null
666 +++ b/drivers/net/modelo_switch.c
667 @@ -0,0 +1,4293 @@
668 +/*
669 + * L2 switch Controller (Etheren switch) driver for MCF5441x.
670 + *
671 + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
672 + * Shrek Wu (B16972@freescale.com)
673 + * Alison Wang (b18965@freescale.com)
674 + * Jason Jin (Jason.jin@freescale.com)
675 + *
676 + * This program is free software; you can redistribute it and/or modify it
677 + * under the terms of the GNU General Public License as published by the
678 + * Free Software Foundation; either version 2 of the License, or (at your
679 + * option) any later version.
680 + */
681 +
682 +#include <linux/module.h>
683 +#include <linux/kernel.h>
684 +#include <linux/string.h>
685 +#include <linux/ptrace.h>
686 +#include <linux/errno.h>
687 +#include <linux/ioport.h>
688 +#include <linux/slab.h>
689 +#include <linux/interrupt.h>
690 +#include <linux/pci.h>
691 +#include <linux/init.h>
692 +#include <linux/delay.h>
693 +#include <linux/netdevice.h>
694 +#include <linux/etherdevice.h>
695 +#include <linux/skbuff.h>
696 +#include <linux/spinlock.h>
697 +#include <linux/workqueue.h>
698 +#include <linux/bitops.h>
699 +#include <linux/platform_device.h>
700 +#include <linux/fsl_devices.h>
701 +#include <linux/phy.h>
702 +#include <linux/kthread.h>
703 +#include <linux/syscalls.h>
704 +#include <linux/uaccess.h>
705 +#include <linux/io.h>
706 +#include <linux/signal.h>
707 +
708 +#include <asm/irq.h>
709 +#include <asm/pgtable.h>
710 +#include <asm/cacheflush.h>
711 +#include <asm/coldfire.h>
712 +#include <asm/mcfsim.h>
713 +#include "modelo_switch.h"
714 +
715 +#define SWITCH_MAX_PORTS 1
716 +#define CONFIG_FEC_SHARED_PHY
717 +
718 +/* Interrupt events/masks.
719 +*/
720 +#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
721 +#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
722 +#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
723 +#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
724 +#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
725 +#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
726 +#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
727 +#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
728 +#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
729 +#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
730 +
731 +static int switch_enet_open(struct net_device *dev);
732 +static int switch_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
733 +static irqreturn_t switch_enet_interrupt(int irq, void *dev_id);
734 +static void switch_enet_tx(struct net_device *dev);
735 +static void switch_enet_rx(struct net_device *dev);
736 +static int switch_enet_close(struct net_device *dev);
737 +static void set_multicast_list(struct net_device *dev);
738 +static void switch_restart(struct net_device *dev, int duplex);
739 +static void switch_stop(struct net_device *dev);
740 +static void switch_set_mac_address(struct net_device *dev);
741 +
742 +#define NMII 20
743 +
744 +/* Make MII read/write commands for the FEC.
745 +*/
746 +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
747 +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
748 + (VAL & 0xffff))
749 +
750 +/* Transmitter timeout.
751 +*/
752 +#define TX_TIMEOUT (2*HZ)
753 +
754 +/*last read entry from learning interface*/
755 +eswPortInfo g_info;
756 +/* switch ports status */
757 +struct port_status ports_link_status;
758 +
759 +/* the user space pid, used to send the link change to user space */
760 +long user_pid = 1;
761 +
762 +/* ----------------------------------------------------------------*/
763 +/*
764 + * Calculate Galois Field Arithmetic CRC for Polynom x^8+x^2+x+1.
765 + * It omits the final shift in of 8 zeroes a "normal" CRC would do
766 + * (getting the remainder).
767 + *
768 + * Examples (hexadecimal values):<br>
769 + * 10-11-12-13-14-15 => CRC=0xc2
770 + * 10-11-cc-dd-ee-00 => CRC=0xe6
771 + *
772 + * param: pmacaddress
773 + * A 6-byte array with the MAC address.
774 + * The first byte is the first byte transmitted
775 + * return The 8-bit CRC in bits 7:0
776 + */
777 +int crc8_calc(unsigned char *pmacaddress)
778 +{
779 + /* byte index */
780 + int byt;
781 + /* bit index */
782 + int bit;
783 + int inval;
784 + int crc;
785 + /* preset */
786 + crc = 0x12;
787 + for (byt = 0; byt < 6; byt++) {
788 + inval = (((int)pmacaddress[byt]) & 0xff);
789 + /*
790 + * shift bit 0 to bit 8 so all our bits
791 + * travel through bit 8
792 + * (simplifies below calc)
793 + */
794 + inval <<= 8;
795 +
796 + for (bit = 0; bit < 8; bit++) {
797 + /* next input bit comes into d7 after shift */
798 + crc |= inval & 0x100;
799 + if (crc & 0x01)
800 + /* before shift */
801 + crc ^= 0x1c0;
802 +
803 + crc >>= 1;
804 + inval >>= 1;
805 + }
806 +
807 + }
808 + /* upper bits are clean as we shifted in zeroes! */
809 + return crc;
810 +}
811 +
812 +void read_atable(struct switch_enet_private *fep,
813 + int index, unsigned long *read_lo, unsigned long *read_hi)
814 +{
815 + unsigned long atable_base = 0xFC0E0000;
816 +
817 + *read_lo = *((volatile unsigned long *)(atable_base + (index<<3)));
818 + *read_hi = *((volatile unsigned long *)(atable_base + (index<<3) + 4));
819 +}
820 +
821 +void write_atable(struct switch_enet_private *fep,
822 + int index, unsigned long write_lo, unsigned long write_hi)
823 +{
824 + unsigned long atable_base = 0xFC0E0000;
825 +
826 + *((volatile unsigned long *)(atable_base + (index<<3))) = write_lo;
827 + *((volatile unsigned long *)(atable_base + (index<<3) + 4)) = write_hi;
828 +}
829 +
830 +/* Check if the Port Info FIFO has data available
831 + * for reading. 1 valid, 0 invalid*/
832 +int esw_portinfofifo_status(struct switch_enet_private *fep)
833 +{
834 + volatile switch_t *fecp;
835 + fecp = fep->hwp;
836 + return fecp->ESW_LSR;
837 +}
838 +
839 +/* Initialize the Port Info FIFO. */
840 +void esw_portinfofifo_initialize(struct switch_enet_private *fep)
841 +{
842 + volatile switch_t *fecp;
843 + unsigned long tmp;
844 + fecp = fep->hwp;
845 +
846 + /*disable all learn*/
847 + fecp->switch_imask &= (~MCF_ESW_IMR_LRN);
848 + /* remove all entries from FIFO */
849 + while (esw_portinfofifo_status(fep)) {
850 + /* read one data word */
851 + tmp = fecp->ESW_LREC0;
852 + tmp = fecp->ESW_LREC1;
853 + }
854 +
855 +}
856 +
857 +/* Read one element from the HW receive FIFO (Queue)
858 + * if available and return it.
859 + * return ms_HwPortInfo or null if no data is available
860 + */
861 +eswPortInfo *esw_portinfofifo_read(struct switch_enet_private *fep)
862 +{
863 + volatile switch_t *fecp;
864 + unsigned long tmp;
865 +
866 + fecp = fep->hwp;
867 + /* check learning record valid */
868 + if (fecp->ESW_LSR == 0)
869 + return NULL;
870 +
871 + /*read word from FIFO*/
872 + g_info.maclo = fecp->ESW_LREC0;
873 +
874 + /*but verify that we actually did so
875 + * (0=no data available)*/
876 + if (g_info.maclo == 0)
877 + return NULL;
878 +
879 + /* read 2nd word from FIFO */
880 + tmp = fecp->ESW_LREC1;
881 + g_info.machi = tmp & 0xffff;
882 + g_info.hash = (tmp >> 16) & 0xff;
883 + g_info.port = (tmp >> 24) & 0xf;
884 +
885 + return &g_info;
886 +}
887 +
888 +/*
889 + * Clear complete MAC Look Up Table
890 + */
891 +void esw_clear_atable(struct switch_enet_private *fep)
892 +{
893 + int index;
894 + for (index = 0; index < 2048; index++)
895 + write_atable(fep, index, 0, 0);
896 +}
897 +
898 +void esw_dump_atable(struct switch_enet_private *fep)
899 +{
900 + int index;
901 + unsigned long read_lo, read_hi;
902 + for (index = 0; index < 2048; index++)
903 + read_atable(fep, index, &read_lo, &read_hi);
904 +}
905 +
906 +/*
907 + * pdates MAC address lookup table with a static entry
908 + * Searches if the MAC address is already there in the block and replaces
909 + * the older entry with new one. If MAC address is not there then puts a
910 + * new entry in the first empty slot available in the block
911 + *
912 + * mac_addr Pointer to the array containing MAC address to
913 + * be put as static entry
914 + * port Port bitmask numbers to be added in static entry,
915 + * valid values are 1-7
916 + * priority Priority for the static entry in table
917 + *
918 + * return 0 for a successful update else -1 when no slot available
919 + */
920 +int esw_update_atable_static(unsigned char *mac_addr,
921 + unsigned int port, unsigned int priority,
922 + struct switch_enet_private *fep)
923 +{
924 + unsigned long block_index, entry, index_end;
925 + unsigned long read_lo, read_hi;
926 + unsigned long write_lo, write_hi;
927 +
928 + write_lo = (unsigned long)((mac_addr[3] << 24) |
929 + (mac_addr[2] << 16) |
930 + (mac_addr[1] << 8) |
931 + mac_addr[0]);
932 + write_hi = (unsigned long)(0 |
933 + (port << AT_SENTRY_PORTMASK_shift) |
934 + (priority << AT_SENTRY_PRIO_shift) |
935 + (AT_ENTRY_TYPE_STATIC << AT_ENTRY_TYPE_shift) |
936 + (AT_ENTRY_RECORD_VALID << AT_ENTRY_VALID_shift) |
937 + (mac_addr[5] << 8) | (mac_addr[4]));
938 +
939 + block_index = GET_BLOCK_PTR(crc8_calc(mac_addr));
940 + index_end = block_index + ATABLE_ENTRY_PER_SLOT;
941 + /* Now search all the entries in the selected block */
942 + for (entry = block_index; entry < index_end; entry++) {
943 + read_atable(fep, entry, &read_lo, &read_hi);
944 + /*
945 + * MAC address matched, so update the
946 + * existing entry
947 + * even if its a dynamic one
948 + */
949 + if ((read_lo == write_lo) && ((read_hi & 0x0000ffff) ==
950 + (write_hi & 0x0000ffff))) {
951 + write_atable(fep, entry, write_lo, write_hi);
952 + return 0;
953 + } else if (!(read_hi & (1 << 16))) {
954 + /*
955 + * Fill this empty slot (valid bit zero),
956 + * assuming no holes in the block
957 + */
958 + write_atable(fep, entry, write_lo, write_hi);
959 + fep->atCurrEntries++;
960 + return 0;
961 + }
962 + }
963 +
964 + /* No space available for this static entry */
965 + return -1;
966 +}
967 +
968 +/* lookup entry in given Address Table slot and
969 + * insert (learn) it if it is not found.
970 + * return 0 if entry was found and updated.
971 + * 1 if entry was not found and has been inserted (learned).
972 + */
973 +int esw_update_atable_dynamic(unsigned char *mac_addr, unsigned int port,
974 + unsigned int currTime, struct switch_enet_private *fep)
975 +{
976 + unsigned long block_index, entry, index_end;
977 + unsigned long read_lo, read_hi;
978 + unsigned long write_lo, write_hi;
979 + unsigned long tmp;
980 + int time, timeold, indexold;
981 +
982 + /* prepare update port and timestamp */
983 + write_hi = (mac_addr[5] << 8) | (mac_addr[4]);
984 + write_lo = (unsigned long)((mac_addr[3] << 24) |
985 + (mac_addr[2] << 16) |
986 + (mac_addr[1] << 8) |
987 + mac_addr[0]);
988 + tmp = AT_ENTRY_RECORD_VALID << AT_ENTRY_VALID_shift;
989 + tmp |= AT_ENTRY_TYPE_DYNAMIC << AT_ENTRY_TYPE_shift;
990 + tmp |= currTime << AT_DENTRY_TIME_shift;
991 + tmp |= port << AT_DENTRY_PORT_shift;
992 + tmp |= write_hi;
993 +
994 + /*
995 + * linear search through all slot
996 + * entries and update if found
997 + */
998 + block_index = GET_BLOCK_PTR(crc8_calc(mac_addr));
999 + index_end = block_index + ATABLE_ENTRY_PER_SLOT;
1000 + /* Now search all the entries in the selected block */
1001 + for (entry = block_index; entry < index_end; entry++) {
1002 + read_atable(fep, entry, &read_lo, &read_hi);
1003 +
1004 + if ((read_lo == write_lo) &&
1005 + ((read_hi & 0x0000ffff) ==
1006 + (write_hi & 0x0000ffff))) {
1007 + /* found correct address,
1008 + * update timestamp. */
1009 + write_atable(fep, entry, write_lo, tmp);
1010 + return 0;
1011 + } else if (!(read_hi & (1 << 16))) {
1012 + /* slot is empty, then use it
1013 + * for new entry
1014 + * Note: There are no holes,
1015 + * therefore cannot be any
1016 + * more that need to be compared.
1017 + */
1018 + write_atable(fep, entry, write_lo, tmp);
1019 + /* statistics (we do it between writing
1020 + * .hi an .lo due to
1021 + * hardware limitation...
1022 + */
1023 + fep->atCurrEntries++;
1024 + /* newly inserted */
1025 + return 1;
1026 + }
1027 + }
1028 +
1029 + /*
1030 + * no more entry available in blockk ...
1031 + * overwrite oldest
1032 + */
1033 + timeold = 0;
1034 + indexold = 0;
1035 + for (entry = block_index; entry < index_end; entry++) {
1036 + read_atable(fep, entry, &read_lo, &read_hi);
1037 + time = AT_EXTRACT_TIMESTAMP(read_hi);
1038 + time = TIMEDELTA(currTime, time);
1039 + if (time > timeold) {
1040 + /* is it older ?*/
1041 + timeold = time;
1042 + indexold = entry;
1043 + }
1044 + }
1045 +
1046 + write_atable(fep, indexold, write_lo, tmp);
1047 + /* Statistics (do it inbetween
1048 + * writing to .lo and .hi*/
1049 + fep->atBlockOverflows++;
1050 + /* newly inserted */
1051 + return 1;
1052 +}
1053 +
1054 +int esw_update_atable_dynamic1(unsigned long write_lo, unsigned long write_hi,
1055 + int block_index, unsigned int port, unsigned int currTime,
1056 + struct switch_enet_private *fep)
1057 +{
1058 + unsigned long entry, index_end;
1059 + unsigned long read_lo, read_hi;
1060 + unsigned long tmp;
1061 + int time, timeold, indexold;
1062 +
1063 + /* prepare update port and timestamp */
1064 + tmp = AT_ENTRY_RECORD_VALID << AT_ENTRY_VALID_shift;
1065 + tmp |= AT_ENTRY_TYPE_DYNAMIC << AT_ENTRY_TYPE_shift;
1066 + tmp |= currTime << AT_DENTRY_TIME_shift;
1067 + tmp |= port << AT_DENTRY_PORT_shift;
1068 + tmp |= write_hi;
1069 +
1070 + /*
1071 + * linear search through all slot
1072 + * entries and update if found
1073 + */
1074 + index_end = block_index + ATABLE_ENTRY_PER_SLOT;
1075 + /* Now search all the entries in the selected block */
1076 + for (entry = block_index; entry < index_end; entry++) {
1077 + read_atable(fep, entry, &read_lo, &read_hi);
1078 + if ((read_lo == write_lo) &&
1079 + ((read_hi & 0x0000ffff) ==
1080 + (write_hi & 0x0000ffff))) {
1081 + /* found correct address,
1082 + * update timestamp. */
1083 + write_atable(fep, entry, write_lo, tmp);
1084 + return 0;
1085 + } else if (!(read_hi & (1 << 16))) {
1086 + /* slot is empty, then use it
1087 + * for new entry
1088 + * Note: There are no holes,
1089 + * therefore cannot be any
1090 + * more that need to be compared.
1091 + */
1092 + write_atable(fep, entry, write_lo, tmp);
1093 + /* statistics (we do it between writing
1094 + * .hi an .lo due to
1095 + * hardware limitation...
1096 + */
1097 + fep->atCurrEntries++;
1098 + /* newly inserted */
1099 + return 1;
1100 + }
1101 + }
1102 +
1103 + /*
1104 + * no more entry available in block ...
1105 + * overwrite oldest
1106 + */
1107 + timeold = 0;
1108 + indexold = 0;
1109 + for (entry = block_index; entry < index_end; entry++) {
1110 + read_atable(fep, entry, &read_lo, &read_hi);
1111 + time = AT_EXTRACT_TIMESTAMP(read_hi);
1112 + time = TIMEDELTA(currTime, time);
1113 + if (time > timeold) {
1114 + /* is it older ?*/
1115 + timeold = time;
1116 + indexold = entry;
1117 + }
1118 + }
1119 +
1120 + write_atable(fep, indexold, write_lo, tmp);
1121 + /* Statistics (do it inbetween
1122 + * writing to .lo and .hi*/
1123 + fep->atBlockOverflows++;
1124 + /* newly inserted */
1125 + return 1;
1126 +}
1127 +
1128 +/*
1129 + * Delete one dynamic entry within the given block
1130 + * of 64-bit entries.
1131 + * return number of valid entries in the block after deletion.
1132 + */
1133 +int esw_del_atable_dynamic(struct switch_enet_private *fep,
1134 + int blockidx, int entryidx)
1135 +{
1136 + unsigned long index_start, index_end;
1137 + int i;
1138 + unsigned long read_lo, read_hi;
1139 +
1140 + /* the entry to delete */
1141 + index_start = blockidx + entryidx;
1142 + /* one after last */
1143 + index_end = blockidx + ATABLE_ENTRY_PER_SLOT;
1144 + /* Statistics */
1145 + fep->atCurrEntries--;
1146 +
1147 + if (entryidx == (ATABLE_ENTRY_PER_SLOT - 1)) {
1148 + /* if it is the very last entry,
1149 + * just delete it without further efford*/
1150 + write_atable(fep, index_start, 0, 0);
1151 + /*number of entries left*/
1152 + i = ATABLE_ENTRY_PER_SLOT - 1;
1153 + return i;
1154 + } else {
1155 + /*not the last in the block, then
1156 + * shift all that follow the one
1157 + * that is deleted to avoid "holes".
1158 + */
1159 + for (i = index_start; i < (index_end - 1); i++) {
1160 + read_atable(fep, i + 1, &read_lo, &read_hi);
1161 + /* move it down */
1162 + write_atable(fep, i, read_lo, read_hi);
1163 + if (!(read_hi & (1 << 16))) {
1164 + /* stop if we just copied the last */
1165 + return i - blockidx;
1166 + }
1167 + }
1168 +
1169 + /*moved all entries up to the last.
1170 + * then set invalid flag in the last*/
1171 + write_atable(fep, index_end - 1, 0, 0);
1172 + /* number of valid entries left */
1173 + return i - blockidx;
1174 + }
1175 +}
1176 +
1177 +void esw_atable_dynamicms_del_entries_for_port(
1178 + struct switch_enet_private *fep, int port_index)
1179 +{
1180 + unsigned long read_lo, read_hi;
1181 + unsigned int port_idx;
1182 + int i;
1183 +
1184 + for (i = 0; i < ESW_ATABLE_MEM_NUM_ENTRIES; i++) {
1185 + read_atable(fep, i, &read_lo, &read_hi);
1186 + if (read_hi & (1 << 16)) {
1187 + port_idx = AT_EXTRACT_PORT(read_hi);
1188 +
1189 + if (port_idx == port_index)
1190 + write_atable(fep, i, 0, 0);
1191 + }
1192 + }
1193 +}
1194 +
1195 +void esw_atable_dynamicms_del_entries_for_other_port(
1196 + struct switch_enet_private *fep,
1197 + int port_index)
1198 +{
1199 + unsigned long read_lo, read_hi;
1200 + unsigned int port_idx;
1201 + int i;
1202 +
1203 + for (i = 0; i < ESW_ATABLE_MEM_NUM_ENTRIES; i++) {
1204 + read_atable(fep, i, &read_lo, &read_hi);
1205 + if (read_hi & (1 << 16)) {
1206 + port_idx = AT_EXTRACT_PORT(read_hi);
1207 +
1208 + if (port_idx != port_index)
1209 + write_atable(fep, i, 0, 0);
1210 + }
1211 + }
1212 +}
1213 +
1214 +/*
1215 + * Scan one complete block (Slot) for outdated entries and delete them.
1216 + * blockidx index of block of entries that should be analyzed.
1217 + * return number of deleted entries, 0 if nothing was modified.
1218 + */
1219 +int esw_atable_dynamicms_check_block_age(
1220 + struct switch_enet_private *fep, int blockidx) {
1221 +
1222 + int i, tm, tdelta;
1223 + int deleted = 0, entries = 0;
1224 + unsigned long read_lo, read_hi;
1225 + /* Scan all entries from last down to
1226 + * have faster deletion speed if necessary*/
1227 + for (i = (blockidx + ATABLE_ENTRY_PER_SLOT - 1);
1228 + i >= blockidx; i--) {
1229 + read_atable(fep, i, &read_lo, &read_hi);
1230 +
1231 + if (read_hi & (1 << 16)) {
1232 + /* the entry is valide*/
1233 + tm = AT_EXTRACT_TIMESTAMP(read_hi);
1234 + tdelta = TIMEDELTA(fep->currTime, tm);
1235 + if (tdelta > fep->ageMax) {
1236 + esw_del_atable_dynamic(fep,
1237 + blockidx, i-blockidx);
1238 + deleted++;
1239 + } else {
1240 + /* statistics */
1241 + entries++;
1242 + }
1243 + }
1244 + }
1245 +
1246 + /*update statistics*/
1247 + if (fep->atMaxEntriesPerBlock < entries)
1248 + fep->atMaxEntriesPerBlock = entries;
1249 +
1250 + return deleted;
1251 +}
1252 +
1253 +/* scan the complete address table and find the most current entry.
1254 + * The time of the most current entry then is used as current time
1255 + * for the context structure.
1256 + * In addition the atCurrEntries value is updated as well.
1257 + * return time that has been set in the context.
1258 + */
1259 +int esw_atable_dynamicms_find_set_latesttime(
1260 + struct switch_enet_private *fep) {
1261 +
1262 + int tm_min, tm_max, tm;
1263 + int delta, current, i;
1264 + unsigned long read_lo, read_hi;
1265 +
1266 + tm_min = (1 << AT_DENTRY_TIMESTAMP_WIDTH) - 1;
1267 + tm_max = 0;
1268 + current = 0;
1269 +
1270 + for (i = 0; i < ESW_ATABLE_MEM_NUM_ENTRIES; i++) {
1271 + read_atable(fep, i, &read_lo, &read_hi);
1272 + if (read_hi & (1 << 16)) {
1273 + /*the entry is valid*/
1274 + tm = AT_EXTRACT_TIMESTAMP(read_hi);
1275 + if (tm > tm_max)
1276 + tm_max = tm;
1277 + if (tm < tm_min)
1278 + tm_min = tm;
1279 + current++;
1280 + }
1281 + }
1282 +
1283 + delta = TIMEDELTA(tm_max, tm_min);
1284 + if (delta < fep->ageMax) {
1285 + /*Difference must be in range*/
1286 + fep->currTime = tm_max;
1287 + } else {
1288 + fep->currTime = tm_min;
1289 + }
1290 +
1291 + fep->atCurrEntries = current;
1292 + return fep->currTime;
1293 +}
1294 +
1295 +int esw_atable_dynamicms_get_port(
1296 + struct switch_enet_private *fep,
1297 + unsigned long write_lo,
1298 + unsigned long write_hi,
1299 + int block_index)
1300 +{
1301 + int i, index_end;
1302 + unsigned long read_lo, read_hi, port;
1303 +
1304 + index_end = block_index + ATABLE_ENTRY_PER_SLOT;
1305 + /* Now search all the entries in the selected block */
1306 + for (i = block_index; i < index_end; i++) {
1307 + read_atable(fep, i, &read_lo, &read_hi);
1308 +
1309 + if ((read_lo == write_lo) &&
1310 + ((read_hi & 0x0000ffff) ==
1311 + (write_hi & 0x0000ffff))) {
1312 + /* found correct address,*/
1313 + if (read_hi & (1 << 16)) {
1314 + /*extract the port index from the valid entry*/
1315 + port = AT_EXTRACT_PORT(read_hi);
1316 + return port;
1317 + }
1318 + }
1319 + }
1320 +
1321 + return -1;
1322 +}
1323 +
1324 +/* Get the port index from the source MAC address
1325 + * of the received frame
1326 + * @return port index
1327 + */
1328 +int esw_atable_dynamicms_get_portindex_from_mac(
1329 + struct switch_enet_private *fep,
1330 + unsigned char *mac_addr,
1331 + unsigned long write_lo,
1332 + unsigned long write_hi)
1333 +{
1334 + int blockIdx;
1335 + int rc;
1336 + /*compute the block index*/
1337 + blockIdx = GET_BLOCK_PTR(crc8_calc(mac_addr));
1338 + /* Get the ingress port index of the received BPDU */
1339 + rc = esw_atable_dynamicms_get_port(fep,
1340 + write_lo, write_hi, blockIdx);
1341 +
1342 + return rc;
1343 +}
1344 +
1345 +/* dynamicms MAC address table learn and migration*/
1346 +int esw_atable_dynamicms_learn_migration(
1347 + struct switch_enet_private *fep,
1348 + int currTime)
1349 +{
1350 + eswPortInfo *pESWPortInfo;
1351 + int index;
1352 + int inserted = 0;
1353 +
1354 + pESWPortInfo = esw_portinfofifo_read(fep);
1355 + /* Anything to learn */
1356 + if (pESWPortInfo != 0) {
1357 + /*get block index from lookup table*/
1358 + index = GET_BLOCK_PTR(pESWPortInfo->hash);
1359 + inserted = esw_update_atable_dynamic1(
1360 + pESWPortInfo->maclo,
1361 + pESWPortInfo->machi, index,
1362 + pESWPortInfo->port, currTime, fep);
1363 + }
1364 +
1365 + return 0;
1366 +}
1367 +/* -----------------------------------------------------------------*/
1368 +/*
1369 + * esw_forced_forward
1370 + * The frame is forwared to the forced destination ports.
1371 + * It only replace the MAC lookup function,
1372 + * all other filtering(eg.VLAN verification) act as normal
1373 + */
1374 +int esw_forced_forward(struct switch_enet_private *fep,
1375 + int port1, int port2, int enable)
1376 +{
1377 + unsigned long tmp = 0;
1378 + volatile switch_t *fecp;
1379 +
1380 + fecp = fep->hwp;
1381 +
1382 + /* Enable Forced forwarding for port num */
1383 + if ((port1 == 1) && (port2 == 1))
1384 + tmp |= MCF_ESW_P0FFEN_FD(3);
1385 + else if (port1 == 1)
1386 + /*Enable Forced forwarding for port 1 only*/
1387 + tmp |= MCF_ESW_P0FFEN_FD(1);
1388 + else if (port2 == 1)
1389 + /*Enable Forced forwarding for port 2 only*/
1390 + tmp |= MCF_ESW_P0FFEN_FD(2);
1391 + else {
1392 + printk(KERN_ERR "%s:do not support "
1393 + "the forced forward mode"
1394 + "port1 %x port2 %x\n",
1395 + __func__, port1, port2);
1396 + return -1;
1397 + }
1398 +
1399 + if (enable == 1)
1400 + tmp |= MCF_ESW_P0FFEN_FEN;
1401 + else if (enable == 0)
1402 + tmp &= ~MCF_ESW_P0FFEN_FEN;
1403 + else {
1404 + printk(KERN_ERR "%s: the enable %x is error\n",
1405 + __func__, enable);
1406 + return -2;
1407 + }
1408 +
1409 + fecp->ESW_P0FFEN = tmp;
1410 + return 0;
1411 +}
1412 +
1413 +void esw_get_forced_forward(
1414 + struct switch_enet_private *fep,
1415 + unsigned long *ulForceForward)
1416 +{
1417 + volatile switch_t *fecp;
1418 +
1419 + fecp = fep->hwp;
1420 + *ulForceForward = fecp->ESW_P0FFEN;
1421 +}
1422 +
1423 +void esw_get_port_enable(
1424 + struct switch_enet_private *fep,
1425 + unsigned long *ulPortEnable)
1426 +{
1427 + volatile switch_t *fecp;
1428 +
1429 + fecp = fep->hwp;
1430 + *ulPortEnable = fecp->ESW_PER;
1431 +}
1432 +/*
1433 + * enable or disable port n tx or rx
1434 + * tx_en 0 disable port n tx
1435 + * tx_en 1 enable port n tx
1436 + * rx_en 0 disbale port n rx
1437 + * rx_en 1 enable port n rx
1438 + */
1439 +int esw_port_enable_config(struct switch_enet_private *fep,
1440 + int port, int tx_en, int rx_en)
1441 +{
1442 + unsigned long tmp = 0;
1443 + volatile switch_t *fecp;
1444 +
1445 + fecp = fep->hwp;
1446 + tmp = fecp->ESW_PER;
1447 + if (tx_en == 1) {
1448 + if (port == 0)
1449 + tmp |= MCF_ESW_PER_TE0;
1450 + else if (port == 1)
1451 + tmp |= MCF_ESW_PER_TE1;
1452 + else if (port == 2)
1453 + tmp |= MCF_ESW_PER_TE2;
1454 + else {
1455 + printk(KERN_ERR "%s:do not support the"
1456 + " port %x tx enable\n",
1457 + __func__, port);
1458 + return -1;
1459 + }
1460 + } else if (tx_en == 0) {
1461 + if (port == 0)
1462 + tmp &= (~MCF_ESW_PER_TE0);
1463 + else if (port == 1)
1464 + tmp &= (~MCF_ESW_PER_TE1);
1465 + else if (port == 2)
1466 + tmp &= (~MCF_ESW_PER_TE2);
1467 + else {
1468 + printk(KERN_ERR "%s:do not support "
1469 + "the port %x tx disable\n",
1470 + __func__, port);
1471 + return -2;
1472 + }
1473 + } else {
1474 + printk(KERN_ERR "%s:do not support the port %x"
1475 + " tx op value %x\n",
1476 + __func__, port, tx_en);
1477 + return -3;
1478 + }
1479 +
1480 + if (rx_en == 1) {
1481 + if (port == 0)
1482 + tmp |= MCF_ESW_PER_RE0;
1483 + else if (port == 1)
1484 + tmp |= MCF_ESW_PER_RE1;
1485 + else if (port == 2)
1486 + tmp |= MCF_ESW_PER_RE2;
1487 + else {
1488 + printk(KERN_ERR "%s:do not support the "
1489 + "port %x rx enable\n",
1490 + __func__, port);
1491 + return -4;
1492 + }
1493 + } else if (rx_en == 0) {
1494 + if (port == 0)
1495 + tmp &= (~MCF_ESW_PER_RE0);
1496 + else if (port == 1)
1497 + tmp &= (~MCF_ESW_PER_RE1);
1498 + else if (port == 2)
1499 + tmp &= (~MCF_ESW_PER_RE2);
1500 + else {
1501 + printk(KERN_ERR "%s:do not support the "
1502 + "port %x rx disable\n",
1503 + __func__, port);
1504 + return -5;
1505 + }
1506 + } else {
1507 + printk(KERN_ERR "%s:do not support the port %x"
1508 + " rx op value %x\n",
1509 + __func__, port, tx_en);
1510 + return -6;
1511 + }
1512 +
1513 + fecp->ESW_PER = tmp;
1514 + return 0;
1515 +}
1516 +
1517 +
1518 +void esw_get_port_broadcast(struct switch_enet_private *fep,
1519 + unsigned long *ulPortBroadcast)
1520 +{
1521 + volatile switch_t *fecp;
1522 +
1523 + fecp = fep->hwp;
1524 + *ulPortBroadcast = fecp->ESW_DBCR;
1525 +}
1526 +
1527 +int esw_port_broadcast_config(struct switch_enet_private *fep,
1528 + int port, int enable)
1529 +{
1530 + unsigned long tmp = 0;
1531 + volatile switch_t *fecp;
1532 +
1533 + fecp = fep->hwp;
1534 +
1535 + if ((port > 2) || (port < 0)) {
1536 + printk(KERN_ERR "%s:do not support the port %x"
1537 + " default broadcast\n",
1538 + __func__, port);
1539 + return -1;
1540 + }
1541 +
1542 + tmp = fecp->ESW_DBCR;
1543 + if (enable == 1) {
1544 + if (port == 0)
1545 + tmp |= MCF_ESW_DBCR_P0;
1546 + else if (port == 1)
1547 + tmp |= MCF_ESW_DBCR_P1;
1548 + else if (port == 2)
1549 + tmp |= MCF_ESW_DBCR_P2;
1550 + } else if (enable == 0) {
1551 + if (port == 0)
1552 + tmp &= ~MCF_ESW_DBCR_P0;
1553 + else if (port == 1)
1554 + tmp &= ~MCF_ESW_DBCR_P1;
1555 + else if (port == 2)
1556 + tmp &= ~MCF_ESW_DBCR_P2;
1557 + }
1558 +
1559 + fecp->ESW_DBCR = tmp;
1560 + return 0;
1561 +}
1562 +
1563 +
1564 +void esw_get_port_multicast(struct switch_enet_private *fep,
1565 + unsigned long *ulPortMulticast)
1566 +{
1567 + volatile switch_t *fecp;
1568 +
1569 + fecp = fep->hwp;
1570 + *ulPortMulticast = fecp->ESW_DMCR;
1571 +}
1572 +
1573 +int esw_port_multicast_config(struct switch_enet_private *fep,
1574 + int port, int enable)
1575 +{
1576 + unsigned long tmp = 0;
1577 + volatile switch_t *fecp;
1578 +
1579 + fecp = fep->hwp;
1580 +
1581 + if ((port > 2) || (port < 0)) {
1582 + printk(KERN_ERR "%s:do not support the port %x"
1583 + " default broadcast\n",
1584 + __func__, port);
1585 + return -1;
1586 + }
1587 +
1588 + tmp = fecp->ESW_DMCR;
1589 + if (enable == 1) {
1590 + if (port == 0)
1591 + tmp |= MCF_ESW_DMCR_P0;
1592 + else if (port == 1)
1593 + tmp |= MCF_ESW_DMCR_P1;
1594 + else if (port == 2)
1595 + tmp |= MCF_ESW_DMCR_P2;
1596 + } else if (enable == 0) {
1597 + if (port == 0)
1598 + tmp &= ~MCF_ESW_DMCR_P0;
1599 + else if (port == 1)
1600 + tmp &= ~MCF_ESW_DMCR_P1;
1601 + else if (port == 2)
1602 + tmp &= ~MCF_ESW_DMCR_P2;
1603 + }
1604 +
1605 + fecp->ESW_DMCR = tmp;
1606 + return 0;
1607 +}
1608 +
1609 +
1610 +void esw_get_port_blocking(struct switch_enet_private *fep,
1611 + unsigned long *ulPortBlocking)
1612 +{
1613 + volatile switch_t *fecp;
1614 +
1615 + fecp = fep->hwp;
1616 + *ulPortBlocking = (fecp->ESW_BKLR & 0x0000000f);
1617 +}
1618 +
1619 +int esw_port_blocking_config(struct switch_enet_private *fep,
1620 + int port, int enable)
1621 +{
1622 + unsigned long tmp = 0;
1623 + volatile switch_t *fecp;
1624 +
1625 + fecp = fep->hwp;
1626 +
1627 + if ((port > 2) || (port < 0)) {
1628 + printk(KERN_ERR "%s:do not support the port %x"
1629 + " default broadcast\n",
1630 + __func__, port);
1631 + return -1;
1632 + }
1633 +
1634 + tmp = fecp->ESW_BKLR;
1635 + if (enable == 1) {
1636 + if (port == 0)
1637 + tmp |= MCF_ESW_BKLR_BE0;
1638 + else if (port == 1)
1639 + tmp |= MCF_ESW_BKLR_BE1;
1640 + else if (port == 2)
1641 + tmp |= MCF_ESW_BKLR_BE2;
1642 + } else if (enable == 0) {
1643 + if (port == 0)
1644 + tmp &= ~MCF_ESW_BKLR_BE0;
1645 + else if (port == 1)
1646 + tmp &= ~MCF_ESW_BKLR_BE1;
1647 + else if (port == 2)
1648 + tmp &= ~MCF_ESW_BKLR_BE2;
1649 + }
1650 +
1651 + fecp->ESW_BKLR = tmp;
1652 + return 0;
1653 +}
1654 +
1655 +
1656 +void esw_get_port_learning(struct switch_enet_private *fep,
1657 + unsigned long *ulPortLearning)
1658 +{
1659 + volatile switch_t *fecp;
1660 +
1661 + fecp = fep->hwp;
1662 + *ulPortLearning = (fecp->ESW_BKLR & 0x000f0000) >> 16;
1663 +}
1664 +
1665 +int esw_port_learning_config(struct switch_enet_private *fep,
1666 + int port, int disable)
1667 +{
1668 + unsigned long tmp = 0;
1669 + volatile switch_t *fecp;
1670 +
1671 + fecp = fep->hwp;
1672 +
1673 + if ((port > 2) || (port < 0)) {
1674 + printk(KERN_ERR "%s:do not support the port %x"
1675 + " default broadcast\n",
1676 + __func__, port);
1677 + return -1;
1678 + }
1679 +
1680 + tmp = fecp->ESW_BKLR;
1681 + if (disable == 0) {
1682 + fep->learning_irqhandle_enable = 0;
1683 + if (port == 0)
1684 + tmp |= MCF_ESW_BKLR_LD0;
1685 + else if (port == 1)
1686 + tmp |= MCF_ESW_BKLR_LD1;
1687 + else if (port == 2)
1688 + tmp |= MCF_ESW_BKLR_LD2;
1689 + } else if (disable == 1) {
1690 + if (port == 0)
1691 + tmp &= ~MCF_ESW_BKLR_LD0;
1692 + else if (port == 1)
1693 + tmp &= ~MCF_ESW_BKLR_LD1;
1694 + else if (port == 2)
1695 + tmp &= ~MCF_ESW_BKLR_LD2;
1696 + }
1697 +
1698 + fecp->ESW_BKLR = tmp;
1699 + return 0;
1700 +}
1701 +/*********************************************************************/
1702 +void esw_mac_lookup_table_range(struct switch_enet_private *fep)
1703 +{
1704 + int index;
1705 + unsigned long read_lo, read_hi;
1706 + /* Pointer to switch address look up memory*/
1707 + for (index = 0; index < 2048; index++)
1708 + write_atable(fep, index, index, (~index));
1709 +
1710 + /* Pointer to switch address look up memory*/
1711 + for (index = 0; index < 2048; index++) {
1712 + read_atable(fep, index, &read_lo, &read_hi);
1713 + if (read_lo != index) {
1714 + printk(KERN_ERR "%s:Mismatch at low %d\n",
1715 + __func__, index);
1716 + return;
1717 + }
1718 +
1719 + if (read_hi != (~index)) {
1720 + printk(KERN_ERR "%s:Mismatch at high %d\n",
1721 + __func__, index);
1722 + return;
1723 + }
1724 + }
1725 +}
1726 +
1727 +/*
1728 + * Checks IP Snoop options of handling the snooped frame.
1729 + * mode 0 : The snooped frame is forward only to management port
1730 + * mode 1 : The snooped frame is copy to management port and
1731 + * normal forwarding is checked.
1732 + * mode 2 : The snooped frame is discarded.
1733 + * mode 3 : Disable the ip snoop function
1734 + * ip_header_protocol : the IP header protocol field
1735 + */
1736 +int esw_ip_snoop_config(struct switch_enet_private *fep,
1737 + int mode, unsigned long ip_header_protocol)
1738 +{
1739 + volatile switch_t *fecp;
1740 + unsigned long tmp = 0, protocol_type = 0;
1741 + int num = 0;
1742 +
1743 + fecp = fep->hwp;
1744 + /* Config IP Snooping */
1745 + if (mode == 0) {
1746 + /* Enable IP Snooping */
1747 + tmp = MCF_ESW_IPSNP_EN;
1748 + tmp |= MCF_ESW_IPSNP_MODE(0);/*For Forward*/
1749 + } else if (mode == 1) {
1750 + /* Enable IP Snooping */
1751 + tmp = MCF_ESW_IPSNP_EN;
1752 + /*For Forward and copy_to_mangmnt_port*/
1753 + tmp |= MCF_ESW_IPSNP_MODE(1);
1754 + } else if (mode == 2) {
1755 + /* Enable IP Snooping */
1756 + tmp = MCF_ESW_IPSNP_EN;
1757 + tmp |= MCF_ESW_IPSNP_MODE(2);/*discard*/
1758 + } else if (mode == 3) {
1759 + /* disable IP Snooping */
1760 + tmp = MCF_ESW_IPSNP_EN;
1761 + tmp &= ~MCF_ESW_IPSNP_EN;
1762 + } else {
1763 + printk(KERN_ERR "%s: the mode %x "
1764 + "we do not support\n", __func__, mode);
1765 + return -1;
1766 + }
1767 +
1768 + protocol_type = ip_header_protocol;
1769 + for (num = 0; num < 8; num++) {
1770 + if (protocol_type ==
1771 + AT_EXTRACT_IP_PROTOCOL(fecp->ESW_IPSNP[num])) {
1772 + fecp->ESW_IPSNP[num] =
1773 + tmp | MCF_ESW_IPSNP_PROTOCOL(protocol_type);
1774 + break;
1775 + } else if (!(fecp->ESW_IPSNP[num])) {
1776 + fecp->ESW_IPSNP[num] =
1777 + tmp | MCF_ESW_IPSNP_PROTOCOL(protocol_type);
1778 + break;
1779 + }
1780 + }
1781 + if (num == 8) {
1782 + printk(KERN_INFO "IP snooping table is full\n");
1783 + return 0;
1784 + }
1785 +
1786 + return 0;
1787 +}
1788 +
1789 +void esw_get_ip_snoop_config(struct switch_enet_private *fep,
1790 + unsigned long *ulpESW_IPSNP)
1791 +{
1792 + int i;
1793 + volatile switch_t *fecp;
1794 +
1795 + fecp = fep->hwp;
1796 + for (i = 0; i < 8; i++)
1797 + *(ulpESW_IPSNP + i) = fecp->ESW_IPSNP[i];
1798 +}
1799 +/*
1800 + * Checks TCP/UDP Port Snoop options of handling the snooped frame.
1801 + * mode 0 : The snooped frame is forward only to management port
1802 + * mode 1 : The snooped frame is copy to management port and
1803 + * normal forwarding is checked.
1804 + * mode 2 : The snooped frame is discarded.
1805 + * mode 3 : Disable the TCP/UDP port snoop function
1806 + * compare_port : port number in the TCP/UDP header
1807 + * compare_num 1: TCP/UDP source port number is compared
1808 + * compare_num 2: TCP/UDP destination port number is compared
1809 + * compare_num 3: TCP/UDP source and destination port number is compared
1810 + */
1811 +int esw_tcpudp_port_snoop_config(struct switch_enet_private *fep,
1812 + int mode, int compare_port, int compare_num)
1813 +{
1814 + volatile switch_t *fecp;
1815 + unsigned long tmp;
1816 + int num;
1817 +
1818 + fecp = fep->hwp;
1819 +
1820 + /* Enable TCP/UDP port Snooping */
1821 + tmp = MCF_ESW_PSNP_EN;
1822 + if (mode == 0)
1823 + tmp |= MCF_ESW_PSNP_MODE(0);/*For Forward*/
1824 + else if (mode == 1)/*For Forward and copy_to_mangmnt_port*/
1825 + tmp |= MCF_ESW_PSNP_MODE(1);
1826 + else if (mode == 2)
1827 + tmp |= MCF_ESW_PSNP_MODE(2);/*discard*/
1828 + else if (mode == 3) /*disable the port function*/
1829 + tmp &= (~MCF_ESW_PSNP_EN);
1830 + else {
1831 + printk(KERN_ERR "%s: the mode %x we do not support\n",
1832 + __func__, mode);
1833 + return -1;
1834 + }
1835 +
1836 + if (compare_num == 1)
1837 + tmp |= MCF_ESW_PSNP_CS;
1838 + else if (compare_num == 2)
1839 + tmp |= MCF_ESW_PSNP_CD;
1840 + else if (compare_num == 3)
1841 + tmp |= MCF_ESW_PSNP_CD | MCF_ESW_PSNP_CS;
1842 + else {
1843 + printk(KERN_ERR "%s: the compare port address %x"
1844 + " we do not support\n",
1845 + __func__, compare_num);
1846 + return -1;
1847 + }
1848 +
1849 + for (num = 0; num < 8; num++) {
1850 + if (compare_port ==
1851 + AT_EXTRACT_TCP_UDP_PORT(fecp->ESW_PSNP[num])) {
1852 + fecp->ESW_PSNP[num] =
1853 + tmp | MCF_ESW_PSNP_PORT_COMPARE(compare_port);
1854 + break;
1855 + } else if (!(fecp->ESW_PSNP[num])) {
1856 + fecp->ESW_PSNP[num] =
1857 + tmp | MCF_ESW_PSNP_PORT_COMPARE(compare_port);
1858 + break;
1859 + }
1860 + }
1861 + if (num == 8) {
1862 + printk(KERN_INFO "TCP/UDP port snooping table is full\n");
1863 + return 0;
1864 + }
1865 +
1866 + return 0;
1867 +}
1868 +
1869 +void esw_get_tcpudp_port_snoop_config(
1870 + struct switch_enet_private *fep,
1871 + unsigned long *ulpESW_PSNP)
1872 +{
1873 + int i;
1874 + volatile switch_t *fecp;
1875 +
1876 + fecp = fep->hwp;
1877 + for (i = 0; i < 8; i++)
1878 + *(ulpESW_PSNP + i) = fecp->ESW_PSNP[i];
1879 +}
1880 +/*-----------------mirror----------------------------------------*/
1881 +void esw_get_port_mirroring(struct switch_enet_private *fep)
1882 +{
1883 + volatile switch_t *fecp;
1884 +
1885 + fecp = fep->hwp;
1886 +
1887 + printk(KERN_INFO "Mirror Port: %1ld Egress Port Match:%s "
1888 + "Ingress Port Match:%s\n", fecp->ESW_MCR & 0xf,
1889 + (fecp->ESW_MCR >> 6) & 1 ? "Y" : "N",
1890 + (fecp->ESW_MCR >> 5) & 1 ? "Y" : "N");
1891 +
1892 + if ((fecp->ESW_MCR >> 6) & 1)
1893 + printk(KERN_INFO "Egress Port to be mirrored: Port %ld\n",
1894 + fecp->ESW_EGMAP >> 1);
1895 + if ((fecp->ESW_MCR >> 5) & 1)
1896 + printk(KERN_INFO "Ingress Port to be mirrored: Port %ld\n",
1897 + fecp->ESW_INGMAP >> 1);
1898 +
1899 + printk(KERN_INFO "Egress Des Address Match:%s "
1900 + "Egress Src Address Match:%s\n",
1901 + (fecp->ESW_MCR >> 10) & 1 ? "Y" : "N",
1902 + (fecp->ESW_MCR >> 9) & 1 ? "Y" : "N");
1903 + printk(KERN_INFO "Ingress Des Address Match:%s "
1904 + "Ingress Src Address Match:%s\n",
1905 + (fecp->ESW_MCR >> 8) & 1 ? "Y" : "N",
1906 + (fecp->ESW_MCR >> 7) & 1 ? "Y" : "N");
1907 +
1908 + if ((fecp->ESW_MCR >> 10) & 1)
1909 + printk(KERN_INFO "Egress Des Address to be mirrored: "
1910 + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx\n",
1911 + fecp->ESW_ENGDAL & 0xff, (fecp->ESW_ENGDAL >> 8) & 0xff,
1912 + (fecp->ESW_ENGDAL >> 16) & 0xff,
1913 + (fecp->ESW_ENGDAL >> 24) & 0xff,
1914 + fecp->ESW_ENGDAH & 0xff,
1915 + (fecp->ESW_ENGDAH >> 8) & 0xff);
1916 + if ((fecp->ESW_MCR >> 9) & 1)
1917 + printk("Egress Src Address to be mirrored: "
1918 + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx\n",
1919 + fecp->ESW_ENGSAL & 0xff, (fecp->ESW_ENGSAL >> 8) & 0xff,
1920 + (fecp->ESW_ENGSAL >> 16) & 0xff,
1921 + (fecp->ESW_ENGSAL >> 24) & 0xff,
1922 + fecp->ESW_ENGSAH & 0xff,
1923 + (fecp->ESW_ENGSAH >> 8) & 0xff);
1924 + if ((fecp->ESW_MCR >> 8) & 1)
1925 + printk("Ingress Des Address to be mirrored: "
1926 + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx\n",
1927 + fecp->ESW_INGDAL & 0xff, (fecp->ESW_INGDAL >> 8) & 0xff,
1928 + (fecp->ESW_INGDAL >> 16) & 0xff,
1929 + (fecp->ESW_INGDAL >> 24) & 0xff,
1930 + fecp->ESW_INGDAH & 0xff,
1931 + (fecp->ESW_INGDAH >> 8) & 0xff);
1932 + if ((fecp->ESW_MCR >> 7) & 1)
1933 + printk("Ingress Src Address to be mirrored: "
1934 + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx\n",
1935 + fecp->ESW_INGSAL & 0xff, (fecp->ESW_INGSAL >> 8) & 0xff,
1936 + (fecp->ESW_INGSAL >> 16) & 0xff,
1937 + (fecp->ESW_INGSAL >> 24) & 0xff,
1938 + fecp->ESW_INGSAH & 0xff,
1939 + (fecp->ESW_INGSAH >> 8) & 0xff);
1940 +}
1941 +
1942 +int esw_port_mirroring_config_port_match(struct switch_enet_private *fep,
1943 + int mirror_port, int port_match_en, int port)
1944 +{
1945 + volatile switch_t *fecp;
1946 + unsigned long tmp = 0;
1947 +
1948 + fecp = fep->hwp;
1949 +
1950 + tmp = fecp->ESW_MCR;
1951 + if (mirror_port != (tmp & 0xf))
1952 + tmp = 0;
1953 +
1954 + switch (port_match_en) {
1955 + case MIRROR_EGRESS_PORT_MATCH:
1956 + tmp |= MCF_ESW_MCR_EGMAP;
1957 + if (port == 0)
1958 + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG0;
1959 + else if (port == 1)
1960 + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG1;
1961 + else if (port == 2)
1962 + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG2;
1963 + break;
1964 + case MIRROR_INGRESS_PORT_MATCH:
1965 + tmp |= MCF_ESW_MCR_INGMAP;
1966 + if (port == 0)
1967 + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING0;
1968 + else if (port == 1)
1969 + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING1;
1970 + else if (port == 2)
1971 + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING2;
1972 + break;
1973 + default:
1974 + tmp = 0;
1975 + break;
1976 + }
1977 +
1978 + tmp = tmp & 0x07e0;
1979 + if (port_match_en)
1980 + tmp |= MCF_ESW_MCR_MEN | MCF_ESW_MCR_PORT(mirror_port);
1981 +
1982 + fecp->ESW_MCR = tmp;
1983 + return 0;
1984 +}
1985 +
1986 +int esw_port_mirroring_config(struct switch_enet_private *fep,
1987 + int mirror_port, int port, int mirror_enable,
1988 + unsigned char *src_mac, unsigned char *des_mac,
1989 + int egress_en, int ingress_en,
1990 + int egress_mac_src_en, int egress_mac_des_en,
1991 + int ingress_mac_src_en, int ingress_mac_des_en)
1992 +{
1993 + volatile switch_t *fecp;
1994 + unsigned long tmp;
1995 +
1996 + fecp = fep->hwp;
1997 +
1998 + /*mirroring config*/
1999 + tmp = 0;
2000 + if (egress_en == 1) {
2001 + tmp |= MCF_ESW_MCR_EGMAP;
2002 + if (port == 0)
2003 + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG0;
2004 + else if (port == 1)
2005 + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG1;
2006 + else if (port == 2)
2007 + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG2;
2008 + else {
2009 + printk(KERN_ERR "%s: the port %x we do not support\n",
2010 + __func__, port);
2011 + return -1;
2012 + }
2013 + } else if (egress_en == 0) {
2014 + tmp &= (~MCF_ESW_MCR_EGMAP);
2015 + } else {
2016 + printk(KERN_ERR "%s: egress_en %x we do not support\n",
2017 + __func__, egress_en);
2018 + return -1;
2019 + }
2020 +
2021 + if (ingress_en == 1) {
2022 + tmp |= MCF_ESW_MCR_INGMAP;
2023 + if (port == 0)
2024 + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING0;
2025 + else if (port == 1)
2026 + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING1;
2027 + else if (port == 2)
2028 + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING2;
2029 + else {
2030 + printk(KERN_ERR "%s: the port %x we do not support\n",
2031 + __func__, port);
2032 + return -1;
2033 + }
2034 + } else if (ingress_en == 0) {
2035 + tmp &= ~MCF_ESW_MCR_INGMAP;
2036 + } else{
2037 + printk(KERN_ERR "%s: ingress_en %x we do not support\n",
2038 + __func__, ingress_en);
2039 + return -1;
2040 + }
2041 +
2042 + if (egress_mac_src_en == 1) {
2043 + tmp |= MCF_ESW_MCR_EGSA;
2044 + fecp->ESW_ENGSAH = (src_mac[5] << 8) | (src_mac[4]);
2045 + fecp->ESW_ENGSAL = (unsigned long)((src_mac[3] << 24) |
2046 + (src_mac[2] << 16) |
2047 + (src_mac[1] << 8) |
2048 + src_mac[0]);
2049 + } else if (egress_mac_src_en == 0) {
2050 + tmp &= ~MCF_ESW_MCR_EGSA;
2051 + } else {
2052 + printk(KERN_ERR "%s: egress_mac_src_en %x we do not support\n",
2053 + __func__, egress_mac_src_en);
2054 + return -1;
2055 + }
2056 +
2057 + if (egress_mac_des_en == 1) {
2058 + tmp |= MCF_ESW_MCR_EGDA;
2059 + fecp->ESW_ENGDAH = (des_mac[5] << 8) | (des_mac[4]);
2060 + fecp->ESW_ENGDAL = (unsigned long)((des_mac[3] << 24) |
2061 + (des_mac[2] << 16) |
2062 + (des_mac[1] << 8) |
2063 + des_mac[0]);
2064 + } else if (egress_mac_des_en == 0) {
2065 + tmp &= ~MCF_ESW_MCR_EGDA;
2066 + } else {
2067 + printk(KERN_ERR "%s: egress_mac_des_en %x we do not support\n",
2068 + __func__, egress_mac_des_en);
2069 + return -1;
2070 + }
2071 +
2072 + if (ingress_mac_src_en == 1) {
2073 + tmp |= MCF_ESW_MCR_INGSA;
2074 + fecp->ESW_INGSAH = (src_mac[5] << 8) | (src_mac[4]);
2075 + fecp->ESW_INGSAL = (unsigned long)((src_mac[3] << 24) |
2076 + (src_mac[2] << 16) |
2077 + (src_mac[1] << 8) |
2078 + src_mac[0]);
2079 + } else if (ingress_mac_src_en == 0) {
2080 + tmp &= ~MCF_ESW_MCR_INGSA;
2081 + } else {
2082 + printk(KERN_ERR "%s: ingress_mac_src_en %x we do not support\n",
2083 + __func__, ingress_mac_src_en);
2084 + return -1;
2085 + }
2086 +
2087 + if (ingress_mac_des_en == 1) {
2088 + tmp |= MCF_ESW_MCR_INGDA;
2089 + fecp->ESW_INGDAH = (des_mac[5] << 8) | (des_mac[4]);
2090 + fecp->ESW_INGDAL = (unsigned long)((des_mac[3] << 24) |
2091 + (des_mac[2] << 16) |
2092 + (des_mac[1] << 8) |
2093 + des_mac[0]);
2094 + } else if (ingress_mac_des_en == 0) {
2095 + tmp &= ~MCF_ESW_MCR_INGDA;
2096 + } else {
2097 + printk(KERN_ERR "%s: ingress_mac_des_en %x we do not support\n",
2098 + __func__, ingress_mac_des_en);
2099 + return -1;
2100 + }
2101 +
2102 + if (mirror_enable == 1)
2103 + tmp |= MCF_ESW_MCR_MEN | MCF_ESW_MCR_PORT(mirror_port);
2104 + else if (mirror_enable == 0)
2105 + tmp &= ~MCF_ESW_MCR_MEN;
2106 + else
2107 + printk(KERN_ERR "%s: the mirror enable %x is error\n",
2108 + __func__, mirror_enable);
2109 +
2110 +
2111 + fecp->ESW_MCR = tmp;
2112 + return 0;
2113 +}
2114 +
2115 +int esw_port_mirroring_config_addr_match(struct switch_enet_private *fep,
2116 + int mirror_port, int addr_match_enable, unsigned char *mac_addr)
2117 +{
2118 + volatile switch_t *fecp;
2119 + unsigned long tmp = 0;
2120 +
2121 + fecp = fep->hwp;
2122 +
2123 + tmp = fecp->ESW_MCR;
2124 + if (mirror_port != (tmp & 0xf))
2125 + tmp = 0;
2126 +
2127 + switch (addr_match_enable) {
2128 + case MIRROR_EGRESS_SOURCE_MATCH:
2129 + tmp |= MCF_ESW_MCR_EGSA;
2130 + fecp->ESW_ENGSAH = (mac_addr[5] << 8) | (mac_addr[4]);
2131 + fecp->ESW_ENGSAL = (unsigned long)((mac_addr[3] << 24) |
2132 + (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]);
2133 + break;
2134 + case MIRROR_INGRESS_SOURCE_MATCH:
2135 + tmp |= MCF_ESW_MCR_INGSA;
2136 + fecp->ESW_INGSAH = (mac_addr[5] << 8) | (mac_addr[4]);
2137 + fecp->ESW_INGSAL = (unsigned long)((mac_addr[3] << 24) |
2138 + (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]);
2139 + break;
2140 + case MIRROR_EGRESS_DESTINATION_MATCH:
2141 + tmp |= MCF_ESW_MCR_EGDA;
2142 + fecp->ESW_ENGDAH = (mac_addr[5] << 8) | (mac_addr[4]);
2143 + fecp->ESW_ENGDAL = (unsigned long)((mac_addr[3] << 24) |
2144 + (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]);
2145 + break;
2146 + case MIRROR_INGRESS_DESTINATION_MATCH:
2147 + tmp |= MCF_ESW_MCR_INGDA;
2148 + fecp->ESW_INGDAH = (mac_addr[5] << 8) | (mac_addr[4]);
2149 + fecp->ESW_INGDAL = (unsigned long)((mac_addr[3] << 24) |
2150 + (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]);
2151 + break;
2152 + default:
2153 + tmp = 0;
2154 + break;
2155 + }
2156 +
2157 + tmp = tmp & 0x07e0;
2158 + if (addr_match_enable)
2159 + tmp |= MCF_ESW_MCR_MEN | MCF_ESW_MCR_PORT(mirror_port);
2160 +
2161 + fecp->ESW_MCR = tmp;
2162 + return 0;
2163 +}
2164 +
2165 +void esw_get_vlan_verification(struct switch_enet_private *fep,
2166 + unsigned long *ulValue)
2167 +{
2168 + volatile switch_t *fecp;
2169 + fecp = fep->hwp;
2170 + *ulValue = fecp->ESW_VLANV;
2171 +}
2172 +
2173 +int esw_set_vlan_verification(struct switch_enet_private *fep, int port,
2174 + int vlan_domain_verify_en, int vlan_discard_unknown_en)
2175 +{
2176 + volatile switch_t *fecp;
2177 +
2178 + fecp = fep->hwp;
2179 + if ((port < 0) || (port > 2)) {
2180 + printk(KERN_ERR "%s: do not support the port %d\n",
2181 + __func__, port);
2182 + return -1;
2183 + }
2184 +
2185 + if (vlan_domain_verify_en == 1) {
2186 + if (port == 0)
2187 + fecp->ESW_VLANV |= MCF_ESW_VLANV_VV0;
2188 + else if (port == 1)
2189 + fecp->ESW_VLANV |= MCF_ESW_VLANV_VV1;
2190 + else if (port == 2)
2191 + fecp->ESW_VLANV |= MCF_ESW_VLANV_VV2;
2192 + } else if (vlan_domain_verify_en == 0) {
2193 + if (port == 0)
2194 + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_VV0;
2195 + else if (port == 1)
2196 + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_VV1;
2197 + else if (port == 2)
2198 + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_VV2;
2199 + } else {
2200 + printk(KERN_INFO "%s: donot support "
2201 + "vlan_domain_verify %x\n",
2202 + __func__, vlan_domain_verify_en);
2203 + return -2;
2204 + }
2205 +
2206 + if (vlan_discard_unknown_en == 1) {
2207 + if (port == 0)
2208 + fecp->ESW_VLANV |= MCF_ESW_VLANV_DU0;
2209 + else if (port == 1)
2210 + fecp->ESW_VLANV |= MCF_ESW_VLANV_DU1;
2211 + else if (port == 2)
2212 + fecp->ESW_VLANV |= MCF_ESW_VLANV_DU2;
2213 + } else if (vlan_discard_unknown_en == 0) {
2214 + if (port == 0)
2215 + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_DU0;
2216 + else if (port == 1)
2217 + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_DU1;
2218 + else if (port == 2)
2219 + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_DU2;
2220 + } else {
2221 + printk(KERN_INFO "%s: donot support "
2222 + "vlan_discard_unknown %x\n",
2223 + __func__, vlan_discard_unknown_en);
2224 + return -3;
2225 + }
2226 +
2227 + return 0;
2228 +}
2229 +
2230 +void esw_get_vlan_resolution_table(struct switch_enet_private *fep,
2231 + struct eswVlanTableItem *tableaddr)
2232 +{
2233 + volatile switch_t *fecp;
2234 + int vnum = 0;
2235 + int i;
2236 +
2237 + fecp = fep->hwp;
2238 + for (i = 0; i < 32; i++) {
2239 + if (fecp->ESW_VRES[i]) {
2240 + tableaddr->table[i].port_vlanid =
2241 + fecp->ESW_VRES[i] >> 3;
2242 + tableaddr->table[i].vlan_domain_port =
2243 + fecp->ESW_VRES[i] & 7;
2244 + vnum++;
2245 + }
2246 + }
2247 + tableaddr->valid_num = vnum;
2248 +}
2249 +
2250 +int esw_set_vlan_id(struct switch_enet_private *fep, unsigned long configData)
2251 +{
2252 + volatile switch_t *fecp;
2253 + int i;
2254 +
2255 + fecp = fep->hwp;
2256 +
2257 + for (i = 0; i < 32; i++) {
2258 + if (fecp->ESW_VRES[i] == 0) {
2259 + fecp->ESW_VRES[i] = MCF_ESW_VRES_VLANID(configData);
2260 + return 0;
2261 + } else if (((fecp->ESW_VRES[i] >> 3) & 0xfff) == configData) {
2262 + printk(KERN_INFO "The VLAN already exists\n");
2263 + return 0;
2264 + }
2265 + }
2266 +
2267 + printk(KERN_INFO "The VLAN can't create, because VLAN table is full\n");
2268 + return 0;
2269 +}
2270 +
2271 +int esw_set_vlan_id_cleared(struct switch_enet_private *fep,
2272 + unsigned long configData)
2273 +{
2274 + volatile switch_t *fecp;
2275 + int i;
2276 +
2277 + fecp = fep->hwp;
2278 +
2279 + for (i = 0; i < 32; i++) {
2280 + if (((fecp->ESW_VRES[i] >> 3) & 0xfff) == configData) {
2281 + fecp->ESW_VRES[i] = 0;
2282 + break;
2283 + }
2284 + }
2285 + return 0;
2286 +}
2287 +
2288 +int esw_set_port_in_vlan_id(struct switch_enet_private *fep,
2289 + eswIoctlVlanResoultionTable configData)
2290 +{
2291 + volatile switch_t *fecp;
2292 + int i;
2293 + int lastnum = 0;
2294 +
2295 + fecp = fep->hwp;
2296 +
2297 + for (i = 0; i < 32; i++) {
2298 + if (fecp->ESW_VRES[i] == 0) {
2299 + lastnum = i;
2300 + break;
2301 + } else if (((fecp->ESW_VRES[i] >> 3) & 0xfff) ==
2302 + configData.port_vlanid) {
2303 + /* update the port members of this vlan */
2304 + fecp->ESW_VRES[i] |= 1 << configData.vlan_domain_port;
2305 + return 0;
2306 + }
2307 + }
2308 + /* creat a new vlan in vlan table */
2309 + fecp->ESW_VRES[lastnum] = MCF_ESW_VRES_VLANID(configData.port_vlanid) |
2310 + (1 << configData.vlan_domain_port);
2311 + return 0;
2312 +}
2313 +
2314 +int esw_set_vlan_resolution_table(struct switch_enet_private *fep,
2315 + unsigned short port_vlanid, int vlan_domain_num,
2316 + int vlan_domain_port)
2317 +{
2318 + volatile switch_t *fecp;
2319 +
2320 + fecp = fep->hwp;
2321 + if ((vlan_domain_num < 0)
2322 + || (vlan_domain_num > 31)) {
2323 + printk(KERN_ERR "%s: do not support the "
2324 + "vlan_domain_num %d\n",
2325 + __func__, vlan_domain_num);
2326 + return -1;
2327 + }
2328 +
2329 + if ((vlan_domain_port < 0)
2330 + || (vlan_domain_port > 7)) {
2331 + printk(KERN_ERR "%s: do not support the "
2332 + "vlan_domain_port %d\n",
2333 + __func__, vlan_domain_port);
2334 + return -2;
2335 + }
2336 +
2337 + fecp->ESW_VRES[vlan_domain_num] =
2338 + MCF_ESW_VRES_VLANID(port_vlanid)
2339 + | vlan_domain_port;
2340 +
2341 + return 0;
2342 +}
2343 +
2344 +void esw_get_vlan_input_config(struct switch_enet_private *fep,
2345 + eswIoctlVlanInputStatus *pVlanInputConfig)
2346 +{
2347 + volatile switch_t *fecp;
2348 + int i;
2349 +
2350 + fecp = fep->hwp;
2351 + for (i = 0; i < 3; i++)
2352 + pVlanInputConfig->ESW_PID[i] = fecp->ESW_PID[i];
2353 +
2354 + pVlanInputConfig->ESW_VLANV = fecp->ESW_VLANV;
2355 + pVlanInputConfig->ESW_VIMSEL = fecp->ESW_VIMSEL;
2356 + pVlanInputConfig->ESW_VIMEN = fecp->ESW_VIMEN;
2357 +
2358 + for (i = 0; i < 32; i++)
2359 + pVlanInputConfig->ESW_VRES[i] = fecp->ESW_VRES[i];
2360 +}
2361 +
2362 +
2363 +int esw_vlan_input_process(struct switch_enet_private *fep,
2364 + int port, int mode, unsigned short port_vlanid)
2365 +{
2366 + volatile switch_t *fecp;
2367 +
2368 + fecp = fep->hwp;
2369 +
2370 + if ((mode < 0) || (mode > 5)) {
2371 + printk(KERN_ERR "%s: do not support the"
2372 + " VLAN input processing mode %d\n",
2373 + __func__, mode);
2374 + return -1;
2375 + }
2376 +
2377 + if ((port < 0) || (port > 3)) {
2378 + printk(KERN_ERR "%s: do not support the port %d\n",
2379 + __func__, mode);
2380 + return -2;
2381 + }
2382 +
2383 + fecp->ESW_PID[port] = MCF_ESW_PID_VLANID(port_vlanid);
2384 + if (port == 0) {
2385 + if (mode == 4)
2386 + fecp->ESW_VIMEN &= ~MCF_ESW_VIMEN_EN0;
2387 + else
2388 + fecp->ESW_VIMEN |= MCF_ESW_VIMEN_EN0;
2389 +
2390 + fecp->ESW_VIMSEL &= ~MCF_ESW_VIMSEL_IM0(3);
2391 + fecp->ESW_VIMSEL |= MCF_ESW_VIMSEL_IM0(mode);
2392 + } else if (port == 1) {
2393 + if (mode == 4)
2394 + fecp->ESW_VIMEN &= ~MCF_ESW_VIMEN_EN1;
2395 + else
2396 + fecp->ESW_VIMEN |= MCF_ESW_VIMEN_EN1;
2397 +
2398 + fecp->ESW_VIMSEL &= ~MCF_ESW_VIMSEL_IM1(3);
2399 + fecp->ESW_VIMSEL |= MCF_ESW_VIMSEL_IM1(mode);
2400 + } else if (port == 2) {
2401 + if (mode == 4)
2402 + fecp->ESW_VIMEN &= ~MCF_ESW_VIMEN_EN2;
2403 + else
2404 + fecp->ESW_VIMEN |= MCF_ESW_VIMEN_EN2;
2405 +
2406 + fecp->ESW_VIMSEL &= ~MCF_ESW_VIMSEL_IM2(3);
2407 + fecp->ESW_VIMSEL |= MCF_ESW_VIMSEL_IM2(mode);
2408 + } else {
2409 + printk(KERN_ERR "%s: do not support the port %d\n",
2410 + __func__, port);
2411 + return -2;
2412 + }
2413 +
2414 + return 0;
2415 +}
2416 +
2417 +void esw_get_vlan_output_config(struct switch_enet_private *fep,
2418 + unsigned long *ulVlanOutputConfig)
2419 +{
2420 + volatile switch_t *fecp;
2421 +
2422 + fecp = fep->hwp;
2423 + *ulVlanOutputConfig = fecp->ESW_VOMSEL;
2424 +}
2425 +
2426 +int esw_vlan_output_process(struct switch_enet_private *fep,
2427 + int port, int mode)
2428 +{
2429 + volatile switch_t *fecp;
2430 +
2431 + fecp = fep->hwp;
2432 +
2433 + if ((port < 0) || (port > 2)) {
2434 + printk(KERN_ERR "%s: do not support the port %d\n",
2435 + __func__, mode);
2436 + return -1;
2437 + }
2438 +
2439 + if (port == 0) {
2440 + fecp->ESW_VOMSEL &= ~MCF_ESW_VOMSEL_OM0(3);
2441 + fecp->ESW_VOMSEL |= MCF_ESW_VOMSEL_OM0(mode);
2442 + } else if (port == 1) {
2443 + fecp->ESW_VOMSEL &= ~MCF_ESW_VOMSEL_OM1(3);
2444 + fecp->ESW_VOMSEL |= MCF_ESW_VOMSEL_OM1(mode);
2445 + } else if (port == 2) {
2446 + fecp->ESW_VOMSEL &= ~MCF_ESW_VOMSEL_OM2(3);
2447 + fecp->ESW_VOMSEL |= MCF_ESW_VOMSEL_OM2(mode);
2448 + } else {
2449 + printk(KERN_ERR "%s: do not support the port %d\n",
2450 + __func__, port);
2451 + return -1;
2452 + }
2453 +
2454 + return 0;
2455 +}
2456 +
2457 +/*------------frame calssify and priority resolution------------*/
2458 +/*vlan priority lookup*/
2459 +int esw_framecalssify_vlan_priority_lookup(struct switch_enet_private *fep,
2460 + int port, int func_enable, int vlan_pri_table_num,
2461 + int vlan_pri_table_value)
2462 +{
2463 + volatile switch_t *fecp;
2464 +
2465 + fecp = fep->hwp;
2466 +
2467 + if ((port < 0) || (port > 3)) {
2468 + printk(KERN_ERR "%s: do not support the port %d\n",
2469 + __func__, port);
2470 + return -1;
2471 + }
2472 +
2473 + if (func_enable == 0) {
2474 + fecp->ESW_PRES[port] &= ~MCF_ESW_PRES_VLAN;
2475 + printk(KERN_ERR "%s: disable port %d VLAN priority "
2476 + "lookup function\n", __func__, port);
2477 + return 0;
2478 + }
2479 +
2480 + if ((vlan_pri_table_num < 0) || (vlan_pri_table_num > 7)) {
2481 + printk(KERN_ERR "%s: do not support the priority %d\n",
2482 + __func__, vlan_pri_table_num);
2483 + return -1;
2484 + }
2485 +
2486 + fecp->ESW_PVRES[port] |= ((vlan_pri_table_value & 0x3)
2487 + << (vlan_pri_table_num*3));
2488 + /* enable port VLAN priority lookup function*/
2489 + fecp->ESW_PRES[port] |= MCF_ESW_PRES_VLAN;
2490 + return 0;
2491 +}
2492 +
2493 +int esw_framecalssify_ip_priority_lookup(struct switch_enet_private *fep,
2494 + int port, int func_enable, int ipv4_en, int ip_priority_num,
2495 + int ip_priority_value)
2496 +{
2497 + volatile switch_t *fecp;
2498 + unsigned long tmp = 0, tmp_prio = 0;
2499 +
2500 + fecp = fep->hwp;
2501 +
2502 + if ((port < 0) || (port > 3)) {
2503 + printk(KERN_ERR "%s: do not support the port %d\n",
2504 + __func__, port);
2505 + return -1;
2506 + }
2507 +
2508 + if (func_enable == 0) {
2509 + fecp->ESW_PRES[port] &= ~MCF_ESW_PRES_IP;
2510 + printk(KERN_ERR "%s: disable port %d ip priority "
2511 + "lookup function\n", __func__, port);
2512 + return 0;
2513 + }
2514 +
2515 + /* IPV4 priority 64 entry table lookup*/
2516 + /* IPv4 head 6 bit TOS field*/
2517 + if (ipv4_en == 1) {
2518 + if ((ip_priority_num < 0) || (ip_priority_num > 63)) {
2519 + printk(KERN_ERR "%s: do not support the table entry %d\n",
2520 + __func__, ip_priority_num);
2521 + return -2;
2522 + }
2523 + } else { /* IPV6 priority 256 entry table lookup*/
2524 + /* IPv6 head 8 bit COS field*/
2525 + if ((ip_priority_num < 0) || (ip_priority_num > 255)) {
2526 + printk(KERN_ERR "%s: do not support the table entry %d\n",
2527 + __func__, ip_priority_num);
2528 + return -3;
2529 + }
2530 + }
2531 +
2532 + /* IP priority table lookup : address*/
2533 + tmp = MCF_ESW_IPRES_ADDRESS(ip_priority_num);
2534 + /* IP priority table lookup : ipv4sel*/
2535 + if (ipv4_en == 1)
2536 + tmp = tmp | MCF_ESW_IPRES_IPV4SEL;
2537 + /* IP priority table lookup : priority*/
2538 + if (port == 0)
2539 + tmp |= MCF_ESW_IPRES_PRI0(ip_priority_value);
2540 + else if (port == 1)
2541 + tmp |= MCF_ESW_IPRES_PRI1(ip_priority_value);
2542 + else if (port == 2)
2543 + tmp |= MCF_ESW_IPRES_PRI2(ip_priority_value);
2544 +
2545 + /* configure*/
2546 + fecp->ESW_IPRES = MCF_ESW_IPRES_READ |
2547 + MCF_ESW_IPRES_ADDRESS(ip_priority_num);
2548 + tmp_prio = fecp->ESW_IPRES;
2549 +
2550 + fecp->ESW_IPRES = tmp | tmp_prio;
2551 +
2552 + fecp->ESW_IPRES = MCF_ESW_IPRES_READ |
2553 + MCF_ESW_IPRES_ADDRESS(ip_priority_num);
2554 + tmp_prio = fecp->ESW_IPRES;
2555 +
2556 + /* enable port IP priority lookup function*/
2557 + fecp->ESW_PRES[port] |= MCF_ESW_PRES_IP;
2558 + return 0;
2559 +}
2560 +
2561 +int esw_framecalssify_mac_priority_lookup(
2562 + struct switch_enet_private *fep, int port)
2563 +{
2564 + volatile switch_t *fecp;
2565 +
2566 + if ((port < 0) || (port > 3)) {
2567 + printk(KERN_ERR "%s: do not support the port %d\n",
2568 + __func__, port);
2569 + return -1;
2570 + }
2571 +
2572 + fecp = fep->hwp;
2573 + fecp->ESW_PRES[port] |= MCF_ESW_PRES_MAC;
2574 +
2575 + return 0;
2576 +}
2577 +
2578 +int esw_frame_calssify_priority_init(struct switch_enet_private *fep,
2579 + int port, unsigned char priority_value)
2580 +{
2581 + volatile switch_t *fecp;
2582 +
2583 + fecp = fep->hwp;
2584 +
2585 + if ((port < 0) || (port > 3)) {
2586 + printk(KERN_ERR "%s: do not support the port %d\n",
2587 + __func__, port);
2588 + return -1;
2589 + }
2590 + /*disable all priority lookup function*/
2591 + fecp->ESW_PRES[port] = 0;
2592 + fecp->ESW_PRES[port] = MCF_ESW_PRES_DFLT_PRI(priority_value & 0x7);
2593 +
2594 + return 0;
2595 +}
2596 +
2597 +/*---------------------------------------------------------------------------*/
2598 +int esw_get_statistics_status(struct switch_enet_private *fep,
2599 + esw_statistics_status *pStatistics)
2600 +{
2601 + volatile switch_t *fecp;
2602 + fecp = fep->hwp;
2603 +
2604 + pStatistics->ESW_DISCN = fecp->ESW_DISCN;
2605 + pStatistics->ESW_DISCB = fecp->ESW_DISCB;
2606 + pStatistics->ESW_NDISCN = fecp->ESW_NDISCN;
2607 + pStatistics->ESW_NDISCB = fecp->ESW_NDISCB;
2608 + return 0;
2609 +}
2610 +
2611 +int esw_get_port_statistics_status(struct switch_enet_private *fep,
2612 + int port, esw_port_statistics_status *pPortStatistics)
2613 +{
2614 + volatile switch_t *fecp;
2615 +
2616 + if ((port < 0) || (port > 3)) {
2617 + printk(KERN_ERR "%s: do not support the port %d\n",
2618 + __func__, port);
2619 + return -1;
2620 + }
2621 +
2622 + fecp = fep->hwp;
2623 +
2624 + pPortStatistics->MCF_ESW_POQC =
2625 + fecp->port_statistics_status[port].MCF_ESW_POQC;
2626 + pPortStatistics->MCF_ESW_PMVID =
2627 + fecp->port_statistics_status[port].MCF_ESW_PMVID;
2628 + pPortStatistics->MCF_ESW_PMVTAG =
2629 + fecp->port_statistics_status[port].MCF_ESW_PMVTAG;
2630 + pPortStatistics->MCF_ESW_PBL =
2631 + fecp->port_statistics_status[port].MCF_ESW_PBL;
2632 + return 0;
2633 +}
2634 +/*----------------------------------------------------------------------*/
2635 +int esw_get_output_queue_status(struct switch_enet_private *fep,
2636 + esw_output_queue_status *pOutputQueue)
2637 +{
2638 + volatile switch_t *fecp;
2639 +
2640 + fecp = fep->hwp;
2641 + pOutputQueue->ESW_MMSR = fecp->ESW_MMSR;
2642 + pOutputQueue->ESW_LMT = fecp->ESW_LMT;
2643 + pOutputQueue->ESW_LFC = fecp->ESW_LFC;
2644 + pOutputQueue->ESW_IOSR = fecp->ESW_IOSR;
2645 + pOutputQueue->ESW_PCSR = fecp->ESW_PCSR;
2646 + pOutputQueue->ESW_QWT = fecp->ESW_QWT;
2647 + pOutputQueue->ESW_P0BCT = fecp->ESW_P0BCT;
2648 + return 0;
2649 +}
2650 +
2651 +/* set output queue memory status and configure*/
2652 +int esw_set_output_queue_memory(struct switch_enet_private *fep,
2653 + int fun_num, esw_output_queue_status *pOutputQueue)
2654 +{
2655 + volatile switch_t *fecp;
2656 +
2657 + fecp = fep->hwp;
2658 +
2659 + if (fun_num == 1) {
2660 + /* memory manager status*/
2661 + fecp->ESW_MMSR = pOutputQueue->ESW_MMSR;
2662 + } else if (fun_num == 2) {
2663 + /*low memory threshold*/
2664 + fecp->ESW_LMT = pOutputQueue->ESW_LMT;
2665 + } else if (fun_num == 3) {
2666 + /*lowest number of free cells*/
2667 + fecp->ESW_LFC = pOutputQueue->ESW_LFC;
2668 + } else if (fun_num == 4) {
2669 + /*queue weights*/
2670 + fecp->ESW_QWT = pOutputQueue->ESW_QWT;
2671 + } else if (fun_num == 5) {
2672 + /*port 0 backpressure congenstion thresled*/
2673 + fecp->ESW_P0BCT = pOutputQueue->ESW_P0BCT;
2674 + } else {
2675 + printk(KERN_ERR "%s: do not support the cmd %x\n",
2676 + __func__, fun_num);
2677 + return -1;
2678 + }
2679 + return 0;
2680 +}
2681 +/*--------------------------------------------------------------------*/
2682 +int esw_get_irq_status(struct switch_enet_private *fep,
2683 + eswIoctlIrqStatus *pIrqStatus)
2684 +{
2685 + volatile switch_t *fecp;
2686 +
2687 + fecp = fep->hwp;
2688 + pIrqStatus->isr = fecp->switch_ievent;
2689 + pIrqStatus->imr = fecp->switch_imask;
2690 + pIrqStatus->rx_buf_pointer = fecp->fec_r_des_start;
2691 + pIrqStatus->tx_buf_pointer = fecp->fec_x_des_start;
2692 + pIrqStatus->rx_max_size = fecp->fec_r_buff_size;
2693 + pIrqStatus->rx_buf_active = fecp->fec_r_des_active;
2694 + pIrqStatus->tx_buf_active = fecp->fec_x_des_active;
2695 + return 0;
2696 +}
2697 +
2698 +int esw_set_irq_mask(struct switch_enet_private *fep,
2699 + unsigned long mask, int enable)
2700 +{
2701 + volatile switch_t *fecp;
2702 +
2703 + fecp = fep->hwp;
2704 +
2705 + if (enable == 1)
2706 + fecp->switch_imask |= mask;
2707 + else if (enable == 1)
2708 + fecp->switch_imask &= (~mask);
2709 + else {
2710 + printk(KERN_INFO "%s: enable %lx is error value\n",
2711 + __func__, mask);
2712 + return -1;
2713 + }
2714 + return 0;
2715 +}
2716 +
2717 +void esw_clear_irq_event(struct switch_enet_private *fep,
2718 + unsigned long mask)
2719 +{
2720 + volatile switch_t *fecp;
2721 +
2722 + fecp = fep->hwp;
2723 + fecp->switch_ievent |= mask;
2724 +}
2725 +
2726 +void esw_get_switch_mode(struct switch_enet_private *fep,
2727 + unsigned long *ulModeConfig)
2728 +{
2729 + volatile switch_t *fecp;
2730 +
2731 + fecp = fep->hwp;
2732 + *ulModeConfig = fecp->ESW_MODE;
2733 +}
2734 +
2735 +void esw_switch_mode_configure(struct switch_enet_private *fep,
2736 + unsigned long configure)
2737 +{
2738 + volatile switch_t *fecp;
2739 +
2740 + fecp = fep->hwp;
2741 + fecp->ESW_MODE |= configure;
2742 +}
2743 +
2744 +void esw_get_bridge_port(struct switch_enet_private *fep,
2745 + unsigned long *ulBMPConfig)
2746 +{
2747 + volatile switch_t *fecp;
2748 +
2749 + fecp = fep->hwp;
2750 + *ulBMPConfig = fecp->ESW_BMPC;
2751 +}
2752 +
2753 +void esw_bridge_port_configure(struct switch_enet_private *fep,
2754 + unsigned long configure)
2755 +{
2756 + volatile switch_t *fecp;
2757 +
2758 + fecp = fep->hwp;
2759 + fecp->ESW_BMPC = configure;
2760 +}
2761 +
2762 +int esw_get_port_all_status(struct switch_enet_private *fep,
2763 + unsigned char portnum, struct port_all_status *port_alstatus)
2764 +{
2765 + volatile switch_t *fecp;
2766 + unsigned long PortBlocking;
2767 + unsigned long PortLearning;
2768 + unsigned long VlanVerify;
2769 + unsigned long DiscardUnknown;
2770 + unsigned long MultiReso;
2771 + unsigned long BroadReso;
2772 + unsigned long FTransmit;
2773 + unsigned long FReceive;
2774 +
2775 + fecp = fep->hwp;
2776 + PortBlocking = fecp->ESW_BKLR & 0x0000000f;
2777 + PortLearning = (fecp->ESW_BKLR & 0x000f0000) >> 16;
2778 + VlanVerify = fecp->ESW_VLANV & 0x0000000f;
2779 + DiscardUnknown = (fecp->ESW_VLANV & 0x000f0000) >> 16;
2780 + MultiReso = fecp->ESW_DMCR & 0x0000000f;
2781 + BroadReso = fecp->ESW_DBCR & 0x0000000f;
2782 + FTransmit = fecp->ESW_PER & 0x0000000f;
2783 + FReceive = (fecp->ESW_PER & 0x000f0000) >> 16;
2784 +
2785 + switch (portnum) {
2786 + case 0:
2787 + port_alstatus->link_status = 1;
2788 + port_alstatus->block_status = PortBlocking & 1;
2789 + port_alstatus->learn_status = PortLearning & 1;
2790 + port_alstatus->vlan_verify = VlanVerify & 1;
2791 + port_alstatus->discard_unknown = DiscardUnknown & 1;
2792 + port_alstatus->multi_reso = MultiReso & 1;
2793 + port_alstatus->broad_reso = BroadReso & 1;
2794 + port_alstatus->ftransmit = FTransmit & 1;
2795 + port_alstatus->freceive = FReceive & 1;
2796 + break;
2797 + case 1:
2798 + port_alstatus->link_status =
2799 + ports_link_status.port1_link_status;
2800 + port_alstatus->block_status = (PortBlocking >> 1) & 1;
2801 + port_alstatus->learn_status = (PortLearning >> 1) & 1;
2802 + port_alstatus->vlan_verify = (VlanVerify >> 1) & 1;
2803 + port_alstatus->discard_unknown = (DiscardUnknown >> 1) & 1;
2804 + port_alstatus->multi_reso = (MultiReso >> 1) & 1;
2805 + port_alstatus->broad_reso = (BroadReso >> 1) & 1;
2806 + port_alstatus->ftransmit = (FTransmit >> 1) & 1;
2807 + port_alstatus->freceive = (FReceive >> 1) & 1;
2808 + break;
2809 + case 2:
2810 + port_alstatus->link_status =
2811 + ports_link_status.port2_link_status;
2812 + port_alstatus->block_status = (PortBlocking >> 2) & 1;
2813 + port_alstatus->learn_status = (PortLearning >> 2) & 1;
2814 + port_alstatus->vlan_verify = (VlanVerify >> 2) & 1;
2815 + port_alstatus->discard_unknown = (DiscardUnknown >> 2) & 1;
2816 + port_alstatus->multi_reso = (MultiReso >> 2) & 1;
2817 + port_alstatus->broad_reso = (BroadReso >> 2) & 1;
2818 + port_alstatus->ftransmit = (FTransmit >> 2) & 1;
2819 + port_alstatus->freceive = (FReceive >> 2) & 1;
2820 + break;
2821 + default:
2822 + printk(KERN_ERR "%s:do not support the port %d",
2823 + __func__, portnum);
2824 + break;
2825 + }
2826 + return 0;
2827 +}
2828 +
2829 +int esw_atable_get_entry_port_number(struct switch_enet_private *fep,
2830 + unsigned char *mac_addr, unsigned char *port)
2831 +{
2832 + int block_index, block_index_end, entry;
2833 + unsigned long read_lo, read_hi;
2834 + unsigned long mac_addr_lo, mac_addr_hi;
2835 +
2836 + mac_addr_lo = (unsigned long)((mac_addr[3]<<24) | (mac_addr[2]<<16) |
2837 + (mac_addr[1]<<8) | mac_addr[0]);
2838 + mac_addr_hi = (unsigned long)((mac_addr[5]<<8) | (mac_addr[4]));
2839 +
2840 + block_index = GET_BLOCK_PTR(crc8_calc(mac_addr));
2841 + block_index_end = block_index + ATABLE_ENTRY_PER_SLOT;
2842 +
2843 + /* now search all the entries in the selected block */
2844 + for (entry = block_index; entry < block_index_end; entry++) {
2845 + read_atable(fep, entry, &read_lo, &read_hi);
2846 + if ((read_lo == mac_addr_lo) &&
2847 + ((read_hi & 0x0000ffff) ==
2848 + (mac_addr_hi & 0x0000ffff))) {
2849 + /* found the correct address */
2850 + if ((read_hi & (1 << 16)) && (!(read_hi & (1 << 17))))
2851 + *port = AT_EXTRACT_PORT(read_hi);
2852 + break;
2853 + } else
2854 + *port = -1;
2855 + }
2856 +
2857 + return 0;
2858 +}
2859 +
2860 +int esw_get_mac_address_lookup_table(struct switch_enet_private *fep,
2861 + unsigned long *tableaddr, unsigned long *dnum, unsigned long *snum)
2862 +{
2863 + unsigned long read_lo, read_hi;
2864 + unsigned long entry;
2865 + unsigned long dennum = 0;
2866 + unsigned long sennum = 0;
2867 +
2868 + for (entry = 0; entry < ESW_ATABLE_MEM_NUM_ENTRIES; entry++) {
2869 + read_atable(fep, entry, &read_lo, &read_hi);
2870 + if ((read_hi & (1 << 17)) && (read_hi & (1 << 16))) {
2871 + /* static entry */
2872 + *(tableaddr + (2047 - sennum) * 11) = entry;
2873 + *(tableaddr + (2047 - sennum) * 11 + 2) =
2874 + read_lo & 0x000000ff;
2875 + *(tableaddr + (2047 - sennum) * 11 + 3) =
2876 + (read_lo & 0x0000ff00) >> 8;
2877 + *(tableaddr + (2047 - sennum) * 11 + 4) =
2878 + (read_lo & 0x00ff0000) >> 16;
2879 + *(tableaddr + (2047 - sennum) * 11 + 5) =
2880 + (read_lo & 0xff000000) >> 24;
2881 + *(tableaddr + (2047 - sennum) * 11 + 6) =
2882 + read_hi & 0x000000ff;
2883 + *(tableaddr + (2047 - sennum) * 11 + 7) =
2884 + (read_hi & 0x0000ff00) >> 8;
2885 + *(tableaddr + (2047 - sennum) * 11 + 8) =
2886 + AT_EXTRACT_PORTMASK(read_hi);
2887 + *(tableaddr + (2047 - sennum) * 11 + 9) =
2888 + AT_EXTRACT_PRIO(read_hi);
2889 + sennum++;
2890 + } else if ((read_hi & (1 << 16)) && (!(read_hi & (1 << 17)))) {
2891 + /* dynamic entry */
2892 + *(tableaddr + dennum * 11) = entry;
2893 + *(tableaddr + dennum * 11 + 2) = read_lo & 0xff;
2894 + *(tableaddr + dennum * 11 + 3) =
2895 + (read_lo & 0x0000ff00) >> 8;
2896 + *(tableaddr + dennum * 11 + 4) =
2897 + (read_lo & 0x00ff0000) >> 16;
2898 + *(tableaddr + dennum * 11 + 5) =
2899 + (read_lo & 0xff000000) >> 24;
2900 + *(tableaddr + dennum * 11 + 6) = read_hi & 0xff;
2901 + *(tableaddr + dennum * 11 + 7) =
2902 + (read_hi & 0x0000ff00) >> 8;
2903 + *(tableaddr + dennum * 11 + 8) =
2904 + AT_EXTRACT_PORT(read_hi);
2905 + *(tableaddr + dennum * 11 + 9) =
2906 + AT_EXTRACT_TIMESTAMP(read_hi);
2907 + dennum++;
2908 + }
2909 + }
2910 +
2911 + *dnum = dennum;
2912 + *snum = sennum;
2913 + return 0;
2914 +}
2915 +
2916 +/*----------------------------------------------------------------------------*/
2917 +/* The timer should create an interrupt every 4 seconds*/
2918 +static void l2switch_aging_timer(unsigned long data)
2919 +{
2920 + struct switch_enet_private *fep;
2921 +
2922 + fep = (struct switch_enet_private *)data;
2923 +
2924 + if (fep) {
2925 + TIMEINCREMENT(fep->currTime);
2926 + fep->timeChanged++;
2927 + }
2928 +
2929 + mod_timer(&fep->timer_aging, jiffies + LEARNING_AGING_TIMER);
2930 +}
2931 +
2932 +/* ----------------------------------------------------------------------- */
2933 +void esw_check_rxb_txb_interrupt(struct switch_enet_private *fep)
2934 +{
2935 + volatile switch_t *fecp;
2936 + fecp = fep->hwp;
2937 +
2938 + /*Enable Forced forwarding for port 1*/
2939 + fecp->ESW_P0FFEN = MCF_ESW_P0FFEN_FEN |
2940 + MCF_ESW_P0FFEN_FD(1);
2941 + /*Disable learning for all ports*/
2942 + MCF_ESW_IMR = MCF_ESW_IMR_TXB | MCF_ESW_IMR_TXF |
2943 + MCF_ESW_IMR_RXB | MCF_ESW_IMR_RXF;
2944 +}
2945 +
2946 +/*----------------------------------------------------------------*/
2947 +static int switch_enet_learning(void *arg)
2948 +{
2949 + struct switch_enet_private *fep = arg;
2950 + volatile switch_t *fecp;
2951 +
2952 + fecp = fep->hwp;
2953 + while (!kthread_should_stop()) {
2954 + set_current_state(TASK_INTERRUPTIBLE);
2955 +
2956 + /* check learning record valid */
2957 + if (fecp->ESW_LSR)
2958 + esw_atable_dynamicms_learn_migration(fep,
2959 + fep->currTime);
2960 + else
2961 + schedule_timeout(HZ/100);
2962 + }
2963 +
2964 + return 0;
2965 +}
2966 +
2967 +static int switch_enet_ioctl(struct net_device *dev,
2968 + struct ifreq *ifr, int cmd)
2969 +{
2970 + struct switch_enet_private *fep = netdev_priv(dev);
2971 + volatile switch_t *fecp;
2972 + int ret = 0;
2973 +
2974 + fecp = (volatile switch_t *)dev->base_addr;
2975 +
2976 + switch (cmd) {
2977 + /*------------------------------------------------------------*/
2978 + case ESW_SET_PORTENABLE_CONF:
2979 + {
2980 + eswIoctlPortEnableConfig configData;
2981 + ret = copy_from_user(&configData,
2982 + ifr->ifr_data,
2983 + sizeof(eswIoctlPortEnableConfig));
2984 + if (ret)
2985 + return -EFAULT;
2986 +
2987 + ret = esw_port_enable_config(fep,
2988 + configData.port,
2989 + configData.tx_enable,
2990 + configData.rx_enable);
2991 + }
2992 + break;
2993 + case ESW_SET_BROADCAST_CONF:
2994 + {
2995 + eswIoctlPortConfig configData;
2996 + ret = copy_from_user(&configData,
2997 + ifr->ifr_data, sizeof(eswIoctlPortConfig));
2998 + if (ret)
2999 + return -EFAULT;
3000 +
3001 + ret = esw_port_broadcast_config(fep,
3002 + configData.port, configData.enable);
3003 + }
3004 + break;
3005 +
3006 + case ESW_SET_MULTICAST_CONF:
3007 + {
3008 + eswIoctlPortConfig configData;
3009 + ret = copy_from_user(&configData,
3010 + ifr->ifr_data, sizeof(eswIoctlPortConfig));
3011 + if (ret)
3012 + return -EFAULT;
3013 +
3014 + ret = esw_port_multicast_config(fep,
3015 + configData.port, configData.enable);
3016 + }
3017 + break;
3018 +
3019 + case ESW_SET_BLOCKING_CONF:
3020 + {
3021 + eswIoctlPortConfig configData;
3022 + ret = copy_from_user(&configData,
3023 + ifr->ifr_data, sizeof(eswIoctlPortConfig));
3024 +
3025 + if (ret)
3026 + return -EFAULT;
3027 +
3028 + ret = esw_port_blocking_config(fep,
3029 + configData.port, configData.enable);
3030 + }
3031 + break;
3032 +
3033 + case ESW_SET_LEARNING_CONF:
3034 + {
3035 + eswIoctlPortConfig configData;
3036 +
3037 + ret = copy_from_user(&configData,
3038 + ifr->ifr_data, sizeof(eswIoctlPortConfig));
3039 + if (ret)
3040 + return -EFAULT;
3041 +
3042 + ret = esw_port_learning_config(fep,
3043 + configData.port, configData.enable);
3044 + }
3045 + break;
3046 +
3047 + case ESW_SET_PORT_ENTRY_EMPTY:
3048 + {
3049 + unsigned long portnum;
3050 +
3051 + ret = copy_from_user(&portnum,
3052 + ifr->ifr_data, sizeof(portnum));
3053 + if (ret)
3054 + return -EFAULT;
3055 + esw_atable_dynamicms_del_entries_for_port(fep, portnum);
3056 + }
3057 + break;
3058 +
3059 + case ESW_SET_OTHER_PORT_ENTRY_EMPTY:
3060 + {
3061 + unsigned long portnum;
3062 +
3063 + ret = copy_from_user(&portnum,
3064 + ifr->ifr_data, sizeof(portnum));
3065 + if (ret)
3066 + return -EFAULT;
3067 +
3068 + esw_atable_dynamicms_del_entries_for_other_port(fep, portnum);
3069 + }
3070 + break;
3071 +
3072 + case ESW_SET_IP_SNOOP_CONF:
3073 + {
3074 + eswIoctlIpsnoopConfig configData;
3075 +
3076 + ret = copy_from_user(&configData,
3077 + ifr->ifr_data, sizeof(eswIoctlIpsnoopConfig));
3078 + if (ret)
3079 + return -EFAULT;
3080 +
3081 + ret = esw_ip_snoop_config(fep, configData.mode,
3082 + configData.ip_header_protocol);
3083 + }
3084 + break;
3085 +
3086 + case ESW_SET_PORT_SNOOP_CONF:
3087 + {
3088 + eswIoctlPortsnoopConfig configData;
3089 +
3090 + ret = copy_from_user(&configData,
3091 + ifr->ifr_data, sizeof(eswIoctlPortsnoopConfig));
3092 + if (ret)
3093 + return -EFAULT;
3094 +
3095 + ret = esw_tcpudp_port_snoop_config(fep, configData.mode,
3096 + configData.compare_port,
3097 + configData.compare_num);
3098 + }
3099 + break;
3100 +
3101 + case ESW_SET_PORT_MIRROR_CONF_PORT_MATCH:
3102 + {
3103 + struct eswIoctlMirrorCfgPortMatch configData;
3104 +
3105 + ret = copy_from_user(&configData,
3106 + ifr->ifr_data, sizeof(configData));
3107 + if (ret)
3108 + return -EFAULT;
3109 + ret = esw_port_mirroring_config_port_match(fep,
3110 + configData.mirror_port, configData.port_match_en,
3111 + configData.port);
3112 + }
3113 + break;
3114 +
3115 + case ESW_SET_PORT_MIRROR_CONF:
3116 + {
3117 + eswIoctlPortMirrorConfig configData;
3118 +
3119 + ret = copy_from_user(&configData,
3120 + ifr->ifr_data, sizeof(eswIoctlPortMirrorConfig));
3121 + if (ret)
3122 + return -EFAULT;
3123 +
3124 + ret = esw_port_mirroring_config(fep,
3125 + configData.mirror_port, configData.port,
3126 + configData.mirror_enable,
3127 + configData.src_mac, configData.des_mac,
3128 + configData.egress_en, configData.ingress_en,
3129 + configData.egress_mac_src_en,
3130 + configData.egress_mac_des_en,
3131 + configData.ingress_mac_src_en,
3132 + configData.ingress_mac_des_en);
3133 + }
3134 + break;
3135 +
3136 + case ESW_SET_PORT_MIRROR_CONF_ADDR_MATCH:
3137 + {
3138 + struct eswIoctlMirrorCfgAddrMatch configData;
3139 +
3140 + ret = copy_from_user(&configData,
3141 + ifr->ifr_data, sizeof(configData));
3142 + if (ret)
3143 + return -EFAULT;
3144 +
3145 + ret = esw_port_mirroring_config_addr_match(fep,
3146 + configData.mirror_port, configData.addr_match_en,
3147 + configData.mac_addr);
3148 + }
3149 + break;
3150 +
3151 + case ESW_SET_PIRORITY_VLAN:
3152 + {
3153 + eswIoctlPriorityVlanConfig configData;
3154 +
3155 + ret = copy_from_user(&configData,
3156 + ifr->ifr_data, sizeof(eswIoctlPriorityVlanConfig));
3157 + if (ret)
3158 + return -EFAULT;
3159 +
3160 + ret = esw_framecalssify_vlan_priority_lookup(fep,
3161 + configData.port, configData.func_enable,
3162 + configData.vlan_pri_table_num,
3163 + configData.vlan_pri_table_value);
3164 + }
3165 + break;
3166 +
3167 + case ESW_SET_PIRORITY_IP:
3168 + {
3169 + eswIoctlPriorityIPConfig configData;
3170 +
3171 + ret = copy_from_user(&configData,
3172 + ifr->ifr_data, sizeof(eswIoctlPriorityIPConfig));
3173 + if (ret)
3174 + return -EFAULT;
3175 +
3176 + ret = esw_framecalssify_ip_priority_lookup(fep,
3177 + configData.port, configData.func_enable,
3178 + configData.ipv4_en, configData.ip_priority_num,
3179 + configData.ip_priority_value);
3180 + }
3181 + break;
3182 +
3183 + case ESW_SET_PIRORITY_MAC:
3184 + {
3185 + eswIoctlPriorityMacConfig configData;
3186 +
3187 + ret = copy_from_user(&configData,
3188 + ifr->ifr_data, sizeof(eswIoctlPriorityMacConfig));
3189 + if (ret)
3190 + return -EFAULT;
3191 +
3192 + ret = esw_framecalssify_mac_priority_lookup(fep,
3193 + configData.port);
3194 + }
3195 + break;
3196 +
3197 + case ESW_SET_PIRORITY_DEFAULT:
3198 + {
3199 + eswIoctlPriorityDefaultConfig configData;
3200 +
3201 + ret = copy_from_user(&configData,
3202 + ifr->ifr_data, sizeof(eswIoctlPriorityDefaultConfig));
3203 + if (ret)
3204 + return -EFAULT;
3205 +
3206 + ret = esw_frame_calssify_priority_init(fep,
3207 + configData.port, configData.priority_value);
3208 + }
3209 + break;
3210 +
3211 + case ESW_SET_P0_FORCED_FORWARD:
3212 + {
3213 + eswIoctlP0ForcedForwardConfig configData;
3214 +
3215 + ret = copy_from_user(&configData,
3216 + ifr->ifr_data, sizeof(eswIoctlP0ForcedForwardConfig));
3217 + if (ret)
3218 + return -EFAULT;
3219 +
3220 + ret = esw_forced_forward(fep, configData.port1,
3221 + configData.port2, configData.enable);
3222 + }
3223 + break;
3224 +
3225 + case ESW_SET_BRIDGE_CONFIG:
3226 + {
3227 + unsigned long configData;
3228 +
3229 + ret = copy_from_user(&configData,
3230 + ifr->ifr_data, sizeof(unsigned long));
3231 + if (ret)
3232 + return -EFAULT;
3233 +
3234 + esw_bridge_port_configure(fep, configData);
3235 + }
3236 + break;
3237 +
3238 + case ESW_SET_SWITCH_MODE:
3239 + {
3240 + unsigned long configData;
3241 +
3242 + ret = copy_from_user(&configData,
3243 + ifr->ifr_data, sizeof(unsigned long));
3244 + if (ret)
3245 + return -EFAULT;
3246 +
3247 + esw_switch_mode_configure(fep, configData);
3248 + }
3249 + break;
3250 +
3251 + case ESW_SET_OUTPUT_QUEUE_MEMORY:
3252 + {
3253 + eswIoctlOutputQueue configData;
3254 +
3255 + ret = copy_from_user(&configData,
3256 + ifr->ifr_data, sizeof(eswIoctlOutputQueue));
3257 + if (ret)
3258 + return -EFAULT;
3259 +
3260 + ret = esw_set_output_queue_memory(fep,
3261 + configData.fun_num, &configData.sOutputQueue);
3262 + }
3263 + break;
3264 +
3265 + case ESW_SET_VLAN_OUTPUT_PROCESS:
3266 + {
3267 + eswIoctlVlanOutputConfig configData;
3268 +
3269 + ret = copy_from_user(&configData,
3270 + ifr->ifr_data, sizeof(eswIoctlVlanOutputConfig));
3271 + if (ret)
3272 + return -EFAULT;
3273 +
3274 + ret = esw_vlan_output_process(fep,
3275 + configData.port, configData.mode);
3276 + }
3277 + break;
3278 +
3279 + case ESW_SET_VLAN_INPUT_PROCESS:
3280 + {
3281 + eswIoctlVlanInputConfig configData;
3282 +
3283 + ret = copy_from_user(&configData,
3284 + ifr->ifr_data,
3285 + sizeof(eswIoctlVlanInputConfig));
3286 + if (ret)
3287 + return -EFAULT;
3288 +
3289 + ret = esw_vlan_input_process(fep, configData.port,
3290 + configData.mode, configData.port_vlanid);
3291 + }
3292 + break;
3293 +
3294 + case ESW_SET_VLAN_DOMAIN_VERIFICATION:
3295 + {
3296 + eswIoctlVlanVerificationConfig configData;
3297 +
3298 + ret = copy_from_user(&configData,
3299 + ifr->ifr_data,
3300 + sizeof(eswIoctlVlanVerificationConfig));
3301 + if (ret)
3302 + return -EFAULT;
3303 +
3304 + ret = esw_set_vlan_verification(
3305 + fep, configData.port,
3306 + configData.vlan_domain_verify_en,
3307 + configData.vlan_discard_unknown_en);
3308 + }
3309 + break;
3310 +
3311 + case ESW_SET_VLAN_RESOLUTION_TABLE:
3312 + {
3313 + eswIoctlVlanResoultionTable configData;
3314 +
3315 + ret = copy_from_user(&configData,
3316 + ifr->ifr_data,
3317 + sizeof(eswIoctlVlanResoultionTable));
3318 + if (ret)
3319 + return -EFAULT;
3320 +
3321 + ret = esw_set_vlan_resolution_table(
3322 + fep, configData.port_vlanid,
3323 + configData.vlan_domain_num,
3324 + configData.vlan_domain_port);
3325 +
3326 + }
3327 + break;
3328 +
3329 + case ESW_SET_VLAN_ID:
3330 + {
3331 + unsigned long configData;
3332 + ret = copy_from_user(&configData, ifr->ifr_data,
3333 + sizeof(configData));
3334 + if (ret)
3335 + return -EFAULT;
3336 +
3337 + ret = esw_set_vlan_id(fep, configData);
3338 + }
3339 + break;
3340 +
3341 + case ESW_SET_VLAN_ID_CLEARED:
3342 + {
3343 + unsigned long configData;
3344 + ret = copy_from_user(&configData, ifr->ifr_data,
3345 + sizeof(configData));
3346 + if (ret)
3347 + return -EFAULT;
3348 +
3349 + ret = esw_set_vlan_id_cleared(fep, configData);
3350 + }
3351 + break;
3352 +
3353 + case ESW_SET_PORT_IN_VLAN_ID:
3354 + {
3355 + eswIoctlVlanResoultionTable configData;
3356 +
3357 + ret = copy_from_user(&configData, ifr->ifr_data,
3358 + sizeof(configData));
3359 + if (ret)
3360 + return -EFAULT;
3361 +
3362 + ret = esw_set_port_in_vlan_id(fep, configData);
3363 + }
3364 + break;
3365 +
3366 + /*--------------------------------------------------------------------*/
3367 + case ESW_UPDATE_STATIC_MACTABLE:
3368 + {
3369 + eswIoctlUpdateStaticMACtable configData;
3370 +
3371 + ret = copy_from_user(&configData,
3372 + ifr->ifr_data, sizeof(eswIoctlUpdateStaticMACtable));
3373 + if (ret)
3374 + return -EFAULT;
3375 +
3376 + ret = esw_update_atable_static(configData.mac_addr,
3377 + configData.port, configData.priority, fep);
3378 + }
3379 + break;
3380 +
3381 + case ESW_CLEAR_ALL_MACTABLE:
3382 + {
3383 + esw_clear_atable(fep);
3384 + }
3385 + break;
3386 +
3387 + /*-------------------get----------------------------------------------*/
3388 + case ESW_GET_STATISTICS_STATUS:
3389 + {
3390 + esw_statistics_status Statistics;
3391 + esw_port_statistics_status PortSta;
3392 + int i;
3393 +
3394 + ret = esw_get_statistics_status(fep, &Statistics);
3395 + if (ret != 0) {
3396 + printk(KERN_ERR "%s: cmd %x fail\n", __func__, cmd);
3397 + return -1;
3398 + }
3399 + printk(KERN_INFO "DISCN : %10ld DISCB : %10ld\n",
3400 + Statistics.ESW_DISCN, Statistics.ESW_DISCB);
3401 + printk(KERN_INFO "NDISCN: %10ld NDISCB: %10ld\n",
3402 + Statistics.ESW_NDISCN, Statistics.ESW_NDISCB);
3403 +
3404 + for (i = 0; i < 3; i++) {
3405 + ret = esw_get_port_statistics_status(fep, i,
3406 + &PortSta);
3407 + if (ret != 0) {
3408 + printk(KERN_ERR "%s: cmd %x fail\n",
3409 + __func__, cmd);
3410 + return -1;
3411 + }
3412 + printk(KERN_INFO "port %d: POQC : %ld\n",
3413 + i, PortSta.MCF_ESW_POQC);
3414 + printk(KERN_INFO " PMVID : %ld\n",
3415 + PortSta.MCF_ESW_PMVID);
3416 + printk(KERN_INFO " PMVTAG: %ld\n",
3417 + PortSta.MCF_ESW_PMVTAG);
3418 + printk(KERN_INFO " PBL : %ld\n",
3419 + PortSta.MCF_ESW_PBL);
3420 + }
3421 + }
3422 + break;
3423 +
3424 + case ESW_GET_LEARNING_CONF:
3425 + {
3426 + unsigned long PortLearning;
3427 +
3428 + esw_get_port_learning(fep, &PortLearning);
3429 + ret = copy_to_user(ifr->ifr_data, &PortLearning,
3430 + sizeof(unsigned long));
3431 + if (ret)
3432 + return -EFAULT;
3433 + }
3434 + break;
3435 +
3436 + case ESW_GET_BLOCKING_CONF:
3437 + {
3438 + unsigned long PortBlocking;
3439 +
3440 + esw_get_port_blocking(fep, &PortBlocking);
3441 + ret = copy_to_user(ifr->ifr_data, &PortBlocking,
3442 + sizeof(unsigned long));
3443 + if (ret)
3444 + return -EFAULT;
3445 + }
3446 + break;
3447 +
3448 + case ESW_GET_MULTICAST_CONF:
3449 + {
3450 + unsigned long PortMulticast;
3451 +
3452 + esw_get_port_multicast(fep, &PortMulticast);
3453 + ret = copy_to_user(ifr->ifr_data, &PortMulticast,
3454 + sizeof(unsigned long));
3455 + if (ret)
3456 + return -EFAULT;
3457 + }
3458 + break;
3459 +
3460 + case ESW_GET_BROADCAST_CONF:
3461 + {
3462 + unsigned long PortBroadcast;
3463 +
3464 + esw_get_port_broadcast(fep, &PortBroadcast);
3465 + ret = copy_to_user(ifr->ifr_data, &PortBroadcast,
3466 + sizeof(unsigned long));
3467 + if (ret)
3468 + return -EFAULT;
3469 + }
3470 + break;
3471 +
3472 + case ESW_GET_PORTENABLE_CONF:
3473 + {
3474 + unsigned long PortEnable;
3475 +
3476 + esw_get_port_enable(fep, &PortEnable);
3477 + ret = copy_to_user(ifr->ifr_data, &PortEnable,
3478 + sizeof(unsigned long));
3479 + if (ret)
3480 + return -EFAULT;
3481 + }
3482 + break;
3483 +
3484 + case ESW_GET_IP_SNOOP_CONF:
3485 + {
3486 + unsigned long ESW_IPSNP[8];
3487 + int i;
3488 +
3489 + esw_get_ip_snoop_config(fep, (unsigned long *)ESW_IPSNP);
3490 + printk(KERN_INFO "IP Protocol Mode Type\n");
3491 + for (i = 0; i < 8; i++) {
3492 + if (ESW_IPSNP[i] != 0)
3493 + printk(KERN_INFO "%3ld "
3494 + "%1ld %s\n",
3495 + (ESW_IPSNP[i] >> 8) & 0xff,
3496 + (ESW_IPSNP[i] >> 1) & 3,
3497 + ESW_IPSNP[i] & 1 ? "Active" :
3498 + "Inactive");
3499 + }
3500 + }
3501 + break;
3502 +
3503 + case ESW_GET_PORT_SNOOP_CONF:
3504 + {
3505 + unsigned long ESW_PSNP[8];
3506 + int i;
3507 +
3508 + esw_get_tcpudp_port_snoop_config(fep,
3509 + (unsigned long *)ESW_PSNP);
3510 + printk(KERN_INFO "TCP/UDP Port SrcCompare DesCompare "
3511 + "Mode Type\n");
3512 + for (i = 0; i < 8; i++) {
3513 + if (ESW_PSNP[i] != 0)
3514 + printk(KERN_INFO "%5ld %s "
3515 + "%s %1ld %s\n",
3516 + (ESW_PSNP[i] >> 16) & 0xffff,
3517 + (ESW_PSNP[i] >> 4) & 1 ? "Y" : "N",
3518 + (ESW_PSNP[i] >> 3) & 1 ? "Y" : "N",
3519 + (ESW_PSNP[i] >> 1) & 3,
3520 + ESW_PSNP[i] & 1 ? "Active" :
3521 + "Inactive");
3522 + }
3523 + }
3524 + break;
3525 +
3526 + case ESW_GET_PORT_MIRROR_CONF:
3527 + esw_get_port_mirroring(fep);
3528 + break;
3529 +
3530 + case ESW_GET_P0_FORCED_FORWARD:
3531 + {
3532 + unsigned long ForceForward;
3533 +
3534 + esw_get_forced_forward(fep, &ForceForward);
3535 + ret = copy_to_user(ifr->ifr_data, &ForceForward,
3536 + sizeof(unsigned long));
3537 + if (ret)
3538 + return -EFAULT;
3539 + }
3540 + break;
3541 +
3542 + case ESW_GET_SWITCH_MODE:
3543 + {
3544 + unsigned long Config;
3545 +
3546 + esw_get_switch_mode(fep, &Config);
3547 + ret = copy_to_user(ifr->ifr_data, &Config,
3548 + sizeof(unsigned long));
3549 + if (ret)
3550 + return -EFAULT;
3551 + }
3552 + break;
3553 +
3554 + case ESW_GET_BRIDGE_CONFIG:
3555 + {
3556 + unsigned long Config;
3557 +
3558 + esw_get_bridge_port(fep, &Config);
3559 + ret = copy_to_user(ifr->ifr_data, &Config,
3560 + sizeof(unsigned long));
3561 + if (ret)
3562 + return -EFAULT;
3563 + }
3564 + break;
3565 + case ESW_GET_OUTPUT_QUEUE_STATUS:
3566 + {
3567 + esw_output_queue_status Config;
3568 + esw_get_output_queue_status(fep,
3569 + &Config);
3570 + ret = copy_to_user(ifr->ifr_data, &Config,
3571 + sizeof(esw_output_queue_status));
3572 + if (ret)
3573 + return -EFAULT;
3574 + }
3575 + break;
3576 +
3577 + case ESW_GET_VLAN_OUTPUT_PROCESS:
3578 + {
3579 + unsigned long Config;
3580 + int tmp;
3581 + int i;
3582 +
3583 + esw_get_vlan_output_config(fep, &Config);
3584 +
3585 + for (i = 0; i < 3; i++) {
3586 + tmp = (Config >> (i << 1)) & 3;
3587 +
3588 + if (tmp != 0)
3589 + printk(KERN_INFO "port %d: vlan output "
3590 + "manipulation enable (mode %d)\n",
3591 + i, tmp);
3592 + else
3593 + printk(KERN_INFO "port %d: vlan output "
3594 + "manipulation disable\n", i);
3595 + }
3596 + }
3597 + break;
3598 +
3599 + case ESW_GET_VLAN_INPUT_PROCESS:
3600 + {
3601 + eswIoctlVlanInputStatus Config;
3602 + int i;
3603 +
3604 + esw_get_vlan_input_config(fep, &Config);
3605 +
3606 + for (i = 0; i < 3; i++) {
3607 + if (((Config.ESW_VIMEN >> i) & 1) == 0)
3608 + printk(KERN_INFO "port %d: vlan input "
3609 + "manipulation disable\n", i);
3610 + else
3611 + printk("port %d: vlan input manipulation enable"
3612 + " (mode %ld, vlan id %ld)\n", i,
3613 + (((Config.ESW_VIMSEL >> (i << 1)) & 3)
3614 + + 1), Config.ESW_PID[i]);
3615 + }
3616 + }
3617 + break;
3618 +
3619 + case ESW_GET_VLAN_RESOLUTION_TABLE:
3620 + {
3621 + struct eswVlanTableItem vtableitem;
3622 + unsigned char tmp0, tmp1, tmp2;
3623 + int i;
3624 +
3625 + esw_get_vlan_resolution_table(fep, &vtableitem);
3626 +
3627 + printk(KERN_INFO "VLAN Name VLAN Id Ports\n");
3628 + for (i = 0; i < vtableitem.valid_num; i++) {
3629 + tmp0 = vtableitem.table[i].vlan_domain_port & 1;
3630 + tmp1 = (vtableitem.table[i].vlan_domain_port >> 1) & 1;
3631 + tmp2 = (vtableitem.table[i].vlan_domain_port >> 2) & 1;
3632 + printk(KERN_INFO "%2d %4d %s%s%s\n",
3633 + i, vtableitem.table[i].port_vlanid,
3634 + tmp0 ? "0 " : "", tmp1 ? "1 " : "",
3635 + tmp2 ? "2" : "");
3636 + }
3637 + }
3638 + break;
3639 +
3640 + case ESW_GET_VLAN_DOMAIN_VERIFICATION:
3641 + {
3642 + unsigned long Config;
3643 +
3644 + esw_get_vlan_verification(fep, &Config);
3645 + ret = copy_to_user(ifr->ifr_data, &Config,
3646 + sizeof(unsigned long));
3647 + if (ret)
3648 + return -EFAULT;
3649 + }
3650 + break;
3651 +
3652 + case ESW_GET_ENTRY_PORT_NUMBER:
3653 + {
3654 + unsigned char mac_addr[6];
3655 + unsigned char portnum;
3656 +
3657 + ret = copy_from_user(mac_addr,
3658 + ifr->ifr_data, sizeof(mac_addr));
3659 + if (ret)
3660 + return -EFAULT;
3661 +
3662 + ret = esw_atable_get_entry_port_number(fep, mac_addr,
3663 + &portnum);
3664 +
3665 + ret = copy_to_user(ifr->ifr_data, &portnum,
3666 + sizeof(unsigned char));
3667 + if (ret)
3668 + return -EFAULT;
3669 + }
3670 + break;
3671 +
3672 + case ESW_GET_LOOKUP_TABLE:
3673 + {
3674 + unsigned long *ConfigData;
3675 + unsigned long dennum, sennum;
3676 + int i;
3677 + int tmp;
3678 +
3679 + ConfigData = kmalloc(sizeof(struct eswAddrTableEntryExample) *
3680 + ESW_ATABLE_MEM_NUM_ENTRIES, GFP_KERNEL);
3681 + ret = esw_get_mac_address_lookup_table(fep, ConfigData,
3682 + &dennum, &sennum);
3683 + printk(KERN_INFO "Dynamic entries number: %ld\n", dennum);
3684 + printk(KERN_INFO "Static entries number: %ld\n", sennum);
3685 + printk(KERN_INFO "Type MAC address Port Timestamp\n");
3686 + for (i = 0; i < dennum; i++) {
3687 + printk(KERN_INFO "dynamic "
3688 + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx "
3689 + "%01lx %4ld\n", *(ConfigData + i * 11 + 2),
3690 + *(ConfigData + i * 11 + 3),
3691 + *(ConfigData + i * 11 + 4),
3692 + *(ConfigData + i * 11 + 5),
3693 + *(ConfigData + i * 11 + 6),
3694 + *(ConfigData + i * 11 + 7),
3695 + *(ConfigData + i * 11 + 8),
3696 + *(ConfigData + i * 11 + 9));
3697 + }
3698 +
3699 + if (sennum != 0)
3700 + printk(KERN_INFO "Type MAC address"
3701 + " Port Priority\n");
3702 +
3703 + for (i = 0; i < sennum; i++) {
3704 + printk(KERN_INFO "static %02lx-%02lx-%02lx-%02lx"
3705 + "-%02lx-%02lx ",
3706 + *(ConfigData + (2047 - i) * 11 + 2),
3707 + *(ConfigData + (2047 - i) * 11 + 3),
3708 + *(ConfigData + (2047 - i) * 11 + 4),
3709 + *(ConfigData + (2047 - i) * 11 + 5),
3710 + *(ConfigData + (2047 - i) * 11 + 6),
3711 + *(ConfigData + (2047 - i) * 11 + 7));
3712 +
3713 + tmp = *(ConfigData + (2047 - i) * 11 + 8);
3714 + if ((tmp == 0) || (tmp == 2) || (tmp == 4))
3715 + printk("%01x ", tmp >> 1);
3716 + else if (tmp == 3)
3717 + printk("0,1 ");
3718 + else if (tmp == 5)
3719 + printk("0,2 ");
3720 + else if (tmp == 6)
3721 + printk("1,2 ");
3722 +
3723 + printk("%4ld\n", *(ConfigData + (2047 - i) * 11 + 9));
3724 + }
3725 + kfree(ConfigData);
3726 + }
3727 + break;
3728 +
3729 + case ESW_GET_PORT_STATUS:
3730 + {
3731 + unsigned long PortBlocking;
3732 +
3733 + esw_get_port_blocking(fep, &PortBlocking);
3734 +
3735 + ports_link_status.port0_block_status = PortBlocking & 1;
3736 + ports_link_status.port1_block_status = (PortBlocking >> 1) & 1;
3737 + ports_link_status.port2_block_status = PortBlocking >> 2;
3738 +
3739 + ret = copy_to_user(ifr->ifr_data, &ports_link_status,
3740 + sizeof(ports_link_status));
3741 + if (ret)
3742 + return -EFAULT;
3743 + }
3744 + break;
3745 +
3746 + case ESW_GET_PORT_ALL_STATUS:
3747 + {
3748 + unsigned char portnum;
3749 + struct port_all_status port_astatus;
3750 +
3751 + ret = copy_from_user(&portnum,
3752 + ifr->ifr_data, sizeof(portnum));
3753 + if (ret)
3754 + return -EFAULT;
3755 +
3756 + esw_get_port_all_status(fep, portnum, &port_astatus);
3757 + printk(KERN_INFO "Port %d status:\n", portnum);
3758 + printk(KERN_INFO "Link:%-4s Blocking:%1s "
3759 + "Learning:%1s\n",
3760 + port_astatus.link_status ? "Up" : "Down",
3761 + port_astatus.block_status ? "Y" : "N",
3762 + port_astatus.learn_status ? "N" : "Y");
3763 + printk(KERN_INFO "VLAN Verify:%1s Discard Unknown:%1s "
3764 + "Multicast Res:%1s\n",
3765 + port_astatus.vlan_verify ? "Y" : "N",
3766 + port_astatus.discard_unknown ? "Y" : "N",
3767 + port_astatus.multi_reso ? "Y" : "N");
3768 + printk(KERN_INFO "Broadcast Res:%1s Transmit:%-7s "
3769 + "Receive:%7s\n",
3770 + port_astatus.broad_reso ? "Y" : "N",
3771 + port_astatus.ftransmit ? "Enable" : "Disable",
3772 + port_astatus.freceive ? "Enable" : "Disable");
3773 +
3774 + }
3775 + break;
3776 +
3777 + case ESW_GET_USER_PID:
3778 + {
3779 + long get_pid = 0;
3780 + ret = copy_from_user(&get_pid,
3781 + ifr->ifr_data, sizeof(get_pid));
3782 +
3783 + if (ret)
3784 + return -EFAULT;
3785 + user_pid = get_pid;
3786 + }
3787 + break;
3788 + /*------------------------------------------------------------------*/
3789 + default:
3790 + return -EOPNOTSUPP;
3791 + }
3792 +
3793 + return ret;
3794 +}
3795 +
3796 +static netdev_tx_t switch_enet_start_xmit(struct sk_buff *skb,
3797 + struct net_device *dev)
3798 +{
3799 + struct switch_enet_private *fep;
3800 + volatile switch_t *fecp;
3801 + cbd_t *bdp;
3802 + unsigned short status;
3803 + unsigned long flags;
3804 +
3805 + fep = netdev_priv(dev);
3806 + fecp = (switch_t *)fep->hwp;
3807 +
3808 + spin_lock_irqsave(&fep->hw_lock, flags);
3809 + /* Fill in a Tx ring entry */
3810 + bdp = fep->cur_tx;
3811 +
3812 + status = bdp->cbd_sc;
3813 +
3814 + /* Clear all of the status flags.
3815 + */
3816 + status &= ~BD_ENET_TX_STATS;
3817 +
3818 + /* Set buffer length and buffer pointer.
3819 + */
3820 + bdp->cbd_bufaddr = __pa(skb->data);
3821 + bdp->cbd_datlen = skb->len;
3822 +
3823 + /*
3824 + * On some FEC implementations data must be aligned on
3825 + * 4-byte boundaries. Use bounce buffers to copy data
3826 + * and get it aligned. Ugh.
3827 + */
3828 + if (bdp->cbd_bufaddr & 0x3) {
3829 + unsigned int index1;
3830 + index1 = bdp - fep->tx_bd_base;
3831 +
3832 + memcpy(fep->tx_bounce[index1],
3833 + (void *)skb->data, bdp->cbd_datlen);
3834 + bdp->cbd_bufaddr = __pa(fep->tx_bounce[index1]);
3835 + }
3836 +
3837 + /* Save skb pointer. */
3838 + fep->tx_skbuff[fep->skb_cur] = skb;
3839 +
3840 + dev->stats.tx_bytes += skb->len;
3841 + fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
3842 +
3843 + /* Push the data cache so the CPM does not get stale memory
3844 + * data.
3845 + */
3846 + flush_dcache_range((unsigned long)skb->data,
3847 + (unsigned long)skb->data + skb->len);
3848 +
3849 + /* Send it on its way. Tell FEC it's ready, interrupt when done,
3850 + * it's the last BD of the frame, and to put the CRC on the end.
3851 + */
3852 +
3853 + status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
3854 + | BD_ENET_TX_LAST | BD_ENET_TX_TC);
3855 + bdp->cbd_sc = status;
3856 + dev->trans_start = jiffies;
3857 +
3858 + /* Trigger transmission start */
3859 + fecp->fec_x_des_active = MCF_ESW_TDAR_X_DES_ACTIVE;
3860 +
3861 + /* If this was the last BD in the ring,
3862 + * start at the beginning again.*/
3863 + if (status & BD_ENET_TX_WRAP)
3864 + bdp = fep->tx_bd_base;
3865 + else
3866 + bdp++;
3867 +
3868 + if (bdp == fep->dirty_tx) {
3869 + fep->tx_full = 1;
3870 + netif_stop_queue(dev);
3871 + printk(KERN_ERR "%s: net stop\n", __func__);
3872 + }
3873 +
3874 + fep->cur_tx = (cbd_t *)bdp;
3875 +
3876 + spin_unlock_irqrestore(&fep->hw_lock, flags);
3877 +
3878 + return NETDEV_TX_OK;
3879 +}
3880 +
3881 +static void switch_timeout(struct net_device *dev)
3882 +{
3883 + struct switch_enet_private *fep = netdev_priv(dev);
3884 +
3885 + printk(KERN_ERR "%s: transmit timed out.\n", dev->name);
3886 + dev->stats.tx_errors++;
3887 + switch_restart(dev, fep->full_duplex);
3888 + netif_wake_queue(dev);
3889 +}
3890 +
3891 +/* The interrupt handler.
3892 + * This is called from the MPC core interrupt.
3893 + */
3894 +static irqreturn_t switch_enet_interrupt(int irq, void *dev_id)
3895 +{
3896 + struct net_device *dev = dev_id;
3897 + volatile switch_t *fecp;
3898 + uint int_events;
3899 + irqreturn_t ret = IRQ_NONE;
3900 +
3901 + fecp = (switch_t *)dev->base_addr;
3902 +
3903 + /* Get the interrupt events that caused us to be here.
3904 + */
3905 + do {
3906 + int_events = fecp->switch_ievent;
3907 + fecp->switch_ievent = int_events;
3908 + /* Handle receive event in its own function. */
3909 +
3910 + /* Transmit OK, or non-fatal error. Update the buffer
3911 + descriptors. Switch handles all errors, we just discover
3912 + them as part of the transmit process.
3913 + */
3914 + if (int_events & MCF_ESW_ISR_OD0)
3915 + ret = IRQ_HANDLED;
3916 +
3917 + if (int_events & MCF_ESW_ISR_OD1)
3918 + ret = IRQ_HANDLED;
3919 +
3920 + if (int_events & MCF_ESW_ISR_OD2)
3921 + ret = IRQ_HANDLED;
3922 +
3923 + if (int_events & MCF_ESW_ISR_RXB)
3924 + ret = IRQ_HANDLED;
3925 +
3926 + if (int_events & MCF_ESW_ISR_RXF) {
3927 + ret = IRQ_HANDLED;
3928 + switch_enet_rx(dev);
3929 + }
3930 +
3931 + if (int_events & MCF_ESW_ISR_TXB)
3932 + ret = IRQ_HANDLED;
3933 +
3934 + if (int_events & MCF_ESW_ISR_TXF) {
3935 + ret = IRQ_HANDLED;
3936 + switch_enet_tx(dev);
3937 + }
3938 +
3939 + } while (int_events);
3940 +
3941 + return ret;
3942 +}
3943 +
3944 +static void switch_enet_tx(struct net_device *dev)
3945 +{
3946 + struct switch_enet_private *fep;
3947 + cbd_t *bdp;
3948 + unsigned short status;
3949 + struct sk_buff *skb;
3950 +
3951 + fep = netdev_priv(dev);
3952 + spin_lock_irq(&fep->hw_lock);
3953 + bdp = fep->dirty_tx;
3954 +
3955 + while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
3956 + if (bdp == fep->cur_tx && fep->tx_full == 0)
3957 + break;
3958 +
3959 + skb = fep->tx_skbuff[fep->skb_dirty];
3960 + /* Check for errors. */
3961 + if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
3962 + BD_ENET_TX_RL | BD_ENET_TX_UN |
3963 + BD_ENET_TX_CSL)) {
3964 + dev->stats.tx_errors++;
3965 + if (status & BD_ENET_TX_HB) /* No heartbeat */
3966 + dev->stats.tx_heartbeat_errors++;
3967 + if (status & BD_ENET_TX_LC) /* Late collision */
3968 + dev->stats.tx_window_errors++;
3969 + if (status & BD_ENET_TX_RL) /* Retrans limit */
3970 + dev->stats.tx_aborted_errors++;
3971 + if (status & BD_ENET_TX_UN) /* Underrun */
3972 + dev->stats.tx_fifo_errors++;
3973 + if (status & BD_ENET_TX_CSL) /* Carrier lost */
3974 + dev->stats.tx_carrier_errors++;
3975 + } else {
3976 + dev->stats.tx_packets++;
3977 + }
3978 +
3979 + /* Deferred means some collisions occurred during transmit,
3980 + * but we eventually sent the packet OK.
3981 + */
3982 + if (status & BD_ENET_TX_DEF)
3983 + dev->stats.collisions++;
3984 +
3985 + /* Free the sk buffer associated with this last transmit.
3986 + */
3987 + dev_kfree_skb_any(skb);
3988 + fep->tx_skbuff[fep->skb_dirty] = NULL;
3989 + fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
3990 +
3991 + /* Update pointer to next buffer descriptor to be transmitted.
3992 + */
3993 + if (status & BD_ENET_TX_WRAP)
3994 + bdp = fep->tx_bd_base;
3995 + else
3996 + bdp++;
3997 +
3998 + /* Since we have freed up a buffer, the ring is no longer
3999 + * full.
4000 + */
4001 + if (fep->tx_full) {
4002 + fep->tx_full = 0;
4003 + printk(KERN_ERR "%s: tx full is zero\n", __func__);
4004 + if (netif_queue_stopped(dev))
4005 + netif_wake_queue(dev);
4006 + }
4007 + }
4008 + fep->dirty_tx = (cbd_t *)bdp;
4009 + spin_unlock_irq(&fep->hw_lock);
4010 +}
4011 +
4012 +
4013 +/* During a receive, the cur_rx points to the current incoming buffer.
4014 + * When we update through the ring, if the next incoming buffer has
4015 + * not been given to the system, we just set the empty indicator,
4016 + * effectively tossing the packet.
4017 + */
4018 +static void switch_enet_rx(struct net_device *dev)
4019 +{
4020 + struct switch_enet_private *fep;
4021 + volatile switch_t *fecp;
4022 + cbd_t *bdp;
4023 + unsigned short status;
4024 + struct sk_buff *skb;
4025 + ushort pkt_len;
4026 + __u8 *data;
4027 +
4028 + fep = netdev_priv(dev);
4029 + /*fecp = (volatile switch_t *)dev->base_addr;*/
4030 + fecp = (volatile switch_t *)fep->hwp;
4031 +
4032 + spin_lock_irq(&fep->hw_lock);
4033 + /* First, grab all of the stats for the incoming packet.
4034 + * These get messed up if we get called due to a busy condition.
4035 + */
4036 + bdp = fep->cur_rx;
4037 +
4038 + while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
4039 +
4040 + /* Since we have allocated space to hold a complete frame,
4041 + * the last indicator should be set.
4042 + * */
4043 + if ((status & BD_ENET_RX_LAST) == 0)
4044 + printk(KERN_ERR "SWITCH ENET: rcv is not +last\n");
4045 +
4046 + if (!fep->opened)
4047 + goto rx_processing_done;
4048 +
4049 + /* Check for errors. */
4050 + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
4051 + BD_ENET_RX_CR | BD_ENET_RX_OV)) {
4052 + dev->stats.rx_errors++;
4053 + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
4054 + /* Frame too long or too short. */
4055 + dev->stats.rx_length_errors++;
4056 + }
4057 + if (status & BD_ENET_RX_NO) /* Frame alignment */
4058 + dev->stats.rx_frame_errors++;
4059 + if (status & BD_ENET_RX_CR) /* CRC Error */
4060 + dev->stats.rx_crc_errors++;
4061 + if (status & BD_ENET_RX_OV) /* FIFO overrun */
4062 + dev->stats.rx_fifo_errors++;
4063 + }
4064 + /* Report late collisions as a frame error.
4065 + * On this error, the BD is closed, but we don't know what we
4066 + * have in the buffer. So, just drop this frame on the floor.
4067 + * */
4068 + if (status & BD_ENET_RX_CL) {
4069 + dev->stats.rx_errors++;
4070 + dev->stats.rx_frame_errors++;
4071 + goto rx_processing_done;
4072 + }
4073 + /* Process the incoming frame */
4074 + dev->stats.rx_packets++;
4075 + pkt_len = bdp->cbd_datlen;
4076 + dev->stats.rx_bytes += pkt_len;
4077 + data = (__u8 *)__va(bdp->cbd_bufaddr);
4078 +
4079 + /* This does 16 byte alignment, exactly what we need.
4080 + * The packet length includes FCS, but we don't want to
4081 + * include that when passing upstream as it messes up
4082 + * bridging applications.
4083 + * */
4084 + skb = dev_alloc_skb(pkt_len);
4085 +
4086 + if (skb == NULL)
4087 + dev->stats.rx_dropped++;
4088 + else {
4089 + skb_put(skb, pkt_len); /* Make room */
4090 + skb_copy_to_linear_data(skb, data, pkt_len);
4091 + skb->protocol = eth_type_trans(skb, dev);
4092 + netif_rx(skb);
4093 + }
4094 +rx_processing_done:
4095 +
4096 + /* Clear the status flags for this buffer */
4097 + status &= ~BD_ENET_RX_STATS;
4098 +
4099 + /* Mark the buffer empty */
4100 + status |= BD_ENET_RX_EMPTY;
4101 + bdp->cbd_sc = status;
4102 +
4103 + /* Update BD pointer to next entry */
4104 + if (status & BD_ENET_RX_WRAP)
4105 + bdp = fep->rx_bd_base;
4106 + else
4107 + bdp++;
4108 +
4109 + /* Doing this here will keep the FEC running while we process
4110 + * incoming frames. On a heavily loaded network, we should be
4111 + * able to keep up at the expense of system resources.
4112 + * */
4113 + fecp->fec_r_des_active = MCF_ESW_RDAR_R_DES_ACTIVE;
4114 + }
4115 + fep->cur_rx = (cbd_t *)bdp;
4116 +
4117 + spin_unlock_irq(&fep->hw_lock);
4118 +}
4119 +
4120 +static int fec_mdio_transfer(struct mii_bus *bus, int phy_id,
4121 + int reg, int regval)
4122 +{
4123 + struct net_device *dev = bus->priv;
4124 + unsigned long flags;
4125 + struct switch_enet_private *fep;
4126 + int tries = 100;
4127 + int retval = 0;
4128 +
4129 + fep = netdev_priv(dev);
4130 + spin_lock_irqsave(&fep->mii_lock, flags);
4131 +
4132 + regval |= phy_id << 23;
4133 + MCF_FEC_MMFR0 = regval;
4134 +
4135 + /* wait for it to finish, this takes about 23 us on lite5200b */
4136 + while (!(MCF_FEC_EIR0 & FEC_ENET_MII) && --tries)
4137 + udelay(5);
4138 +
4139 + if (!tries) {
4140 + printk(KERN_ERR "%s timeout\n", __func__);
4141 + return -ETIMEDOUT;
4142 + }
4143 +
4144 + MCF_FEC_EIR0 = FEC_ENET_MII;
4145 + retval = MCF_FEC_MMFR0;
4146 + spin_unlock_irqrestore(&fep->mii_lock, flags);
4147 +
4148 + return retval;
4149 +}
4150 +
4151 +
4152 +static int coldfire_fec_mdio_read(struct mii_bus *bus,
4153 + int phy_id, int reg)
4154 +{
4155 + int ret;
4156 + ret = fec_mdio_transfer(bus, phy_id, reg,
4157 + mk_mii_read(reg));
4158 + return ret;
4159 +}
4160 +
4161 +static int coldfire_fec_mdio_write(struct mii_bus *bus,
4162 + int phy_id, int reg, u16 data)
4163 +{
4164 + return fec_mdio_transfer(bus, phy_id, reg,
4165 + mk_mii_write(reg, data));
4166 +}
4167 +
4168 +static void switch_adjust_link1(struct net_device *dev)
4169 +{
4170 + struct switch_enet_private *priv = netdev_priv(dev);
4171 + struct phy_device *phydev1 = priv->phydev[0];
4172 + int new_state = 0;
4173 +
4174 + if (phydev1->link != PHY_DOWN) {
4175 + if (phydev1->duplex != priv->phy1_duplex) {
4176 + new_state = 1;
4177 + priv->phy1_duplex = phydev1->duplex;
4178 + }
4179 +
4180 + if (phydev1->speed != priv->phy1_speed) {
4181 + new_state = 1;
4182 + priv->phy1_speed = phydev1->speed;
4183 + }
4184 +
4185 + if (priv->phy1_old_link == PHY_DOWN) {
4186 + new_state = 1;
4187 + priv->phy1_old_link = phydev1->link;
4188 + }
4189 + } else if (priv->phy1_old_link) {
4190 + new_state = 1;
4191 + priv->phy1_old_link = PHY_DOWN;
4192 + priv->phy1_speed = 0;
4193 + priv->phy1_duplex = -1;
4194 + }
4195 +
4196 + if (new_state) {
4197 + ports_link_status.port1_link_status = phydev1->link;
4198 + if (phydev1->link == PHY_DOWN)
4199 + esw_atable_dynamicms_del_entries_for_port(priv, 1);
4200 +
4201 + /*Send the new status to user space*/
4202 + if (user_pid != 1)
4203 + sys_tkill(user_pid, SIGUSR1);
4204 + }
4205 +}
4206 +
4207 +static void switch_adjust_link2(struct net_device *dev)
4208 +{
4209 + struct switch_enet_private *priv = netdev_priv(dev);
4210 + struct phy_device *phydev2 = priv->phydev[1];
4211 + int new_state = 0;
4212 +
4213 + if (phydev2->link != PHY_DOWN) {
4214 + if (phydev2->duplex != priv->phy2_duplex) {
4215 + new_state = 1;
4216 + priv->phy2_duplex = phydev2->duplex;
4217 + }
4218 +
4219 + if (phydev2->speed != priv->phy2_speed) {
4220 + new_state = 1;
4221 + priv->phy2_speed = phydev2->speed;
4222 + }
4223 +
4224 + if (priv->phy2_old_link == PHY_DOWN) {
4225 + new_state = 1;
4226 + priv->phy2_old_link = phydev2->link;
4227 + }
4228 + } else if (priv->phy2_old_link) {
4229 + new_state = 1;
4230 + priv->phy2_old_link = PHY_DOWN;
4231 + priv->phy2_speed = 0;
4232 + priv->phy2_duplex = -1;
4233 + }
4234 +
4235 + if (new_state) {
4236 + ports_link_status.port2_link_status = phydev2->link;
4237 + if (phydev2->link == PHY_DOWN)
4238 + esw_atable_dynamicms_del_entries_for_port(priv, 2);
4239 +
4240 + /*Send the new status to user space*/
4241 + if (user_pid != 1)
4242 + sys_tkill(user_pid, SIGUSR1);
4243 + }
4244 +}
4245 +
4246 +static int coldfire_switch_init_phy(struct net_device *dev)
4247 +{
4248 + struct switch_enet_private *priv = netdev_priv(dev);
4249 + struct phy_device *phydev[SWITCH_EPORT_NUMBER] = {NULL, NULL};
4250 + int i, startnode = 0;
4251 +
4252 + /* search for connect PHY device */
4253 + for (i = 0; i < PHY_MAX_ADDR; i++) {
4254 + struct phy_device *const tmp_phydev =
4255 + priv->mdio_bus->phy_map[i];
4256 +
4257 + if (!tmp_phydev)
4258 + continue;
4259 +
4260 +#ifdef CONFIG_FEC_SHARED_PHY
4261 + if (priv->index == 0)
4262 + phydev[i] = tmp_phydev;
4263 + else if (priv->index == 1) {
4264 + if (startnode == 1) {
4265 + phydev[i] = tmp_phydev;
4266 + startnode = 0;
4267 + } else {
4268 + startnode++;
4269 + continue;
4270 + }
4271 + } else
4272 + printk(KERN_INFO "%s now we do not"
4273 + "support (%d) more than"
4274 + "2 phys shared "
4275 + "one mdio bus\n",
4276 + __func__, startnode);
4277 +#else
4278 + phydev[i] = tmp_phydev;
4279 +#endif
4280 + }
4281 +
4282 + /* now we are supposed to have a proper phydev, to attach to... */
4283 + if ((!phydev[0]) && (!phydev[1])) {
4284 + printk(KERN_INFO "%s: Don't found any phy device at all\n",
4285 + dev->name);
4286 + return -ENODEV;
4287 + }
4288 +
4289 + priv->phy1_link = PHY_DOWN;
4290 + priv->phy1_old_link = PHY_DOWN;
4291 + priv->phy1_speed = 0;
4292 + priv->phy1_duplex = -1;
4293 +
4294 + priv->phy2_link = PHY_DOWN;
4295 + priv->phy2_old_link = PHY_DOWN;
4296 + priv->phy2_speed = 0;
4297 + priv->phy2_duplex = -1;
4298 +
4299 + phydev[0] = phy_connect(dev, dev_name(&phydev[0]->dev),
4300 + &switch_adjust_link1, 0, PHY_INTERFACE_MODE_MII);
4301 + if (IS_ERR(phydev[0])) {
4302 + printk(KERN_ERR " %s phy_connect failed\n", __func__);
4303 + return PTR_ERR(phydev[0]);
4304 + }
4305 +
4306 + phydev[1] = phy_connect(dev, dev_name(&phydev[1]->dev),
4307 + &switch_adjust_link2, 0, PHY_INTERFACE_MODE_MII);
4308 + if (IS_ERR(phydev[1])) {
4309 + printk(KERN_ERR " %s phy_connect failed\n", __func__);
4310 + return PTR_ERR(phydev[1]);
4311 + }
4312 +
4313 + for (i = 0; i < SWITCH_EPORT_NUMBER; i++) {
4314 + printk(KERN_INFO "attached phy %i to driver %s\n",
4315 + phydev[i]->addr, phydev[i]->drv->name);
4316 + priv->phydev[i] = phydev[i];
4317 + }
4318 +
4319 + return 0;
4320 +}
4321 +/* -----------------------------------------------------------------------*/
4322 +static int switch_enet_open(struct net_device *dev)
4323 +{
4324 + struct switch_enet_private *fep = netdev_priv(dev);
4325 + volatile switch_t *fecp;
4326 + int i;
4327 +
4328 + fecp = (volatile switch_t *)fep->hwp;
4329 + /* I should reset the ring buffers here, but I don't yet know
4330 + * a simple way to do that.
4331 + */
4332 + switch_set_mac_address(dev);
4333 +
4334 + fep->phy1_link = 0;
4335 + fep->phy2_link = 0;
4336 +
4337 + coldfire_switch_init_phy(dev);
4338 + for (i = 0; i < SWITCH_EPORT_NUMBER; i++) {
4339 + phy_write(fep->phydev[i], MII_BMCR, BMCR_RESET);
4340 + phy_start(fep->phydev[i]);
4341 + }
4342 +
4343 + fep->phy1_old_link = 0;
4344 + fep->phy2_old_link = 0;
4345 + fep->phy1_link = 1;
4346 + fep->phy2_link = 1;
4347 +
4348 + /* no phy, go full duplex, it's most likely a hub chip */
4349 + switch_restart(dev, 1);
4350 +
4351 + /* if the fec is the fist open, we need to do nothing*/
4352 + /* if the fec is not the fist open, we need to restart the FEC*/
4353 + if (fep->sequence_done == 0)
4354 + switch_restart(dev, 1);
4355 + else
4356 + fep->sequence_done = 0;
4357 +
4358 + fep->currTime = 0;
4359 + fep->learning_irqhandle_enable = 0;
4360 +
4361 + MCF_ESW_PER = 0x70007;
4362 + fecp->ESW_DBCR = MCF_ESW_DBCR_P0 | MCF_ESW_DBCR_P1 | MCF_ESW_DBCR_P2;
4363 + fecp->ESW_DMCR = MCF_ESW_DMCR_P0 | MCF_ESW_DMCR_P1 | MCF_ESW_DMCR_P2;
4364 +
4365 + netif_start_queue(dev);
4366 + fep->opened = 1;
4367 +
4368 + return 0;
4369 +}
4370 +
4371 +static int switch_enet_close(struct net_device *dev)
4372 +{
4373 + struct switch_enet_private *fep = netdev_priv(dev);
4374 + int i;
4375 +
4376 + /* Don't know what to do yet.*/
4377 + fep->opened = 0;
4378 + netif_stop_queue(dev);
4379 + switch_stop(dev);
4380 +
4381 + for (i = 0; i < SWITCH_EPORT_NUMBER; i++) {
4382 + phy_disconnect(fep->phydev[i]);
4383 + phy_stop(fep->phydev[i]);
4384 + phy_write(fep->phydev[i], MII_BMCR, BMCR_PDOWN);
4385 + }
4386 +
4387 + return 0;
4388 +}
4389 +
4390 +/* Set or clear the multicast filter for this adaptor.
4391 + * Skeleton taken from sunlance driver.
4392 + * The CPM Ethernet implementation allows Multicast as well as individual
4393 + * MAC address filtering. Some of the drivers check to make sure it is
4394 + * a group multicast address, and discard those that are not. I guess I
4395 + * will do the same for now, but just remove the test if you want
4396 + * individual filtering as well (do the upper net layers want or support
4397 + * this kind of feature?).
4398 + */
4399 +
4400 +#define HASH_BITS 6 /* #bits in hash */
4401 +#define CRC32_POLY 0xEDB88320
4402 +
4403 +static void set_multicast_list(struct net_device *dev)
4404 +{
4405 + struct switch_enet_private *fep;
4406 + volatile switch_t *ep;
4407 + unsigned int i, bit, data, crc;
4408 + struct netdev_hw_addr *ha;
4409 +
4410 + fep = netdev_priv(dev);
4411 + ep = fep->hwp;
4412 +
4413 + if (dev->flags & IFF_PROMISC) {
4414 + printk(KERN_INFO "%s IFF_PROMISC\n", __func__);
4415 + } else {
4416 + if (dev->flags & IFF_ALLMULTI)
4417 + /* Catch all multicast addresses, so set the
4418 + * filter to all 1's.
4419 + */
4420 + printk(KERN_INFO "%s IFF_ALLMULTI\n", __func__);
4421 + else {
4422 + netdev_for_each_mc_addr(ha, dev) {
4423 + if (!(ha->addr[0] & 1))
4424 + continue;
4425 +
4426 + /* calculate crc32 value of mac address
4427 + */
4428 + crc = 0xffffffff;
4429 +
4430 + for (i = 0; i < dev->addr_len; i++) {
4431 + data = ha->addr[i];
4432 + for (bit = 0; bit < 8; bit++,
4433 + data >>= 1) {
4434 + crc = (crc >> 1) ^
4435 + (((crc ^ data) & 1) ?
4436 + CRC32_POLY : 0);
4437 + }
4438 + }
4439 +
4440 + }
4441 + }
4442 + }
4443 +}
4444 +
4445 +/* Set a MAC change in hardware.*/
4446 +static void switch_set_mac_address(struct net_device *dev)
4447 +{
4448 + volatile switch_t *fecp;
4449 +
4450 + fecp = ((struct switch_enet_private *)netdev_priv(dev))->hwp;
4451 +}
4452 +
4453 +static void switch_hw_init(void)
4454 +{
4455 + /* GPIO config - RMII mode for both MACs */
4456 + MCF_GPIO_PAR_FEC = (MCF_GPIO_PAR_FEC &
4457 + MCF_GPIO_PAR_FEC_FEC_MASK) |
4458 + MCF_GPIO_PAR_FEC_FEC_RMII0FUL_1FUL;
4459 +
4460 + /* Initialize MAC 0/1 */
4461 + /* RCR */
4462 + MCF_FEC_RCR0 = (MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE |
4463 + MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD);
4464 + MCF_FEC_RCR1 = (MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE |
4465 + MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD);
4466 + /* TCR */
4467 + MCF_FEC_TCR0 = MCF_FEC_TCR_FDEN;
4468 + MCF_FEC_TCR1 = MCF_FEC_TCR_FDEN;
4469 + /* ECR */
4470 +#ifdef MODELO_BUFFER
4471 + MCF_FEC_ECR0 = MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588;
4472 + MCF_FEC_ECR1 = MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588;
4473 +#else
4474 + MCF_FEC_ECR0 = MCF_FEC_ECR_ETHER_EN;
4475 + MCF_FEC_ECR1 = MCF_FEC_ECR_ETHER_EN;
4476 +#endif
4477 + MCF_FEC_MSCR0 = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2;
4478 + MCF_FEC_MSCR1 = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2;
4479 +
4480 + MCF_FEC_EIMR0 = FEC_ENET_TXF | FEC_ENET_RXF;
4481 + MCF_FEC_EIMR1 = FEC_ENET_TXF | FEC_ENET_RXF;
4482 + /*MCF_PPMHR0*/
4483 + MCF_PPMCR0 = 0;
4484 +}
4485 +
4486 +static const struct net_device_ops switch_netdev_ops = {
4487 + .ndo_open = switch_enet_open,
4488 + .ndo_stop = switch_enet_close,
4489 + .ndo_start_xmit = switch_enet_start_xmit,
4490 + .ndo_set_multicast_list = set_multicast_list,
4491 + .ndo_do_ioctl = switch_enet_ioctl,
4492 + .ndo_tx_timeout = switch_timeout,
4493 +};
4494 +
4495 +/* Initialize the FEC Ethernet.
4496 + */
4497 + /*
4498 + * XXX: We need to clean up on failure exits here.
4499 + */
4500 +static int switch_enet_init(struct platform_device *pdev)
4501 +{
4502 + struct net_device *dev = platform_get_drvdata(pdev);
4503 + struct switch_enet_private *fep = netdev_priv(dev);
4504 + unsigned long mem_addr;
4505 + cbd_t *bdp;
4506 + cbd_t *cbd_base;
4507 + volatile switch_t *fecp;
4508 + int i, j;
4509 + struct coldfire_switch_platform_data *plat =
4510 + pdev->dev.platform_data;
4511 +
4512 + /* Allocate memory for buffer descriptors.
4513 + */
4514 + mem_addr = __get_free_page(GFP_DMA);
4515 + if (mem_addr == 0) {
4516 + printk(KERN_ERR "Switch: allocate descriptor memory failed?\n");
4517 + return -ENOMEM;
4518 + }
4519 +
4520 + spin_lock_init(&fep->hw_lock);
4521 + spin_lock_init(&fep->mii_lock);
4522 +
4523 + /* Create an Ethernet device instance.
4524 + */
4525 + fecp = (volatile switch_t *)plat->switch_hw[0];
4526 + fep->hwp = fecp;
4527 + fep->netdev = dev;
4528 +
4529 + /*
4530 + * SWITCH CONFIGURATION
4531 + */
4532 + fecp->ESW_MODE = MCF_ESW_MODE_SW_RST;
4533 + udelay(10);
4534 + /* enable switch*/
4535 + fecp->ESW_MODE = MCF_ESW_MODE_STATRST;
4536 + fecp->ESW_MODE = MCF_ESW_MODE_SW_EN;
4537 +
4538 + /* Enable transmit/receive on all ports */
4539 + fecp->ESW_PER = 0xffffffff;
4540 +
4541 + /* Management port configuration,
4542 + * make port 0 as management port */
4543 + fecp->ESW_BMPC = 0;
4544 +
4545 + /* clear all switch irq*/
4546 + fecp->switch_ievent = 0xffffffff;
4547 + fecp->switch_imask = 0;
4548 +
4549 + udelay(10);
4550 +
4551 + /* Set the Ethernet address. If using multiple Enets on the 8xx,
4552 + * this needs some work to get unique addresses.
4553 + *
4554 + * This is our default MAC address unless the user changes
4555 + * it via eth_mac_addr (our dev->set_mac_addr handler).
4556 + */
4557 + if (plat && plat->get_mac)
4558 + plat->get_mac(dev);
4559 +
4560 + cbd_base = (cbd_t *)mem_addr;
4561 + /* XXX: missing check for allocation failure */
4562 + if (plat && plat->uncache)
4563 + plat->uncache(mem_addr);
4564 +
4565 + /* Set receive and transmit descriptor base.
4566 + */
4567 + fep->rx_bd_base = cbd_base;
4568 + fep->tx_bd_base = cbd_base + RX_RING_SIZE;
4569 +
4570 + dev->base_addr = (unsigned long)fecp;
4571 +
4572 + /* The FEC Ethernet specific entries in the device structure. */
4573 + dev->watchdog_timeo = TX_TIMEOUT;
4574 + dev->netdev_ops = &switch_netdev_ops;
4575 +
4576 + fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
4577 + fep->cur_rx = fep->rx_bd_base;
4578 +
4579 + fep->skb_cur = fep->skb_dirty = 0;
4580 +
4581 + /* Initialize the receive buffer descriptors. */
4582 + bdp = fep->rx_bd_base;
4583 +
4584 + for (i = 0; i < SWITCH_ENET_RX_PAGES; i++) {
4585 +
4586 + /* Allocate a page.
4587 + */
4588 + mem_addr = __get_free_page(GFP_DMA);
4589 + /* XXX: missing check for allocation failure */
4590 + if (plat && plat->uncache)
4591 + plat->uncache(mem_addr);
4592 +
4593 + /* Initialize the BD for every fragment in the page.
4594 + */
4595 + for (j = 0; j < SWITCH_ENET_RX_FRPPG; j++) {
4596 + bdp->cbd_sc = BD_ENET_RX_EMPTY;
4597 + bdp->cbd_bufaddr = __pa(mem_addr);
4598 +#ifdef MODELO_BUFFER
4599 + bdp->bdu = 0x00000000;
4600 + bdp->ebd_status = RX_BD_INT;
4601 +#endif
4602 + mem_addr += SWITCH_ENET_RX_FRSIZE;
4603 + bdp++;
4604 + }
4605 + }
4606 +
4607 + /* Set the last buffer to wrap.
4608 + */
4609 + bdp--;
4610 + bdp->cbd_sc |= BD_SC_WRAP;
4611 +
4612 + /* ...and the same for transmmit.
4613 + */
4614 + bdp = fep->tx_bd_base;
4615 + for (i = 0, j = SWITCH_ENET_TX_FRPPG; i < TX_RING_SIZE; i++) {
4616 + if (j >= SWITCH_ENET_TX_FRPPG) {
4617 + mem_addr = __get_free_page(GFP_DMA);
4618 + j = 1;
4619 + } else {
4620 + mem_addr += SWITCH_ENET_TX_FRSIZE;
4621 + j++;
4622 + }
4623 + fep->tx_bounce[i] = (unsigned char *) mem_addr;
4624 +
4625 + /* Initialize the BD for every fragment in the page.
4626 + */
4627 + bdp->cbd_sc = 0;
4628 + bdp->cbd_bufaddr = 0;
4629 + bdp++;
4630 + }
4631 +
4632 + /* Set the last buffer to wrap.
4633 + */
4634 + bdp--;
4635 + bdp->cbd_sc |= BD_SC_WRAP;
4636 +
4637 + /* Set receive and transmit descriptor base.
4638 + */
4639 + fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base));
4640 + fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base));
4641 +
4642 + /* Install our interrupt handlers. This varies depending on
4643 + * the architecture.
4644 + */
4645 + if (plat && plat->request_intrs)
4646 + plat->request_intrs(dev, switch_enet_interrupt, dev);
4647 +
4648 + fecp->fec_r_buff_size = RX_BUFFER_SIZE;
4649 + fecp->fec_r_des_active = MCF_ESW_RDAR_R_DES_ACTIVE;
4650 +
4651 + /* setup MII interface */
4652 + if (plat && plat->set_mii)
4653 + plat->set_mii(dev);
4654 +
4655 + /* Clear and enable interrupts */
4656 + fecp->switch_ievent = 0xffffffff;
4657 + fecp->switch_imask = MCF_ESW_IMR_RXB | MCF_ESW_IMR_TXB |
4658 + MCF_ESW_IMR_RXF | MCF_ESW_IMR_TXF;
4659 + esw_clear_atable(fep);
4660 + /* Queue up command to detect the PHY and initialize the
4661 + * remainder of the interface.
4662 + */
4663 +#ifndef CONFIG_FEC_SHARED_PHY
4664 + fep->phy_addr = 0;
4665 +#else
4666 + fep->phy_addr = fep->index;
4667 +#endif
4668 +
4669 + fep->sequence_done = 1;
4670 + return 0;
4671 +}
4672 +
4673 +/* This function is called to start or restart the FEC during a link
4674 + * change. This only happens when switching between half and full
4675 + * duplex.
4676 + */
4677 +static void switch_restart(struct net_device *dev, int duplex)
4678 +{
4679 + struct switch_enet_private *fep;
4680 + cbd_t *bdp;
4681 + volatile switch_t *fecp;
4682 + int i;
4683 + struct coldfire_switch_platform_data *plat;
4684 +
4685 + fep = netdev_priv(dev);
4686 + fecp = fep->hwp;
4687 + plat = fep->pdev->dev.platform_data;
4688 + /* Whack a reset. We should wait for this.*/
4689 + MCF_FEC_ECR0 = 1;
4690 + MCF_FEC_ECR1 = 1;
4691 + udelay(10);
4692 +
4693 + fecp->ESW_MODE = MCF_ESW_MODE_SW_RST;
4694 + udelay(10);
4695 + fecp->ESW_MODE = MCF_ESW_MODE_STATRST;
4696 + fecp->ESW_MODE = MCF_ESW_MODE_SW_EN;
4697 +
4698 + /* Enable transmit/receive on all ports */
4699 + fecp->ESW_PER = 0xffffffff;
4700 +
4701 + /* Management port configuration,
4702 + * make port 0 as management port */
4703 + fecp->ESW_BMPC = 0;
4704 +
4705 + /* Clear any outstanding interrupt.
4706 + */
4707 + fecp->switch_ievent = 0xffffffff;
4708 +
4709 + /* Set station address.*/
4710 + switch_set_mac_address(dev);
4711 +
4712 + switch_hw_init();
4713 +
4714 + /* Reset all multicast.*/
4715 +
4716 + /* Set maximum receive buffer size.
4717 + */
4718 + fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
4719 +
4720 + if (plat && plat->localhw_setup)
4721 + plat->localhw_setup();
4722 + /* Set receive and transmit descriptor base.
4723 + */
4724 + fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base));
4725 + fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base));
4726 +
4727 + fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
4728 + fep->cur_rx = fep->rx_bd_base;
4729 +
4730 + /* Reset SKB transmit buffers.
4731 + */
4732 + fep->skb_cur = fep->skb_dirty = 0;
4733 + for (i = 0; i <= TX_RING_MOD_MASK; i++) {
4734 + if (fep->tx_skbuff[i] != NULL) {
4735 + dev_kfree_skb_any(fep->tx_skbuff[i]);
4736 + fep->tx_skbuff[i] = NULL;
4737 + }
4738 + }
4739 +
4740 + /* Initialize the receive buffer descriptors.
4741 + */
4742 + bdp = fep->rx_bd_base;
4743 + for (i = 0; i < RX_RING_SIZE; i++) {
4744 +
4745 + /* Initialize the BD for every fragment in the page.
4746 + */
4747 + bdp->cbd_sc = BD_ENET_RX_EMPTY;
4748 +#ifdef MODELO_BUFFER
4749 + bdp->bdu = 0x00000000;
4750 + bdp->ebd_status = RX_BD_INT;
4751 +#endif
4752 + bdp++;
4753 + }
4754 +
4755 + /* Set the last buffer to wrap.
4756 + */
4757 + bdp--;
4758 + bdp->cbd_sc |= BD_SC_WRAP;
4759 +
4760 + /* ...and the same for transmmit.
4761 + */
4762 + bdp = fep->tx_bd_base;
4763 + for (i = 0; i < TX_RING_SIZE; i++) {
4764 +
4765 + /* Initialize the BD for every fragment in the page.*/
4766 + bdp->cbd_sc = 0;
4767 + bdp->cbd_bufaddr = 0;
4768 + bdp++;
4769 + }
4770 +
4771 + /* Set the last buffer to wrap.*/
4772 + bdp--;
4773 + bdp->cbd_sc |= BD_SC_WRAP;
4774 +
4775 + fep->full_duplex = duplex;
4776 +
4777 + /* And last, enable the transmit and receive processing.*/
4778 + fecp->fec_r_buff_size = RX_BUFFER_SIZE;
4779 + fecp->fec_r_des_active = MCF_ESW_RDAR_R_DES_ACTIVE;
4780 +
4781 + /* Enable interrupts we wish to service.
4782 + */
4783 + fecp->switch_ievent = 0xffffffff;
4784 + fecp->switch_imask = MCF_ESW_IMR_RXF | MCF_ESW_IMR_TXF |
4785 + MCF_ESW_IMR_RXB | MCF_ESW_IMR_TXB;
4786 +}
4787 +
4788 +static void switch_stop(struct net_device *dev)
4789 +{
4790 + volatile switch_t *fecp;
4791 + struct switch_enet_private *fep;
4792 + struct coldfire_switch_platform_data *plat;
4793 +
4794 + fep = netdev_priv(dev);
4795 + fecp = fep->hwp;
4796 + plat = fep->pdev->dev.platform_data;
4797 + /*
4798 + ** We cannot expect a graceful transmit stop without link !!!
4799 + */
4800 + if (fep->phy1_link)
4801 + udelay(10);
4802 + if (fep->phy2_link)
4803 + udelay(10);
4804 +
4805 + /* Whack a reset. We should wait for this.
4806 + */
4807 + udelay(10);
4808 +}
4809 +
4810 +static int fec_mdio_register(struct net_device *dev)
4811 +{
4812 + int err = 0;
4813 + struct switch_enet_private *fep = netdev_priv(dev);
4814 +
4815 + fep->mdio_bus = mdiobus_alloc();
4816 + if (!fep->mdio_bus) {
4817 + printk(KERN_ERR "ethernet switch mdiobus_alloc fail\n");
4818 + return -ENOMEM;
4819 + }
4820 +
4821 + fep->mdio_bus->name = "Coldfire switch MII 0 Bus";
4822 + strcpy(fep->mdio_bus->id, "0");
4823 +
4824 + fep->mdio_bus->read = &coldfire_fec_mdio_read;
4825 + fep->mdio_bus->write = &coldfire_fec_mdio_write;
4826 + fep->mdio_bus->priv = dev;
4827 + err = mdiobus_register(fep->mdio_bus);
4828 + if (err) {
4829 + mdiobus_free(fep->mdio_bus);
4830 + printk(KERN_ERR "%s: ethernet mdiobus_register fail\n",
4831 + dev->name);
4832 + return -EIO;
4833 + }
4834 +
4835 + printk(KERN_INFO "mdiobus_register %s ok\n",
4836 + fep->mdio_bus->name);
4837 + return err;
4838 +}
4839 +
4840 +static int __devinit eth_switch_probe(struct platform_device *pdev)
4841 +{
4842 + struct net_device *dev;
4843 + int err;
4844 + struct switch_enet_private *fep;
4845 + struct task_struct *task;
4846 +
4847 + printk(KERN_INFO "Ethernet Switch Version 1.0\n");
4848 +
4849 + dev = alloc_etherdev(sizeof(struct switch_enet_private));
4850 + if (!dev) {
4851 + printk(KERN_ERR "%s: ethernet switch alloc_etherdev fail\n",
4852 + dev->name);
4853 + return -ENOMEM;
4854 + }
4855 +
4856 + SET_NETDEV_DEV(dev, &pdev->dev);
4857 +
4858 + fep = netdev_priv(dev);
4859 + memset(fep, 0, sizeof(*fep));
4860 +
4861 + fep->pdev = pdev;
4862 + platform_set_drvdata(pdev, dev);
4863 + printk(KERN_ERR "%s: ethernet switch port 0 init\n",
4864 + __func__);
4865 + err = switch_enet_init(pdev);
4866 + if (err) {
4867 + free_netdev(dev);
4868 + platform_set_drvdata(pdev, NULL);
4869 + }
4870 +
4871 + err = fec_mdio_register(dev);
4872 + if (err) {
4873 + printk(KERN_ERR "%s: ethernet switch fec_mdio_register\n",
4874 + dev->name);
4875 + free_netdev(dev);
4876 + platform_set_drvdata(pdev, NULL);
4877 + return -ENOMEM;
4878 + }
4879 +
4880 + /* setup timer for Learning Aging function */
4881 + init_timer(&fep->timer_aging);
4882 + fep->timer_aging.function = l2switch_aging_timer;
4883 + fep->timer_aging.data = (unsigned long) fep;
4884 + fep->timer_aging.expires = jiffies + LEARNING_AGING_TIMER;
4885 + add_timer(&fep->timer_aging);
4886 +
4887 + /* register network device*/
4888 + if (register_netdev(dev) != 0) {
4889 + /* XXX: missing cleanup here */
4890 + free_netdev(dev);
4891 + platform_set_drvdata(pdev, NULL);
4892 + printk(KERN_ERR "%s: ethernet switch register_netdev fail\n",
4893 + dev->name);
4894 + return -EIO;
4895 + }
4896 +
4897 + task = kthread_run(switch_enet_learning, fep,
4898 + "modelo l2switch");
4899 + if (IS_ERR(task)) {
4900 + err = PTR_ERR(task);
4901 + return err;
4902 + }
4903 +
4904 + printk(KERN_INFO "%s: ethernet switch %pM\n",
4905 + dev->name, dev->dev_addr);
4906 + return 0;
4907 +}
4908 +
4909 +static int __devexit eth_switch_remove(struct platform_device *pdev)
4910 +{
4911 + int i;
4912 + struct net_device *dev;
4913 + struct switch_enet_private *fep;
4914 + struct switch_platform_private *chip;
4915 +
4916 + chip = platform_get_drvdata(pdev);
4917 + if (chip) {
4918 + for (i = 0; i < chip->num_slots; i++) {
4919 + fep = chip->fep_host[i];
4920 + dev = fep->netdev;
4921 + fep->sequence_done = 1;
4922 + unregister_netdev(dev);
4923 + free_netdev(dev);
4924 +
4925 + del_timer_sync(&fep->timer_aging);
4926 + }
4927 +
4928 + platform_set_drvdata(pdev, NULL);
4929 + kfree(chip);
4930 +
4931 + } else
4932 + printk(KERN_ERR "%s: can not get the "
4933 + "switch_platform_private %x\n", __func__,
4934 + (unsigned int)chip);
4935 +
4936 + return 0;
4937 +}
4938 +
4939 +static struct platform_driver eth_switch_driver = {
4940 + .probe = eth_switch_probe,
4941 + .remove = __devexit_p(eth_switch_remove),
4942 + .driver = {
4943 + .name = "coldfire-switch",
4944 + .owner = THIS_MODULE,
4945 + },
4946 +};
4947 +
4948 +static int __init coldfire_switch_init(void)
4949 +{
4950 + return platform_driver_register(&eth_switch_driver);
4951 +}
4952 +
4953 +static void __exit coldfire_switch_exit(void)
4954 +{
4955 + platform_driver_unregister(&eth_switch_driver);
4956 +}
4957 +
4958 +module_init(coldfire_switch_init);
4959 +module_exit(coldfire_switch_exit);
4960 +MODULE_LICENSE("GPL");
4961 --- /dev/null
4962 +++ b/drivers/net/modelo_switch.h
4963 @@ -0,0 +1,1141 @@
4964 +/****************************************************************************/
4965 +
4966 +/*
4967 + * mcfswitch -- L2 Switch Controller for Modelo ColdFire SoC
4968 + * processors.
4969 + *
4970 + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
4971 + *
4972 + * This program is free software; you can redistribute it and/or modify
4973 + * it under the terms of the GNU General Public License as published by
4974 + * the Free Software Foundation; either version 2 of the License, or (at
4975 + * your option) any later version.
4976 + *
4977 + */
4978 +
4979 +/****************************************************************************/
4980 +#ifndef SWITCH_H
4981 +#define SWITCH_H
4982 +/****************************************************************************/
4983 +/* The Switch stores dest/src/type, data, and checksum for receive packets.
4984 + */
4985 +#define PKT_MAXBUF_SIZE 1518
4986 +#define PKT_MINBUF_SIZE 64
4987 +#define PKT_MAXBLR_SIZE 1520
4988 +
4989 +/*
4990 + * The 5441x RX control register also contains maximum frame
4991 + * size bits.
4992 + */
4993 +#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
4994 +
4995 +/*
4996 + * Some hardware gets it MAC address out of local flash memory.
4997 + * if this is non-zero then assume it is the address to get MAC from.
4998 + */
4999 +#define FEC_FLASHMAC 0
5000 +
5001 +/* The number of Tx and Rx buffers. These are allocated from the page
5002 + * pool. The code may assume these are power of two, so it it best
5003 + * to keep them that size.
5004 + * We don't need to allocate pages for the transmitter. We just use
5005 + * the skbuffer directly.
5006 + */
5007 +#ifdef CONFIG_SWITCH_DMA_USE_SRAM
5008 +#define SWITCH_ENET_RX_PAGES 6
5009 +#else
5010 +#define SWITCH_ENET_RX_PAGES 8
5011 +#endif
5012 +
5013 +#define SWITCH_ENET_RX_FRSIZE 2048
5014 +#define SWITCH_ENET_RX_FRPPG (PAGE_SIZE / SWITCH_ENET_RX_FRSIZE)
5015 +#define RX_RING_SIZE (SWITCH_ENET_RX_FRPPG * SWITCH_ENET_RX_PAGES)
5016 +#define SWITCH_ENET_TX_FRSIZE 2048
5017 +#define SWITCH_ENET_TX_FRPPG (PAGE_SIZE / SWITCH_ENET_TX_FRSIZE)
5018 +
5019 +#ifdef CONFIG_SWITCH_DMA_USE_SRAM
5020 +#define TX_RING_SIZE 8 /* Must be power of two */
5021 +#define TX_RING_MOD_MASK 7 /* for this to work */
5022 +#else
5023 +#define TX_RING_SIZE 16 /* Must be power of two */
5024 +#define TX_RING_MOD_MASK 15 /* for this to work */
5025 +#endif
5026 +
5027 +#define SWITCH_EPORT_NUMBER 2
5028 +
5029 +#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
5030 +#error "L2SWITCH: descriptor ring size constants too large"
5031 +#endif
5032 +/*-----------------------------------------------------------------------*/
5033 +typedef struct l2switch_output_queue_status {
5034 + unsigned long ESW_MMSR;
5035 + unsigned long ESW_LMT;
5036 + unsigned long ESW_LFC;
5037 + unsigned long ESW_PCSR;
5038 + unsigned long ESW_IOSR;
5039 + unsigned long ESW_QWT;
5040 + unsigned long esw_reserved;
5041 + unsigned long ESW_P0BCT;
5042 +} esw_output_queue_status;
5043 +
5044 +typedef struct l2switch_statistics_status {
5045 + /*
5046 + * Total number of incoming frames processed
5047 + * but discarded in switch
5048 + */
5049 + unsigned long ESW_DISCN;
5050 + /*Sum of bytes of frames counted in ESW_DISCN*/
5051 + unsigned long ESW_DISCB;
5052 + /*
5053 + * Total number of incoming frames processed
5054 + * but not discarded in switch
5055 + */
5056 + unsigned long ESW_NDISCN;
5057 + /*Sum of bytes of frames counted in ESW_NDISCN*/
5058 + unsigned long ESW_NDISCB;
5059 +} esw_statistics_status;
5060 +
5061 +typedef struct l2switch_port_statistics_status {
5062 + /*outgoing frames discarded due to transmit queue congestion*/
5063 + unsigned long MCF_ESW_POQC;
5064 + /*incoming frames discarded due to VLAN domain mismatch*/
5065 + unsigned long MCF_ESW_PMVID;
5066 + /*incoming frames discarded due to untagged discard*/
5067 + unsigned long MCF_ESW_PMVTAG;
5068 + /*incoming frames discarded due port is in blocking state*/
5069 + unsigned long MCF_ESW_PBL;
5070 +} esw_port_statistics_status;
5071 +
5072 +typedef struct l2switch {
5073 + unsigned long ESW_REVISION;
5074 + unsigned long ESW_SCRATCH;
5075 + unsigned long ESW_PER;
5076 + unsigned long reserved0[1];
5077 + unsigned long ESW_VLANV;
5078 + unsigned long ESW_DBCR;
5079 + unsigned long ESW_DMCR;
5080 + unsigned long ESW_BKLR;
5081 + unsigned long ESW_BMPC;
5082 + unsigned long ESW_MODE;
5083 + unsigned long ESW_VIMSEL;
5084 + unsigned long ESW_VOMSEL;
5085 + unsigned long ESW_VIMEN;
5086 + unsigned long ESW_VID;/*0x34*/
5087 + /*from 0x38 0x3C*/
5088 + unsigned long esw_reserved0[2];
5089 + unsigned long ESW_MCR;/*0x40*/
5090 + unsigned long ESW_EGMAP;
5091 + unsigned long ESW_INGMAP;
5092 + unsigned long ESW_INGSAL;
5093 + unsigned long ESW_INGSAH;
5094 + unsigned long ESW_INGDAL;
5095 + unsigned long ESW_INGDAH;
5096 + unsigned long ESW_ENGSAL;
5097 + unsigned long ESW_ENGSAH;
5098 + unsigned long ESW_ENGDAL;
5099 + unsigned long ESW_ENGDAH;
5100 + unsigned long ESW_MCVAL;/*0x6C*/
5101 + /*from 0x70--0x7C*/
5102 + unsigned long esw_reserved1[4];
5103 + unsigned long ESW_MMSR;/*0x80*/
5104 + unsigned long ESW_LMT;
5105 + unsigned long ESW_LFC;
5106 + unsigned long ESW_PCSR;
5107 + unsigned long ESW_IOSR;
5108 + unsigned long ESW_QWT;/*0x94*/
5109 + unsigned long esw_reserved2[1];/*0x98*/
5110 + unsigned long ESW_P0BCT;/*0x9C*/
5111 + /*from 0xA0-0xB8*/
5112 + unsigned long esw_reserved3[7];
5113 + unsigned long ESW_P0FFEN;/*0xBC*/
5114 + unsigned long ESW_PSNP[8];
5115 + unsigned long ESW_IPSNP[8];
5116 + /*port0-port2 VLAN Priority resolution map 0xFC0D_C100-C108*/
5117 + unsigned long ESW_PVRES[3];
5118 + /*from 0x10C-0x13C*/
5119 + unsigned long esw_reserved4[13];
5120 + unsigned long ESW_IPRES;/*0x140*/
5121 + /*from 0x144-0x17C*/
5122 + unsigned long esw_reserved5[15];
5123 +
5124 + /*port0-port2 Priority Configuration 0xFC0D_C180-C188*/
5125 + unsigned long ESW_PRES[3];
5126 + /*from 0x18C-0x1FC*/
5127 + unsigned long esw_reserved6[29];
5128 +
5129 + /*port0-port2 VLAN ID 0xFC0D_C200-C208*/
5130 + unsigned long ESW_PID[3];
5131 + /*from 0x20C-0x27C*/
5132 + unsigned long esw_reserved7[29];
5133 +
5134 + /*port0-port2 VLAN domain resolution entry 0xFC0D_C280-C2FC*/
5135 + unsigned long ESW_VRES[32];
5136 +
5137 + unsigned long ESW_DISCN;/*0x300*/
5138 + unsigned long ESW_DISCB;
5139 + unsigned long ESW_NDISCN;
5140 + unsigned long ESW_NDISCB;/*0xFC0DC30C*/
5141 + /*per port statistics 0xFC0DC310_C33C*/
5142 + esw_port_statistics_status port_statistics_status[3];
5143 + /*from 0x340-0x400*/
5144 + unsigned long esw_reserved8[48];
5145 +
5146 + /*0xFC0DC400---0xFC0DC418*/
5147 + /*unsigned long MCF_ESW_ISR;*/
5148 + unsigned long switch_ievent; /* Interrupt event reg */
5149 + /*unsigned long MCF_ESW_IMR;*/
5150 + unsigned long switch_imask; /* Interrupt mask reg */
5151 + /*unsigned long MCF_ESW_RDSR;*/
5152 + unsigned long fec_r_des_start; /* Receive descriptor ring */
5153 + /*unsigned long MCF_ESW_TDSR;*/
5154 + unsigned long fec_x_des_start; /* Transmit descriptor ring */
5155 + /*unsigned long MCF_ESW_MRBR;*/
5156 + unsigned long fec_r_buff_size; /* Maximum receive buff size */
5157 + /*unsigned long MCF_ESW_RDAR;*/
5158 + unsigned long fec_r_des_active; /* Receive descriptor reg */
5159 + /*unsigned long MCF_ESW_TDAR;*/
5160 + unsigned long fec_x_des_active; /* Transmit descriptor reg */
5161 + /*from 0x420-0x4FC*/
5162 + unsigned long esw_reserved9[57];
5163 +
5164 + /*0xFC0DC500---0xFC0DC508*/
5165 + unsigned long ESW_LREC0;
5166 + unsigned long ESW_LREC1;
5167 + unsigned long ESW_LSR;
5168 +} switch_t;
5169 +
5170 +typedef struct _64bTableEntry {
5171 + unsigned int lo; /* lower 32 bits */
5172 + unsigned int hi; /* upper 32 bits */
5173 +} AddrTable64bEntry;
5174 +
5175 +typedef struct l2switchaddrtable {
5176 + AddrTable64bEntry eswTable64bEntry[2048];
5177 +} eswAddrTable_t;
5178 +
5179 +/*unsigned long MCF_ESW_LOOKUP_MEM;*/
5180 +#define MCF_ESW_REVISION (*(volatile unsigned long *)(0xFC0DC000))
5181 +#define MCF_ESW_PER (*(volatile unsigned long *)(0xFC0DC008))
5182 +#define MCF_ESW_VLANV (*(volatile unsigned long *)(0xFC0DC010))
5183 +#define MCF_ESW_DBCR (*(volatile unsigned long *)(0xFC0DC014))
5184 +#define MCF_ESW_DMCR (*(volatile unsigned long *)(0xFC0DC018))
5185 +#define MCF_ESW_BKLR (*(volatile unsigned long *)(0xFC0DC01C))
5186 +#define MCF_ESW_BMPC (*(volatile unsigned long *)(0xFC0DC020))
5187 +#define MCF_ESW_MODE (*(volatile unsigned long *)(0xFC0DC024))
5188 +
5189 +#define MCF_ESW_ISR (*(volatile unsigned long *)(0xFC0DC400))
5190 +#define MCF_ESW_IMR (*(volatile unsigned long *)(0xFC0DC404))
5191 +#define MCF_ESW_TDAR (*(volatile unsigned long *)(0xFC0DC418))
5192 +#define MCF_ESW_LOOKUP_MEM (*(volatile unsigned long *)(0xFC0E0000))
5193 +
5194 +#define MCF_PPMCR0 (*(volatile unsigned short *)(0xFC04002D))
5195 +#define MCF_PPMHR0 (*(volatile unsigned long *)(0xFC040030))
5196 +
5197 +#define MCF_FEC_EIR0 (*(volatile unsigned long *)(0xFC0D4004))
5198 +#define MCF_FEC_EIR1 (*(volatile unsigned long *)(0xFC0D8004))
5199 +#define MCF_FEC_EIMR0 (*(volatile unsigned long *)(0xFC0D4008))
5200 +#define MCF_FEC_EIMR1 (*(volatile unsigned long *)(0xFC0D8008))
5201 +#define MCF_FEC_MMFR0 (*(volatile unsigned long *)(0xFC0D4040))
5202 +#define MCF_FEC_MMFR1 (*(volatile unsigned long *)(0xFC0D8040))
5203 +#define MCF_FEC_MSCR0 (*(volatile unsigned long *)(0xFC0D4044))
5204 +#define MCF_FEC_MSCR1 (*(volatile unsigned long *)(0xFC0D8044))
5205 +#define MCF_FEC_RCR0 (*(volatile unsigned long *)(0xFC0D4084))
5206 +#define MCF_FEC_RCR1 (*(volatile unsigned long *)(0xFC0D8084))
5207 +#define MCF_FEC_TCR0 (*(volatile unsigned long *)(0xFC0D40C4))
5208 +#define MCF_FEC_TCR1 (*(volatile unsigned long *)(0xFC0D80C4))
5209 +#define MCF_FEC_ECR0 (*(volatile unsigned long *)(0xFC0D4024))
5210 +#define MCF_FEC_ECR1 (*(volatile unsigned long *)(0xFC0D8024))
5211 +
5212 +
5213 +#define MCF_FEC_RCR_PROM (0x00000008)
5214 +#define MCF_FEC_RCR_RMII_MODE (0x00000100)
5215 +#define MCF_FEC_RCR_MAX_FL(x) (((x)&0x00003FFF)<<16)
5216 +#define MCF_FEC_RCR_CRC_FWD (0x00004000)
5217 +
5218 +#define MCF_FEC_TCR_FDEN (0x00000004)
5219 +
5220 +#define MCF_FEC_ECR_ETHER_EN (0x00000002)
5221 +#define MCF_FEC_ECR_ENA_1588 (0x00000010)
5222 +
5223 +/*-------------ioctl command ---------------------------------------*/
5224 +#define ESW_SET_LEARNING_CONF 0x9101
5225 +#define ESW_GET_LEARNING_CONF 0x9201
5226 +#define ESW_SET_BLOCKING_CONF 0x9102
5227 +#define ESW_GET_BLOCKING_CONF 0x9202
5228 +#define ESW_SET_MULTICAST_CONF 0x9103
5229 +#define ESW_GET_MULTICAST_CONF 0x9203
5230 +#define ESW_SET_BROADCAST_CONF 0x9104
5231 +#define ESW_GET_BROADCAST_CONF 0x9204
5232 +#define ESW_SET_PORTENABLE_CONF 0x9105
5233 +#define ESW_GET_PORTENABLE_CONF 0x9205
5234 +#define ESW_SET_IP_SNOOP_CONF 0x9106
5235 +#define ESW_GET_IP_SNOOP_CONF 0x9206
5236 +#define ESW_SET_PORT_SNOOP_CONF 0x9107
5237 +#define ESW_GET_PORT_SNOOP_CONF 0x9207
5238 +#define ESW_SET_PORT_MIRROR_CONF 0x9108
5239 +#define ESW_GET_PORT_MIRROR_CONF 0x9208
5240 +#define ESW_SET_PIRORITY_VLAN 0x9109
5241 +#define ESW_GET_PIRORITY_VLAN 0x9209
5242 +#define ESW_SET_PIRORITY_IP 0x910A
5243 +#define ESW_GET_PIRORITY_IP 0x920A
5244 +#define ESW_SET_PIRORITY_MAC 0x910B
5245 +#define ESW_GET_PIRORITY_MAC 0x920B
5246 +#define ESW_SET_PIRORITY_DEFAULT 0x910C
5247 +#define ESW_GET_PIRORITY_DEFAULT 0x920C
5248 +#define ESW_SET_P0_FORCED_FORWARD 0x910D
5249 +#define ESW_GET_P0_FORCED_FORWARD 0x920D
5250 +#define ESW_SET_SWITCH_MODE 0x910E
5251 +#define ESW_GET_SWITCH_MODE 0x920E
5252 +#define ESW_SET_BRIDGE_CONFIG 0x910F
5253 +#define ESW_GET_BRIDGE_CONFIG 0x920F
5254 +#define ESW_SET_VLAN_OUTPUT_PROCESS 0x9110
5255 +#define ESW_GET_VLAN_OUTPUT_PROCESS 0x9210
5256 +#define ESW_SET_VLAN_INPUT_PROCESS 0x9111
5257 +#define ESW_GET_VLAN_INPUT_PROCESS 0x9211
5258 +#define ESW_SET_VLAN_DOMAIN_VERIFICATION 0x9112
5259 +#define ESW_GET_VLAN_DOMAIN_VERIFICATION 0x9212
5260 +#define ESW_SET_VLAN_RESOLUTION_TABLE 0x9113
5261 +#define ESW_GET_VLAN_RESOLUTION_TABLE 0x9213
5262 +#define ESW_GET_ENTRY_PORT_NUMBER 0x9214
5263 +#define ESW_GET_LOOKUP_TABLE 0x9215
5264 +#define ESW_GET_PORT_STATUS 0x9216
5265 +#define ESW_SET_VLAN_ID 0x9114
5266 +#define ESW_SET_VLAN_ID_CLEARED 0x9115
5267 +#define ESW_SET_PORT_IN_VLAN_ID 0x9116
5268 +#define ESW_SET_PORT_ENTRY_EMPTY 0x9117
5269 +#define ESW_SET_OTHER_PORT_ENTRY_EMPTY 0x9118
5270 +#define ESW_GET_PORT_ALL_STATUS 0x9217
5271 +#define ESW_SET_PORT_MIRROR_CONF_PORT_MATCH 0x9119
5272 +#define ESW_SET_PORT_MIRROR_CONF_ADDR_MATCH 0x911A
5273 +
5274 +#define ESW_GET_STATISTICS_STATUS 0x9221
5275 +#define ESW_SET_OUTPUT_QUEUE_MEMORY 0x9125
5276 +#define ESW_GET_OUTPUT_QUEUE_STATUS 0x9225
5277 +#define ESW_UPDATE_STATIC_MACTABLE 0x9226
5278 +#define ESW_CLEAR_ALL_MACTABLE 0x9227
5279 +#define ESW_GET_USER_PID 0x9228
5280 +
5281 +typedef struct _eswIOCTL_PORT_CONF {
5282 + int port;
5283 + int enable;
5284 +} eswIoctlPortConfig;
5285 +
5286 +typedef struct _eswIOCTL_PORT_EN_CONF {
5287 + int port;
5288 + int tx_enable;
5289 + int rx_enable;
5290 +} eswIoctlPortEnableConfig;
5291 +
5292 +typedef struct _eswIOCTL_IP_SNOOP_CONF {
5293 + int mode;
5294 + unsigned long ip_header_protocol;
5295 +} eswIoctlIpsnoopConfig;
5296 +
5297 +typedef struct _eswIOCTL_P0_FORCED_FORWARD_CONF {
5298 + int port1;
5299 + int port2;
5300 + int enable;
5301 +} eswIoctlP0ForcedForwardConfig;
5302 +
5303 +typedef struct _eswIOCTL_PORT_SNOOP_CONF {
5304 + int mode;
5305 + unsigned short compare_port;
5306 + int compare_num;
5307 +} eswIoctlPortsnoopConfig;
5308 +
5309 +typedef struct _eswIOCTL_PORT_Mirror_CONF {
5310 + int mirror_port;
5311 + int port;
5312 + int egress_en;
5313 + int ingress_en;
5314 + int egress_mac_src_en;
5315 + int egress_mac_des_en;
5316 + int ingress_mac_src_en;
5317 + int ingress_mac_des_en;
5318 + unsigned char *src_mac;
5319 + unsigned char *des_mac;
5320 + int mirror_enable;
5321 +} eswIoctlPortMirrorConfig;
5322 +
5323 +struct eswIoctlMirrorCfgPortMatch {
5324 + int mirror_port;
5325 + int port_match_en;
5326 + int port;
5327 +};
5328 +
5329 +struct eswIoctlMirrorCfgAddrMatch {
5330 + int mirror_port;
5331 + int addr_match_en;
5332 + unsigned char *mac_addr;
5333 +};
5334 +
5335 +typedef struct _eswIOCTL_PRIORITY_VLAN_CONF {
5336 + int port;
5337 + int func_enable;
5338 + int vlan_pri_table_num;
5339 + int vlan_pri_table_value;
5340 +} eswIoctlPriorityVlanConfig;
5341 +
5342 +typedef struct _eswIOCTL_PRIORITY_IP_CONF {
5343 + int port;
5344 + int func_enable;
5345 + int ipv4_en;
5346 + int ip_priority_num;
5347 + int ip_priority_value;
5348 +} eswIoctlPriorityIPConfig;
5349 +
5350 +typedef struct _eswIOCTL_PRIORITY_MAC_CONF {
5351 + int port;
5352 +} eswIoctlPriorityMacConfig;
5353 +
5354 +typedef struct _eswIOCTL_PRIORITY_DEFAULT_CONF {
5355 + int port;
5356 + unsigned char priority_value;
5357 +} eswIoctlPriorityDefaultConfig;
5358 +
5359 +typedef struct _eswIOCTL_IRQ_STATUS {
5360 + unsigned long isr;
5361 + unsigned long imr;
5362 + unsigned long rx_buf_pointer;
5363 + unsigned long tx_buf_pointer;
5364 + unsigned long rx_max_size;
5365 + unsigned long rx_buf_active;
5366 + unsigned long tx_buf_active;
5367 +} eswIoctlIrqStatus;
5368 +
5369 +typedef struct _eswIOCTL_PORT_Mirror_STATUS {
5370 + unsigned long ESW_MCR;
5371 + unsigned long ESW_EGMAP;
5372 + unsigned long ESW_INGMAP;
5373 + unsigned long ESW_INGSAL;
5374 + unsigned long ESW_INGSAH;
5375 + unsigned long ESW_INGDAL;
5376 + unsigned long ESW_INGDAH;
5377 + unsigned long ESW_ENGSAL;
5378 + unsigned long ESW_ENGSAH;
5379 + unsigned long ESW_ENGDAL;
5380 + unsigned long ESW_ENGDAH;
5381 + unsigned long ESW_MCVAL;
5382 +} eswIoctlPortMirrorStatus;
5383 +
5384 +typedef struct _eswIOCTL_VLAN_OUTPUT_CONF {
5385 + int port;
5386 + int mode;
5387 +} eswIoctlVlanOutputConfig;
5388 +
5389 +typedef struct _eswIOCTL_VLAN_INPUT_CONF {
5390 + int port;
5391 + int mode;
5392 + unsigned short port_vlanid;
5393 +} eswIoctlVlanInputConfig;
5394 +
5395 +typedef struct _eswIOCTL_VLAN_DOMAIN_VERIFY_CONF {
5396 + int port;
5397 + int vlan_domain_verify_en;
5398 + int vlan_discard_unknown_en;
5399 +} eswIoctlVlanVerificationConfig;
5400 +
5401 +typedef struct _eswIOCTL_VLAN_RESOULATION_TABLE {
5402 + unsigned short port_vlanid;
5403 + unsigned char vlan_domain_port;
5404 + unsigned char vlan_domain_num;
5405 +} eswIoctlVlanResoultionTable;
5406 +
5407 +struct eswVlanTableItem {
5408 + eswIoctlVlanResoultionTable table[32];
5409 + unsigned char valid_num;
5410 +};
5411 +
5412 +typedef struct _eswIOCTL_VLAN_INPUT_STATUS {
5413 + unsigned long ESW_VLANV;
5414 + unsigned long ESW_PID[3];
5415 + unsigned long ESW_VIMSEL;
5416 + unsigned long ESW_VIMEN;
5417 + unsigned long ESW_VRES[32];
5418 +} eswIoctlVlanInputStatus;
5419 +
5420 +typedef struct _eswIOCTL_Static_MACTable {
5421 + unsigned char *mac_addr;
5422 + int port;
5423 + int priority;
5424 +} eswIoctlUpdateStaticMACtable;
5425 +
5426 +typedef struct _eswIOCTL_OUTPUT_QUEUE {
5427 + int fun_num;
5428 + esw_output_queue_status sOutputQueue;
5429 +} eswIoctlOutputQueue;
5430 +
5431 +/*=============================================================*/
5432 +#define LEARNING_AGING_TIMER (10 * HZ)
5433 +/*
5434 + * Info received from Hardware Learning FIFO,
5435 + * holding MAC address and corresponding Hash Value and
5436 + * port number where the frame was received (disassembled).
5437 + */
5438 +typedef struct _eswPortInfo {
5439 + /* MAC lower 32 bits (first byte is 7:0). */
5440 + unsigned int maclo;
5441 + /* MAC upper 16 bits (47:32). */
5442 + unsigned int machi;
5443 + /* the hash value for this MAC address. */
5444 + unsigned int hash;
5445 + /* the port number this MAC address is associated with. */
5446 + unsigned int port;
5447 +} eswPortInfo;
5448 +
5449 +/*
5450 + * Hardware Look up Address Table 64-bit element.
5451 + */
5452 +typedef volatile struct _64bitTableEntry {
5453 + unsigned int lo; /* lower 32 bits */
5454 + unsigned int hi; /* upper 32 bits */
5455 +} eswTable64bitEntry;
5456 +
5457 +struct eswAddrTableEntryExample {
5458 + /* the entry number */
5459 + unsigned short entrynum;
5460 + /* mac address array */
5461 + unsigned char mac_addr[6];
5462 + unsigned char item1;
5463 + unsigned short item2;
5464 +};
5465 +
5466 +/*
5467 + * Define the buffer descriptor structure.
5468 + */
5469 +typedef struct bufdesc {
5470 + unsigned short cbd_sc; /* Control and status info */
5471 + unsigned short cbd_datlen; /* Data length */
5472 + unsigned long cbd_bufaddr; /* Buffer address */
5473 +#ifdef MODELO_BUFFER
5474 + unsigned long ebd_status;
5475 + unsigned short length_proto_type;
5476 + unsigned short payload_checksum;
5477 + unsigned long bdu;
5478 + unsigned long timestamp;
5479 + unsigned long reserverd_word1;
5480 + unsigned long reserverd_word2;
5481 +#endif
5482 +} cbd_t;
5483 +
5484 +/* Forward declarations of some structures to support different PHYs
5485 + */
5486 +typedef struct {
5487 + uint mii_data;
5488 + void (*funct)(uint mii_reg, struct net_device *dev);
5489 +} phy_cmd_t;
5490 +
5491 +typedef struct {
5492 + uint id;
5493 + char *name;
5494 +
5495 + const phy_cmd_t *config;
5496 + const phy_cmd_t *startup;
5497 + const phy_cmd_t *ack_int;
5498 + const phy_cmd_t *shutdown;
5499 +} phy_info_t;
5500 +
5501 +struct port_status {
5502 + /* 1: link is up, 0: link is down */
5503 + int port1_link_status;
5504 + int port2_link_status;
5505 + /* 1: blocking, 0: unblocking */
5506 + int port0_block_status;
5507 + int port1_block_status;
5508 + int port2_block_status;
5509 +};
5510 +
5511 +struct port_all_status {
5512 + /* 1: link is up, 0: link is down */
5513 + int link_status;
5514 + /* 1: blocking, 0: unblocking */
5515 + int block_status;
5516 + /* 1: unlearning, 0: learning */
5517 + int learn_status;
5518 + /* vlan domain verify 1: enable 0: disable */
5519 + int vlan_verify;
5520 + /* discard unknow 1: enable 0: disable */
5521 + int discard_unknown;
5522 + /* multicast resolution 1: enable 0: disable */
5523 + int multi_reso;
5524 + /* broadcast resolution 1: enable 0: disalbe */
5525 + int broad_reso;
5526 + /* transmit 1: enable 0: disable */
5527 + int ftransmit;
5528 + /* receive 1: enable 0: disable */
5529 + int freceive;
5530 +};
5531 +
5532 +/* The switch buffer descriptors track the ring buffers. The rx_bd_base and
5533 + * tx_bd_base always point to the base of the buffer descriptors. The
5534 + * cur_rx and cur_tx point to the currently available buffer.
5535 + * The dirty_tx tracks the current buffer that is being sent by the
5536 + * controller. The cur_tx and dirty_tx are equal under both completely
5537 + * empty and completely full conditions. The empty/ready indicator in
5538 + * the buffer descriptor determines the actual condition.
5539 + */
5540 +struct switch_enet_private {
5541 + /* Hardware registers of the switch device */
5542 + volatile switch_t *hwp;
5543 + volatile eswAddrTable_t *hwentry;
5544 +
5545 + struct net_device *netdev;
5546 + struct platform_device *pdev;
5547 + /* The saved address of a sent-in-place packet/buffer, for skfree(). */
5548 + unsigned char *tx_bounce[TX_RING_SIZE];
5549 + struct sk_buff *tx_skbuff[TX_RING_SIZE];
5550 + ushort skb_cur;
5551 + ushort skb_dirty;
5552 +
5553 + /* CPM dual port RAM relative addresses.
5554 + */
5555 + cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
5556 + cbd_t *tx_bd_base;
5557 + cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
5558 + cbd_t *dirty_tx; /* The ring entries to be free()ed. */
5559 + uint tx_full;
5560 + /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
5561 + spinlock_t hw_lock;
5562 +
5563 + /* hold while accessing the mii_list_t() elements */
5564 + spinlock_t mii_lock;
5565 + struct mii_bus *mdio_bus;
5566 + struct phy_device *phydev[SWITCH_EPORT_NUMBER];
5567 +
5568 + uint phy_id;
5569 + uint phy_id_done;
5570 + uint phy_status;
5571 + uint phy_speed;
5572 + phy_info_t const *phy;
5573 + struct work_struct phy_task;
5574 + volatile switch_t *phy_hwp;
5575 +
5576 + uint sequence_done;
5577 + uint mii_phy_task_queued;
5578 +
5579 + uint phy_addr;
5580 +
5581 + int index;
5582 + int opened;
5583 + int full_duplex;
5584 + int msg_enable;
5585 + int phy1_link;
5586 + int phy1_old_link;
5587 + int phy1_duplex;
5588 + int phy1_speed;
5589 +
5590 + int phy2_link;
5591 + int phy2_old_link;
5592 + int phy2_duplex;
5593 + int phy2_speed;
5594 + /* --------------Statistics--------------------------- */
5595 + /* when a new element deleted a element with in
5596 + * a block due to lack of space */
5597 + int atBlockOverflows;
5598 + /* Peak number of valid entries in the address table */
5599 + int atMaxEntries;
5600 + /* current number of valid entries in the address table */
5601 + int atCurrEntries;
5602 + /* maximum entries within a block found
5603 + * (updated within ageing)*/
5604 + int atMaxEntriesPerBlock;
5605 +
5606 + /* -------------------ageing function------------------ */
5607 + /* maximum age allowed for an entry */
5608 + int ageMax;
5609 + /* last LUT entry to block that was
5610 + * inspected by the Ageing task*/
5611 + int ageLutIdx;
5612 + /* last element within block inspected by the Ageing task */
5613 + int ageBlockElemIdx;
5614 + /* complete table has been processed by ageing process */
5615 + int ageCompleted;
5616 + /* delay setting */
5617 + int ageDelay;
5618 + /* current delay Counter */
5619 + int ageDelayCnt;
5620 +
5621 + /* ----------------timer related---------------------------- */
5622 + /* current time (for timestamping) */
5623 + int currTime;
5624 + /* flag set by timer when currTime changed
5625 + * and cleared by serving function*/
5626 + int timeChanged;
5627 +
5628 + /**/
5629 + /* Timer for Aging */
5630 + struct timer_list timer_aging;
5631 + int learning_irqhandle_enable;
5632 +};
5633 +
5634 +struct switch_platform_private {
5635 + unsigned long quirks;
5636 + int num_slots; /* Slots on controller */
5637 + struct switch_enet_private *fep_host[0]; /* Pointers to hosts */
5638 +};
5639 +
5640 +/******************************************************************************/
5641 +/* Recieve is empty */
5642 +#define BD_SC_EMPTY ((unsigned short)0x8000)
5643 +/* Transmit is ready */
5644 +#define BD_SC_READY ((unsigned short)0x8000)
5645 +/* Last buffer descriptor */
5646 +#define BD_SC_WRAP ((unsigned short)0x2000)
5647 +/* Interrupt on change */
5648 +#define BD_SC_INTRPT ((unsigned short)0x1000)
5649 +/* Continous mode */
5650 +#define BD_SC_CM ((unsigned short)0x0200)
5651 +/* Rec'd too many idles */
5652 +#define BD_SC_ID ((unsigned short)0x0100)
5653 +/* xmt preamble */
5654 +#define BD_SC_P ((unsigned short)0x0100)
5655 +/* Break received */
5656 +#define BD_SC_BR ((unsigned short)0x0020)
5657 +/* Framing error */
5658 +#define BD_SC_FR ((unsigned short)0x0010)
5659 +/* Parity error */
5660 +#define BD_SC_PR ((unsigned short)0x0008)
5661 +/* Overrun */
5662 +#define BD_SC_OV ((unsigned short)0x0002)
5663 +#define BD_SC_CD ((unsigned short)0x0001)
5664 +
5665 +/* Buffer descriptor control/status used by Ethernet receive.
5666 +*/
5667 +#define BD_ENET_RX_EMPTY ((unsigned short)0x8000)
5668 +#define BD_ENET_RX_WRAP ((unsigned short)0x2000)
5669 +#define BD_ENET_RX_INTR ((unsigned short)0x1000)
5670 +#define BD_ENET_RX_LAST ((unsigned short)0x0800)
5671 +#define BD_ENET_RX_FIRST ((unsigned short)0x0400)
5672 +#define BD_ENET_RX_MISS ((unsigned short)0x0100)
5673 +#define BD_ENET_RX_LG ((unsigned short)0x0020)
5674 +#define BD_ENET_RX_NO ((unsigned short)0x0010)
5675 +#define BD_ENET_RX_SH ((unsigned short)0x0008)
5676 +#define BD_ENET_RX_CR ((unsigned short)0x0004)
5677 +#define BD_ENET_RX_OV ((unsigned short)0x0002)
5678 +#define BD_ENET_RX_CL ((unsigned short)0x0001)
5679 +/* All status bits */
5680 +#define BD_ENET_RX_STATS ((unsigned short)0x013f)
5681 +
5682 +/* Buffer descriptor control/status used by Ethernet transmit.
5683 +*/
5684 +#define BD_ENET_TX_READY ((unsigned short)0x8000)
5685 +#define BD_ENET_TX_PAD ((unsigned short)0x4000)
5686 +#define BD_ENET_TX_WRAP ((unsigned short)0x2000)
5687 +#define BD_ENET_TX_INTR ((unsigned short)0x1000)
5688 +#define BD_ENET_TX_LAST ((unsigned short)0x0800)
5689 +#define BD_ENET_TX_TC ((unsigned short)0x0400)
5690 +#define BD_ENET_TX_DEF ((unsigned short)0x0200)
5691 +#define BD_ENET_TX_HB ((unsigned short)0x0100)
5692 +#define BD_ENET_TX_LC ((unsigned short)0x0080)
5693 +#define BD_ENET_TX_RL ((unsigned short)0x0040)
5694 +#define BD_ENET_TX_RCMASK ((unsigned short)0x003c)
5695 +#define BD_ENET_TX_UN ((unsigned short)0x0002)
5696 +#define BD_ENET_TX_CSL ((unsigned short)0x0001)
5697 +/* All status bits */
5698 +#define BD_ENET_TX_STATS ((unsigned short)0x03ff)
5699 +
5700 +/*Copy from validation code */
5701 +#define RX_BUFFER_SIZE 1520
5702 +#define TX_BUFFER_SIZE 1520
5703 +#define NUM_RXBDS 20
5704 +#define NUM_TXBDS 20
5705 +
5706 +#define TX_BD_R 0x8000
5707 +#define TX_BD_TO1 0x4000
5708 +#define TX_BD_W 0x2000
5709 +#define TX_BD_TO2 0x1000
5710 +#define TX_BD_L 0x0800
5711 +#define TX_BD_TC 0x0400
5712 +
5713 +#define TX_BD_INT 0x40000000
5714 +#define TX_BD_TS 0x20000000
5715 +#define TX_BD_PINS 0x10000000
5716 +#define TX_BD_IINS 0x08000000
5717 +#define TX_BD_TXE 0x00008000
5718 +#define TX_BD_UE 0x00002000
5719 +#define TX_BD_EE 0x00001000
5720 +#define TX_BD_FE 0x00000800
5721 +#define TX_BD_LCE 0x00000400
5722 +#define TX_BD_OE 0x00000200
5723 +#define TX_BD_TSE 0x00000100
5724 +#define TX_BD_BDU 0x80000000
5725 +
5726 +#define RX_BD_E 0x8000
5727 +#define RX_BD_R01 0x4000
5728 +#define RX_BD_W 0x2000
5729 +#define RX_BD_R02 0x1000
5730 +#define RX_BD_L 0x0800
5731 +#define RX_BD_M 0x0100
5732 +#define RX_BD_BC 0x0080
5733 +#define RX_BD_MC 0x0040
5734 +#define RX_BD_LG 0x0020
5735 +#define RX_BD_NO 0x0010
5736 +#define RX_BD_CR 0x0004
5737 +#define RX_BD_OV 0x0002
5738 +#define RX_BD_TR 0x0001
5739 +
5740 +#define RX_BD_ME 0x80000000
5741 +#define RX_BD_PE 0x04000000
5742 +#define RX_BD_CE 0x02000000
5743 +#define RX_BD_UC 0x01000000
5744 +#define RX_BD_INT 0x00800000
5745 +#define RX_BD_ICE 0x00000020
5746 +#define RX_BD_PCR 0x00000010
5747 +#define RX_BD_VLAN 0x00000004
5748 +#define RX_BD_IPV6 0x00000002
5749 +#define RX_BD_FRAG 0x00000001
5750 +#define RX_BD_BDU 0x80000000
5751 +/****************************************************************************/
5752 +
5753 +/* Address Table size in bytes(2048 64bit entry ) */
5754 +#define ESW_ATABLE_MEM_SIZE (2048*8)
5755 +/* How many 64-bit elements fit in the address table */
5756 +#define ESW_ATABLE_MEM_NUM_ENTRIES (2048)
5757 +/* Address Table Maximum number of entries in each Slot */
5758 +#define ATABLE_ENTRY_PER_SLOT 8
5759 +/* log2(ATABLE_ENTRY_PER_SLOT)*/
5760 +#define ATABLE_ENTRY_PER_SLOT_bits 3
5761 +/* entry size in byte */
5762 +#define ATABLE_ENTRY_SIZE 8
5763 +/* slot size in byte */
5764 +#define ATABLE_SLOT_SIZE (ATABLE_ENTRY_PER_SLOT * ATABLE_ENTRY_SIZE)
5765 +/* width of timestamp variable (bits) within address table entry */
5766 +#define AT_DENTRY_TIMESTAMP_WIDTH 10
5767 +/* number of bits for port number storage */
5768 +#define AT_DENTRY_PORT_WIDTH 4
5769 +/* number of bits for port bitmask number storage */
5770 +#define AT_SENTRY_PORT_WIDTH 7
5771 +/* address table static entry port bitmask start address bit */
5772 +#define AT_SENTRY_PORTMASK_shift 21
5773 +/* number of bits for port priority storage */
5774 +#define AT_SENTRY_PRIO_WIDTH 7
5775 +/* address table static entry priority start address bit */
5776 +#define AT_SENTRY_PRIO_shift 18
5777 +/* address table dynamic entry port start address bit */
5778 +#define AT_DENTRY_PORT_shift 28
5779 +/* address table dynamic entry timestamp start address bit */
5780 +#define AT_DENTRY_TIME_shift 18
5781 +/* address table entry record type start address bit */
5782 +#define AT_ENTRY_TYPE_shift 17
5783 +/* address table entry record type bit: 1 static, 0 dynamic */
5784 +#define AT_ENTRY_TYPE_STATIC 1
5785 +#define AT_ENTRY_TYPE_DYNAMIC 0
5786 +/* address table entry record valid start address bit */
5787 +#define AT_ENTRY_VALID_shift 16
5788 +#define AT_ENTRY_RECORD_VALID 1
5789 +
5790 +#define AT_EXTRACT_VALID(x) \
5791 + ((x >> AT_ENTRY_VALID_shift) & AT_ENTRY_RECORD_VALID)
5792 +
5793 +#define AT_EXTRACT_PORTMASK(x) \
5794 + ((x >> AT_SENTRY_PORTMASK_shift) & AT_SENTRY_PORT_WIDTH)
5795 +
5796 +#define AT_EXTRACT_PRIO(x) \
5797 + ((x >> AT_SENTRY_PRIO_shift) & AT_SENTRY_PRIO_WIDTH)
5798 +
5799 +/* return block corresponding to the 8 bit hash value calculated */
5800 +#define GET_BLOCK_PTR(hash) (hash << 3)
5801 +#define AT_EXTRACT_TIMESTAMP(x) \
5802 + ((x >> AT_DENTRY_TIME_shift) & ((1 << AT_DENTRY_TIMESTAMP_WIDTH)-1))
5803 +#define AT_EXTRACT_PORT(x) \
5804 + ((x >> AT_DENTRY_PORT_shift) & ((1 << AT_DENTRY_PORT_WIDTH)-1))
5805 +#define AT_SEXTRACT_PORT(x) \
5806 + ((~((x >> AT_SENTRY_PORTMASK_shift) & \
5807 + ((1 << AT_DENTRY_PORT_WIDTH)-1))) >> 1)
5808 +#define TIMEDELTA(newtime, oldtime) \
5809 + ((newtime - oldtime) & \
5810 + ((1 << AT_DENTRY_TIMESTAMP_WIDTH)-1))
5811 +
5812 +#define AT_EXTRACT_IP_PROTOCOL(x) ((x >> 8) & 0xff)
5813 +#define AT_EXTRACT_TCP_UDP_PORT(x) ((x >> 16) & 0xffff)
5814 +
5815 +/* increment time value respecting modulo. */
5816 +#define TIMEINCREMENT(time) \
5817 + ((time) = ((time)+1) & ((1 << AT_DENTRY_TIMESTAMP_WIDTH)-1))
5818 +/* ------------------------------------------------------------------------- */
5819 +/* Bit definitions and macros for MCF_ESW_REVISION */
5820 +#define MCF_ESW_REVISION_CORE_REVISION(x) (((x)&0x0000FFFF)<<0)
5821 +#define MCF_ESW_REVISION_CUSTOMER_REVISION(x) (((x)&0x0000FFFF)<<16)
5822 +
5823 +/* Bit definitions and macros for MCF_ESW_PER */
5824 +#define MCF_ESW_PER_TE0 (0x00000001)
5825 +#define MCF_ESW_PER_TE1 (0x00000002)
5826 +#define MCF_ESW_PER_TE2 (0x00000004)
5827 +#define MCF_ESW_PER_RE0 (0x00010000)
5828 +#define MCF_ESW_PER_RE1 (0x00020000)
5829 +#define MCF_ESW_PER_RE2 (0x00040000)
5830 +
5831 +/* Bit definitions and macros for MCF_ESW_VLANV */
5832 +#define MCF_ESW_VLANV_VV0 (0x00000001)
5833 +#define MCF_ESW_VLANV_VV1 (0x00000002)
5834 +#define MCF_ESW_VLANV_VV2 (0x00000004)
5835 +#define MCF_ESW_VLANV_DU0 (0x00010000)
5836 +#define MCF_ESW_VLANV_DU1 (0x00020000)
5837 +#define MCF_ESW_VLANV_DU2 (0x00040000)
5838 +
5839 +/* Bit definitions and macros for MCF_ESW_DBCR */
5840 +#define MCF_ESW_DBCR_P0 (0x00000001)
5841 +#define MCF_ESW_DBCR_P1 (0x00000002)
5842 +#define MCF_ESW_DBCR_P2 (0x00000004)
5843 +
5844 +/* Bit definitions and macros for MCF_ESW_DMCR */
5845 +#define MCF_ESW_DMCR_P0 (0x00000001)
5846 +#define MCF_ESW_DMCR_P1 (0x00000002)
5847 +#define MCF_ESW_DMCR_P2 (0x00000004)
5848 +
5849 +/* Bit definitions and macros for MCF_ESW_BKLR */
5850 +#define MCF_ESW_BKLR_BE0 (0x00000001)
5851 +#define MCF_ESW_BKLR_BE1 (0x00000002)
5852 +#define MCF_ESW_BKLR_BE2 (0x00000004)
5853 +#define MCF_ESW_BKLR_LD0 (0x00010000)
5854 +#define MCF_ESW_BKLR_LD1 (0x00020000)
5855 +#define MCF_ESW_BKLR_LD2 (0x00040000)
5856 +
5857 +/* Bit definitions and macros for MCF_ESW_BMPC */
5858 +#define MCF_ESW_BMPC_PORT(x) (((x)&0x0000000F)<<0)
5859 +#define MCF_ESW_BMPC_MSG_TX (0x00000020)
5860 +#define MCF_ESW_BMPC_EN (0x00000040)
5861 +#define MCF_ESW_BMPC_DIS (0x00000080)
5862 +#define MCF_ESW_BMPC_PRIORITY(x) (((x)&0x00000007)<<13)
5863 +#define MCF_ESW_BMPC_PORTMASK(x) (((x)&0x00000007)<<16)
5864 +
5865 +/* Bit definitions and macros for MCF_ESW_MODE */
5866 +#define MCF_ESW_MODE_SW_RST (0x00000001)
5867 +#define MCF_ESW_MODE_SW_EN (0x00000002)
5868 +#define MCF_ESW_MODE_STOP (0x00000080)
5869 +#define MCF_ESW_MODE_CRC_TRAN (0x00000100)
5870 +#define MCF_ESW_MODE_P0CT (0x00000200)
5871 +#define MCF_ESW_MODE_STATRST (0x80000000)
5872 +
5873 +/* Bit definitions and macros for MCF_ESW_VIMSEL */
5874 +#define MCF_ESW_VIMSEL_IM0(x) (((x)&0x00000003)<<0)
5875 +#define MCF_ESW_VIMSEL_IM1(x) (((x)&0x00000003)<<2)
5876 +#define MCF_ESW_VIMSEL_IM2(x) (((x)&0x00000003)<<4)
5877 +
5878 +/* Bit definitions and macros for MCF_ESW_VOMSEL */
5879 +#define MCF_ESW_VOMSEL_OM0(x) (((x)&0x00000003)<<0)
5880 +#define MCF_ESW_VOMSEL_OM1(x) (((x)&0x00000003)<<2)
5881 +#define MCF_ESW_VOMSEL_OM2(x) (((x)&0x00000003)<<4)
5882 +
5883 +/* Bit definitions and macros for MCF_ESW_VIMEN */
5884 +#define MCF_ESW_VIMEN_EN0 (0x00000001)
5885 +#define MCF_ESW_VIMEN_EN1 (0x00000002)
5886 +#define MCF_ESW_VIMEN_EN2 (0x00000004)
5887 +
5888 +/* Bit definitions and macros for MCF_ESW_VID */
5889 +#define MCF_ESW_VID_TAG(x) (((x)&0xFFFFFFFF)<<0)
5890 +
5891 +/* Bit definitions and macros for MCF_ESW_MCR */
5892 +#define MCF_ESW_MCR_PORT(x) (((x)&0x0000000F)<<0)
5893 +#define MCF_ESW_MCR_MEN (0x00000010)
5894 +#define MCF_ESW_MCR_INGMAP (0x00000020)
5895 +#define MCF_ESW_MCR_EGMAP (0x00000040)
5896 +#define MCF_ESW_MCR_INGSA (0x00000080)
5897 +#define MCF_ESW_MCR_INGDA (0x00000100)
5898 +#define MCF_ESW_MCR_EGSA (0x00000200)
5899 +#define MCF_ESW_MCR_EGDA (0x00000400)
5900 +
5901 +/* Bit definitions and macros for MCF_ESW_EGMAP */
5902 +#define MCF_ESW_EGMAP_EG0 (0x00000001)
5903 +#define MCF_ESW_EGMAP_EG1 (0x00000002)
5904 +#define MCF_ESW_EGMAP_EG2 (0x00000004)
5905 +
5906 +/* Bit definitions and macros for MCF_ESW_INGMAP */
5907 +#define MCF_ESW_INGMAP_ING0 (0x00000001)
5908 +#define MCF_ESW_INGMAP_ING1 (0x00000002)
5909 +#define MCF_ESW_INGMAP_ING2 (0x00000004)
5910 +
5911 +/* Bit definitions and macros for MCF_ESW_INGSAL */
5912 +#define MCF_ESW_INGSAL_ADDLOW(x) (((x)&0xFFFFFFFF)<<0)
5913 +
5914 +/* Bit definitions and macros for MCF_ESW_INGSAH */
5915 +#define MCF_ESW_INGSAH_ADDHIGH(x) (((x)&0x0000FFFF)<<0)
5916 +
5917 +/* Bit definitions and macros for MCF_ESW_INGDAL */
5918 +#define MCF_ESW_INGDAL_ADDLOW(x) (((x)&0xFFFFFFFF)<<0)
5919 +
5920 +/* Bit definitions and macros for MCF_ESW_INGDAH */
5921 +#define MCF_ESW_INGDAH_ADDHIGH(x) (((x)&0x0000FFFF)<<0)
5922 +
5923 +/* Bit definitions and macros for MCF_ESW_ENGSAL */
5924 +#define MCF_ESW_ENGSAL_ADDLOW(x) (((x)&0xFFFFFFFF)<<0)
5925 +
5926 +/* Bit definitions and macros for MCF_ESW_ENGSAH */
5927 +#define MCF_ESW_ENGSAH_ADDHIGH(x) (((x)&0x0000FFFF)<<0)
5928 +
5929 +/* Bit definitions and macros for MCF_ESW_ENGDAL */
5930 +#define MCF_ESW_ENGDAL_ADDLOW(x) (((x)&0xFFFFFFFF)<<0)
5931 +
5932 +/* Bit definitions and macros for MCF_ESW_ENGDAH */
5933 +#define MCF_ESW_ENGDAH_ADDHIGH(x) (((x)&0x0000FFFF)<<0)
5934 +
5935 +/* Bit definitions and macros for MCF_ESW_MCVAL */
5936 +#define MCF_ESW_MCVAL_COUNT(x) (((x)&0x000000FF)<<0)
5937 +
5938 +/* Bit definitions and macros for MCF_ESW_MMSR */
5939 +#define MCF_ESW_MMSR_BUSY (0x00000001)
5940 +#define MCF_ESW_MMSR_NOCELL (0x00000002)
5941 +#define MCF_ESW_MMSR_MEMFULL (0x00000004)
5942 +#define MCF_ESW_MMSR_MFLATCH (0x00000008)
5943 +#define MCF_ESW_MMSR_DQ_GRNT (0x00000040)
5944 +#define MCF_ESW_MMSR_CELLS_AVAIL(x) (((x)&0x000000FF)<<16)
5945 +
5946 +/* Bit definitions and macros for MCF_ESW_LMT */
5947 +#define MCF_ESW_LMT_THRESH(x) (((x)&0x000000FF)<<0)
5948 +
5949 +/* Bit definitions and macros for MCF_ESW_LFC */
5950 +#define MCF_ESW_LFC_COUNT(x) (((x)&0xFFFFFFFF)<<0)
5951 +
5952 +/* Bit definitions and macros for MCF_ESW_PCSR */
5953 +#define MCF_ESW_PCSR_PC0 (0x00000001)
5954 +#define MCF_ESW_PCSR_PC1 (0x00000002)
5955 +#define MCF_ESW_PCSR_PC2 (0x00000004)
5956 +
5957 +/* Bit definitions and macros for MCF_ESW_IOSR */
5958 +#define MCF_ESW_IOSR_OR0 (0x00000001)
5959 +#define MCF_ESW_IOSR_OR1 (0x00000002)
5960 +#define MCF_ESW_IOSR_OR2 (0x00000004)
5961 +
5962 +/* Bit definitions and macros for MCF_ESW_QWT */
5963 +#define MCF_ESW_QWT_Q0WT(x) (((x)&0x0000001F)<<0)
5964 +#define MCF_ESW_QWT_Q1WT(x) (((x)&0x0000001F)<<8)
5965 +#define MCF_ESW_QWT_Q2WT(x) (((x)&0x0000001F)<<16)
5966 +#define MCF_ESW_QWT_Q3WT(x) (((x)&0x0000001F)<<24)
5967 +
5968 +/* Bit definitions and macros for MCF_ESW_P0BCT */
5969 +#define MCF_ESW_P0BCT_THRESH(x) (((x)&0x000000FF)<<0)
5970 +
5971 +/* Bit definitions and macros for MCF_ESW_P0FFEN */
5972 +#define MCF_ESW_P0FFEN_FEN (0x00000001)
5973 +#define MCF_ESW_P0FFEN_FD(x) (((x)&0x00000003)<<2)
5974 +
5975 +/* Bit definitions and macros for MCF_ESW_PSNP */
5976 +#define MCF_ESW_PSNP_EN (0x00000001)
5977 +#define MCF_ESW_PSNP_MODE(x) (((x)&0x00000003)<<1)
5978 +#define MCF_ESW_PSNP_CD (0x00000008)
5979 +#define MCF_ESW_PSNP_CS (0x00000010)
5980 +#define MCF_ESW_PSNP_PORT_COMPARE(x) (((x)&0x0000FFFF)<<16)
5981 +
5982 +/* Bit definitions and macros for MCF_ESW_IPSNP */
5983 +#define MCF_ESW_IPSNP_EN (0x00000001)
5984 +#define MCF_ESW_IPSNP_MODE(x) (((x)&0x00000003)<<1)
5985 +#define MCF_ESW_IPSNP_PROTOCOL(x) (((x)&0x000000FF)<<8)
5986 +
5987 +/* Bit definitions and macros for MCF_ESW_PVRES */
5988 +#define MCF_ESW_PVRES_PRI0(x) (((x)&0x00000007)<<0)
5989 +#define MCF_ESW_PVRES_PRI1(x) (((x)&0x00000007)<<3)
5990 +#define MCF_ESW_PVRES_PRI2(x) (((x)&0x00000007)<<6)
5991 +#define MCF_ESW_PVRES_PRI3(x) (((x)&0x00000007)<<9)
5992 +#define MCF_ESW_PVRES_PRI4(x) (((x)&0x00000007)<<12)
5993 +#define MCF_ESW_PVRES_PRI5(x) (((x)&0x00000007)<<15)
5994 +#define MCF_ESW_PVRES_PRI6(x) (((x)&0x00000007)<<18)
5995 +#define MCF_ESW_PVRES_PRI7(x) (((x)&0x00000007)<<21)
5996 +
5997 +/* Bit definitions and macros for MCF_ESW_IPRES */
5998 +#define MCF_ESW_IPRES_ADDRESS(x) (((x)&0x000000FF)<<0)
5999 +#define MCF_ESW_IPRES_IPV4SEL (0x00000100)
6000 +#define MCF_ESW_IPRES_PRI0(x) (((x)&0x00000003)<<9)
6001 +#define MCF_ESW_IPRES_PRI1(x) (((x)&0x00000003)<<11)
6002 +#define MCF_ESW_IPRES_PRI2(x) (((x)&0x00000003)<<13)
6003 +#define MCF_ESW_IPRES_READ (0x80000000)
6004 +
6005 +/* Bit definitions and macros for MCF_ESW_PRES */
6006 +#define MCF_ESW_PRES_VLAN (0x00000001)
6007 +#define MCF_ESW_PRES_IP (0x00000002)
6008 +#define MCF_ESW_PRES_MAC (0x00000004)
6009 +#define MCF_ESW_PRES_DFLT_PRI(x) (((x)&0x00000007)<<4)
6010 +
6011 +/* Bit definitions and macros for MCF_ESW_PID */
6012 +#define MCF_ESW_PID_VLANID(x) (((x)&0x0000FFFF)<<0)
6013 +
6014 +/* Bit definitions and macros for MCF_ESW_VRES */
6015 +#define MCF_ESW_VRES_P0 (0x00000001)
6016 +#define MCF_ESW_VRES_P1 (0x00000002)
6017 +#define MCF_ESW_VRES_P2 (0x00000004)
6018 +#define MCF_ESW_VRES_VLANID(x) (((x)&0x00000FFF)<<3)
6019 +
6020 +/* Bit definitions and macros for MCF_ESW_DISCN */
6021 +#define MCF_ESW_DISCN_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6022 +
6023 +/* Bit definitions and macros for MCF_ESW_DISCB */
6024 +#define MCF_ESW_DISCB_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6025 +
6026 +/* Bit definitions and macros for MCF_ESW_NDISCN */
6027 +#define MCF_ESW_NDISCN_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6028 +
6029 +/* Bit definitions and macros for MCF_ESW_NDISCB */
6030 +#define MCF_ESW_NDISCB_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6031 +
6032 +/* Bit definitions and macros for MCF_ESW_POQC */
6033 +#define MCF_ESW_POQC_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6034 +
6035 +/* Bit definitions and macros for MCF_ESW_PMVID */
6036 +#define MCF_ESW_PMVID_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6037 +
6038 +/* Bit definitions and macros for MCF_ESW_PMVTAG */
6039 +#define MCF_ESW_PMVTAG_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6040 +
6041 +/* Bit definitions and macros for MCF_ESW_PBL */
6042 +#define MCF_ESW_PBL_COUNT(x) (((x)&0xFFFFFFFF)<<0)
6043 +
6044 +/* Bit definitions and macros for MCF_ESW_ISR */
6045 +#define MCF_ESW_ISR_EBERR (0x00000001)
6046 +#define MCF_ESW_ISR_RXB (0x00000002)
6047 +#define MCF_ESW_ISR_RXF (0x00000004)
6048 +#define MCF_ESW_ISR_TXB (0x00000008)
6049 +#define MCF_ESW_ISR_TXF (0x00000010)
6050 +#define MCF_ESW_ISR_QM (0x00000020)
6051 +#define MCF_ESW_ISR_OD0 (0x00000040)
6052 +#define MCF_ESW_ISR_OD1 (0x00000080)
6053 +#define MCF_ESW_ISR_OD2 (0x00000100)
6054 +#define MCF_ESW_ISR_LRN (0x00000200)
6055 +
6056 +/* Bit definitions and macros for MCF_ESW_IMR */
6057 +#define MCF_ESW_IMR_EBERR (0x00000001)
6058 +#define MCF_ESW_IMR_RXB (0x00000002)
6059 +#define MCF_ESW_IMR_RXF (0x00000004)
6060 +#define MCF_ESW_IMR_TXB (0x00000008)
6061 +#define MCF_ESW_IMR_TXF (0x00000010)
6062 +#define MCF_ESW_IMR_QM (0x00000020)
6063 +#define MCF_ESW_IMR_OD0 (0x00000040)
6064 +#define MCF_ESW_IMR_OD1 (0x00000080)
6065 +#define MCF_ESW_IMR_OD2 (0x00000100)
6066 +#define MCF_ESW_IMR_LRN (0x00000200)
6067 +
6068 +/* Bit definitions and macros for MCF_ESW_RDSR */
6069 +#define MCF_ESW_RDSR_ADDRESS(x) (((x)&0x3FFFFFFF)<<2)
6070 +
6071 +/* Bit definitions and macros for MCF_ESW_TDSR */
6072 +#define MCF_ESW_TDSR_ADDRESS(x) (((x)&0x3FFFFFFF)<<2)
6073 +
6074 +/* Bit definitions and macros for MCF_ESW_MRBR */
6075 +#define MCF_ESW_MRBR_SIZE(x) (((x)&0x000003FF)<<4)
6076 +
6077 +/* Bit definitions and macros for MCF_ESW_RDAR */
6078 +#define MCF_ESW_RDAR_R_DES_ACTIVE (0x01000000)
6079 +
6080 +/* Bit definitions and macros for MCF_ESW_TDAR */
6081 +#define MCF_ESW_TDAR_X_DES_ACTIVE (0x01000000)
6082 +
6083 +/* Bit definitions and macros for MCF_ESW_LREC0 */
6084 +#define MCF_ESW_LREC0_MACADDR0(x) (((x)&0xFFFFFFFF)<<0)
6085 +
6086 +/* Bit definitions and macros for MCF_ESW_LREC1 */
6087 +#define MCF_ESW_LREC1_MACADDR1(x) (((x)&0x0000FFFF)<<0)
6088 +#define MCF_ESW_LREC1_HASH(x) (((x)&0x000000FF)<<16)
6089 +#define MCF_ESW_LREC1_SWPORT(x) (((x)&0x00000003)<<24)
6090 +
6091 +/* Bit definitions and macros for MCF_ESW_LSR */
6092 +#define MCF_ESW_LSR_DA (0x00000001)
6093 +
6094 +/* port mirroring port number match */
6095 +#define MIRROR_EGRESS_PORT_MATCH 1
6096 +#define MIRROR_INGRESS_PORT_MATCH 2
6097 +
6098 +/* port mirroring mac address match */
6099 +#define MIRROR_EGRESS_SOURCE_MATCH 1
6100 +#define MIRROR_INGRESS_SOURCE_MATCH 2
6101 +#define MIRROR_EGRESS_DESTINATION_MATCH 3
6102 +#define MIRROR_INGRESS_DESTINATION_MATCH 4
6103 +
6104 +#endif /* SWITCH_H */
6105 --- a/include/linux/fsl_devices.h
6106 +++ b/include/linux/fsl_devices.h
6107 @@ -129,4 +129,21 @@ struct fsl_ata_platform_data {
6108 void (*exit)(void);
6109 int (*get_clk_rate)(void);
6110 };
6111 +
6112 +struct net_device;
6113 +struct coldfire_switch_platform_data {
6114 + int hash_table;
6115 + unsigned int *switch_hw;
6116 + void (*request_intrs)(struct net_device *dev,
6117 + irqreturn_t (*)(int, void *),
6118 + void *irq_privatedata);
6119 + void (*set_mii)(struct net_device *dev);
6120 + void (*get_mac)(struct net_device *dev);
6121 + void (*enable_phy_intr)(void);
6122 + void (*disable_phy_intr)(void);
6123 + void (*phy_ack_intr)(void);
6124 + void (*localhw_setup)(void);
6125 + void (*uncache)(unsigned long addr);
6126 + void (*platform_flush_cache)(void);
6127 +};
6128 #endif /* _FSL_DEVICE_H_ */
6129 --- a/net/core/dev.c
6130 +++ b/net/core/dev.c
6131 @@ -4756,6 +4756,10 @@ static int dev_ifsioc(struct net *net, s
6132 default:
6133 if ((cmd >= SIOCDEVPRIVATE &&
6134 cmd <= SIOCDEVPRIVATE + 15) ||
6135 +#if defined(CONFIG_MODELO_SWITCH)
6136 + (cmd >= 0x9101 &&
6137 + cmd <= 0x92ff) ||
6138 +#endif
6139 cmd == SIOCBONDENSLAVE ||
6140 cmd == SIOCBONDRELEASE ||
6141 cmd == SIOCBONDSETHWADDR ||
6142 @@ -4948,6 +4952,10 @@ int dev_ioctl(struct net *net, unsigned
6143 */
6144 default:
6145 if (cmd == SIOCWANDEV ||
6146 +#if defined(CONFIG_MODELO_SWITCH)
6147 + (cmd >= 0x9101 &&
6148 + cmd <= 0x92ff) ||
6149 +#endif
6150 (cmd >= SIOCDEVPRIVATE &&
6151 cmd <= SIOCDEVPRIVATE + 15)) {
6152 dev_load(net, ifr.ifr_name);