1 From e3aece79d5003b6879298b05551e113117d5cdd8 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Sat, 27 Jun 2015 13:13:36 +0200
4 Subject: [PATCH 63/76] arm: mediatek: add SDK ethernet
6 Signed-off-by: John Crispin <blogic@openwrt.org>
8 drivers/net/ethernet/Kconfig | 1 +
9 drivers/net/ethernet/Makefile | 1 +
10 drivers/net/ethernet/raeth/Kconfig | 415 ++
11 drivers/net/ethernet/raeth/Makefile | 67 +
12 drivers/net/ethernet/raeth/Makefile.release | 60 +
13 drivers/net/ethernet/raeth/csr_netlink.h | 27 +
14 drivers/net/ethernet/raeth/dvt/pkt_gen.c | 88 +
15 drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c | 138 +
16 drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c | 191 +
17 drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c | 1527 +++++
18 drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h | 75 +
19 drivers/net/ethernet/raeth/ethtool_readme.txt | 44 +
20 drivers/net/ethernet/raeth/mcast.c | 187 +
21 drivers/net/ethernet/raeth/mii_mgr.c | 603 ++
22 drivers/net/ethernet/raeth/ra2882ethreg.h | 1985 +++++++
23 drivers/net/ethernet/raeth/ra_ethtool.c | 515 ++
24 drivers/net/ethernet/raeth/ra_ethtool.h | 13 +
25 drivers/net/ethernet/raeth/ra_ioctl.h | 102 +
26 drivers/net/ethernet/raeth/ra_mac.c | 2645 +++++++++
27 drivers/net/ethernet/raeth/ra_mac.h | 57 +
28 drivers/net/ethernet/raeth/ra_netlink.c | 142 +
29 drivers/net/ethernet/raeth/ra_netlink.h | 10 +
30 drivers/net/ethernet/raeth/ra_qos.c | 655 +++
31 drivers/net/ethernet/raeth/ra_qos.h | 18 +
32 drivers/net/ethernet/raeth/ra_rfrw.c | 66 +
33 drivers/net/ethernet/raeth/ra_rfrw.h | 6 +
34 drivers/net/ethernet/raeth/raether.c | 6401 +++++++++++++++++++++
35 drivers/net/ethernet/raeth/raether.h | 126 +
36 drivers/net/ethernet/raeth/raether_hwlro.c | 347 ++
37 drivers/net/ethernet/raeth/raether_pdma.c | 1121 ++++
38 drivers/net/ethernet/raeth/raether_qdma.c | 1407 +++++
39 drivers/net/ethernet/raeth/raether_qdma_mt7623.c | 1020 ++++
40 drivers/net/ethernet/raeth/smb_hook.c | 17 +
41 drivers/net/ethernet/raeth/smb_nf.c | 177 +
42 drivers/net/ethernet/raeth/sync_write.h | 103 +
43 35 files changed, 20357 insertions(+)
44 create mode 100644 drivers/net/ethernet/raeth/Kconfig
45 create mode 100644 drivers/net/ethernet/raeth/Makefile
46 create mode 100644 drivers/net/ethernet/raeth/Makefile.release
47 create mode 100644 drivers/net/ethernet/raeth/csr_netlink.h
48 create mode 100755 drivers/net/ethernet/raeth/dvt/pkt_gen.c
49 create mode 100755 drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c
50 create mode 100755 drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c
51 create mode 100755 drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c
52 create mode 100755 drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h
53 create mode 100644 drivers/net/ethernet/raeth/ethtool_readme.txt
54 create mode 100644 drivers/net/ethernet/raeth/mcast.c
55 create mode 100644 drivers/net/ethernet/raeth/mii_mgr.c
56 create mode 100644 drivers/net/ethernet/raeth/ra2882ethreg.h
57 create mode 100644 drivers/net/ethernet/raeth/ra_ethtool.c
58 create mode 100644 drivers/net/ethernet/raeth/ra_ethtool.h
59 create mode 100644 drivers/net/ethernet/raeth/ra_ioctl.h
60 create mode 100644 drivers/net/ethernet/raeth/ra_mac.c
61 create mode 100644 drivers/net/ethernet/raeth/ra_mac.h
62 create mode 100644 drivers/net/ethernet/raeth/ra_netlink.c
63 create mode 100644 drivers/net/ethernet/raeth/ra_netlink.h
64 create mode 100644 drivers/net/ethernet/raeth/ra_qos.c
65 create mode 100644 drivers/net/ethernet/raeth/ra_qos.h
66 create mode 100644 drivers/net/ethernet/raeth/ra_rfrw.c
67 create mode 100644 drivers/net/ethernet/raeth/ra_rfrw.h
68 create mode 100644 drivers/net/ethernet/raeth/raether.c
69 create mode 100644 drivers/net/ethernet/raeth/raether.h
70 create mode 100755 drivers/net/ethernet/raeth/raether_hwlro.c
71 create mode 100755 drivers/net/ethernet/raeth/raether_pdma.c
72 create mode 100644 drivers/net/ethernet/raeth/raether_qdma.c
73 create mode 100644 drivers/net/ethernet/raeth/raether_qdma_mt7623.c
74 create mode 100644 drivers/net/ethernet/raeth/smb_hook.c
75 create mode 100644 drivers/net/ethernet/raeth/smb_nf.c
76 create mode 100644 drivers/net/ethernet/raeth/sync_write.h
78 diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
79 index eadcb05..627e1d4 100644
80 --- a/drivers/net/ethernet/Kconfig
81 +++ b/drivers/net/ethernet/Kconfig
82 @@ -17,6 +17,7 @@ config MDIO
86 +source "drivers/net/ethernet/raeth/Kconfig"
87 source "drivers/net/ethernet/3com/Kconfig"
88 source "drivers/net/ethernet/adaptec/Kconfig"
89 source "drivers/net/ethernet/aeroflex/Kconfig"
90 diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
91 index 1367afc..abdd636 100644
92 --- a/drivers/net/ethernet/Makefile
93 +++ b/drivers/net/ethernet/Makefile
94 @@ -84,3 +84,4 @@ obj-$(CONFIG_NET_VENDOR_VIA) += via/
95 obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
96 obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
97 obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
98 +obj-$(CONFIG_RAETH) += raeth/
99 diff --git a/drivers/net/ethernet/raeth/Kconfig b/drivers/net/ethernet/raeth/Kconfig
101 index 0000000..c252c85
103 +++ b/drivers/net/ethernet/raeth/Kconfig
114 + tristate "Ralink GMAC"
116 + This driver supports Ralink gigabit ethernet family of
121 + default y if (RALINK_MT7620 || RALINK_MT7621 || ARCH_MT7623)
124 +config RAETH_SCATTER_GATHER_RX_DMA
126 + default y if (RALINK_MT7620 || RALINK_MT7621 || ARCH_MT7623)
131 + prompt "Network BottomHalves"
133 + default RA_NETWORK_WORKQUEUE_BH
135 + config RA_NETWORK_TASKLET_BH
138 + config RA_NETWORK_WORKQUEUE_BH
146 +#config TASKLET_WORKQUEUE_SW
147 +# bool "Tasklet and Workqueue switch"
148 +# depends on RA_NETWORK_TASKLET_BH
150 +config RAETH_SKB_RECYCLE_2K
151 + bool "SKB Recycling"
154 +config RAETH_SPECIAL_TAG
155 + bool "Ralink Special Tag (0x810x)"
156 + depends on RAETH && RT_3052_ESW
158 +#config RAETH_JUMBOFRAME
159 +# bool "Jumbo Frame up to 4K bytes"
160 +# depends on RAETH && !(RALINK_RT3052 || RALINK_RT3352 || RALINK_RT5350 || RALINK_MT7628)
162 +config RAETH_CHECKSUM_OFFLOAD
163 + bool "TCP/UDP/IP checksum offload"
165 + depends on RAETH && !RALINK_RT2880
168 +# bool "When TX ring is full, inform kernel stop transmit and stop RX handler"
172 +#config RAETH_8023AZ_EEE
173 +# bool "Enable Embeded Switch EEE"
175 +# depends on RAETH && (RALINK_MT7620 || RALINK_MT7621 || RALINK_MT7628)
180 + bool "32bytes TX/RX description"
182 + depends on RAETH && (RALINK_MT7620 || RALINK_MT7621)
184 + At this moment, you cannot enable 32B description with Multiple RX ring at the same time.
187 + bool "LRO (Large Receive Offload )"
189 + depends on RAETH && (RALINK_RT6855A || RALINK_MT7620 || RALINK_MT7621 || ARCH_MT7623)
192 + bool "HW LRO (Large Receive Offload)"
196 +config RAETH_HW_LRO_DBG
197 + bool "HW LRO Debug"
199 + depends on RAETH_HW_LRO
201 +config RAETH_HW_LRO_AUTO_ADJ_DBG
202 + bool "HW LRO Auto Adjustment Debug"
204 + depends on RAETH_HW_LRO
206 +config RAETH_HW_LRO_REASON_DBG
207 + bool "HW LRO Flush Reason Debug"
209 + depends on RAETH_HW_LRO
211 +config RAETH_HW_VLAN_TX
212 + bool "Transmit VLAN HW (DoubleVLAN is not supported)"
213 + depends on RAETH && !(RALINK_RT5350 || RALINK_MT7628)
215 + Please disable HW_VLAN_TX if you need double vlan
217 +config RAETH_HW_VLAN_RX
218 + bool "Receive VLAN HW (DoubleVLAN is not supported)"
219 + depends on RAETH && RALINK_MT7621
221 + Please disable HW_VLAN_RX if you need double vlan
224 + bool "TSOV4 (Tcp Segmentaton Offload)"
225 + depends on (RAETH_HW_VLAN_TX && (RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620))||((RALINK_MT7621 || ARCH_MT7623) &&(RAETH_HW_VLAN_TX || RAETH_GMAC2 ))
228 + bool "TSOV6 (Tcp Segmentaton Offload)"
229 + depends on RAETH_TSO
231 +config RAETH_RW_PDMAPTR_FROM_VAR
233 + default y if RALINK_RT6855A || RALINK_MT7620
237 + bool "Samba Speedup Module"
240 +config SPLICE_NET_SUPPORT
241 + default y if MTK_SMB_HOOK
242 + depends on MTK_SMB_HOOK
248 + depends on RAETH && (RALINK_MT7621 || ARCH_MT7623)
250 +config RAETH_PDMA_DVT
252 + depends on RAETH_DVT
254 +config RAETH_PDMA_LEGACY_MODE
255 + bool "PDMA legacy mode"
256 + depends on RAETH_PDMA_DVT
259 +# bool "QoS Feature"
260 +# depends on RAETH && !RALINK_RT2880 && !RALINK_MT7620 && !RALINK_MT7621 && !RAETH_TSO
264 + depends on RAETH_QOS
265 + default DSCP_QOS_DSCP
267 +config RAETH_QOS_DSCP_BASED
269 + depends on RAETH_QOS
271 +config RAETH_QOS_VPRI_BASED
273 + depends on RAETH_QOS
278 + bool "Choose QDMA instead PDMA"
280 + depends on RAETH && (RALINK_MT7621 || ARCH_MT7623)
282 +config RAETH_QDMATX_QDMARX
283 + bool "Choose QDMA RX instead PDMA RX"
285 + depends on RAETH_QDMA && !RALINK_MT7621
290 + prompt "GMAC is connected to"
292 + default GE1_RGMII_FORCE_1000
294 +config GE1_MII_FORCE_100
295 + bool "MII_FORCE_100 (10/100M Switch)"
296 + depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621)
299 + bool "MII_AN (100Phy)"
300 + depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621)
302 +config GE1_RVMII_FORCE_100
303 + bool "RvMII_FORCE_100 (CPU)"
304 + depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621)
306 +config GE1_RGMII_FORCE_1000
307 + bool "RGMII_FORCE_1000 (GigaSW, CPU)"
308 + depends on (RALINK_RT2880 || RALINK_RT3883)
311 +config GE1_RGMII_FORCE_1000
312 + bool "RGMII_FORCE_1000 (GigaSW, CPU)"
313 + depends on (RALINK_MT7621 || ARCH_MT7623)
316 +config GE1_TRGMII_FORCE_1200
317 + bool "TRGMII_FORCE_1200 (GigaSW, CPU)"
318 + depends on (RALINK_MT7621)
321 +config GE1_TRGMII_FORCE_2000
322 + bool "TRGMII_FORCE_2000 (GigaSW, CPU, for MT7623 and MT7683)"
323 + depends on (ARCH_MT7623)
326 +config GE1_TRGMII_FORCE_2600
327 + bool "TRGMII_FORCE_2600 (GigaSW, CPU, MT7623 only)"
328 + depends on (ARCH_MT7623)
332 + bool "RGMII_AN (GigaPhy)"
333 + depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621 || ARCH_MT7623)
335 +config GE1_RGMII_NONE
336 + bool "NONE (NO CONNECT)"
337 + depends on (RALINK_MT7621 || ARCH_MT7623)
344 + depends on RAETH_QDMA && (ARCH_MT7623)
348 + bool "Ralink Embedded Switch"
350 + depends on RAETH && (RALINK_RT3052 || RALINK_RT3352 || RALINK_RT5350 || RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620 || RALINK_MT7621 || RALINK_MT7628 || ARCH_MT7623)
352 +config LAN_WAN_SUPPORT
353 + bool "LAN/WAN Partition"
354 + depends on RAETH && (RAETH_ROUTER || RT_3052_ESW)
356 +config ETH_MEMORY_OPTIMIZATION
357 + bool "Ethernet memory optimization"
358 + depends on RALINK_MT7628
360 +config ETH_ONE_PORT_ONLY
361 + bool "One Port Only"
362 + depends on RALINK_MT7628
365 + prompt "Switch Board Layout Type"
366 + depends on LAN_WAN_SUPPORT || P5_RGMII_TO_MAC_MODE || GE1_RGMII_FORCE_1000 || GE1_TRGMII_FORCE_1200 || GE2_RGMII_FORCE_1000
376 +config RALINK_VISTA_BASIC
377 + bool 'Vista Basic Logo for IC+ 175C'
378 + depends on LAN_WAN_SUPPORT && (RALINK_RT2880 || RALINK_RT3883)
380 +config ESW_DOUBLE_VLAN_TAG
382 + default y if RT_3052_ESW
384 +config RAETH_HAS_PORT4
385 + bool "Port 4 Support"
386 + depends on RAETH && RALINK_MT7620
388 + prompt "Target Mode"
389 + depends on RAETH_HAS_PORT4
390 + default P4_RGMII_TO_MAC_MODE
392 + config P4_MAC_TO_PHY_MODE
393 + bool "Giga_Phy (RGMII)"
394 + config GE_RGMII_MT7530_P0_AN
395 + bool "GE_RGMII_MT7530_P0_AN (MT7530 Internal GigaPhy)"
396 + config GE_RGMII_MT7530_P4_AN
397 + bool "GE_RGMII_MT7530_P4_AN (MT7530 Internal GigaPhy)"
398 + config P4_RGMII_TO_MAC_MODE
399 + bool "Giga_SW/iNIC (RGMII)"
400 + config P4_MII_TO_MAC_MODE
401 + bool "External_CPU (MII_RvMII)"
402 + config P4_RMII_TO_MAC_MODE
403 + bool "External_CPU (RvMII_MII)"
406 +config MAC_TO_GIGAPHY_MODE_ADDR2
407 + hex "Port4 Phy Address"
409 + depends on P4_MAC_TO_PHY_MODE
411 +config RAETH_HAS_PORT5
412 + bool "Port 5 Support"
413 + depends on RAETH && (RALINK_RT3052 || RALINK_RT3352 || RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620)
415 + prompt "Target Mode"
416 + depends on RAETH_HAS_PORT5
417 + default P5_RGMII_TO_MAC_MODE
419 + config P5_MAC_TO_PHY_MODE
420 + bool "Giga_Phy (RGMII)"
421 + config P5_RGMII_TO_MAC_MODE
422 + bool "Giga_SW/iNIC (RGMII)"
423 + config P5_RGMII_TO_MT7530_MODE
424 + bool "MT7530 Giga_SW (RGMII)"
425 + depends on RALINK_MT7620
426 + config P5_MII_TO_MAC_MODE
427 + bool "External_CPU (MII_RvMII)"
428 + config P5_RMII_TO_MAC_MODE
429 + bool "External_CPU (RvMII_MII)"
432 +config MAC_TO_GIGAPHY_MODE_ADDR
433 + hex "GE1 Phy Address"
435 + depends on GE1_MII_AN || GE1_RGMII_AN
437 +config MAC_TO_GIGAPHY_MODE_ADDR
438 + hex "Port5 Phy Address"
440 + depends on P5_MAC_TO_PHY_MODE
443 + bool "GMAC2 Support"
444 + depends on RAETH && (RALINK_RT3883 || RALINK_MT7621 || ARCH_MT7623)
447 + prompt "GMAC2 is connected to"
448 + depends on RAETH_GMAC2
449 + default GE2_RGMII_AN
451 +config GE2_MII_FORCE_100
452 + bool "MII_FORCE_100 (10/100M Switch)"
453 + depends on RAETH_GMAC2
456 + bool "MII_AN (100Phy)"
457 + depends on RAETH_GMAC2
459 +config GE2_RVMII_FORCE_100
460 + bool "RvMII_FORCE_100 (CPU)"
461 + depends on RAETH_GMAC2
463 +config GE2_RGMII_FORCE_1000
464 + bool "RGMII_FORCE_1000 (GigaSW, CPU)"
465 + depends on RAETH_GMAC2
469 + bool "RGMII_AN (External GigaPhy)"
470 + depends on RAETH_GMAC2
472 +config GE2_INTERNAL_GPHY
473 + bool "RGMII_AN (Internal GigaPny)"
474 + depends on RAETH_GMAC2
475 + select LAN_WAN_SUPPORT
479 +config GE_RGMII_INTERNAL_P0_AN
481 + depends on GE2_INTERNAL_GPHY
482 + default y if WAN_AT_P0
484 +config GE_RGMII_INTERNAL_P4_AN
486 + depends on GE2_INTERNAL_GPHY
487 + default y if WAN_AT_P4
489 +config MAC_TO_GIGAPHY_MODE_ADDR2
491 + default 0 if GE_RGMII_INTERNAL_P0_AN
492 + default 4 if GE_RGMII_INTERNAL_P4_AN
493 + depends on GE_RGMII_INTERNAL_P0_AN || GE_RGMII_INTERNAL_P4_AN
495 +config MAC_TO_GIGAPHY_MODE_ADDR2
496 + hex "GE2 Phy Address"
498 + depends on GE2_MII_AN || GE2_RGMII_AN
503 +default y if GE1_MII_FORCE_100 || GE2_MII_FORCE_100 || GE1_RVMII_FORCE_100 || GE2_RVMII_FORCE_100
506 +config MAC_TO_MAC_MODE
508 +default y if GE1_RGMII_FORCE_1000 || GE2_RGMII_FORCE_1000
509 +depends on (RALINK_RT2880 || RALINK_RT3883)
514 +default y if GE1_RGMII_AN || GE2_RGMII_AN
519 +default y if GE1_MII_AN || GE2_MII_AN
520 diff --git a/drivers/net/ethernet/raeth/Makefile b/drivers/net/ethernet/raeth/Makefile
522 index 0000000..563af05
524 +++ b/drivers/net/ethernet/raeth/Makefile
526 +obj-$(CONFIG_RAETH) += raeth.o
527 +raeth-objs := ra_mac.o mii_mgr.o ra_rfrw.o
529 +ifeq ($(CONFIG_MTK_SMB_HOOK),y)
532 +smb-objs := smb_nf.o
535 +#EXTRA_CFLAGS += -DCONFIG_RAETH_MULTIPLE_RX_RING
537 +ifeq ($(CONFIG_RAETH_QOS),y)
538 +raeth-objs += ra_qos.o
541 +ifeq ($(CONFIG_RAETH_QDMA),y)
542 +raeth-objs += raether_qdma.o
545 +ifneq ($(CONFIG_RAETH_QDMA),y)
546 +raeth-objs += raether_pdma.o
549 +raeth-objs += raether.o
551 +ifeq ($(CONFIG_ETHTOOL),y)
552 +raeth-objs += ra_ethtool.o
555 +ifeq ($(CONFIG_RALINK_RT3052_MP2),y)
556 +raeth-objs += mcast.o
559 +ifeq ($(CONFIG_RAETH_NETLINK),y)
560 +raeth-objs += ra_netlink.o
563 +ifeq ($(CONFIG_RAETH_PDMA_DVT),y)
564 +raeth-objs += dvt/raether_pdma_dvt.o
565 +obj-m += dvt/pkt_gen.o
566 +obj-m += dvt/pkt_gen_udp_frag.o
567 +obj-m += dvt/pkt_gen_tcp_frag.o
570 +ifeq ($(CONFIG_RAETH_HW_LRO),y)
571 +raeth-objs += raether_hwlro.o
574 +ifeq ($(CONFIG_RAETH_GMAC2),y)
575 +EXTRA_CFLAGS += -DCONFIG_PSEUDO_SUPPORT
578 +ifeq ($(CONFIG_ETH_MEMORY_OPTIMIZATION),y)
579 +EXTRA_CFLAGS += -DMEMORY_OPTIMIZATION
582 +ifeq ($(CONFIG_RT2860V2_AP_MEMORY_OPTIMIZATION),y)
583 +EXTRA_CFLAGS += -DMEMORY_OPTIMIZATION
586 +ifeq ($(CONFIG_RA_NETWORK_WORKQUEUE_BH),y)
587 +EXTRA_CFLAGS += -DWORKQUEUE_BH
590 +ifeq ($(CONFIG_TASKLET_WORKQUEUE_SW),y)
591 +EXTRA_CFLAGS += -DTASKLET_WORKQUEUE_SW
593 diff --git a/drivers/net/ethernet/raeth/Makefile.release b/drivers/net/ethernet/raeth/Makefile.release
595 index 0000000..ecdeeda
597 +++ b/drivers/net/ethernet/raeth/Makefile.release
599 +obj-$(CONFIG_RAETH) += raeth.o
600 +raeth-objs := ra_mac.o mii_mgr.o ra_rfrw.o
602 +ifeq ($(CONFIG_MTK_SMB_HOOK),y)
605 +smb-objs := smb_nf.o
608 +#EXTRA_CFLAGS += -DCONFIG_RAETH_MULTIPLE_RX_RING
610 +ifeq ($(CONFIG_RAETH_QOS),y)
611 +raeth-objs += ra_qos.o
614 +ifeq ($(CONFIG_RAETH_QDMA),y)
615 +raeth-objs += raether_qdma.o
618 +ifneq ($(CONFIG_RAETH_QDMA),y)
619 +raeth-objs += raether_pdma.o
622 +raeth-objs += raether.o
624 +ifeq ($(CONFIG_ETHTOOL),y)
625 +raeth-objs += ra_ethtool.o
628 +ifeq ($(CONFIG_RALINK_RT3052_MP2),y)
629 +raeth-objs += mcast.o
632 +ifeq ($(CONFIG_RAETH_NETLINK),y)
633 +raeth-objs += ra_netlink.o
636 +ifeq ($(CONFIG_RAETH_HW_LRO),y)
637 +raeth-objs += raether_hwlro.o
640 +ifeq ($(CONFIG_RAETH_GMAC2),y)
641 +EXTRA_CFLAGS += -DCONFIG_PSEUDO_SUPPORT
644 +ifeq ($(CONFIG_ETH_MEMORY_OPTIMIZATION),y)
645 +EXTRA_CFLAGS += -DMEMORY_OPTIMIZATION
648 +ifeq ($(CONFIG_RT2860V2_AP_MEMORY_OPTIMIZATION),y)
649 +EXTRA_CFLAGS += -DMEMORY_OPTIMIZATION
652 +ifeq ($(CONFIG_RA_NETWORK_WORKQUEUE_BH),y)
653 +EXTRA_CFLAGS += -DWORKQUEUE_BH
656 +ifeq ($(CONFIG_TASKLET_WORKQUEUE_SW),y)
657 +EXTRA_CFLAGS += -DTASKLET_WORKQUEUE_SW
659 diff --git a/drivers/net/ethernet/raeth/csr_netlink.h b/drivers/net/ethernet/raeth/csr_netlink.h
661 index 0000000..add7745
663 +++ b/drivers/net/ethernet/raeth/csr_netlink.h
665 +#ifndef CSR_NETLINK_H
666 +#define CSR_NETLINK_H
668 +#define CSR_NETLINK 30
673 +#define RALINK_CSR_GROUP 2882
675 +typedef struct rt2880_csr_msg {
678 + unsigned long address;
679 + unsigned long default_value;
680 + unsigned long reserved_bits; /* 1 : not reserved, 0 : reserved */
681 + unsigned long write_mask;
682 + unsigned long write_value;
686 +int csr_msg_send(CSR_MSG* msg);
687 +int csr_msg_recv(void);
689 +// static CSR_MSG input_csr_msg;
692 diff --git a/drivers/net/ethernet/raeth/dvt/pkt_gen.c b/drivers/net/ethernet/raeth/dvt/pkt_gen.c
694 index 0000000..b351b21
696 +++ b/drivers/net/ethernet/raeth/dvt/pkt_gen.c
698 +//#include <linux/config.h>
699 +#include <linux/version.h>
700 +#include <linux/module.h>
701 +#include <linux/skbuff.h>
702 +#include <linux/kernel.h>
703 +#include <linux/init.h>
704 +#include <linux/netfilter.h>
705 +#include <linux/netdevice.h>
706 +#include <linux/types.h>
707 +#include <asm/uaccess.h>
708 +#include <linux/moduleparam.h>
710 +char *ifname="eth3";
712 +static int32_t PktGenInitMod(void)
715 + struct net_dev *dev;
716 + struct sk_buff *skb;
719 + unsigned char pkt[]={
720 + //0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // dest bcast mac
721 + 0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest macA
722 + //0x00, 0x30, 0xdb, 0x02, 0x02, 0x01, // dest macB
723 + 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
724 + 0x81, 0x00, // vlan tag
725 + //0x81, 0x10, // vlan tag
726 + //0x87, 0x39, // do not learn
727 + //0xc1, 0x03, // vlan tag SA=0, VID=2, LV=1
728 + 0x00, 0x03, // pri=0, vlan=3
729 + 0x08, 0x00, // eth type=ip
730 + 0x45, 0x00, 0x00, 0x30, 0x12, 0x34, 0x40, 0x00, 0xff, 0x06,
731 + 0x40, 0x74, 0x0a, 0x0a, 0x1e, 0x0a, 0x0a, 0x0a, 0x1e, 0x0b,
732 + 0x00, 0x1e, 0x00, 0x28, 0x00, 0x1c, 0x81, 0x06, 0x00, 0x00,
733 + 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
734 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
736 + skb = alloc_skb(256, GFP_ATOMIC);
738 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
739 + if((dev=dev_get_by_name(&init_net,ifname))){
741 + if((dev=dev_get_by_name(ifname))){
747 + skb_put(skb,sizeof(pkt));
748 + memcpy(skb->data, pkt, sizeof(pkt));
750 + printk("send pkt(len=%d) to %s\n", skb->len, skb->dev->name);
753 + for(i=0;i<sizeof(pkt);i++){
757 + printk("%02X-",skb->data[i]);
760 + dev_queue_xmit(skb);
762 + printk("interface %s not found\n",ifname);
769 +static void PktGenCleanupMod(void)
773 +module_init(PktGenInitMod);
774 +module_exit(PktGenCleanupMod);
775 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
776 +MODULE_PARM (ifname, "s");
778 +module_param (ifname, charp, 0);
781 +MODULE_DESCRIPTION("Ralink PktGen Module");
782 +MODULE_AUTHOR("Steven Liu");
783 +MODULE_LICENSE("Proprietary");
784 +MODULE_PARM_DESC (ifname, "interface name");
786 diff --git a/drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c b/drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c
788 index 0000000..e78c65a
790 +++ b/drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c
792 +//#include <linux/config.h>
793 +#include <linux/version.h>
794 +#include <linux/module.h>
795 +#include <linux/skbuff.h>
796 +#include <linux/kernel.h>
797 +#include <linux/init.h>
798 +#include <linux/netfilter.h>
799 +#include <linux/netdevice.h>
800 +#include <linux/types.h>
801 +#include <asm/uaccess.h>
802 +#include <linux/moduleparam.h>
804 +char *ifname="eth3";
807 +static int32_t PktGenInitMod(void)
809 + unsigned char pkt_1[]={
810 + 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
811 + 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
812 + 0x08, 0x00, // type: ip
813 + 0x45, 0x00, 0x00, 0x34, // ip: ..., total len (0x034 = 52)
814 + 0xa1, 0x78, 0x20, 0x00, // ip: id, frag, frag offset
815 + 0x80, 0x06, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
816 + 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
817 + 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
818 + 0x0d, 0xd5, //tcp src port
819 + 0x13, 0x89, //tcp dst port
820 + 0x40, 0xf5, 0x15, 0x04, //tcp sequence number
821 + 0xf6, 0x4f, 0x1e, 0x31, //tcp ack number
822 + 0x50, 0x10, 0xfc, 0x00, //tcp flags, win size
823 + 0xf1, 0xfe, 0x00, 0x00, //tcp checksum (0xf1fe)
824 + 0x01, 0x02, 0x03, 0x04, 0x05, //payload (12 bytes)
825 + 0x06, 0x07, 0x08, 0x09, 0x0a,
829 + unsigned char pkt_2[]={
830 + 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
831 + 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
832 + 0x08, 0x00, // type: ip
833 + 0x45, 0x00, 0x00, 0x20, // ip: ..., total len (0x020 = 32)
834 + 0xa1, 0x78, 0x00, 0x04, // ip: id, frag, frag offset (32)
835 + 0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
836 + 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
837 + 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
838 + 0x11, 0x12, 0x13, 0x14, 0x15, //payload (12 bytes)
839 + 0x16, 0x17, 0x18, 0x19, 0x1a,
843 + struct net_dev *dev;
844 + struct sk_buff *skb_1;
845 + struct sk_buff *skb_2;
848 + skb_1 = alloc_skb(256, GFP_ATOMIC);
849 + skb_2 = alloc_skb(256, GFP_ATOMIC);
854 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
855 + if((dev=dev_get_by_name(&init_net,ifname))){
857 + if((dev=dev_get_by_name(ifname))){
861 + skb_put(skb_1,sizeof(pkt_1));
862 + memcpy(skb_1->data, pkt_1, sizeof(pkt_1));
864 + printk("send pkt(len=%d) to %s\n", skb_1->len, skb_1->dev->name);
867 + for(i=0;i<sizeof(pkt_1);i++){
871 + printk("%02X-",skb_1->data[i]);
874 + dev_queue_xmit(skb_1);
876 + printk("interface %s not found\n",ifname);
883 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
884 + if((dev=dev_get_by_name(&init_net,ifname))){
886 + if((dev=dev_get_by_name(ifname))){
890 + skb_put(skb_2,sizeof(pkt_2));
891 + memcpy(skb_2->data, pkt_2, sizeof(pkt_2));
893 + printk("send pkt(len=%d) to %s\n", skb_2->len, skb_2->dev->name);
896 + for(i=0;i<sizeof(pkt_2);i++){
900 + printk("%02X-",skb_2->data[i]);
903 + dev_queue_xmit(skb_2);
905 + printk("interface %s not found\n",ifname);
913 +static void PktGenCleanupMod(void)
917 +module_init(PktGenInitMod);
918 +module_exit(PktGenCleanupMod);
919 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
920 +MODULE_PARM (ifname, "s");
922 +module_param (ifname, charp, 0);
925 +MODULE_DESCRIPTION("Ralink PktGen Module");
926 +MODULE_AUTHOR("Steven Liu");
927 +MODULE_LICENSE("Proprietary");
928 +MODULE_PARM_DESC (ifname, "interface name");
930 diff --git a/drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c b/drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c
932 index 0000000..917e7ad
934 +++ b/drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c
936 +//#include <linux/config.h>
937 +#include <linux/version.h>
938 +#include <linux/module.h>
939 +#include <linux/skbuff.h>
940 +#include <linux/kernel.h>
941 +#include <linux/init.h>
942 +#include <linux/netfilter.h>
943 +#include <linux/netdevice.h>
944 +#include <linux/types.h>
945 +#include <asm/uaccess.h>
946 +#include <linux/moduleparam.h>
948 +char *ifname="eth3";
951 +static int32_t PktGenInitMod(void)
954 + unsigned char pkt_0[]={
955 +// 0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest mac
956 + 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
957 + 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
958 + 0x08, 0x00, // type: ip
959 + 0x45, 0x00, 0x00, 0x26, // ip: ..., total len (0x026 = 38)
960 +// 0xa1, 0x78, 0x20, 0x00, // ip: id, frag, frag offset
961 + 0xa1, 0x78, 0x40, 0x00, // ip: id, frag, frag offset
962 + 0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
963 + 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
964 +// 0x0a, 0x0a, 0x1e, 0x03, // dst ip (10.10.30.3)
965 + 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
966 + 0xca, 0x7b, //udp src port
967 + 0x13, 0x89, //udp dst port
968 + 0x00, 0x12, //udp len (0x01c = 18)
969 + 0x2f, 0x96, //udp checksum (0x2f96)
970 + 0x01, 0x02, 0x03, 0x04, 0x05, //payload (10 bytes)
971 + 0x06, 0x07, 0x08, 0x09, 0x0a
975 + unsigned char pkt_1[]={
976 +// 0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest mac
977 + 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
978 + 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
979 + 0x08, 0x00, // type: ip
980 + 0x45, 0x00, 0x00, 0x24, // ip: ..., total len (0x024 = 36)
981 + 0xa1, 0x78, 0x20, 0x00, // ip: id, frag, frag offset
982 +// 0xa1, 0x78, 0x40, 0x00, // ip: id, frag, frag offset
983 + 0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
984 + 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
985 +// 0x0a, 0x0a, 0x1e, 0x03, // dst ip (10.10.30.3)
986 + 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
987 + 0xca, 0x7b, //udp src port
988 + 0x13, 0x89, //udp dst port
989 + 0x00, 0x1a, //udp len (0x01a = 26)
990 + 0x2f, 0x96, //udp checksum (0x2f96)
991 + 0x01, 0x02, 0x03, 0x04, 0x05, //payload (8 bytes)
995 + unsigned char pkt_2[]={
996 +// 0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest mac
997 + 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
998 + 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
999 + 0x08, 0x00, // type: ip
1000 + 0x45, 0x00, 0x00, 0x1e, // ip: ..., total len (0x01e = 30)
1001 + 0xa1, 0x78, 0x00, 0x02, // ip: id, frag, frag offset (16)
1002 + 0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
1003 + 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
1004 +// 0x0a, 0x0a, 0x1e, 0x03, // dst ip (10.10.30.3)
1005 + 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
1006 + 0x11, 0x12, 0x13, 0x14, 0x15, //payload (10 bytes)
1007 + 0x16, 0x17, 0x18, 0x19, 0x1a
1010 + struct net_dev *dev;
1011 +// struct sk_buff *skb_0;
1012 + struct sk_buff *skb_1;
1013 + struct sk_buff *skb_2;
1016 +// skb_0 = alloc_skb(256, GFP_ATOMIC);
1017 + skb_1 = alloc_skb(256, GFP_ATOMIC);
1018 + skb_2 = alloc_skb(256, GFP_ATOMIC);
1021 +/* send packet 0 */
1022 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
1023 + if((dev=dev_get_by_name(&init_net,ifname))){
1025 + if((dev=dev_get_by_name(ifname))){
1029 + skb_put(skb_0,sizeof(pkt_0));
1030 + memcpy(skb_0->data, pkt_0, sizeof(pkt_0));
1032 + printk("send pkt(len=%d) to %s\n", skb_0->len, skb_0->dev->name);
1035 + for(i=0;i<sizeof(pkt_0);i++){
1039 + printk("%02X-",skb_0->data[i]);
1042 + dev_queue_xmit(skb_0);
1044 + printk("interface %s not found\n",ifname);
1050 +/* send packet 1 */
1051 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
1052 + if((dev=dev_get_by_name(&init_net,ifname))){
1054 + if((dev=dev_get_by_name(ifname))){
1058 + skb_put(skb_1,sizeof(pkt_1));
1059 + memcpy(skb_1->data, pkt_1, sizeof(pkt_1));
1061 + printk("send pkt(len=%d) to %s\n", skb_1->len, skb_1->dev->name);
1064 + for(i=0;i<sizeof(pkt_1);i++){
1068 + printk("%02X-",skb_1->data[i]);
1071 + dev_queue_xmit(skb_1);
1073 + printk("interface %s not found\n",ifname);
1079 +/* send packet 2 */
1080 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
1081 + if((dev=dev_get_by_name(&init_net,ifname))){
1083 + if((dev=dev_get_by_name(ifname))){
1087 + skb_put(skb_2,sizeof(pkt_2));
1088 + memcpy(skb_2->data, pkt_2, sizeof(pkt_2));
1090 + printk("send pkt(len=%d) to %s\n", skb_2->len, skb_2->dev->name);
1093 + for(i=0;i<sizeof(pkt_2);i++){
1097 + printk("%02X-",skb_2->data[i]);
1100 + dev_queue_xmit(skb_2);
1102 + printk("interface %s not found\n",ifname);
1110 +static void PktGenCleanupMod(void)
1114 +module_init(PktGenInitMod);
1115 +module_exit(PktGenCleanupMod);
1116 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
1117 +MODULE_PARM (ifname, "s");
1119 +module_param (ifname, charp, 0);
1122 +MODULE_DESCRIPTION("Ralink PktGen Module");
1123 +MODULE_AUTHOR("Steven Liu");
1124 +MODULE_LICENSE("Proprietary");
1125 +MODULE_PARM_DESC (ifname, "interface name");
1127 diff --git a/drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c b/drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c
1128 new file mode 100755
1129 index 0000000..971a821
1131 +++ b/drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c
1133 +#include <linux/module.h>
1134 +#include <linux/version.h>
1135 +#include <linux/kernel.h>
1136 +#include <linux/types.h>
1137 +#include <linux/pci.h>
1138 +#include <linux/init.h>
1139 +#include <linux/skbuff.h>
1140 +#include <linux/if_vlan.h>
1141 +#include <linux/if_ether.h>
1142 +#include <linux/fs.h>
1143 +#include <asm/uaccess.h>
1144 +#include <asm/rt2880/surfboardint.h>
1145 +#if defined(CONFIG_RAETH_TSO)
1146 +#include <linux/tcp.h>
1147 +#include <net/ipv6.h>
1148 +#include <linux/ip.h>
1149 +#include <net/ip.h>
1150 +#include <net/tcp.h>
1151 +#include <linux/in.h>
1152 +#include <linux/ppp_defs.h>
1153 +#include <linux/if_pppox.h>
1155 +#if defined(CONFIG_RAETH_LRO)
1156 +#include <linux/inet_lro.h>
1158 +#include <linux/delay.h>
1159 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1160 +#include <linux/sched.h>
1163 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 0)
1164 +#include <asm/rt2880/rt_mmap.h>
1166 +#include <linux/libata-compat.h>
1169 +#include "../ra2882ethreg.h"
1170 +#include "../raether.h"
1171 +#include "../ra_mac.h"
1172 +#include "../ra_ioctl.h"
1173 +#include "../ra_rfrw.h"
1174 +#ifdef CONFIG_RAETH_NETLINK
1175 +#include "../ra_netlink.h"
1177 +#if defined(CONFIG_RAETH_QOS)
1178 +#include "../ra_qos.h"
1180 +#include "raether_pdma_dvt.h"
1182 +/* Global variables */
1183 +static unsigned int g_pdma_dvt_show_config;
1184 +static unsigned int g_pdma_dvt_rx_test_config;
1185 +static unsigned int g_pdma_dvt_tx_test_config;
1186 +static unsigned int g_pdma_dvt_debug_test_config;
1187 +static unsigned int g_pdma_dvt_lro_test_config;
1189 +unsigned int g_pdma_dev_lanport = 0;
1190 +unsigned int g_pdma_dev_wanport = 0;
1192 +void skb_dump(struct sk_buff *sk)
1196 + printk("skb_dump: from %s with len %d (%d) headroom=%d tailroom=%d\n",
1197 + sk->dev ? sk->dev->name : "ip stack", sk->len, sk->truesize,
1198 + skb_headroom(sk), skb_tailroom(sk));
1200 + /* for(i=(unsigned int)sk->head;i<=(unsigned int)sk->tail;i++) { */
1201 + /* for(i=(unsigned int)sk->head;i<=(unsigned int)sk->data+20;i++) { */
1202 + for (i = (unsigned int)sk->head; i <= (unsigned int)sk->data + 60; i++) {
1203 + if ((i % 20) == 0)
1205 + if (i == (unsigned int)sk->data)
1207 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 21)
1208 + if (i == (unsigned int)sk->transport_header)
1210 + if (i == (unsigned int)sk->network_header)
1212 + if (i == (unsigned int)sk->mac_header)
1215 + if (i == (unsigned int)sk->h.raw)
1217 + if (i == (unsigned int)sk->nh.raw)
1219 + if (i == (unsigned int)sk->mac.raw)
1222 + printk("%02X-", *((unsigned char *)i));
1223 + if (i == (unsigned int)sk->tail)
1229 +#if defined(CONFIG_RAETH_HW_LRO)
1230 +/* PDMA LRO test functions start */
1231 +int pdma_lro_disable_dvt(void)
1233 + unsigned int regVal = 0;
1235 + printk("pdma_lro_disable_dvt()\n");
1237 + /* 1. Invalid LRO ring1~3 */
1238 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
1239 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
1240 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
1242 + /* 2 Polling relinguish */
1243 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1246 + /* 3. Disable LRO */
1247 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1248 + regVal &= ~(PDMA_LRO_EN);
1249 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1252 + /* 4. Disable non-lro multiple rx */
1253 + SET_PDMA_NON_LRO_MULTI_EN(0);
1255 + /* 5.1. Set GDM1 to ring0 */
1256 + SET_GDM_PID1_RXID_SEL(0);
1257 + /* 5.2. Set GDM2 to ring0 */
1258 + SET_GDM_PID2_RXID_SEL(0);
1264 +int pdma_lro_force_aggre_dvt(void)
1266 + unsigned int regVal = 0;
1269 + printk("pdma_lro_force_aggre_dvt()\n");
1271 +/* pdma rx ring1 */
1272 + /* 1. Set RX ring mode to force port */
1273 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
1275 + /* 2. Configure lro ring */
1276 + /* 2.1 set src/destination TCP ports */
1277 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 3423);
1278 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 2301);
1279 + /* 2.2 set src/destination IPs */
1280 + str_to_ip(&ip, "10.10.10.3");
1281 + sysRegWrite(LRO_RX_RING1_SIP_DW0, ip);
1282 + str_to_ip(&ip, "10.10.10.100");
1283 + sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
1284 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
1286 + /* 2.3 Valid LRO ring */
1287 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1289 + /* 2.4 Set AGE timer */
1290 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, 0);
1292 + /* 2.5 Set max AGG timer */
1293 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, 0);
1295 + /* 2.6 Set max LRO agg count */
1296 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
1298 + /* 3. IPv4 checksum update enable */
1299 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1301 + /* 4. Polling relinguish */
1302 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1305 + /* 5. Enable LRO */
1306 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1307 + regVal |= PDMA_LRO_EN;
1308 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1313 +int pdma_lro_auto_aggre_dvt(void)
1315 + unsigned int regVal = 0;
1318 + printk("pdma_lro_auto_aggre_dvt()\n");
1320 + /* 1.1 Set my IP_1 */
1321 + str_to_ip(&ip, "10.10.10.254");
1322 + sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
1323 + sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
1324 + sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
1325 + sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
1326 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
1328 + /* 1.2 Set my IP_2 */
1329 + str_to_ip(&ip, "10.10.20.254");
1330 + sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
1331 + sysRegWrite(LRO_RX_RING1_DIP_DW1, 0);
1332 + sysRegWrite(LRO_RX_RING1_DIP_DW2, 0);
1333 + sysRegWrite(LRO_RX_RING1_DIP_DW3, 0);
1334 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
1336 + /* 1.3 Set my IP_3 */
1337 + sysRegWrite(LRO_RX_RING2_DIP_DW3, 0x20010238);
1338 + sysRegWrite(LRO_RX_RING2_DIP_DW2, 0x08000000);
1339 + sysRegWrite(LRO_RX_RING2_DIP_DW1, 0x00000000);
1340 + sysRegWrite(LRO_RX_RING2_DIP_DW0, 0x00000254);
1341 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
1343 + /* 1.4 Set my IP_4 */
1344 + sysRegWrite(LRO_RX_RING3_DIP_DW3, 0x20010238);
1345 + sysRegWrite(LRO_RX_RING3_DIP_DW2, 0x08010000);
1346 + sysRegWrite(LRO_RX_RING3_DIP_DW1, 0x00000000);
1347 + sysRegWrite(LRO_RX_RING3_DIP_DW0, 0x00000254);
1348 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
1350 + /* 2.1 Set RX ring1~3 to auto-learn modes */
1351 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
1352 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
1353 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
1355 + /* 2.2 Valid LRO ring */
1356 + SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
1357 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1358 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
1359 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
1361 + /* 2.3 Set AGE timer */
1362 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, 0);
1363 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, 0);
1364 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, 0);
1366 + /* 2.4 Set max AGG timer */
1367 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, 0);
1368 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, 0);
1369 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, 0);
1371 + /* 2.5 Set max LRO agg count */
1372 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
1373 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, HW_LRO_MAX_AGG_CNT);
1374 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, HW_LRO_MAX_AGG_CNT);
1376 + /* 3.0 IPv6 LRO enable */
1377 + SET_PDMA_LRO_IPV6_EN(1);
1379 + /* 3.1 IPv4 checksum update disable */
1380 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1382 + /* 3.2 switch priority comparision to byte count mode */
1383 + SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
1385 + /* 3.3 bandwidth threshold setting */
1386 + SET_PDMA_LRO_BW_THRESHOLD(0);
1388 + /* 3.4 auto-learn score delta setting */
1389 + sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
1391 + /* 3.5 Set ALT timer to 20us: (unit: 20us) */
1392 + SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(HW_LRO_TIMER_UNIT);
1393 + /* 3.6 Set ALT refresh timer to 1 sec. (unit: 20us) */
1394 + SET_PDMA_LRO_ALT_REFRESH_TIMER(HW_LRO_REFRESH_TIME);
1396 + /* 4. Polling relinguish */
1397 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1400 + /* 5. Enable LRO */
1401 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1402 + regVal |= PDMA_LRO_EN;
1403 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1408 +int pdma_lro_auto_ipv6_dvt(void)
1410 + unsigned int regVal = 0;
1412 + printk("pdma_lro_auto_ipv6_dvt()\n");
1414 + /* 1. Set my IP */
1415 + sysRegWrite(LRO_RX_RING1_DIP_DW3, 0x20010238);
1416 + sysRegWrite(LRO_RX_RING1_DIP_DW2, 0x08000000);
1417 + sysRegWrite(LRO_RX_RING1_DIP_DW1, 0x00000000);
1418 + sysRegWrite(LRO_RX_RING1_DIP_DW0, 0x00000254);
1420 + /* 2.1 Set RX ring1~3 to auto-learn modes */
1421 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
1422 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
1423 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
1425 + /* 2.2 Valid LRO ring */
1426 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1427 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
1428 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
1430 + /* 2.3 Set AGE timer */
1431 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
1432 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
1433 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
1435 + /* 3.0 IPv6 LRO enable */
1436 + SET_PDMA_LRO_IPV6_EN(1);
1438 + /* 3.1 IPv4 checksum update disable */
1439 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1441 + /* 3.2 switch priority comparision to byte count mode */
1442 + SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
1444 + /* 3.3 bandwidth threshold setting */
1445 + SET_PDMA_LRO_BW_THRESHOLD(0);
1447 + /* 3.4 auto-learn score delta setting */
1448 + sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
1450 + /* 3.5 Set ALT timer to 500us: (unit: 20us) */
1451 + SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(25);
1452 + /* 3.6 Set ALT refresh timer to 1 sec. (unit: 500us) */
1453 + SET_PDMA_LRO_ALT_REFRESH_TIMER(2000);
1455 + /* 3.7 Set max AGG timer: 10 msec. */
1456 + SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
1458 + /* 4. Polling relinguish */
1459 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1462 + /* 5. Enable LRO */
1463 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1464 + regVal |= PDMA_LRO_EN;
1465 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1470 +int pdma_lro_auto_myIP_dvt(void)
1472 + unsigned int regVal = 0;
1475 + printk("pdma_lro_auto_myIP_dvt()\n");
1477 + /* 1.1 Set my IP_1 */
1478 + str_to_ip(&ip, "10.10.10.254");
1479 + sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
1480 + sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
1481 + sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
1482 + sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
1483 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
1484 + /* 1.2 Set my IP_2 */
1485 + str_to_ip(&ip, "10.10.20.254");
1486 + sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
1487 + sysRegWrite(LRO_RX_RING1_DIP_DW1, 0);
1488 + sysRegWrite(LRO_RX_RING1_DIP_DW2, 0);
1489 + sysRegWrite(LRO_RX_RING1_DIP_DW3, 0);
1490 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
1491 + /* 1.3 Set my IP_3 */
1492 + sysRegWrite(LRO_RX_RING2_DIP_DW3, 0x20010238);
1493 + sysRegWrite(LRO_RX_RING2_DIP_DW2, 0x08000000);
1494 + sysRegWrite(LRO_RX_RING2_DIP_DW1, 0x00000000);
1495 + sysRegWrite(LRO_RX_RING2_DIP_DW0, 0x00000254);
1496 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
1497 + /* 1.4 Set my IP_4 */
1498 + sysRegWrite(LRO_RX_RING3_DIP_DW3, 0x20010238);
1499 + sysRegWrite(LRO_RX_RING3_DIP_DW2, 0x08010000);
1500 + sysRegWrite(LRO_RX_RING3_DIP_DW1, 0x00000000);
1501 + sysRegWrite(LRO_RX_RING3_DIP_DW0, 0x00000254);
1502 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
1504 + /* 2.1 Set RX ring1~3 to auto-learn modes */
1505 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
1506 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
1507 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
1509 + /* 2.2 Valid LRO ring */
1510 + SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
1511 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1512 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
1513 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
1515 + /* 2.3 Set AGE timer */
1516 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
1517 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
1518 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
1520 + /* 3.0 IPv6 LRO enable */
1521 + SET_PDMA_LRO_IPV6_EN(1);
1523 + /* 3.1 IPv4 checksum update disable */
1524 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1526 + /* 3.2 switch priority comparision to byte count mode */
1527 + SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
1529 + /* 3.3 bandwidth threshold setting */
1530 + SET_PDMA_LRO_BW_THRESHOLD(0);
1532 + /* 3.4 auto-learn score delta setting */
1533 + sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
1535 + /* 3.5 Set ALT timer to 500us: (unit: 20us) */
1536 + SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(25);
1537 + /* 3.6 Set ALT refresh timer to 1 sec. (unit: 500us) */
1538 + SET_PDMA_LRO_ALT_REFRESH_TIMER(2000);
1540 + /* 3.7 Set max AGG timer: 10 msec. */
1541 + SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
1543 + /* 4. Polling relinguish */
1544 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1547 + /* 5. Enable LRO */
1548 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1549 + regVal |= PDMA_LRO_EN;
1550 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1555 +int pdma_lro_dly_int_dvt(int index)
1557 + unsigned int regVal = 0;
1560 + printk("pdma_lro_dly_int_dvt(%d)\n", index);
1563 + /* 1.1 Set my IP_1 */
1564 + /* str_to_ip( &ip, "10.10.10.254" ); */
1565 + str_to_ip(&ip, "10.10.10.100");
1566 + sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
1567 + sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
1568 + sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
1569 + sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
1571 + /* 1.1 set src/destination TCP ports */
1572 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 3423);
1573 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 2301);
1574 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING2, 3423);
1575 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING2, 2301);
1576 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING3, 3423);
1577 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING3, 2301);
1578 + /* 1.2 set src/destination IPs */
1579 + str_to_ip(&ip, "10.10.10.3");
1580 + sysRegWrite(LRO_RX_RING1_SIP_DW0, ip);
1581 + str_to_ip(&ip, "10.10.10.100");
1582 + sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
1583 + str_to_ip(&ip, "10.10.10.3");
1584 + sysRegWrite(LRO_RX_RING2_SIP_DW0, ip);
1585 + str_to_ip(&ip, "10.10.10.100");
1586 + sysRegWrite(LRO_RX_RING2_DIP_DW0, ip);
1587 + str_to_ip(&ip, "10.10.10.3");
1588 + sysRegWrite(LRO_RX_RING3_SIP_DW0, ip);
1589 + str_to_ip(&ip, "10.10.10.100");
1590 + sysRegWrite(LRO_RX_RING3_DIP_DW0, ip);
1591 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
1592 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
1593 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
1597 + /* 1.2 Disable DLY_INT for lro ring */
1598 + SET_PDMA_LRO_DLY_INT_EN(0);
1600 + /* 1.2 Enable DLY_INT for lro ring */
1601 + SET_PDMA_LRO_DLY_INT_EN(1);
1604 + /* 1.3 LRO ring DLY_INT setting */
1606 + sysRegWrite(LRO_RX1_DLY_INT, DELAY_INT_INIT);
1607 + } else if (index == 2) {
1608 + sysRegWrite(LRO_RX2_DLY_INT, DELAY_INT_INIT);
1609 + } else if (index == 3) {
1610 + sysRegWrite(LRO_RX3_DLY_INT, DELAY_INT_INIT);
1613 + /* 2.1 Set RX rings to auto-learn modes */
1614 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
1615 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
1616 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
1618 + /* 2.0 set rx ring mode */
1619 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
1620 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_FORCE_PORT);
1621 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_FORCE_PORT);
1623 + /* 2.1 IPv4 force port mode */
1624 + SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING1, 1);
1625 + SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING2, 1);
1626 + SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING3, 1);
1629 + /* 2.2 Valid LRO ring */
1630 + SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
1631 + if ((index == 0) || (index == 1)) {
1632 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1633 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
1634 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
1635 + } else if (index == 2) {
1636 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
1637 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
1638 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
1640 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
1641 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
1642 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
1645 + /* 2.3 Set AGE timer */
1646 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
1647 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
1648 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
1650 + /* 3.1 IPv4 checksum update enable */
1651 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1653 + /* 3.2 switch priority comparision to byte count mode */
1654 + SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
1656 + /* 3.3 bandwidth threshold setting */
1657 + SET_PDMA_LRO_BW_THRESHOLD(0);
1659 + /* 3.4 auto-learn score delta setting */
1660 + sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
1662 + /* 3.5 Set ALT timer to 500us: (unit: 20us) */
1663 + SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(25);
1664 + /* 3.6 Set ALT refresh timer to 1 sec. (unit: 500us) */
1665 + SET_PDMA_LRO_ALT_REFRESH_TIMER(2000);
1667 + /* 3.7 Set max AGG timer */
1668 + SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
1670 + /* 4. Polling relinguish */
1671 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1674 + /* 5. Enable LRO */
1675 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1676 + regVal |= PDMA_LRO_EN;
1677 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1682 +int pdma_lro_dly_int0_dvt(void)
1684 + return pdma_lro_dly_int_dvt(0);
1687 +int pdma_lro_dly_int1_dvt(void)
1689 + return pdma_lro_dly_int_dvt(1);
1692 +int pdma_lro_dly_int2_dvt(void)
1694 + return pdma_lro_dly_int_dvt(2);
1697 +int pdma_lro_dly_int3_dvt(void)
1699 + return pdma_lro_dly_int_dvt(3);
1702 +#endif /* CONFIG_RAETH_HW_LRO */
1704 +#if defined(CONFIG_RAETH_MULTIPLE_RX_RING)
1705 +int pdma_gdm_rxid_config(void)
1707 + unsigned int regVal = 0;
1709 + printk("pdma_gdm_rxid_config()\n");
1711 + /* 1. Set RX ring1~3 to pse modes */
1712 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_PSE_MODE);
1713 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_PSE_MODE);
1714 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_PSE_MODE);
1716 + /* 2. Enable non-lro multiple rx */
1717 + SET_PDMA_NON_LRO_MULTI_EN(1);
1722 +int pdma_non_lro_portid_dvt(void)
1724 + unsigned int regVal = 0;
1726 + printk("pdma_non_lro_portid_dvt()\n");
1728 + /* 1. Set GDM1 to ring3 */
1729 + SET_GDM_PID1_RXID_SEL(3);
1731 + /* 2. Set GDM2 to ring1 */
1732 + SET_GDM_PID2_RXID_SEL(1);
1735 + /* 3. Set priority rule: pid */
1736 + SET_GDM_RXID_PRI_SEL(GDM_PRI_PID);
1738 + /* PDMA multi-rx enable */
1739 + pdma_gdm_rxid_config();
1744 +int pdma_non_lro_stag_dvt(void)
1746 + unsigned int regVal = 0;
1748 + printk("pdma_non_lro_stag_dvt()\n");
1750 + /* 1. Set STAG4 to ring0 */
1751 + GDM_STAG_RXID_SEL(4, 0);
1752 + /* 2. Set STAG3 to ring1 */
1753 + GDM_STAG_RXID_SEL(3, 1);
1754 + /* 3. Set STAG2 to ring2 */
1755 + GDM_STAG_RXID_SEL(2, 2);
1756 + /* 4. Set STAG1 to ring3 */
1757 + GDM_STAG_RXID_SEL(1, 3);
1759 + /* 5. Set priority rule: stag/pid */
1760 + SET_GDM_RXID_PRI_SEL(GDM_PRI_PID);
1762 + /* PDMA multi-rx enable */
1763 + pdma_gdm_rxid_config();
1768 +int pdma_non_lro_vlan_dvt(void)
1770 + unsigned int regVal = 0;
1772 + printk("pdma_non_lro_vlan_dvt()\n");
1774 + /* 1. Set vlan priority=3 to ring1 */
1775 + SET_GDM_VLAN_PRI_RXID_SEL(3, 1);
1776 + /* 2. Set vlan priority=2 to ring2 */
1777 + SET_GDM_VLAN_PRI_RXID_SEL(2, 2);
1778 + /* 3. Set vlan priority=1 to ring3 */
1779 + SET_GDM_VLAN_PRI_RXID_SEL(1, 3);
1780 + /* 4. Set vlan priority=0 to ring3 */
1781 + SET_GDM_VLAN_PRI_RXID_SEL(0, 3);
1783 + /* 1. Set vlan priority=4 to ring1 */
1784 + SET_GDM_VLAN_PRI_RXID_SEL(4, 1);
1785 + /* 2. Set vlan priority=5 to ring2 */
1786 + SET_GDM_VLAN_PRI_RXID_SEL(5, 2);
1787 + /* 3. Set vlan priority=6 to ring3 */
1788 + SET_GDM_VLAN_PRI_RXID_SEL(6, 3);
1789 + /* 4. Set vlan priority=7 to ring3 */
1790 + SET_GDM_VLAN_PRI_RXID_SEL(7, 3);
1792 + /* 4. Set priority rule: vlan > pid */
1793 + SET_GDM_RXID_PRI_SEL(GDM_PRI_VLAN_PID);
1795 + /* PDMA multi-rx enable */
1796 + pdma_gdm_rxid_config();
1801 +int pdma_non_lro_tcpack_dvt(void)
1803 + unsigned int regVal = 0;
1805 + printk("pdma_non_lro_tcpack_dvt()\n");
1807 + /* 1. Enable TCP ACK with zero payload check */
1808 + SET_GDM_TCP_ACK_WZPC(1);
1809 + /* 2. Set TCP ACK to ring3 */
1810 + SET_GDM_TCP_ACK_RXID_SEL(3);
1812 + /* 3. Set priority rule: ack > pid */
1813 + SET_GDM_RXID_PRI_SEL(GDM_PRI_ACK_PID);
1815 + /* PDMA multi-rx enable */
1816 + pdma_gdm_rxid_config();
1821 +int pdma_non_lro_pri1_dvt(void)
1823 + unsigned int regVal = 0;
1825 + printk("pdma_non_lro_pri1_dvt()\n");
1827 + /* 1. Set GDM1 to ring0 */
1828 + SET_GDM_PID1_RXID_SEL(0);
1830 + /* 2.1 Disable TCP ACK with zero payload check */
1831 + SET_GDM_TCP_ACK_WZPC(0);
1832 + /* 2.2 Set TCP ACK to ring1 */
1833 + SET_GDM_TCP_ACK_RXID_SEL(1);
1835 + /* 3. Set vlan priority=1 to ring2 */
1836 + SET_GDM_VLAN_PRI_RXID_SEL(1, 2);
1838 + /* 4. Set priority rule: vlan > ack > pid */
1839 + SET_GDM_RXID_PRI_SEL(GDM_PRI_VLAN_ACK_PID);
1841 + /* PDMA multi-rx enable */
1842 + pdma_gdm_rxid_config();
1847 +int pdma_non_lro_pri2_dvt(void)
1849 + unsigned int regVal = 0;
1851 + printk("pdma_non_lro_pri2_dvt()\n");
1853 + /* 1. Set GDM1 to ring0 */
1854 + SET_GDM_PID1_RXID_SEL(0);
1856 + /* 2.1 Disable TCP ACK with zero payload check */
1857 + SET_GDM_TCP_ACK_WZPC(0);
1858 + /* 2.2 Set TCP ACK to ring1 */
1859 + SET_GDM_TCP_ACK_RXID_SEL(1);
1861 + /* 3. Set vlan priority=1 to ring2 */
1862 + SET_GDM_VLAN_PRI_RXID_SEL(1, 2);
1864 + /* 4. Set priority rule: ack > vlan > pid */
1865 + SET_GDM_RXID_PRI_SEL(GDM_PRI_ACK_VLAN_PID);
1867 + /* PDMA multi-rx enable */
1868 + pdma_gdm_rxid_config();
1872 +#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
1873 +const static PDMA_LRO_DVT_FUNC pdma_dvt_lro_func[] = {
1874 +#if defined(CONFIG_RAETH_HW_LRO)
1875 + [0] = pdma_lro_disable_dvt, /* PDMA_TEST_LRO_DISABLE */
1876 + [1] = pdma_lro_force_aggre_dvt, /* PDMA_TEST_LRO_FORCE_PORT */
1877 + [2] = pdma_lro_auto_aggre_dvt, /* PDMA_TEST_LRO_AUTO_LEARN */
1878 + [3] = pdma_lro_auto_ipv6_dvt, /* PDMA_TEST_LRO_AUTO_IPV6 */
1879 + [4] = pdma_lro_auto_myIP_dvt, /* PDMA_TEST_LRO_AUTO_MYIP */
1880 + [5] = pdma_lro_force_aggre_dvt, /* PDMA_TEST_LRO_FORCE_AGGREGATE */
1881 +#endif /* CONFIG_RAETH_HW_LRO */
1882 +#if defined(CONFIG_RAETH_MULTIPLE_RX_RING)
1883 + [6] = pdma_non_lro_portid_dvt, /* PDMA_TEST_NON_LRO_PORT_ID */
1884 + [7] = pdma_non_lro_stag_dvt, /* PDMA_TEST_NON_LRO_STAG */
1885 + [8] = pdma_non_lro_vlan_dvt, /* PDMA_TEST_NON_LRO_VLAN */
1886 + [9] = pdma_non_lro_tcpack_dvt, /* PDMA_TEST_NON_LRO_TCP_ACK */
1887 + [10] = pdma_non_lro_pri1_dvt, /* PDMA_TEST_NON_LRO_PRI1 */
1888 + [11] = pdma_non_lro_pri2_dvt, /* PDMA_TEST_NON_LRO_PRI2 */
1889 +#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
1890 +#if defined(CONFIG_RAETH_HW_LRO)
1891 + [12] = pdma_lro_dly_int0_dvt, /* PDMA_TEST_LRO_DLY_INT0 */
1892 + [13] = pdma_lro_dly_int1_dvt, /* PDMA_TEST_LRO_DLY_INT1 */
1893 + [14] = pdma_lro_dly_int2_dvt, /* PDMA_TEST_LRO_DLY_INT2 */
1894 + [15] = pdma_lro_dly_int3_dvt, /* PDMA_TEST_LRO_DLY_INT3 */
1895 +#endif /* CONFIG_RAETH_HW_LRO */
1898 +/* PDMA LRO test functions end */
1900 +#if defined(CONFIG_RAETH_HW_LRO) || defined(CONFIG_RAETH_MULTIPLE_RX_RING)
1901 +void raeth_pdma_lro_dvt(int rx_ring_no, END_DEVICE *ei_local,
1902 + int rx_dma_owner_idx0)
1904 + if (pdma_dvt_get_show_config() & PDMA_SHOW_RX_DESC) {
1905 + if (rx_ring_no == 1) {
1906 + printk("------- rt2880_eth_recv (ring1) --------\n");
1907 + printk("rx_info1=0x%x\n",
1909 + &ei_local->rx_ring1[rx_dma_owner_idx0].
1911 + printk("rx_info2=0x%x\n",
1913 + &ei_local->rx_ring1[rx_dma_owner_idx0].
1915 + printk("rx_info3=0x%x\n",
1917 + &ei_local->rx_ring1[rx_dma_owner_idx0].
1919 + printk("rx_info4=0x%x\n",
1921 + &ei_local->rx_ring1[rx_dma_owner_idx0].
1923 + printk("-------------------------------\n");
1924 + } else if (rx_ring_no == 2) {
1925 + printk("------- rt2880_eth_recv (ring2) --------\n");
1926 + printk("rx_info1=0x%x\n",
1928 + &ei_local->rx_ring2[rx_dma_owner_idx0].
1930 + printk("rx_info2=0x%x\n",
1932 + &ei_local->rx_ring2[rx_dma_owner_idx0].
1934 + printk("rx_info3=0x%x\n",
1936 + &ei_local->rx_ring2[rx_dma_owner_idx0].
1938 + printk("rx_info4=0x%x\n",
1940 + &ei_local->rx_ring2[rx_dma_owner_idx0].
1942 + printk("-------------------------------\n");
1943 + } else if (rx_ring_no == 3) {
1944 + printk("------- rt2880_eth_recv (ring3) --------\n");
1945 + printk("rx_info1=0x%x\n",
1947 + &ei_local->rx_ring3[rx_dma_owner_idx0].
1949 + printk("rx_info2=0x%x\n",
1951 + &ei_local->rx_ring3[rx_dma_owner_idx0].
1953 + printk("rx_info3=0x%x\n",
1955 + &ei_local->rx_ring3[rx_dma_owner_idx0].
1957 + printk("rx_info4=0x%x\n",
1959 + &ei_local->rx_ring3[rx_dma_owner_idx0].
1961 + printk("-------------------------------\n");
1965 + printk("------- rt2880_eth_recv (ring0) --------\n");
1966 + printk("rx_info1=0x%x\n",
1968 + &ei_local->rx_ring0[rx_dma_owner_idx0].
1970 + printk("rx_info2=0x%x\n",
1972 + &ei_local->rx_ring0[rx_dma_owner_idx0].
1974 + printk("rx_info3=0x%x\n",
1976 + &ei_local->rx_ring0[rx_dma_owner_idx0].
1978 + printk("rx_info4=0x%x\n",
1980 + &ei_local->rx_ring0[rx_dma_owner_idx0].
1982 + printk("-------------------------------\n");
1986 + if ((pdma_dvt_get_show_config() & PDMA_SHOW_DETAIL_RX_DESC) ||
1987 + (pdma_dvt_get_lro_test_config()==PDMA_TEST_LRO_FORCE_PORT)) {
1988 + if (rx_ring_no == 1) {
1989 + printk("------- rt2880_eth_recv (ring1) --------\n");
1990 + printk("rx_info1.PDP0=0x%x\n",
1991 + ei_local->rx_ring1[rx_dma_owner_idx0].
1993 + printk("rx_info2.DDONE_bit=0x%x\n",
1994 + ei_local->rx_ring1[rx_dma_owner_idx0].
1995 + rxd_info2.DDONE_bit);
1996 + printk("rx_info2.LS0=0x%x\n",
1997 + ei_local->rx_ring1[rx_dma_owner_idx0].
1999 + printk("rx_info2.PLEN0=0x%x\n",
2000 + ei_local->rx_ring1[rx_dma_owner_idx0].
2002 + printk("rx_info2.TAG=0x%x\n",
2003 + ei_local->rx_ring1[rx_dma_owner_idx0].
2005 +#if defined(CONFIG_ARCH_MT7623)
2006 + printk("rx_info2.LRO_AGG_CNT=0x%x\n",
2007 + ei_local->rx_ring1[rx_dma_owner_idx0].
2008 + rxd_info2.LRO_AGG_CNT);
2009 + printk("rx_info2.REV=0x%x\n",
2010 + ei_local->rx_ring1[rx_dma_owner_idx0].
2013 + printk("rx_info2.LS1=0x%x\n",
2014 + ei_local->rx_ring1[rx_dma_owner_idx0].
2016 +#endif /* CONFIG_RAETH_HW_LRO */
2017 + printk("rx_info2.PLEN1=0x%x\n",
2018 + ei_local->rx_ring1[rx_dma_owner_idx0].
2020 + printk("rx_info3.TPID=0x%x\n",
2021 + ei_local->rx_ring1[rx_dma_owner_idx0].
2023 + printk("rx_info3.VID=0x%x\n",
2024 + ei_local->rx_ring1[rx_dma_owner_idx0].
2026 + printk("rx_info4.IP6=0x%x\n",
2027 + ei_local->rx_ring1[rx_dma_owner_idx0].
2029 + printk("rx_info4.IP4=0x%x\n",
2030 + ei_local->rx_ring1[rx_dma_owner_idx0].
2032 + printk("rx_info4.IP4F=0x%x\n",
2033 + ei_local->rx_ring1[rx_dma_owner_idx0].
2035 + printk("rx_info4.TACK=0x%x\n",
2036 + ei_local->rx_ring1[rx_dma_owner_idx0].
2038 + printk("rx_info4.L4VLD=0x%x\n",
2039 + ei_local->rx_ring1[rx_dma_owner_idx0].
2041 + printk("rx_info4.L4F=0x%x\n",
2042 + ei_local->rx_ring1[rx_dma_owner_idx0].
2044 + printk("rx_info4.SPORT=0x%x\n",
2045 + ei_local->rx_ring1[rx_dma_owner_idx0].
2047 + printk("rx_info4.CRSN=0x%x\n",
2048 + ei_local->rx_ring1[rx_dma_owner_idx0].
2050 + printk("rx_info4.FOE_Entry=0x%x\n",
2051 + ei_local->rx_ring1[rx_dma_owner_idx0].
2052 + rxd_info4.FOE_Entry);
2053 + printk("-------------------------------\n");
2054 + } else if (rx_ring_no == 2) {
2055 + printk("------- rt2880_eth_recv (ring2) --------\n");
2056 + printk("rx_info1.PDP0=0x%x\n",
2057 + ei_local->rx_ring2[rx_dma_owner_idx0].
2059 + printk("rx_info2.DDONE_bit=0x%x\n",
2060 + ei_local->rx_ring2[rx_dma_owner_idx0].
2061 + rxd_info2.DDONE_bit);
2062 + printk("rx_info2.LS0=0x%x\n",
2063 + ei_local->rx_ring2[rx_dma_owner_idx0].
2065 + printk("rx_info2.PLEN0=0x%x\n",
2066 + ei_local->rx_ring2[rx_dma_owner_idx0].
2068 + printk("rx_info2.TAG=0x%x\n",
2069 + ei_local->rx_ring2[rx_dma_owner_idx0].
2071 +#if defined(CONFIG_ARCH_MT7623)
2072 + printk("rx_info2.LRO_AGG_CNT=0x%x\n",
2073 + ei_local->rx_ring2[rx_dma_owner_idx0].
2074 + rxd_info2.LRO_AGG_CNT);
2075 + printk("rx_info2.REV=0x%x\n",
2076 + ei_local->rx_ring2[rx_dma_owner_idx0].
2079 + printk("rx_info2.LS1=0x%x\n",
2080 + ei_local->rx_ring2[rx_dma_owner_idx0].
2082 +#endif /* CONFIG_RAETH_HW_LRO */
2083 + printk("rx_info2.PLEN1=0x%x\n",
2084 + ei_local->rx_ring2[rx_dma_owner_idx0].
2086 + printk("rx_info3.TPID=0x%x\n",
2087 + ei_local->rx_ring2[rx_dma_owner_idx0].
2089 + printk("rx_info3.VID=0x%x\n",
2090 + ei_local->rx_ring2[rx_dma_owner_idx0].
2092 + printk("rx_info4.IP6=0x%x\n",
2093 + ei_local->rx_ring2[rx_dma_owner_idx0].
2095 + printk("rx_info4.IP4=0x%x\n",
2096 + ei_local->rx_ring2[rx_dma_owner_idx0].
2098 + printk("rx_info4.IP4F=0x%x\n",
2099 + ei_local->rx_ring2[rx_dma_owner_idx0].
2101 + printk("rx_info4.TACK=0x%x\n",
2102 + ei_local->rx_ring2[rx_dma_owner_idx0].
2104 + printk("rx_info4.L4VLD=0x%x\n",
2105 + ei_local->rx_ring2[rx_dma_owner_idx0].
2107 + printk("rx_info4.L4F=0x%x\n",
2108 + ei_local->rx_ring2[rx_dma_owner_idx0].
2110 + printk("rx_info4.SPORT=0x%x\n",
2111 + ei_local->rx_ring2[rx_dma_owner_idx0].
2113 + printk("rx_info4.CRSN=0x%x\n",
2114 + ei_local->rx_ring2[rx_dma_owner_idx0].
2116 + printk("rx_info4.FOE_Entry=0x%x\n",
2117 + ei_local->rx_ring2[rx_dma_owner_idx0].
2118 + rxd_info4.FOE_Entry);
2119 + printk("-------------------------------\n");
2120 + } else if (rx_ring_no == 3) {
2121 + printk("------- rt2880_eth_recv (ring3) --------\n");
2122 + printk("rx_info1.PDP0=0x%x\n",
2123 + ei_local->rx_ring3[rx_dma_owner_idx0].
2125 + printk("rx_info2.DDONE_bit=0x%x\n",
2126 + ei_local->rx_ring3[rx_dma_owner_idx0].
2127 + rxd_info2.DDONE_bit);
2128 + printk("rx_info2.LS0=0x%x\n",
2129 + ei_local->rx_ring3[rx_dma_owner_idx0].
2131 + printk("rx_info2.PLEN0=0x%x\n",
2132 + ei_local->rx_ring3[rx_dma_owner_idx0].
2134 + printk("rx_info2.TAG=0x%x\n",
2135 + ei_local->rx_ring3[rx_dma_owner_idx0].
2137 +#if defined(CONFIG_ARCH_MT7623)
2138 + printk("rx_info2.LRO_AGG_CNT=0x%x\n",
2139 + ei_local->rx_ring3[rx_dma_owner_idx0].
2140 + rxd_info2.LRO_AGG_CNT);
2141 + printk("rx_info2.REV=0x%x\n",
2142 + ei_local->rx_ring3[rx_dma_owner_idx0].
2145 + printk("rx_info2.LS1=0x%x\n",
2146 + ei_local->rx_ring3[rx_dma_owner_idx0].
2148 +#endif /* CONFIG_RAETH_HW_LRO */
2149 + printk("rx_info2.PLEN1=0x%x\n",
2150 + ei_local->rx_ring3[rx_dma_owner_idx0].
2152 + printk("rx_info3.TPID=0x%x\n",
2153 + ei_local->rx_ring3[rx_dma_owner_idx0].
2155 + printk("rx_info3.VID=0x%x\n",
2156 + ei_local->rx_ring3[rx_dma_owner_idx0].
2158 + printk("rx_info4.IP6=0x%x\n",
2159 + ei_local->rx_ring3[rx_dma_owner_idx0].
2161 + printk("rx_info4.IP4=0x%x\n",
2162 + ei_local->rx_ring3[rx_dma_owner_idx0].
2164 + printk("rx_info4.IP4F=0x%x\n",
2165 + ei_local->rx_ring3[rx_dma_owner_idx0].
2167 + printk("rx_info4.TACK=0x%x\n",
2168 + ei_local->rx_ring3[rx_dma_owner_idx0].
2170 + printk("rx_info4.L4VLD=0x%x\n",
2171 + ei_local->rx_ring3[rx_dma_owner_idx0].
2173 + printk("rx_info4.L4F=0x%x\n",
2174 + ei_local->rx_ring3[rx_dma_owner_idx0].
2176 + printk("rx_info4.SPORT=0x%x\n",
2177 + ei_local->rx_ring3[rx_dma_owner_idx0].
2179 + printk("rx_info4.CRSN=0x%x\n",
2180 + ei_local->rx_ring3[rx_dma_owner_idx0].
2182 + printk("rx_info4.FOE_Entry=0x%x\n",
2183 + ei_local->rx_ring3[rx_dma_owner_idx0].
2184 + rxd_info4.FOE_Entry);
2185 + printk("-------------------------------\n");
2189 + printk("------- rt2880_eth_recv (ring0) --------\n");
2190 + printk("rx_info1.PDP0=0x%x\n",
2191 + ei_local->rx_ring0[rx_dma_owner_idx0].
2193 + printk("rx_info2.DDONE_bit=0x%x\n",
2194 + ei_local->rx_ring0[rx_dma_owner_idx0].
2195 + rxd_info2.DDONE_bit);
2196 + printk("rx_info2.LS0=0x%x\n",
2197 + ei_local->rx_ring0[rx_dma_owner_idx0].
2199 + printk("rx_info2.PLEN0=0x%x\n",
2200 + ei_local->rx_ring0[rx_dma_owner_idx0].
2202 + printk("rx_info2.TAG=0x%x\n",
2203 + ei_local->rx_ring0[rx_dma_owner_idx0].
2205 + printk("rx_info2.LS1=0x%x\n",
2206 + ei_local->rx_ring0[rx_dma_owner_idx0].
2208 + printk("rx_info2.PLEN1=0x%x\n",
2209 + ei_local->rx_ring0[rx_dma_owner_idx0].
2211 + printk("rx_info3.TPID=0x%x\n",
2212 + ei_local->rx_ring0[rx_dma_owner_idx0].
2214 + printk("rx_info3.VID=0x%x\n",
2215 + ei_local->rx_ring0[rx_dma_owner_idx0].
2217 + printk("rx_info4.IP6=0x%x\n",
2218 + ei_local->rx_ring0[rx_dma_owner_idx0].
2220 + printk("rx_info4.IP4=0x%x\n",
2221 + ei_local->rx_ring0[rx_dma_owner_idx0].
2223 + printk("rx_info4.IP4F=0x%x\n",
2224 + ei_local->rx_ring0[rx_dma_owner_idx0].
2226 + printk("rx_info4.TACK=0x%x\n",
2227 + ei_local->rx_ring0[rx_dma_owner_idx0].
2229 + printk("rx_info4.L4VLD=0x%x\n",
2230 + ei_local->rx_ring0[rx_dma_owner_idx0].
2232 + printk("rx_info4.L4F=0x%x\n",
2233 + ei_local->rx_ring0[rx_dma_owner_idx0].
2235 + printk("rx_info4.SPORT=0x%x\n",
2236 + ei_local->rx_ring0[rx_dma_owner_idx0].
2238 + printk("rx_info4.CRSN=0x%x\n",
2239 + ei_local->rx_ring0[rx_dma_owner_idx0].
2241 + printk("rx_info4.FOE_Entry=0x%x\n",
2242 + ei_local->rx_ring0[rx_dma_owner_idx0].
2243 + rxd_info4.FOE_Entry);
2244 + printk("-------------------------------\n");
2248 + if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_FORCE_AGGREGATE) {
2249 + if (rx_ring_no == 1) {
2250 + printk("PASS!!! => RING1: rxd_info1.PDP0=0x%x\n",
2251 + ei_local->rx_ring1[rx_dma_owner_idx0].
2253 + skb_dump(ei_local->netrx1_skbuf[rx_dma_owner_idx0]);
2254 + pdma_dvt_reset_config();
2260 +int pdma_dvt_show_ctrl(int par1, int par2)
2263 + g_pdma_dvt_show_config = 0;
2265 + g_pdma_dvt_show_config |= (1 << par2);
2270 +int pdma_dvt_test_rx_ctrl(int par1, int par2)
2273 + g_pdma_dvt_rx_test_config = 0;
2275 + g_pdma_dvt_rx_test_config |= (1 << par2);
2280 +int pdma_dvt_test_tx_ctrl(int par1, int par2)
2283 + g_pdma_dvt_tx_test_config = 0;
2285 + g_pdma_dvt_tx_test_config |= (1 << par2);
2290 +int pdma_dvt_test_debug_ctrl(int par1, int par2)
2293 + g_pdma_dvt_debug_test_config = 0;
2295 + g_pdma_dvt_debug_test_config |= (1 << par2);
2300 +int pdma_dvt_test_lro_ctrl(int par1, int par2)
2302 + g_pdma_dvt_lro_test_config = par2;
2304 +#if defined(CONFIG_RAETH_HW_LRO) || defined(CONFIG_RAETH_MULTIPLE_RX_RING)
2305 + if (pdma_dvt_lro_func[par2])
2306 + (*pdma_dvt_lro_func[par2]) ();
2307 +#endif /* #if defined (CONFIG_RAETH_HW_LRO) */
2312 +unsigned int pdma_dvt_get_show_config()
2314 + return g_pdma_dvt_show_config;
2317 +unsigned int pdma_dvt_get_rx_test_config()
2319 + return g_pdma_dvt_rx_test_config;
2322 +unsigned int pdma_dvt_get_tx_test_config()
2324 + return g_pdma_dvt_tx_test_config;
2327 +unsigned int pdma_dvt_get_debug_test_config()
2329 + return g_pdma_dvt_debug_test_config;
2332 +unsigned int pdma_dvt_get_lro_test_config()
2334 + return g_pdma_dvt_lro_test_config;
2337 +void pdma_dvt_reset_config()
2339 + g_pdma_dvt_show_config = 0;
2340 + g_pdma_dvt_rx_test_config = 0;
2341 + g_pdma_dvt_tx_test_config = 0;
2342 + g_pdma_dvt_lro_test_config = 0;
2345 +void raeth_pdma_rx_desc_dvt(END_DEVICE *ei_local, int rx_dma_owner_idx0)
2348 + unsigned int udf = 0;
2351 + if (pdma_dvt_get_show_config() & PDMA_SHOW_RX_DESC) {
2352 + printk("------- rt2880_eth_recv --------\n");
2353 + printk("rx_info1=0x%x\n",
2354 + *(unsigned int *)&ei_local->
2355 + rx_ring0[rx_dma_owner_idx0].rxd_info1);
2356 + printk("rx_info2=0x%x\n",
2357 + *(unsigned int *)&ei_local->
2358 + rx_ring0[rx_dma_owner_idx0].rxd_info2);
2359 + printk("rx_info3=0x%x\n",
2360 + *(unsigned int *)&ei_local->
2361 + rx_ring0[rx_dma_owner_idx0].rxd_info3);
2362 + printk("rx_info4=0x%x\n",
2363 + *(unsigned int *)&ei_local->
2364 + rx_ring0[rx_dma_owner_idx0].rxd_info4);
2365 + printk("-------------------------------\n");
2367 + if ((pdma_dvt_get_show_config() & PDMA_SHOW_DETAIL_RX_DESC) ||
2368 + pdma_dvt_get_rx_test_config()) {
2370 + udf = ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP6 << 5 |
2371 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4 << 4 |
2372 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4F << 3 |
2373 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.TACK << 2 |
2374 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4VLD << 1 |
2375 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4F;
2377 + printk("------- rt2880_eth_recv --------\n");
2378 + printk("rx_info1.PDP0=0x%x\n",
2379 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info1.PDP0);
2380 + printk("rx_info2.DDONE_bit=0x%x\n",
2381 + ei_local->rx_ring0[rx_dma_owner_idx0].
2382 + rxd_info2.DDONE_bit);
2383 + printk("rx_info2.LS0=0x%x\n",
2384 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.LS0);
2385 + printk("rx_info2.PLEN0=0x%x\n",
2386 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.PLEN0);
2387 + printk("rx_info2.TAG=0x%x\n",
2388 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.TAG);
2389 +#if defined(CONFIG_ARCH_MT7623)
2390 + printk("rx_info2.LRO_AGG_CNT=0x%x\n",
2391 + ei_local->rx_ring0[rx_dma_owner_idx0].
2392 + rxd_info2.LRO_AGG_CNT);
2394 + printk("rx_info2.LS1=0x%x\n",
2395 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.LS1);
2396 +#endif /* CONFIG_RAETH_HW_LRO */
2397 + printk("rx_info2.PLEN1=0x%x\n",
2398 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.PLEN1);
2399 + printk("rx_info3.TPID=0x%x\n",
2400 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.TPID);
2401 + printk("rx_info3.VID=0x%x\n",
2402 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.VID);
2404 + printk("rx_info4.UDF=0x%x\n", udf);
2406 + printk("rx_info4.IP6=0x%x\n",
2407 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP6);
2408 + printk("rx_info4.IP4=0x%x\n",
2409 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4);
2410 + printk("rx_info4.IP4F=0x%x\n",
2411 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4F);
2412 + printk("rx_info4.TACK=0x%x\n",
2413 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.TACK);
2414 + printk("rx_info4.L4VLD=0x%x\n",
2415 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4VLD);
2416 + printk("rx_info4.L4F=0x%x\n",
2417 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4F);
2418 + printk("rx_info4.SPORT=0x%x\n",
2419 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.SP);
2420 + printk("rx_info4.CRSN=0x%x\n",
2421 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.CRSN);
2422 + printk("rx_info4.FOE_Entry=0x%x\n",
2423 + ei_local->rx_ring0[rx_dma_owner_idx0].
2424 + rxd_info4.FOE_Entry);
2425 + printk("-------------------------------\n");
2427 + if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_IPV6)) {
2428 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP6) {
2429 + printk("PASS!!! => rx_info4.IP6=0x%x\n",
2430 + ei_local->rx_ring0[rx_dma_owner_idx0].
2432 + pdma_dvt_reset_config();
2434 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_IPV4)) {
2435 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4) {
2436 + printk("PASS!!! => rx_info4.IP4=0x%x\n",
2437 + ei_local->rx_ring0[rx_dma_owner_idx0].
2439 + pdma_dvt_reset_config();
2441 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_IPV4F)) {
2442 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4F) {
2443 + printk("PASS!!! => rx_info4.IP4F=0x%x\n",
2444 + ei_local->rx_ring0[rx_dma_owner_idx0].
2446 + pdma_dvt_reset_config();
2448 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_L4VLD)) {
2449 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4VLD) {
2450 + printk("PASS!!! => rx_info4.L4VLD=0x%x\n",
2451 + ei_local->rx_ring0[rx_dma_owner_idx0].
2453 + pdma_dvt_reset_config();
2455 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_L4F)) {
2456 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4F) {
2457 + printk("PASS!!! => rx_info4.L4F=0x%x\n",
2458 + ei_local->rx_ring0[rx_dma_owner_idx0].
2460 + pdma_dvt_reset_config();
2462 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_SPORT)) {
2463 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.SP == 1) {
2464 + g_pdma_dev_lanport++;
2465 + } else if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.SP ==
2467 + g_pdma_dev_wanport++;
2469 + if (g_pdma_dev_lanport && g_pdma_dev_wanport) {
2471 + ("PASS!!! => g_pdma_dev_lanport=0x%x, g_pdma_dev_wanport=0x%x",
2472 + g_pdma_dev_lanport, g_pdma_dev_wanport);
2474 + g_pdma_dev_lanport = 0;
2475 + g_pdma_dev_wanport = 0;
2476 + pdma_dvt_reset_config();
2478 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_VID_OFF)) {
2479 + if (!ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.VID) {
2480 + printk("PASS!!! => rxd_info3.VID=0x%x\n",
2481 + ei_local->rx_ring0[rx_dma_owner_idx0].
2483 + pdma_dvt_reset_config();
2485 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_VID_ON)) {
2486 + printk("RX data: (PDP0=%x)\n",
2487 + (unsigned int)ei_local->
2488 + netrx0_skbuf[rx_dma_owner_idx0]->data);
2490 + skb_dump(ei_local->netrx0_skbuf[rx_dma_owner_idx0]);
2492 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.VID &&
2493 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.TAG) {
2494 + printk("PASS!!! => rxd_info2.TAG=0x%x\n",
2495 + ei_local->rx_ring0[rx_dma_owner_idx0].
2497 + printk("PASS!!! => rxd_info3.VID=0x%x\n",
2498 + ei_local->rx_ring0[rx_dma_owner_idx0].
2500 + pdma_dvt_reset_config();
2505 +void raeth_pdma_tx_vlan_dvt(END_DEVICE *ei_local,
2506 + unsigned long tx_cpu_owner_idx0)
2508 + if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ON)) {
2509 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | 0xE007; /* VLAN_TAG = 0x1E007 */
2510 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ZERO)) {
2511 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | 0x0000; /* VLAN_TAG = 0x10000 */
2512 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_MAX)) {
2513 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | 0xFFFF; /* VLAN_TAG = 0x1FFFF */
2517 +void raeth_pdma_tx_desc_dvt(END_DEVICE *ei_local,
2518 + unsigned long tx_cpu_owner_idx0)
2520 + if (PDMA_TEST_RX_UDF == pdma_dvt_get_rx_test_config()) {
2521 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 4; /* PPE */
2522 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.UDF = 0x2F;
2524 + if (pdma_dvt_get_show_config() & PDMA_SHOW_TX_DESC) {
2525 + printk("------- rt2880_eth_send --------\n");
2526 + printk("tx_info1=%x\n",
2527 + *(unsigned int *)&ei_local->
2528 + tx_ring0[tx_cpu_owner_idx0].txd_info1);
2529 + printk("tx_info2=%x\n",
2530 + *(unsigned int *)&ei_local->
2531 + tx_ring0[tx_cpu_owner_idx0].txd_info2);
2532 + printk("tx_info3=%x\n",
2533 + *(unsigned int *)&ei_local->
2534 + tx_ring0[tx_cpu_owner_idx0].txd_info3);
2535 + printk("tx_info4=%x\n",
2536 + *(unsigned int *)&ei_local->
2537 + tx_ring0[tx_cpu_owner_idx0].txd_info4);
2538 + printk("--------------------------------\n");
2540 + if ((pdma_dvt_get_show_config() & PDMA_SHOW_DETAIL_TX_DESC) ||
2541 + pdma_dvt_get_tx_test_config()) {
2542 + printk("------- rt2880_eth_send --------\n");
2543 + printk("tx_info1.SDP0=%x\n",
2544 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0);
2545 + printk("tx_info2.DDONE_bit=%x\n",
2546 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2547 + txd_info2.DDONE_bit);
2548 + printk("tx_info2.LS0_bit=%x\n",
2549 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit);
2550 + printk("tx_info2.SDL0=%x\n",
2551 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0);
2552 + printk("tx_info2.BURST_bit=%x\n",
2553 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2554 + txd_info2.BURST_bit);
2555 + printk("tx_info2.LS1_bit=%x\n",
2556 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS1_bit);
2557 + printk("tx_info2.SDL1=%x\n",
2558 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL1);
2559 + printk("tx_info3.SDP1=%x\n",
2560 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3.SDP1);
2561 + printk("tx_info4.TUI_CO=%x\n",
2562 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO);
2563 + printk("tx_info4.TSO=%x\n",
2564 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TSO);
2565 + printk("tx_info4.FPORT=%x\n",
2566 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT);
2567 + printk("tx_info4.UDF=%x\n",
2568 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.UDF);
2569 + printk("tx_info4.RESV=%x\n",
2570 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.RESV);
2571 + printk("tx_info4.VLAN_TAG=%x\n",
2572 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2573 + txd_info4.VLAN_TAG);
2574 + printk("--------------------------------\n");
2576 + if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_LAN_SPORT)) {
2577 + if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT == 1) {
2578 + printk("PASS!!! => txd_info4.FPORT=0x%x\n",
2579 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2581 + pdma_dvt_reset_config();
2583 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_WAN_SPORT)) {
2584 + if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT == 2) {
2585 + printk("PASS!!! => txd_info4.FPORT=0x%x\n",
2586 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2588 + pdma_dvt_reset_config();
2590 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ON)) {
2591 + if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
2592 + printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
2593 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2594 + txd_info4.VLAN_TAG);
2595 + /* pdma_dvt_reset_config(); */
2597 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_OFF)) {
2598 + if (!ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
2599 + printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
2600 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2601 + txd_info4.VLAN_TAG);
2602 + pdma_dvt_reset_config();
2604 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ZERO)) {
2605 + if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
2606 + printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
2607 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2608 + txd_info4.VLAN_TAG);
2609 + /* pdma_dvt_reset_config(); */
2611 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_MAX)) {
2612 + if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
2613 + printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
2614 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2615 + txd_info4.VLAN_TAG);
2616 + /* pdma_dvt_reset_config(); */
2621 +void raeth_pdma_lro_dly_int_dvt(void)
2623 + unsigned int reg_int_val;
2625 + reg_int_val = sysRegRead(RAETH_FE_INT_STATUS);
2627 + if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT0) {
2628 + if ((reg_int_val & RX_DLY_INT)) {
2629 + printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
2630 + pdma_dvt_reset_config();
2632 + } else if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT1) {
2633 + if ((reg_int_val & RING1_RX_DLY_INT)) {
2634 + printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
2635 + pdma_dvt_reset_config();
2637 + } else if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT2) {
2638 + if ((reg_int_val & RING2_RX_DLY_INT)) {
2639 + printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
2640 + pdma_dvt_reset_config();
2642 + } else if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT3) {
2643 + if ((reg_int_val & RING3_RX_DLY_INT)) {
2644 + printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
2645 + pdma_dvt_reset_config();
2650 +void pdma_dvt_set_dma_mode(void)
2652 +#if defined(CONFIG_RAETH_PDMA_LEGACY_MODE)
2653 + unsigned int regVal;
2654 + regVal = sysRegRead(ADMA_LRO_CTRL_DW3);
2655 + regVal &= ~(BIT(15));
2656 + sysRegWrite(ADMA_LRO_CTRL_DW3, regVal);
2657 +#endif /* CONFIG_RAETH_PDMA_DVT */
2660 diff --git a/drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h b/drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h
2661 new file mode 100755
2662 index 0000000..600aab7
2664 +++ b/drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h
2666 +/* Show controls */
2667 +#define PDMA_SHOW_RX_DESC (1 << 1)
2668 +#define PDMA_SHOW_TX_DESC (1 << 2)
2669 +#define PDMA_SHOW_DETAIL_RX_DESC (1 << 3)
2670 +#define PDMA_SHOW_DETAIL_TX_DESC (1 << 4)
2672 +/* Rx test controls */
2673 +#define PDMA_TEST_RX_UDF (1 << 1)
2674 +#define PDMA_TEST_RX_IPV6 (1 << 2)
2675 +#define PDMA_TEST_RX_IPV4 (1 << 3)
2676 +#define PDMA_TEST_RX_IPV4F (1 << 4)
2677 +#define PDMA_TEST_RX_L4VLD (1 << 5)
2678 +#define PDMA_TEST_RX_L4F (1 << 6)
2679 +#define PDMA_TEST_RX_SPORT (1 << 7)
2680 +#define PDMA_TEST_RX_VID_ON (1 << 8)
2681 +#define PDMA_TEST_RX_VID_OFF (1 << 9)
2683 +/* Tx test controls */
2684 +#define PDMA_TEST_TX_LAN_SPORT (1 << 1)
2685 +#define PDMA_TEST_TX_WAN_SPORT (1 << 2)
2686 +#define PDMA_TEST_TX_VLAN_ON (1 << 3)
2687 +#define PDMA_TEST_TX_VLAN_OFF (1 << 4)
2688 +#define PDMA_TEST_TX_VLAN_ZERO (1 << 5)
2689 +#define PDMA_TEST_TX_VLAN_MAX (1 << 6)
2690 +#define PDMA_TEST_TX_PDMA_LPK (1 << 31)
2692 +/* Debug controls */
2693 +#define PDMA_TEST_TSO_DEBUG (1 << 1)
2695 +/* LRO test controls */
2696 +typedef int (*PDMA_LRO_DVT_FUNC) (void);
2698 +#define PDMA_TEST_LRO_DISABLE (0)
2699 +#define PDMA_TEST_LRO_FORCE_PORT (1)
2700 +#define PDMA_TEST_LRO_AUTO_LEARN (2)
2701 +#define PDMA_TEST_LRO_AUTO_IPV6 (3)
2702 +#define PDMA_TEST_LRO_AUTO_MYIP (4)
2703 +#define PDMA_TEST_LRO_FORCE_AGGREGATE (5)
2704 +#define PDMA_TEST_NON_LRO_PORT_ID (6)
2705 +#define PDMA_TEST_NON_LRO_STAG (7)
2706 +#define PDMA_TEST_NON_LRO_VLAN (8)
2707 +#define PDMA_TEST_NON_LRO_TCP_ACK (9)
2708 +#define PDMA_TEST_NON_LRO_PRI1 (10)
2709 +#define PDMA_TEST_NON_LRO_PRI2 (11)
2710 +#define PDMA_TEST_LRO_DLY_INT0 (12)
2711 +#define PDMA_TEST_LRO_DLY_INT1 (13)
2712 +#define PDMA_TEST_LRO_DLY_INT2 (14)
2713 +#define PDMA_TEST_LRO_DLY_INT3 (15)
2715 +void skb_dump(struct sk_buff *sk);
2717 +int pdma_dvt_show_ctrl(int par1, int par2);
2718 +int pdma_dvt_test_rx_ctrl(int par1, int par2);
2719 +int pdma_dvt_test_tx_ctrl(int par1, int par2);
2720 +int pdma_dvt_test_debug_ctrl(int par1, int par2);
2721 +int pdma_dvt_test_lro_ctrl(int par1, int par2);
2723 +unsigned int pdma_dvt_get_show_config(void);
2724 +unsigned int pdma_dvt_get_rx_test_config(void);
2725 +unsigned int pdma_dvt_get_tx_test_config(void);
2726 +unsigned int pdma_dvt_get_debug_test_config(void);
2727 +unsigned int pdma_dvt_get_lro_test_config(void);
2728 +void pdma_dvt_reset_config(void);
2730 +void raeth_pdma_rx_desc_dvt(END_DEVICE *ei_local, int rx_dma_owner_idx0);
2731 +void raeth_pdma_tx_vlan_dvt(END_DEVICE *ei_local,
2732 + unsigned long tx_cpu_owner_idx0);
2733 +void raeth_pdma_tx_desc_dvt(END_DEVICE *ei_local,
2734 + unsigned long tx_cpu_owner_idx0);
2736 +void raeth_pdma_lro_dvt(int rx_ring_no, END_DEVICE *ei_local,
2737 + int rx_dma_owner_idx0);
2738 +void raeth_pdma_lro_dly_int_dvt(void);
2739 +void pdma_dvt_set_dma_mode(void);
2741 diff --git a/drivers/net/ethernet/raeth/ethtool_readme.txt b/drivers/net/ethernet/raeth/ethtool_readme.txt
2742 new file mode 100644
2743 index 0000000..10e918b
2745 +++ b/drivers/net/ethernet/raeth/ethtool_readme.txt
2748 +Ethtool readme for selecting different PHY address.
2750 +Before doing any ethtool command you should make sure the current PHY
2751 +address is expected. The default PHY address is 1(port 1).
2753 +You can change current PHY address to X(0~4) by doing follow command:
2754 +# echo X > /proc/rt2880/gmac
2756 +Ethtool command also would show the current PHY address as following.
2760 + Supported ports: [ TP MII ]
2761 + Supported link modes: 10baseT/Half 10baseT/Full
2762 + 100baseT/Half 100baseT/Full
2763 + Supports auto-negotiation: Yes
2764 + Advertised link modes: 10baseT/Half 10baseT/Full
2765 + 100baseT/Half 100baseT/Full
2766 + Advertised auto-negotiation: No
2771 + Transceiver: internal
2772 + Auto-negotiation: off
2773 + Current message level: 0x00000000 (0)
2777 +The "PHYAD" field shows the current PHY address.
2783 +# echo 1 > /proc/rt2880/gmac # change phy address to 1
2787 +# echo 0 > /proc/rt2880/gmac # change phy address to 0
2791 diff --git a/drivers/net/ethernet/raeth/mcast.c b/drivers/net/ethernet/raeth/mcast.c
2792 new file mode 100644
2793 index 0000000..d796b03
2795 +++ b/drivers/net/ethernet/raeth/mcast.c
2797 +#include <linux/config.h>
2798 +#include <linux/version.h>
2799 +#include <linux/module.h>
2800 +#include <linux/skbuff.h>
2801 +#include <linux/kernel.h>
2802 +#include <linux/init.h>
2803 +#include <linux/types.h>
2804 +#include <linux/netdevice.h>
2805 +#include <linux/if_vlan.h>
2808 +#define MAX_MCAST_ENTRY 16
2809 +#define AGEING_TIME 5 //Unit: Sec
2810 +#define MAC_ARG(x) ((u8*)(x))[0],((u8*)(x))[1],((u8*)(x))[2], \
2811 + ((u8*)(x))[3],((u8*)(x))[4],((u8*)(x))[5]
2813 +//#define MCAST_DEBUG
2815 +#define MCAST_PRINT(fmt, args...) printk(KERN_INFO fmt, ## args)
2817 +#define MCAST_PRINT(fmt, args...) { }
2821 + uint8_t src_mac[6];
2822 + uint8_t dst_mac[6];
2825 + uint32_t use_count;
2826 + unsigned long ageout;
2829 +mcast_entry mcast_tbl[MAX_MCAST_ENTRY];
2830 +atomic_t mcast_entry_num=ATOMIC_INIT(0);
2831 +DECLARE_MUTEX(mtbl_lock);
2833 +uint32_t inline is_multicast_pkt(uint8_t *mac)
2835 + if(mac[0]==0x01 && mac[1]==0x00 && mac[2]==0x5E) {
2842 +int32_t inline mcast_entry_get(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
2846 + for(i=0;i<MAX_MCAST_ENTRY;i++) {
2847 + if( (mcast_tbl[i].vlan_id == vlan_id) &&
2848 + memcmp(mcast_tbl[i].src_mac,src_mac, 6)==0 &&
2849 + memcmp(mcast_tbl[i].dst_mac, dst_mac, 6)==0 &&
2850 + mcast_tbl[i].valid == 1) {
2857 +int inline __add_mcast_entry(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
2861 + // use empty or ageout entry
2862 + for(i=0;i<MAX_MCAST_ENTRY;i++) {
2863 + if( mcast_tbl[i].valid==0 ||
2864 + time_after(jiffies, mcast_tbl[i].ageout)) {
2866 + if(mcast_tbl[i].valid==0) {
2867 + atomic_inc(&mcast_entry_num);
2869 + mcast_tbl[i].vlan_id = vlan_id;
2870 + memcpy(mcast_tbl[i].src_mac, src_mac, 6);
2871 + memcpy(mcast_tbl[i].dst_mac, dst_mac, 6);
2872 + mcast_tbl[i].valid=1;
2873 + mcast_tbl[i].use_count=1;
2874 + mcast_tbl[i].ageout=jiffies + AGEING_TIME * HZ;
2880 + MCAST_PRINT("RAETH: Multicast Table is FULL!!\n");
2884 +int inline mcast_entry_ins(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
2886 + int entry_num=0, ret=0;
2889 + if((entry_num = mcast_entry_get(vlan_id, src_mac, dst_mac)) >=0) {
2890 + mcast_tbl[entry_num].use_count++;
2891 + mcast_tbl[entry_num].ageout=jiffies + AGEING_TIME * HZ;
2892 + MCAST_PRINT("%s: Update %0X:%0X:%0X:%0X:%0X:%0X's use_count=%d\n" \
2893 + ,__FUNCTION__, MAC_ARG(dst_mac), mcast_tbl[entry_num].use_count);
2895 + }else { //if entry not found, create new entry.
2896 + MCAST_PRINT("%s: Create new entry %0X:%0X:%0X:%0X:%0X:%0X\n", \
2897 + __FUNCTION__, MAC_ARG(dst_mac));
2898 + ret = __add_mcast_entry(vlan_id, src_mac,dst_mac);
2910 + * 1: entry not found
2912 +int inline mcast_entry_del(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
2917 + if((entry_num = mcast_entry_get(vlan_id, src_mac, dst_mac)) >=0) {
2918 + if((--mcast_tbl[entry_num].use_count)==0) {
2919 + MCAST_PRINT("%s: %0X:%0X:%0X:%0X:%0X:%0X (entry_num=%d)\n", \
2920 + __FUNCTION__, MAC_ARG(dst_mac), entry_num);
2921 + mcast_tbl[entry_num].valid=0;
2922 + atomic_dec(&mcast_entry_num);
2927 + /* this multicast packet was not sent by meself, just ignore it */
2938 +int32_t mcast_rx(struct sk_buff * skb)
2940 + struct vlan_ethhdr *eth = (struct vlan_ethhdr *)(skb->data-ETH_HLEN);
2942 + /* if we do not send multicast packet before,
2943 + * we don't need to check re-inject multicast packet.
2945 + if (atomic_read(&mcast_entry_num)==0) {
2950 + if(is_multicast_pkt(eth->h_dest)) {
2951 + MCAST_PRINT("%s: %0X:%0X:%0X:%0X:%0X:%0X\n", __FUNCTION__, \
2952 + MAC_ARG(eth->h_dest));
2954 + if(ntohs(eth->h_vlan_proto)==0x8100) {
2955 + return mcast_entry_del(eth->h_vlan_TCI, eth->h_source, eth->h_dest);
2957 + return mcast_entry_del(0, eth->h_source, eth->h_dest);
2965 +int32_t mcast_tx(struct sk_buff *skb)
2967 + struct vlan_ethhdr *eth = (struct vlan_ethhdr *)(skb->data);
2970 + if(is_multicast_pkt(eth->h_dest)) {
2971 + MCAST_PRINT("%s: %0X:%0X:%0X:%0X:%0X:%0X\n", __FUNCTION__,\
2972 + MAC_ARG(eth->h_dest));
2974 + if(ntohs(eth->h_vlan_proto)==0x8100) {
2975 + mcast_entry_ins(eth->h_vlan_TCI, eth->h_source, eth->h_dest);
2977 + mcast_entry_ins(0, eth->h_source, eth->h_dest);
2984 diff --git a/drivers/net/ethernet/raeth/mii_mgr.c b/drivers/net/ethernet/raeth/mii_mgr.c
2985 new file mode 100644
2986 index 0000000..77a47f1
2988 +++ b/drivers/net/ethernet/raeth/mii_mgr.c
2990 +#include <linux/module.h>
2991 +#include <linux/version.h>
2992 +#include <linux/netdevice.h>
2994 +#include <linux/kernel.h>
2995 +#include <linux/sched.h>
2996 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
2997 +#include <asm/rt2880/rt_mmap.h>
3000 +#include "ra2882ethreg.h"
3001 +#include "raether.h"
3004 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3005 +#define PHY_CONTROL_0 0xC0
3006 +#define PHY_CONTROL_1 0xC4
3007 +#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
3008 +#define MDIO_PHY_CONTROL_1 (RALINK_ETH_SW_BASE + PHY_CONTROL_1)
3010 +#define GPIO_MDIO_BIT (1<<7)
3011 +#define GPIO_PURPOSE_SELECT 0x60
3012 +#define GPIO_PRUPOSE (RALINK_SYSCTL_BASE + GPIO_PURPOSE_SELECT)
3014 +#elif defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A)
3016 +#define PHY_CONTROL_0 0x7004
3017 +#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
3018 +#define enable_mdio(x)
3020 +#elif defined (CONFIG_RALINK_MT7620)
3022 +#define PHY_CONTROL_0 0x7004
3023 +#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
3024 +#define enable_mdio(x)
3026 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3028 +#define PHY_CONTROL_0 0x0004
3029 +#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
3030 +#define enable_mdio(x)
3033 +#define PHY_CONTROL_0 0x00
3034 +#define PHY_CONTROL_1 0x04
3035 +#define MDIO_PHY_CONTROL_0 (RALINK_FRAME_ENGINE_BASE + PHY_CONTROL_0)
3036 +#define MDIO_PHY_CONTROL_1 (RALINK_FRAME_ENGINE_BASE + PHY_CONTROL_1)
3037 +#define enable_mdio(x)
3040 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3041 +void enable_mdio(int enable)
3043 +#if !defined (CONFIG_P5_MAC_TO_PHY_MODE) && !defined(CONFIG_GE1_RGMII_AN) && !defined(CONFIG_GE2_RGMII_AN) && \
3044 + !defined (CONFIG_GE1_MII_AN) && !defined (CONFIG_GE2_MII_AN) && !defined (CONFIG_RALINK_MT7628)
3045 + u32 data = sysRegRead(GPIO_PRUPOSE);
3047 + data &= ~GPIO_MDIO_BIT;
3049 + data |= GPIO_MDIO_BIT;
3050 + sysRegWrite(GPIO_PRUPOSE, data);
3055 +#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A)
3057 +u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
3059 + u32 volatile status = 0;
3061 + unsigned long volatile t_start = jiffies;
3062 + u32 volatile data = 0;
3064 + /* We enable mdio gpio purpose register, and disable it when exit. */
3067 + // make sure previous read operation is complete
3069 + // 0 : Read/write operation complete
3070 + if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3074 + else if (time_after(jiffies, t_start + 5*HZ)) {
3076 + printk("\n MDIO Read operation is ongoing !!\n");
3081 + data = (0x01 << 16) | (0x02 << 18) | (phy_addr << 20) | (phy_register << 25);
3082 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3084 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3085 + //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3088 + // make sure read operation is complete
3089 + t_start = jiffies;
3091 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
3092 + status = sysRegRead(MDIO_PHY_CONTROL_0);
3093 + *read_data = (u32)(status & 0x0000FFFF);
3098 + else if (time_after(jiffies, t_start+5*HZ)) {
3100 + printk("\n MDIO Read operation is ongoing and Time Out!!\n");
3106 +u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
3108 + unsigned long volatile t_start=jiffies;
3109 + u32 volatile data;
3113 + // make sure previous write operation is complete
3115 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3119 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3121 + printk("\n MDIO Write operation ongoing\n");
3126 + data = (0x01 << 16)| (1<<18) | (phy_addr << 20) | (phy_register << 25) | write_data;
3127 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3129 + sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation
3130 + //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3132 + t_start = jiffies;
3134 + // make sure write operation is complete
3136 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
3141 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3143 + printk("\n MDIO Write operation Time Out\n");
3148 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_ARCH_MT7623)
3150 +u32 __mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
3152 + u32 volatile status = 0;
3154 + unsigned long volatile t_start = jiffies;
3155 + u32 volatile data = 0;
3157 + /* We enable mdio gpio purpose register, and disable it when exit. */
3160 + // make sure previous read operation is complete
3162 + // 0 : Read/write operation complete
3163 + if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3167 + else if (time_after(jiffies, t_start + 5*HZ)) {
3169 + printk("\n MDIO Read operation is ongoing !!\n");
3174 + data = (0x01 << 16) | (0x02 << 18) | (phy_addr << 20) | (phy_register << 25);
3175 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3177 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3178 + //printk("\n Set Command [0x%08X] = [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0, data);
3181 + // make sure read operation is complete
3182 + t_start = jiffies;
3184 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
3185 + status = sysRegRead(MDIO_PHY_CONTROL_0);
3186 + *read_data = (u32)(status & 0x0000FFFF);
3191 + else if (time_after(jiffies, t_start+5*HZ)) {
3193 + printk("\n MDIO Read operation is ongoing and Time Out!!\n");
3199 +u32 __mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
3201 + unsigned long volatile t_start=jiffies;
3202 + u32 volatile data;
3206 + // make sure previous write operation is complete
3208 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3212 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3214 + printk("\n MDIO Write operation ongoing\n");
3219 + data = (0x01 << 16)| (1<<18) | (phy_addr << 20) | (phy_register << 25) | write_data;
3220 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3222 + sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation
3223 + //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3225 + t_start = jiffies;
3227 + // make sure write operation is complete
3229 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
3234 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3236 + printk("\n MDIO Write operation Time Out\n");
3242 +u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
3244 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
3247 + u32 an_status = 0;
3251 + an_status = (*(unsigned long *)(ESW_PHY_POLLING) & (1<<31));
3253 + *(unsigned long *)(ESW_PHY_POLLING) &= ~(1<<31);//(AN polling off)
3255 + //phase1: write page address phase
3256 + if(__mii_mgr_write(phy_addr, 0x1f, ((phy_register >> 6) & 0x3FF))) {
3257 + //phase2: write address & read low word phase
3258 + if(__mii_mgr_read(phy_addr, (phy_register >> 2) & 0xF, &low_word)) {
3259 + //phase3: write address & read high word phase
3260 + if(__mii_mgr_read(phy_addr, (0x1 << 4), &high_word)) {
3261 + *read_data = (high_word << 16) | (low_word & 0xFFFF);
3263 + *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
3270 + *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
3275 + if(__mii_mgr_read(phy_addr, phy_register, read_data)) {
3283 +u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
3285 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
3286 + u32 an_status = 0;
3288 + if(phy_addr == 31)
3290 + an_status = (*(unsigned long *)(ESW_PHY_POLLING) & (1<<31));
3292 + *(unsigned long *)(ESW_PHY_POLLING) &= ~(1<<31);//(AN polling off)
3294 + //phase1: write page address phase
3295 + if(__mii_mgr_write(phy_addr, 0x1f, (phy_register >> 6) & 0x3FF)) {
3296 + //phase2: write address & read low word phase
3297 + if(__mii_mgr_write(phy_addr, ((phy_register >> 2) & 0xF), write_data & 0xFFFF)) {
3298 + //phase3: write address & read high word phase
3299 + if(__mii_mgr_write(phy_addr, (0x1 << 4), write_data >> 16)) {
3301 + *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
3308 + *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
3313 + if(__mii_mgr_write(phy_addr, phy_register, write_data)) {
3321 +u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr)
3324 + unsigned long volatile t_start = jiffies;
3325 + u32 volatile data = 0;
3330 + if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3334 + else if (time_after(jiffies, t_start + 5*HZ)) {
3336 + printk("\n MDIO Read operation is ongoing !!\n");
3340 + data = (dev_addr << 25) | (port_num << 20) | (0x00 << 18) | (0x00 << 16) | reg_addr;
3341 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3343 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3345 + t_start = jiffies;
3347 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
3352 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3354 + printk("\n MDIO Write operation Time Out\n");
3362 +u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 *read_data)
3364 + u32 volatile status = 0;
3366 + unsigned long volatile t_start = jiffies;
3367 + u32 volatile data = 0;
3369 + // set address first
3370 + mii_mgr_cl45_set_address(port_num, dev_addr, reg_addr);
3376 + if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3380 + else if (time_after(jiffies, t_start + 5*HZ)) {
3382 + printk("\n MDIO Read operation is ongoing !!\n");
3386 + data = (dev_addr << 25) | (port_num << 20) | (0x03 << 18) | (0x00 << 16) | reg_addr;
3387 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3389 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3390 + t_start = jiffies;
3392 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
3393 + *read_data = (sysRegRead(MDIO_PHY_CONTROL_0) & 0x0000FFFF);
3397 + else if (time_after(jiffies, t_start+5*HZ)) {
3399 + printk("\n Set Operation: MDIO Read operation is ongoing and Time Out!!\n");
3402 + status = sysRegRead(MDIO_PHY_CONTROL_0);
3407 +u32 mii_mgr_write_cl45 (u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data)
3410 + unsigned long volatile t_start = jiffies;
3411 + u32 volatile data = 0;
3413 + // set address first
3414 + mii_mgr_cl45_set_address(port_num, dev_addr, reg_addr);
3419 + if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3423 + else if (time_after(jiffies, t_start + 5*HZ)) {
3425 + printk("\n MDIO Read operation is ongoing !!\n");
3430 + data = (dev_addr << 25) | (port_num << 20) | (0x01 << 18) | (0x00 << 16) | write_data;
3431 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3433 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3435 + t_start = jiffies;
3438 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3443 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3445 + printk("\n MDIO Write operation Time Out\n");
3452 +#else // not rt6855
3454 +u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
3456 + u32 volatile status = 0;
3458 + unsigned long volatile t_start = jiffies;
3459 +#if !defined (CONFIG_RALINK_RT3052) && !defined (CONFIG_RALINK_RT3352) && !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
3460 + u32 volatile data = 0;
3463 + /* We enable mdio gpio purpose register, and disable it when exit. */
3466 + // make sure previous read operation is complete
3468 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3469 + // rd_rdy: read operation is complete
3470 + if(!( sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 1)))
3472 + // 0 : Read/write operation complet
3473 + if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3478 + else if (time_after(jiffies, t_start + 5*HZ)) {
3480 + printk("\n MDIO Read operation is ongoing !!\n");
3485 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3486 + sysRegWrite(MDIO_PHY_CONTROL_0 , (1<<14) | (phy_register << 8) | (phy_addr));
3488 + data = (phy_addr << 24) | (phy_register << 16);
3489 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3491 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3493 + //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3496 + // make sure read operation is complete
3497 + t_start = jiffies;
3499 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3500 + if (sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 1)) {
3501 + status = sysRegRead(MDIO_PHY_CONTROL_1);
3502 + *read_data = (u32)(status >>16);
3508 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
3509 + status = sysRegRead(MDIO_PHY_CONTROL_0);
3510 + *read_data = (u32)(status & 0x0000FFFF);
3516 + else if (time_after(jiffies, t_start+5*HZ)) {
3518 + printk("\n MDIO Read operation is ongoing and Time Out!!\n");
3525 +u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
3527 + unsigned long volatile t_start=jiffies;
3528 + u32 volatile data;
3532 + // make sure previous write operation is complete
3534 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3535 + if (!(sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 0)))
3537 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3542 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3544 + printk("\n MDIO Write operation ongoing\n");
3549 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3550 + data = ((write_data & 0xFFFF) << 16);
3551 + data |= (phy_register << 8) | (phy_addr);
3553 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3555 + data = (1<<30) | (phy_addr << 24) | (phy_register << 16) | write_data;
3556 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3558 + sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation
3560 + //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3562 + t_start = jiffies;
3564 + // make sure write operation is complete
3566 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3567 + if (sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 0)) //wt_done ?= 1
3569 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
3575 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3577 + printk("\n MDIO Write operation Time Out\n");
3591 +EXPORT_SYMBOL(mii_mgr_write);
3592 +EXPORT_SYMBOL(mii_mgr_read);
3593 diff --git a/drivers/net/ethernet/raeth/ra2882ethreg.h b/drivers/net/ethernet/raeth/ra2882ethreg.h
3594 new file mode 100644
3595 index 0000000..c67a042
3597 +++ b/drivers/net/ethernet/raeth/ra2882ethreg.h
3599 +#ifndef RA2882ETHREG_H
3600 +#define RA2882ETHREG_H
3602 +#include <linux/mii.h> // for struct mii_if_info in ra2882ethreg.h
3603 +#include <linux/version.h> /* check linux version for 2.4 and 2.6 compatibility */
3604 +#include <linux/interrupt.h> /* for "struct tasklet_struct" in linux-3.10.14 */
3605 +#if defined (CONFIG_HW_SFQ)
3606 +#include <linux/ip.h>
3607 +#include <linux/ipv6.h>
3609 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
3610 +#include <asm/rt2880/rt_mmap.h>
3612 +#include "raether.h"
3614 +#ifdef WORKQUEUE_BH
3615 +#include <linux/workqueue.h>
3616 +#endif // WORKQUEUE_BH //
3617 +#ifdef CONFIG_RAETH_LRO
3618 +#include <linux/inet_lro.h>
3621 +#define MAX_PACKET_SIZE 1514
3622 +#define MIN_PACKET_SIZE 60
3623 +#define MAX_TXD_LEN 0x3fff
3625 +#if defined (CONFIG_ARCH_MT7623)
3626 +#define phys_to_bus(a) (a)
3628 +#define phys_to_bus(a) (a & 0x1FFFFFFF)
3634 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
3635 +#define BIT(x) ((1 << x))
3637 +/* bits range: for example BITS(16,23) = 0xFF0000
3638 + * ==> (BIT(m)-1) = 0x0000FFFF ~(BIT(m)-1) => 0xFFFF0000
3639 + * ==> (BIT(n+1)-1) = 0x00FFFFFF
3641 +#define BITS(m,n) (~(BIT(m)-1) & ((BIT(n) - 1) | BIT(n)))
3643 +#define ETHER_ADDR_LEN 6
3645 +/* Phy Vender ID list */
3647 +#define EV_ICPLUS_PHY_ID0 0x0243
3648 +#define EV_ICPLUS_PHY_ID1 0x0D90
3649 +#define EV_MARVELL_PHY_ID0 0x0141
3650 +#define EV_MARVELL_PHY_ID1 0x0CC2
3651 +#define EV_VTSS_PHY_ID0 0x0007
3652 +#define EV_VTSS_PHY_ID1 0x0421
3657 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
3658 + defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) || \
3659 + defined (CONFIG_ARCH_MT7623)
3661 +#define RX_COHERENT BIT(31)
3662 +#define RX_DLY_INT BIT(30)
3663 +#define TX_COHERENT BIT(29)
3664 +#define TX_DLY_INT BIT(28)
3665 +#define RING3_RX_DLY_INT BIT(27)
3666 +#define RING2_RX_DLY_INT BIT(26)
3667 +#define RING1_RX_DLY_INT BIT(25)
3669 +#define ALT_RPLC_INT3 BIT(23)
3670 +#define ALT_RPLC_INT2 BIT(22)
3671 +#define ALT_RPLC_INT1 BIT(21)
3673 +#define RX_DONE_INT3 BIT(19)
3674 +#define RX_DONE_INT2 BIT(18)
3675 +#define RX_DONE_INT1 BIT(17)
3676 +#define RX_DONE_INT0 BIT(16)
3678 +#define TX_DONE_INT3 BIT(3)
3679 +#define TX_DONE_INT2 BIT(2)
3680 +#define TX_DONE_INT1 BIT(1)
3681 +#define TX_DONE_INT0 BIT(0)
3683 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3684 +#define RLS_COHERENT BIT(29)
3685 +#define RLS_DLY_INT BIT(28)
3686 +#define RLS_DONE_INT BIT(0)
3690 +//#define CNT_PPE_AF BIT(31)
3691 +//#define CNT_GDM_AF BIT(29)
3692 +#define PSE_P2_FC BIT(26)
3693 +#define GDM_CRC_DROP BIT(25)
3694 +#define PSE_BUF_DROP BIT(24)
3695 +#define GDM_OTHER_DROP BIT(23)
3696 +#define PSE_P1_FC BIT(22)
3697 +#define PSE_P0_FC BIT(21)
3698 +#define PSE_FQ_EMPTY BIT(20)
3699 +#define GE1_STA_CHG BIT(18)
3700 +#define TX_COHERENT BIT(17)
3701 +#define RX_COHERENT BIT(16)
3703 +#define TX_DONE_INT3 BIT(11)
3704 +#define TX_DONE_INT2 BIT(10)
3705 +#define TX_DONE_INT1 BIT(9)
3706 +#define TX_DONE_INT0 BIT(8)
3707 +#define RX_DONE_INT1 RX_DONE_INT0
3708 +#define RX_DONE_INT0 BIT(2)
3709 +#define TX_DLY_INT BIT(1)
3710 +#define RX_DLY_INT BIT(0)
3713 +#define FE_INT_ALL (TX_DONE_INT3 | TX_DONE_INT2 | \
3714 + TX_DONE_INT1 | TX_DONE_INT0 | \
3715 + RX_DONE_INT0 | RX_DONE_INT1 | \
3716 + RX_DONE_INT2 | RX_DONE_INT3)
3718 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3719 +#define QFE_INT_ALL (RLS_DONE_INT | RX_DONE_INT0 | RX_DONE_INT1)
3720 +#define QFE_INT_DLY_INIT (RLS_DLY_INT | RX_DLY_INT)
3722 +#define NUM_QDMA_PAGE 512
3723 +#define QDMA_PAGE_SIZE 2048
3728 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3729 +#define PORT0_QUEUE_FULL BIT(14) //port0 queue full
3730 +#define PORT1_QUEUE_FULL BIT(15) //port1 queue full
3731 +#define PORT2_QUEUE_FULL BIT(16) //port2 queue full
3732 +#define PORT3_QUEUE_FULL BIT(17) //port3 queue full
3733 +#define PORT4_QUEUE_FULL BIT(18) //port4 queue full
3734 +#define PORT5_QUEUE_FULL BIT(19) //port5 queue full
3735 +#define PORT6_QUEUE_FULL BIT(20) //port6 queue full
3736 +#define SHARED_QUEUE_FULL BIT(23) //shared queue full
3737 +#define QUEUE_EXHAUSTED BIT(24) //global queue is used up and all packets are dropped
3738 +#define BC_STROM BIT(25) //the device is undergoing broadcast storm
3739 +#define PORT_ST_CHG BIT(26) //Port status change
3740 +#define UNSECURED_ALERT BIT(27) //Intruder alert
3741 +#define ABNORMAL_ALERT BIT(28) //Abnormal
3743 +#define ESW_ISR (RALINK_ETH_SW_BASE + 0x00)
3744 +#define ESW_IMR (RALINK_ETH_SW_BASE + 0x04)
3745 +#define ESW_INT_ALL (PORT_ST_CHG)
3747 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
3748 + defined (CONFIG_RALINK_MT7620)
3749 +#define MIB_INT BIT(25)
3750 +#define ACL_INT BIT(24)
3751 +#define P5_LINK_CH BIT(5)
3752 +#define P4_LINK_CH BIT(4)
3753 +#define P3_LINK_CH BIT(3)
3754 +#define P2_LINK_CH BIT(2)
3755 +#define P1_LINK_CH BIT(1)
3756 +#define P0_LINK_CH BIT(0)
3758 +#define RX_GOCT_CNT BIT(4)
3759 +#define RX_GOOD_CNT BIT(6)
3760 +#define TX_GOCT_CNT BIT(17)
3761 +#define TX_GOOD_CNT BIT(19)
3763 +#define MSK_RX_GOCT_CNT BIT(4)
3764 +#define MSK_RX_GOOD_CNT BIT(6)
3765 +#define MSK_TX_GOCT_CNT BIT(17)
3766 +#define MSK_TX_GOOD_CNT BIT(19)
3767 +#define MSK_CNT_INT_ALL (MSK_RX_GOCT_CNT | MSK_RX_GOOD_CNT | MSK_TX_GOCT_CNT | MSK_TX_GOOD_CNT)
3768 +//#define MSK_CNT_INT_ALL (MSK_RX_GOOD_CNT | MSK_TX_GOOD_CNT)
3771 +#define ESW_IMR (RALINK_ETH_SW_BASE + 0x7000 + 0x8)
3772 +#define ESW_ISR (RALINK_ETH_SW_BASE + 0x7000 + 0xC)
3773 +#define ESW_INT_ALL (P0_LINK_CH | P1_LINK_CH | P2_LINK_CH | P3_LINK_CH | P4_LINK_CH | P5_LINK_CH | ACL_INT | MIB_INT)
3774 +#define ESW_AISR (RALINK_ETH_SW_BASE + 0x8)
3775 +#define ESW_P0_IntSn (RALINK_ETH_SW_BASE + 0x4004)
3776 +#define ESW_P1_IntSn (RALINK_ETH_SW_BASE + 0x4104)
3777 +#define ESW_P2_IntSn (RALINK_ETH_SW_BASE + 0x4204)
3778 +#define ESW_P3_IntSn (RALINK_ETH_SW_BASE + 0x4304)
3779 +#define ESW_P4_IntSn (RALINK_ETH_SW_BASE + 0x4404)
3780 +#define ESW_P5_IntSn (RALINK_ETH_SW_BASE + 0x4504)
3781 +#define ESW_P6_IntSn (RALINK_ETH_SW_BASE + 0x4604)
3782 +#define ESW_P0_IntMn (RALINK_ETH_SW_BASE + 0x4008)
3783 +#define ESW_P1_IntMn (RALINK_ETH_SW_BASE + 0x4108)
3784 +#define ESW_P2_IntMn (RALINK_ETH_SW_BASE + 0x4208)
3785 +#define ESW_P3_IntMn (RALINK_ETH_SW_BASE + 0x4308)
3786 +#define ESW_P4_IntMn (RALINK_ETH_SW_BASE + 0x4408)
3787 +#define ESW_P5_IntMn (RALINK_ETH_SW_BASE + 0x4508)
3788 +#define ESW_P6_IntMn (RALINK_ETH_SW_BASE + 0x4608)
3790 +#if defined (CONFIG_RALINK_MT7620)
3791 +#define ESW_P7_IntSn (RALINK_ETH_SW_BASE + 0x4704)
3792 +#define ESW_P7_IntMn (RALINK_ETH_SW_BASE + 0x4708)
3796 +#define ESW_PHY_POLLING (RALINK_ETH_SW_BASE + 0x7000)
3798 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3800 +#define ESW_PHY_POLLING (RALINK_ETH_SW_BASE + 0x0000)
3802 +#define P5_LINK_CH BIT(5)
3803 +#define P4_LINK_CH BIT(4)
3804 +#define P3_LINK_CH BIT(3)
3805 +#define P2_LINK_CH BIT(2)
3806 +#define P1_LINK_CH BIT(1)
3807 +#define P0_LINK_CH BIT(0)
3810 +#endif // CONFIG_RALINK_RT3052 || CONFIG_RALINK_RT3352 || CONFIG_RALINK_RT5350 || defined (CONFIG_RALINK_MT7628)//
3812 +#define RX_BUF_ALLOC_SIZE 2000
3813 +#define FASTPATH_HEADROOM 64
3815 +#define ETHER_BUFFER_ALIGN 32 ///// Align on a cache line
3817 +#define ETHER_ALIGNED_RX_SKB_ADDR(addr) \
3818 + ((((unsigned long)(addr) + ETHER_BUFFER_ALIGN - 1) & \
3819 + ~(ETHER_BUFFER_ALIGN - 1)) - (unsigned long)(addr))
3821 +#ifdef CONFIG_PSEUDO_SUPPORT
3822 +typedef struct _PSEUDO_ADAPTER {
3823 + struct net_device *RaethDev;
3824 + struct net_device *PseudoDev;
3825 + struct net_device_stats stat;
3826 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
3827 + struct mii_if_info mii_info;
3830 +} PSEUDO_ADAPTER, PPSEUDO_ADAPTER;
3832 +#define MAX_PSEUDO_ENTRY 1
3837 +/* Register Categories Definition */
3838 +#define RAFRAMEENGINE_OFFSET 0x0000
3839 +#define RAGDMA_OFFSET 0x0020
3840 +#define RAPSE_OFFSET 0x0040
3841 +#define RAGDMA2_OFFSET 0x0060
3842 +#define RACDMA_OFFSET 0x0080
3843 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
3844 + defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) || \
3845 + defined (CONFIG_ARCH_MT7623)
3847 +#define RAPDMA_OFFSET 0x0800
3848 +#define SDM_OFFSET 0x0C00
3850 +#define RAPDMA_OFFSET 0x0100
3852 +#define RAPPE_OFFSET 0x0200
3853 +#define RACMTABLE_OFFSET 0x0400
3854 +#define RAPOLICYTABLE_OFFSET 0x1000
3857 +/* Register Map Detail */
3859 +#define SYSCFG1 (RALINK_SYSCTL_BASE + 0x14)
3861 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3864 +#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x000)
3865 +#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x004)
3866 +#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x008)
3867 +#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x00C)
3869 +#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x010)
3870 +#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x014)
3871 +#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x018)
3872 +#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x01C)
3874 +#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x020)
3875 +#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x024)
3876 +#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x028)
3877 +#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x02C)
3879 +#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x030)
3880 +#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x034)
3881 +#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x038)
3882 +#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x03C)
3884 +#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x100)
3885 +#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x104)
3886 +#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x108)
3887 +#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x10C)
3889 +#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x110)
3890 +#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x114)
3891 +#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x118)
3892 +#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x11C)
3894 +#define PDMA_INFO (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x200)
3895 +#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x204)
3896 +#define PDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x208)
3897 +#define PDMA_RST_CFG (PDMA_RST_IDX)
3898 +#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x20C)
3899 +#define FREEQ_THRES (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x210)
3900 +#define INT_STATUS (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x220)
3901 +#define FE_INT_STATUS (INT_STATUS)
3902 +#define INT_MASK (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x228)
3903 +#define FE_INT_ENABLE (INT_MASK)
3904 +#define PDMA_WRR (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x280)
3905 +#define PDMA_SCH_CFG (PDMA_WRR)
3907 +#define SDM_CON (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x00) //Switch DMA configuration
3908 +#define SDM_RRING (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x04) //Switch DMA Rx Ring
3909 +#define SDM_TRING (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x08) //Switch DMA Tx Ring
3910 +#define SDM_MAC_ADRL (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x0C) //Switch MAC address LSB
3911 +#define SDM_MAC_ADRH (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x10) //Switch MAC Address MSB
3912 +#define SDM_TPCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x100) //Switch DMA Tx packet count
3913 +#define SDM_TBCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x104) //Switch DMA Tx byte count
3914 +#define SDM_RPCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x108) //Switch DMA rx packet count
3915 +#define SDM_RBCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x10C) //Switch DMA rx byte count
3916 +#define SDM_CS_ERR (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x110) //Switch DMA rx checksum error count
3918 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
3919 + defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || \
3920 + defined (CONFIG_ARCH_MT7623)
3922 +/* Old FE with New PDMA */
3923 +#define PDMA_RELATED 0x0800
3925 +#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x000)
3926 +#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x004)
3927 +#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x008)
3928 +#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x00C)
3930 +#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x010)
3931 +#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x014)
3932 +#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x018)
3933 +#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x01C)
3935 +#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x020)
3936 +#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x024)
3937 +#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x028)
3938 +#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x02C)
3940 +#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x030)
3941 +#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x034)
3942 +#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x038)
3943 +#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x03C)
3945 +#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x100)
3946 +#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x104)
3947 +#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x108)
3948 +#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x10C)
3950 +#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x110)
3951 +#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x114)
3952 +#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x118)
3953 +#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x11C)
3955 +#define RX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x120)
3956 +#define RX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x124)
3957 +#define RX_CALC_IDX2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x128)
3958 +#define RX_DRX_IDX12 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x12C)
3960 +#define RX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x130)
3961 +#define RX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x134)
3962 +#define RX_CALC_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x138)
3963 +#define RX_DRX_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x13C)
3965 +#define PDMA_INFO (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x200)
3966 +#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x204)
3967 +#define PDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x208)
3968 +#define PDMA_RST_CFG (PDMA_RST_IDX)
3969 +#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x20C)
3970 +#define FREEQ_THRES (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x210)
3971 +#define INT_STATUS (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x220)
3972 +#define FE_INT_STATUS (INT_STATUS)
3973 +#define INT_MASK (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x228)
3974 +#define FE_INT_ENABLE (INT_MASK)
3975 +#define SCH_Q01_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x280)
3976 +#define SCH_Q23_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x284)
3978 +#define FE_GLO_CFG RALINK_FRAME_ENGINE_BASE + 0x00
3979 +#define FE_RST_GL RALINK_FRAME_ENGINE_BASE + 0x04
3980 +#define FE_INT_STATUS2 RALINK_FRAME_ENGINE_BASE + 0x08
3981 +#define FE_INT_ENABLE2 RALINK_FRAME_ENGINE_BASE + 0x0c
3982 +//#define FC_DROP_STA RALINK_FRAME_ENGINE_BASE + 0x18
3983 +#define FOE_TS_T RALINK_FRAME_ENGINE_BASE + 0x10
3985 +#if defined (CONFIG_RALINK_MT7620)
3986 +#define GDMA1_RELATED 0x0600
3987 +#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
3988 +#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
3989 +#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
3990 +#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
3991 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3992 +#define GDMA1_RELATED 0x0500
3993 +#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
3994 +#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
3995 +#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
3996 +#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
3998 +#define GDMA2_RELATED 0x1500
3999 +#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x00)
4000 +#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x04)
4001 +#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x08)
4002 +#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x0C)
4004 +#define GDMA1_RELATED 0x0020
4005 +#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
4006 +#define GDMA1_SCH_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
4007 +#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
4008 +#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
4009 +#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x10)
4011 +#define GDMA2_RELATED 0x0060
4012 +#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x00)
4013 +#define GDMA2_SCH_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x04)
4014 +#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x08)
4015 +#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x0C)
4016 +#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x10)
4019 +#if defined (CONFIG_RALINK_MT7620)
4020 +#define PSE_RELATED 0x0500
4021 +#define PSE_FQFC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x00)
4022 +#define PSE_IQ_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x04)
4023 +#define PSE_QUE_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x08)
4025 +#define PSE_RELATED 0x0040
4026 +#define PSE_FQ_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x00)
4027 +#define CDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x04)
4028 +#define GDMA1_FC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x08)
4029 +#define GDMA2_FC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x0C)
4030 +#define CDMA_OQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x10)
4031 +#define GDMA1_OQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x14)
4032 +#define GDMA2_OQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x18)
4033 +#define PSE_IQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x1C)
4037 +#if defined (CONFIG_RALINK_MT7620)
4038 +#define CDMA_RELATED 0x0400
4039 +#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
4040 +#define SMACCR0 (RALINK_ETH_SW_BASE + 0x3FE4)
4041 +#define SMACCR1 (RALINK_ETH_SW_BASE + 0x3FE8)
4042 +#define CKGCR (RALINK_ETH_SW_BASE + 0x3FF0)
4043 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4044 +#define CDMA_RELATED 0x0400
4045 +#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00) //fake definition
4046 +#define CDMP_IG_CTRL (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
4047 +#define CDMP_EG_CTRL (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x04)
4049 +#define CDMA_RELATED 0x0080
4050 +#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
4051 +#define CDMA_SCH_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x04)
4052 +#define SMACCR0 (RALINK_ETH_SW_BASE + 0x30E4)
4053 +#define SMACCR1 (RALINK_ETH_SW_BASE + 0x30E8)
4054 +#define CKGCR (RALINK_ETH_SW_BASE + 0x30F0)
4057 +#define PDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE+0x100)
4060 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4061 +/*kurtis: add QDMA define*/
4063 +#define CLK_CFG_0 (RALINK_SYSCTL_BASE + 0x2C)
4064 +#define PAD_RGMII2_MDIO_CFG (RALINK_SYSCTL_BASE + 0x58)
4066 +#define QDMA_RELATED 0x1800
4067 +#define QTX_CFG_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x000)
4068 +#define QTX_SCH_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x004)
4069 +#define QTX_HEAD_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x008)
4070 +#define QTX_TAIL_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x00C)
4071 +#define QTX_CFG_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x010)
4072 +#define QTX_SCH_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x014)
4073 +#define QTX_HEAD_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x018)
4074 +#define QTX_TAIL_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x01C)
4075 +#define QTX_CFG_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x020)
4076 +#define QTX_SCH_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x024)
4077 +#define QTX_HEAD_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x028)
4078 +#define QTX_TAIL_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x02C)
4079 +#define QTX_CFG_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x030)
4080 +#define QTX_SCH_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x034)
4081 +#define QTX_HEAD_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x038)
4082 +#define QTX_TAIL_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x03C)
4083 +#define QTX_CFG_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x040)
4084 +#define QTX_SCH_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x044)
4085 +#define QTX_HEAD_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x048)
4086 +#define QTX_TAIL_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x04C)
4087 +#define QTX_CFG_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x050)
4088 +#define QTX_SCH_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x054)
4089 +#define QTX_HEAD_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x058)
4090 +#define QTX_TAIL_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x05C)
4091 +#define QTX_CFG_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x060)
4092 +#define QTX_SCH_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x064)
4093 +#define QTX_HEAD_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x068)
4094 +#define QTX_TAIL_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x06C)
4095 +#define QTX_CFG_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x070)
4096 +#define QTX_SCH_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x074)
4097 +#define QTX_HEAD_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x078)
4098 +#define QTX_TAIL_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x07C)
4099 +#define QTX_CFG_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x080)
4100 +#define QTX_SCH_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x084)
4101 +#define QTX_HEAD_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x088)
4102 +#define QTX_TAIL_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x08C)
4103 +#define QTX_CFG_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x090)
4104 +#define QTX_SCH_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x094)
4105 +#define QTX_HEAD_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x098)
4106 +#define QTX_TAIL_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x09C)
4107 +#define QTX_CFG_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A0)
4108 +#define QTX_SCH_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A4)
4109 +#define QTX_HEAD_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A8)
4110 +#define QTX_TAIL_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0AC)
4111 +#define QTX_CFG_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B0)
4112 +#define QTX_SCH_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B4)
4113 +#define QTX_HEAD_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B8)
4114 +#define QTX_TAIL_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0BC)
4115 +#define QTX_CFG_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C0)
4116 +#define QTX_SCH_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C4)
4117 +#define QTX_HEAD_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C8)
4118 +#define QTX_TAIL_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0CC)
4119 +#define QTX_CFG_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D0)
4120 +#define QTX_SCH_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D4)
4121 +#define QTX_HEAD_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D8)
4122 +#define QTX_TAIL_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0DC)
4123 +#define QTX_CFG_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E0)
4124 +#define QTX_SCH_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E4)
4125 +#define QTX_HEAD_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E8)
4126 +#define QTX_TAIL_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0EC)
4127 +#define QTX_CFG_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F0)
4128 +#define QTX_SCH_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F4)
4129 +#define QTX_HEAD_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F8)
4130 +#define QTX_TAIL_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0FC)
4131 +#define QRX_BASE_PTR_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x100)
4132 +#define QRX_MAX_CNT_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x104)
4133 +#define QRX_CRX_IDX_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x108)
4134 +#define QRX_DRX_IDX_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x10C)
4135 +#define QRX_BASE_PTR_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x110)
4136 +#define QRX_MAX_CNT_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x114)
4137 +#define QRX_CRX_IDX_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x118)
4138 +#define QRX_DRX_IDX_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x11C)
4139 +#if defined (CONFIG_ARCH_MT7623)
4140 +#define VQTX_TB_BASE_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x180)
4141 +#define VQTX_TB_BASE_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x184)
4142 +#define VQTX_TB_BASE_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x188)
4143 +#define VQTX_TB_BASE_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x18C)
4145 +#define QDMA_INFO (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x200)
4146 +#define QDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x204)
4147 +#define QDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x208)
4148 +#define QDMA_RST_CFG (QDMA_RST_IDX)
4149 +#define QDMA_DELAY_INT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x20C)
4150 +#define QDMA_FC_THRES (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x210)
4151 +#define QDMA_TX_SCH (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x214)
4152 +#define QDMA_INT_STS (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x218)
4153 +#define QFE_INT_STATUS (QDMA_INT_STS)
4154 +#define QDMA_INT_MASK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x21C)
4155 +#define QFE_INT_ENABLE (QDMA_INT_MASK)
4156 +#define QDMA_TRTCM (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x220)
4157 +#define QDMA_DATA0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x224)
4158 +#define QDMA_DATA1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x228)
4159 +#define QDMA_RED_THRES (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x22C)
4160 +#define QDMA_TEST (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x230)
4161 +#define QDMA_DMA (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x234)
4162 +#define QDMA_BMU (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x238)
4163 +#define QDMA_HRED1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x240)
4164 +#define QDMA_HRED2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x244)
4165 +#define QDMA_SRED1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x248)
4166 +#define QDMA_SRED2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x24C)
4167 +#define QTX_CTX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x300)
4168 +#define QTX_DTX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x304)
4169 +#define QTX_FWD_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x308)
4170 +#define QTX_CRX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x310)
4171 +#define QTX_DRX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x314)
4172 +#define QTX_RLS_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x318)
4173 +#define QDMA_FQ_HEAD (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x320)
4174 +#define QDMA_FQ_TAIL (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x324)
4175 +#define QDMA_FQ_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x328)
4176 +#define QDMA_FQ_BLEN (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x32C)
4177 +#define QTX_Q0MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x350)
4178 +#define QTX_Q1MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x354)
4179 +#define QTX_Q2MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x358)
4180 +#define QTX_Q3MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x35C)
4181 +#define QTX_Q0MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x360)
4182 +#define QTX_Q1MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x364)
4183 +#define QTX_Q2MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x368)
4184 +#define QTX_Q3MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x36C)
4187 +#endif/*MT7621 QDMA*/
4191 +/* 1. Frame Engine Global Registers */
4192 +#define MDIO_ACCESS (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x00)
4193 +#define MDIO_CFG (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x04)
4194 +#define FE_GLO_CFG (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x08)
4195 +#define FE_RST_GL (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x0C)
4196 +#define FE_INT_STATUS (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x10)
4197 +#define FE_INT_ENABLE (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x14)
4198 +#define MDIO_CFG2 (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x18) //Original:FC_DROP_STA
4199 +#define FOC_TS_T (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x1C)
4202 +/* 2. GDMA Registers */
4203 +#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x00)
4204 +#define GDMA1_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x04)
4205 +#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x08)
4206 +#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x0C)
4207 +#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x10)
4209 +#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x00)
4210 +#define GDMA2_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x04)
4211 +#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x08)
4212 +#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x0C)
4213 +#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x10)
4216 +#define PSE_FQ_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x00)
4217 +#define CDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x04)
4218 +#define GDMA1_FC_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x08)
4219 +#define GDMA2_FC_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x0C)
4220 +#define PDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE+0x1f0)
4223 +#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE+RACDMA_OFFSET+0x00)
4224 +#define CDMA_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RACDMA_OFFSET+0x04)
4225 +/* skip ppoe sid and vlan id definition */
4229 +#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x00)
4230 +#define PDMA_RST_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x04)
4231 +#define PDMA_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x08)
4233 +#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x0C)
4235 +#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x10)
4236 +#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x14)
4237 +#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x18)
4238 +#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x1C)
4240 +#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x20)
4241 +#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x24)
4242 +#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x28)
4243 +#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x2C)
4245 +#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x40)
4246 +#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x44)
4247 +#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x48)
4248 +#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x4C)
4250 +#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x50)
4251 +#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x54)
4252 +#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x58)
4253 +#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x5C)
4255 +#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x30)
4256 +#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x34)
4257 +#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x38)
4258 +#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x3C)
4260 +#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x40)
4261 +#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x44)
4262 +#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x48)
4263 +#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x4C)
4267 +#define DELAY_INT_INIT 0x84048404
4268 +#define FE_INT_DLY_INIT (TX_DLY_INT | RX_DLY_INT)
4271 +#if !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
4273 +/* 6. Counter and Meter Table */
4274 +#define PPE_AC_BCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x000) /* PPE Accounting Group 0 Byte Cnt */
4275 +#define PPE_AC_PCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x004) /* PPE Accounting Group 0 Packet Cnt */
4278 +#define PPE_MTR_CNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x200) /* 0 ~ 63 */
4280 +#define PPE_MTR_CNT63 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x2FC)
4282 +#define GDMA_TX_GBCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x300) /* Transmit good byte cnt for GEport */
4283 +#define GDMA_TX_GPCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x304) /* Transmit good pkt cnt for GEport */
4284 +#define GDMA_TX_SKIPCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x308) /* Transmit skip cnt for GEport */
4285 +#define GDMA_TX_COLCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x30C) /* Transmit collision cnt for GEport */
4287 +/* update these address mapping to fit data sheet v0.26, by bobtseng, 2007.6.14 */
4288 +#define GDMA_RX_GBCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x320)
4289 +#define GDMA_RX_GPCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x324)
4290 +#define GDMA_RX_OERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x328)
4291 +#define GDMA_RX_FERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x32C)
4292 +#define GDMA_RX_SERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x330)
4293 +#define GDMA_RX_LERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x334)
4294 +#define GDMA_RX_CERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x338)
4295 +#define GDMA_RX_FCCNT1 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x33C)
4299 +/* LRO global control */
4300 +/* Bits [15:0]:LRO_ALT_RFSH_TIMER, Bits [20:16]:LRO_ALT_TICK_TIMER */
4301 +#define LRO_ALT_REFRESH_TIMER (RALINK_FRAME_ENGINE_BASE+0x001C)
4303 +/* LRO auto-learn table info */
4304 +#define PDMA_FE_ALT_CF8 (RALINK_FRAME_ENGINE_BASE+0x0300)
4305 +#define PDMA_FE_ALT_SGL_CFC (RALINK_FRAME_ENGINE_BASE+0x0304)
4306 +#define PDMA_FE_ALT_SEQ_CFC (RALINK_FRAME_ENGINE_BASE+0x0308)
4309 +#define ADMA_LRO_CTRL_OFFSET 0x0980
4311 + * Bit [0]:LRO_EN, Bit [1]:LRO_IPv6_EN, Bit [2]:MULTIPLE_NON_LRO_RX_RING_EN, Bit [3]:MULTIPLE_RXD_PREFETCH_EN,
4312 + * Bit [4]:RXD_PREFETCH_EN, Bit [5]:LRO_DLY_INT_EN, Bit [6]:LRO_CRSN_BNW, Bit [7]:L3_CKS_UPD_EN,
4313 + * Bit [20]:first_ineligible_pkt_redirect_en, Bit [21]:cr_lro_alt_score_mode, Bit [22]:cr_lro_alt_rplc_mode,
4314 + * Bit [23]:cr_lro_l4_ctrl_psh_en, Bits [28:26]:LRO_RING_RELINGUISH_REQ, Bits [31:29]:LRO_RING_RELINGUISH_DONE
4316 +#define ADMA_LRO_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x00)
4317 +/* Bits [31:0]:LRO_CPU_REASON */
4318 +#define ADMA_LRO_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x04)
4319 +/* Bits [31:0]:AUTO_LEARN_LRO_ELIGIBLE_THRESHOLD */
4320 +#define ADMA_LRO_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x08)
4322 + * Bits [7:0]:LRO_MAX_AGGREGATED_CNT, Bits [11:8]:LRO_VLAN_EN, Bits [13:12]:LRO_VLAN_VID_CMP_DEPTH,
4323 + * Bit [14]:ADMA_FW_RSTN_REQ, Bit [15]:ADMA_MODE, Bits [31:16]:LRO_MIN_RXD_SDL0
4325 +#define ADMA_LRO_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x0C)
4327 +/* LRO RX delay interrupt configurations */
4328 +#define LRO_RX1_DLY_INT (RALINK_FRAME_ENGINE_BASE+0x0a70)
4329 +#define LRO_RX2_DLY_INT (RALINK_FRAME_ENGINE_BASE+0x0a74)
4330 +#define LRO_RX3_DLY_INT (RALINK_FRAME_ENGINE_BASE+0x0a78)
4332 +/* LRO auto-learn configurations */
4333 +#define PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET 0x0990
4334 +#define PDMA_LRO_ATL_OVERFLOW_ADJ (RALINK_FRAME_ENGINE_BASE+PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET)
4335 +#define LRO_ALT_SCORE_DELTA (RALINK_FRAME_ENGINE_BASE+0x0a4c)
4337 +/* LRO agg timer configurations */
4338 +#define LRO_MAX_AGG_TIME (RALINK_FRAME_ENGINE_BASE+0x0a5c)
4340 +/* LRO configurations of RX ring #0 */
4341 +#define LRO_RXRING0_OFFSET 0x0b00
4342 +#define LRO_RX_RING0_DIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x04)
4343 +#define LRO_RX_RING0_DIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x08)
4344 +#define LRO_RX_RING0_DIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x0C)
4345 +#define LRO_RX_RING0_DIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x10)
4346 +#define LRO_RX_RING0_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x28)
4347 +/* Bit [8]:RING0_VLD, Bit [9]:RING0_MYIP_VLD */
4348 +#define LRO_RX_RING0_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x2C)
4349 +#define LRO_RX_RING0_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x30)
4350 +/* LRO configurations of RX ring #1 */
4351 +#define LRO_RXRING1_OFFSET 0x0b40
4352 +#define LRO_RX_RING1_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x00)
4353 +#define LRO_RX_RING1_DIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x04)
4354 +#define LRO_RX_RING1_DIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x08)
4355 +#define LRO_RX_RING1_DIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x0C)
4356 +#define LRO_RX_RING1_DIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x10)
4357 +#define LRO_RX_RING1_SIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x14)
4358 +#define LRO_RX_RING1_SIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x18)
4359 +#define LRO_RX_RING1_SIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x1C)
4360 +#define LRO_RX_RING1_SIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x20)
4361 +#define LRO_RX_RING1_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x24)
4362 +#define LRO_RX_RING1_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x28)
4363 +#define LRO_RX_RING1_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x2C)
4364 +#define LRO_RX_RING1_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x30)
4365 +#define LRO_RXRING2_OFFSET 0x0b80
4366 +#define LRO_RX_RING2_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x00)
4367 +#define LRO_RX_RING2_DIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x04)
4368 +#define LRO_RX_RING2_DIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x08)
4369 +#define LRO_RX_RING2_DIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x0C)
4370 +#define LRO_RX_RING2_DIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x10)
4371 +#define LRO_RX_RING2_SIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x14)
4372 +#define LRO_RX_RING2_SIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x18)
4373 +#define LRO_RX_RING2_SIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x1C)
4374 +#define LRO_RX_RING2_SIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x20)
4375 +#define LRO_RX_RING2_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x24)
4376 +#define LRO_RX_RING2_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x28)
4377 +#define LRO_RX_RING2_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x2C)
4378 +#define LRO_RX_RING2_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x30)
4379 +#define LRO_RXRING3_OFFSET 0x0bc0
4380 +#define LRO_RX_RING3_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x00)
4381 +#define LRO_RX_RING3_DIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x04)
4382 +#define LRO_RX_RING3_DIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x08)
4383 +#define LRO_RX_RING3_DIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x0C)
4384 +#define LRO_RX_RING3_DIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x10)
4385 +#define LRO_RX_RING3_SIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x14)
4386 +#define LRO_RX_RING3_SIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x18)
4387 +#define LRO_RX_RING3_SIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x1C)
4388 +#define LRO_RX_RING3_SIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x20)
4389 +#define LRO_RX_RING3_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x24)
4390 +#define LRO_RX_RING3_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x28)
4391 +#define LRO_RX_RING3_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x2C)
4392 +#define LRO_RX_RING3_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x30)
4394 +/* LRO RX ring mode */
4395 +#define PDMA_RX_NORMAL_MODE (0x0)
4396 +#define PDMA_RX_PSE_MODE (0x1)
4397 +#define PDMA_RX_FORCE_PORT (0x2)
4398 +#define PDMA_RX_AUTO_LEARN (0x3)
4400 +#define ADMA_RX_RING0 (0)
4401 +#define ADMA_RX_RING1 (1)
4402 +#define ADMA_RX_RING2 (2)
4403 +#define ADMA_RX_RING3 (3)
4405 +#define ADMA_RX_LEN0_MASK (0x3fff)
4406 +#define ADMA_RX_LEN1_MASK (0x3)
4408 +#define PDMA_LRO_EN BIT(0)
4409 +#define PDMA_LRO_IPV6_EN BIT(1)
4410 +#define PDMA_LRO_IPV4_CSUM_UPDATE_EN BIT(7)
4411 +#define PDMA_LRO_IPV4_CTRL_PUSH_EN BIT(23)
4412 +#define PDMA_LRO_RXD_PREFETCH_EN BITS(3,4)
4413 +#define PDMA_NON_LRO_MULTI_EN BIT(2)
4414 +#define PDMA_LRO_DLY_INT_EN BIT(5)
4415 +#define PDMA_LRO_FUSH_REQ BITS(26,28)
4416 +#define PDMA_LRO_RELINGUISH BITS(29,31)
4417 +#define PDMA_LRO_FREQ_PRI_ADJ BITS(16,19)
4418 +#define PDMA_LRO_TPUT_PRE_ADJ BITS(8,11)
4419 +#define PDMA_LRO_TPUT_PRI_ADJ BITS(12,15)
4420 +#define PDMA_LRO_ALT_SCORE_MODE BIT(21)
4421 +#define PDMA_LRO_RING_AGE1 BITS(22,31)
4422 +#define PDMA_LRO_RING_AGE2 BITS(0,5)
4423 +#define PDMA_LRO_RING_AGG BITS(10,25)
4424 +#define PDMA_LRO_RING_AGG_CNT1 BITS(26,31)
4425 +#define PDMA_LRO_RING_AGG_CNT2 BITS(0,1)
4426 +#define PDMA_LRO_ALT_TICK_TIMER BITS(16,20)
4427 +#define PDMA_LRO_LRO_MIN_RXD_SDL0 BITS(16,31)
4429 +#define PDMA_LRO_DLY_INT_EN_OFFSET (5)
4430 +#define PDMA_LRO_TPUT_PRE_ADJ_OFFSET (8)
4431 +#define PDMA_LRO_FREQ_PRI_ADJ_OFFSET (16)
4432 +#define PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET (16)
4433 +#define PDMA_LRO_TPUT_PRI_ADJ_OFFSET (12)
4434 +#define PDMA_LRO_ALT_SCORE_MODE_OFFSET (21)
4435 +#define PDMA_LRO_FUSH_REQ_OFFSET (26)
4436 +#define PDMA_NON_LRO_MULTI_EN_OFFSET (2)
4437 +#define PDMA_LRO_IPV6_EN_OFFSET (1)
4438 +#define PDMA_LRO_RXD_PREFETCH_EN_OFFSET (3)
4439 +#define PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET (7)
4440 +#define PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET (23)
4441 +#define PDMA_LRO_ALT_TICK_TIMER_OFFSET (16)
4443 +#define PDMA_LRO_TPUT_OVERFLOW_ADJ BITS(12,31)
4444 +#define PDMA_LRO_CNT_OVERFLOW_ADJ BITS(0,11)
4446 +#define PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET (12)
4447 +#define PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET (0)
4449 +#define PDMA_LRO_ALT_BYTE_CNT_MODE (0)
4450 +#define PDMA_LRO_ALT_PKT_CNT_MODE (1)
4452 +/* LRO_RX_RING1_CTRL_DW1 offsets */
4453 +#define PDMA_LRO_AGE_H_OFFSET (10)
4454 +#define PDMA_LRO_RING_AGE1_OFFSET (22)
4455 +#define PDMA_LRO_RING_AGG_CNT1_OFFSET (26)
4456 +/* LRO_RX_RING1_CTRL_DW2 offsets */
4457 +#define PDMA_RX_MODE_OFFSET (6)
4458 +#define PDMA_RX_PORT_VALID_OFFSET (8)
4459 +#define PDMA_RX_MYIP_VALID_OFFSET (9)
4460 +#define PDMA_LRO_RING_AGE2_OFFSET (0)
4461 +#define PDMA_LRO_RING_AGG_OFFSET (10)
4462 +#define PDMA_LRO_RING_AGG_CNT2_OFFSET (0)
4463 +/* LRO_RX_RING1_CTRL_DW3 offsets */
4464 +#define PDMA_LRO_AGG_CNT_H_OFFSET (6)
4465 +/* LRO_RX_RING1_STP_DTP_DW offsets */
4466 +#define PDMA_RX_TCP_SRC_PORT_OFFSET (16)
4467 +#define PDMA_RX_TCP_DEST_PORT_OFFSET (0)
4468 +/* LRO_RX_RING1_CTRL_DW0 offsets */
4469 +#define PDMA_RX_IPV4_FORCE_OFFSET (1)
4470 +#define PDMA_RX_IPV6_FORCE_OFFSET (0)
4472 +#define SET_ADMA_RX_LEN0(x) ((x)&ADMA_RX_LEN0_MASK)
4473 +#define SET_ADMA_RX_LEN1(x) ((x)&ADMA_RX_LEN1_MASK)
4475 +#define SET_PDMA_LRO_MAX_AGG_CNT(x) \
4476 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW3; \
4478 + *addr |= ((x) & 0xff); \
4480 +#define SET_PDMA_LRO_FLUSH_REQ(x) \
4481 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4482 + *addr &= ~PDMA_LRO_FUSH_REQ; \
4483 + *addr |= ((x) & 0x7)<<PDMA_LRO_FUSH_REQ_OFFSET; \
4485 +#define SET_PDMA_LRO_IPV6_EN(x) \
4486 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4487 + *addr &= ~PDMA_LRO_IPV6_EN; \
4488 + *addr |= ((x) & 0x1)<<PDMA_LRO_IPV6_EN_OFFSET; \
4490 +#if defined(CONFIG_RAETH_HW_LRO_PREFETCH)
4491 +#define SET_PDMA_LRO_RXD_PREFETCH_EN(x) \
4492 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4493 + *addr &= ~PDMA_LRO_RXD_PREFETCH_EN; \
4494 + *addr |= ((x) & 0x3)<<PDMA_LRO_RXD_PREFETCH_EN_OFFSET; \
4497 +#define SET_PDMA_LRO_RXD_PREFETCH_EN(x)
4499 +#define SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(x) \
4500 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4501 + *addr &= ~PDMA_LRO_IPV4_CSUM_UPDATE_EN; \
4502 + *addr |= ((x) & 0x1)<<PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET; \
4504 +#define SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(x) \
4505 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4506 + *addr &= ~PDMA_LRO_IPV4_CTRL_PUSH_EN; \
4507 + *addr |= ((x) & 0x1)<<PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET; \
4509 +#define SET_PDMA_NON_LRO_MULTI_EN(x) \
4510 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4511 + *addr &= ~(PDMA_NON_LRO_MULTI_EN); \
4512 + *addr |= ((x) & 0x1)<<PDMA_NON_LRO_MULTI_EN_OFFSET; \
4514 +#define SET_PDMA_LRO_FREQ_PRI_ADJ(x) \
4515 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4516 + *addr &= ~PDMA_LRO_FREQ_PRI_ADJ; \
4517 + *addr |= ((x) & 0xf)<<PDMA_LRO_FREQ_PRI_ADJ_OFFSET; \
4519 +#define SET_PDMA_LRO_TPUT_PRE_ADJ(x) \
4520 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4521 + *addr &= ~PDMA_LRO_TPUT_PRE_ADJ; \
4522 + *addr |= ((x) & 0xf)<<PDMA_LRO_TPUT_PRE_ADJ_OFFSET; \
4524 +#define SET_PDMA_LRO_TPUT_PRI_ADJ(x) \
4525 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4526 + *addr &= ~PDMA_LRO_TPUT_PRI_ADJ; \
4527 + *addr |= ((x) & 0xf)<<PDMA_LRO_TPUT_PRI_ADJ_OFFSET; \
4529 +#define SET_PDMA_LRO_ALT_SCORE_MODE(x) \
4530 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4531 + *addr &= ~PDMA_LRO_ALT_SCORE_MODE; \
4532 + *addr |= ((x) & 0x1)<<PDMA_LRO_ALT_SCORE_MODE_OFFSET; \
4534 +#define SET_PDMA_LRO_DLY_INT_EN(x) \
4535 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4536 + *addr &= ~PDMA_LRO_DLY_INT_EN; \
4537 + *addr |= ((x) & 0x1)<<PDMA_LRO_DLY_INT_EN_OFFSET; \
4539 +#define SET_PDMA_LRO_BW_THRESHOLD(x) \
4540 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW2; \
4543 +#define SET_PDMA_LRO_MIN_RXD_SDL(x) \
4544 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW3; \
4545 + *addr &= ~PDMA_LRO_LRO_MIN_RXD_SDL0; \
4546 + *addr |= ((x) & 0xffff)<<PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET; \
4548 +#define SET_PDMA_LRO_TPUT_OVERFLOW_ADJ(x) \
4549 + { volatile unsigned int *addr = (unsigned int*)PDMA_LRO_ATL_OVERFLOW_ADJ; \
4550 + *addr &= ~PDMA_LRO_TPUT_OVERFLOW_ADJ; \
4551 + *addr |= ((x) & 0xfffff)<<PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET; \
4553 +#define SET_PDMA_LRO_CNT_OVERFLOW_ADJ(x) \
4554 + { volatile unsigned int *addr = (unsigned int*)PDMA_LRO_ATL_OVERFLOW_ADJ; \
4555 + *addr &= ~PDMA_LRO_CNT_OVERFLOW_ADJ; \
4556 + *addr |= ((x) & 0xfff)<<PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET; \
4558 +#define SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(x) \
4559 + { volatile unsigned int *addr = (unsigned int*)LRO_ALT_REFRESH_TIMER; \
4560 + *addr &= ~PDMA_LRO_ALT_TICK_TIMER; \
4561 + *addr |= ((x) & 0x1f)<<PDMA_LRO_ALT_TICK_TIMER_OFFSET; \
4563 +#define SET_PDMA_LRO_ALT_REFRESH_TIMER(x) \
4564 + { volatile unsigned int *addr = (unsigned int*)LRO_ALT_REFRESH_TIMER; \
4565 + *addr &= ~0xffff; \
4566 + *addr |= ((x) & 0xffff); \
4568 +#define SET_PDMA_LRO_MAX_AGG_TIME(x) \
4569 + { volatile unsigned int *addr = (unsigned int*)LRO_MAX_AGG_TIME; \
4570 + *addr &= ~0xffff; \
4571 + *addr |= ((x) & 0xffff); \
4573 +#define SET_PDMA_RXRING_MODE(x,y) \
4574 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4575 + *addr &= ~(0x3<<PDMA_RX_MODE_OFFSET); \
4576 + *addr |= (y)<<PDMA_RX_MODE_OFFSET; \
4578 +#define SET_PDMA_RXRING_MYIP_VALID(x,y) \
4579 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4580 + *addr &= ~(0x1<<PDMA_RX_MYIP_VALID_OFFSET); \
4581 + *addr |= ((y)&0x1)<<PDMA_RX_MYIP_VALID_OFFSET; \
4583 +#define SET_PDMA_RXRING_VALID(x,y) \
4584 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4585 + *addr &= ~(0x1<<PDMA_RX_PORT_VALID_OFFSET); \
4586 + *addr |= ((y)&0x1)<<PDMA_RX_PORT_VALID_OFFSET; \
4588 +#define SET_PDMA_RXRING_TCP_SRC_PORT(x,y) \
4589 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_STP_DTP_DW + (((x)-1) << 6)); \
4590 + *addr &= ~(0xffff<<PDMA_RX_TCP_SRC_PORT_OFFSET); \
4591 + *addr |= (y)<<PDMA_RX_TCP_SRC_PORT_OFFSET; \
4593 +#define SET_PDMA_RXRING_TCP_DEST_PORT(x,y) \
4594 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_STP_DTP_DW + (((x)-1) << 6)); \
4595 + *addr &= ~(0xffff<<PDMA_RX_TCP_DEST_PORT_OFFSET); \
4596 + *addr |= (y)<<PDMA_RX_TCP_DEST_PORT_OFFSET; \
4598 +#define SET_PDMA_RXRING_IPV4_FORCE_MODE(x,y) \
4599 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_CTRL_DW0 + (((x)-1) << 6)); \
4600 + *addr &= ~(0x1<<PDMA_RX_IPV4_FORCE_OFFSET); \
4601 + *addr |= (y)<<PDMA_RX_IPV4_FORCE_OFFSET; \
4603 +#define SET_PDMA_RXRING_IPV6_FORCE_MODE(x,y) \
4604 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_CTRL_DW0 + (((x)-1) << 6)); \
4605 + *addr &= ~(0x1<<PDMA_RX_IPV6_FORCE_OFFSET); \
4606 + *addr |= (y)<<PDMA_RX_IPV6_FORCE_OFFSET; \
4608 +#define SET_PDMA_RXRING_AGE_TIME(x,y) \
4609 + { volatile unsigned int *addr1 = (unsigned int*)(LRO_RX_RING0_CTRL_DW1 + ((x) << 6)); \
4610 + volatile unsigned int *addr2 = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4611 + *addr1 &= ~PDMA_LRO_RING_AGE1; \
4612 + *addr2 &= ~PDMA_LRO_RING_AGE2; \
4613 + *addr1 |= ((y) & 0x3ff)<<PDMA_LRO_RING_AGE1_OFFSET; \
4614 + *addr2 |= (((y)>>PDMA_LRO_AGE_H_OFFSET) & 0x03f)<<PDMA_LRO_RING_AGE2_OFFSET; \
4616 +#define SET_PDMA_RXRING_AGG_TIME(x,y) \
4617 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4618 + *addr &= ~PDMA_LRO_RING_AGG; \
4619 + *addr |= ((y) & 0xffff)<<PDMA_LRO_RING_AGG_OFFSET; \
4621 +#define SET_PDMA_RXRING_MAX_AGG_CNT(x,y) \
4622 + { volatile unsigned int *addr1 = (unsigned int*)(LRO_RX_RING1_CTRL_DW2 + (((x)-1) << 6)); \
4623 + volatile unsigned int *addr2 = (unsigned int*)(LRO_RX_RING1_CTRL_DW3 + (((x)-1) << 6)); \
4624 + *addr1 &= ~PDMA_LRO_RING_AGG_CNT1; \
4625 + *addr2 &= ~PDMA_LRO_RING_AGG_CNT2; \
4626 + *addr1 |= ((y) & 0x3f)<<PDMA_LRO_RING_AGG_CNT1_OFFSET; \
4627 + *addr2 |= (((y)>>PDMA_LRO_AGG_CNT_H_OFFSET) & 0x03)<<PDMA_LRO_RING_AGG_CNT2_OFFSET; \
4630 +typedef struct _PDMA_LRO_AUTO_TLB_INFO0_ PDMA_LRO_AUTO_TLB_INFO0_T;
4631 +typedef struct _PDMA_LRO_AUTO_TLB_INFO1_ PDMA_LRO_AUTO_TLB_INFO1_T;
4632 +typedef struct _PDMA_LRO_AUTO_TLB_INFO2_ PDMA_LRO_AUTO_TLB_INFO2_T;
4633 +typedef struct _PDMA_LRO_AUTO_TLB_INFO3_ PDMA_LRO_AUTO_TLB_INFO3_T;
4634 +typedef struct _PDMA_LRO_AUTO_TLB_INFO4_ PDMA_LRO_AUTO_TLB_INFO4_T;
4635 +typedef struct _PDMA_LRO_AUTO_TLB_INFO5_ PDMA_LRO_AUTO_TLB_INFO5_T;
4636 +typedef struct _PDMA_LRO_AUTO_TLB_INFO6_ PDMA_LRO_AUTO_TLB_INFO6_T;
4637 +typedef struct _PDMA_LRO_AUTO_TLB_INFO7_ PDMA_LRO_AUTO_TLB_INFO7_T;
4638 +typedef struct _PDMA_LRO_AUTO_TLB_INFO8_ PDMA_LRO_AUTO_TLB_INFO8_T;
4640 +struct _PDMA_LRO_AUTO_TLB_INFO0_
4642 + unsigned int DTP : 16;
4643 + unsigned int STP : 16;
4645 +struct _PDMA_LRO_AUTO_TLB_INFO1_
4647 + unsigned int SIP0 : 32;
4649 +struct _PDMA_LRO_AUTO_TLB_INFO2_
4651 + unsigned int SIP1 : 32;
4653 +struct _PDMA_LRO_AUTO_TLB_INFO3_
4655 + unsigned int SIP2 : 32;
4657 +struct _PDMA_LRO_AUTO_TLB_INFO4_
4659 + unsigned int SIP3 : 32;
4661 +struct _PDMA_LRO_AUTO_TLB_INFO5_
4663 + unsigned int VLAN_VID0 : 32;
4665 +struct _PDMA_LRO_AUTO_TLB_INFO6_
4667 + unsigned int VLAN_VID1 : 16;
4668 + unsigned int VLAN_VID_VLD : 4;
4669 + unsigned int CNT : 12;
4671 +struct _PDMA_LRO_AUTO_TLB_INFO7_
4673 + unsigned int DW_LEN : 32;
4675 +struct _PDMA_LRO_AUTO_TLB_INFO8_
4677 + unsigned int DIP_ID : 2;
4678 + unsigned int IPV6 : 1;
4679 + unsigned int IPV4 : 1;
4680 + unsigned int RESV : 27;
4681 + unsigned int VALID : 1;
4683 +struct PDMA_LRO_AUTO_TLB_INFO {
4684 + PDMA_LRO_AUTO_TLB_INFO0_T auto_tlb_info0;
4685 + PDMA_LRO_AUTO_TLB_INFO1_T auto_tlb_info1;
4686 + PDMA_LRO_AUTO_TLB_INFO2_T auto_tlb_info2;
4687 + PDMA_LRO_AUTO_TLB_INFO3_T auto_tlb_info3;
4688 + PDMA_LRO_AUTO_TLB_INFO4_T auto_tlb_info4;
4689 + PDMA_LRO_AUTO_TLB_INFO5_T auto_tlb_info5;
4690 + PDMA_LRO_AUTO_TLB_INFO6_T auto_tlb_info6;
4691 + PDMA_LRO_AUTO_TLB_INFO7_T auto_tlb_info7;
4692 + PDMA_LRO_AUTO_TLB_INFO8_T auto_tlb_info8;
4695 +#if defined (CONFIG_HW_SFQ)
4696 +#define VQTX_TB_BASE0 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1980)
4697 +#define VQTX_TB_BASE1 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1984)
4698 +#define VQTX_TB_BASE2 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1988)
4699 +#define VQTX_TB_BASE3 (ETHDMASYS_FRAME_ENGINE_BASE + 0x198C)
4700 +#define SFQ_OFFSET 0x1A80
4701 +#define VQTX_GLO (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET)
4702 +#define VQTX_INVLD_PTR (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x0C)
4703 +#define VQTX_NUM (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x10)
4704 +#define VQTX_SCH (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x18)
4705 +#define VQTX_HASH_CFG (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x20)
4706 +#define VQTX_HASH_SD (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x24)
4707 +#define VQTX_VLD_CFG (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x30)
4708 +#define VQTX_MIB_IF (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x3C)
4709 +#define VQTX_MIB_PCNT (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x40)
4710 +#define VQTX_MIB_BCNT0 (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x44)
4711 +#define VQTX_MIB_BCNT1 (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x48)
4713 +#define VQTX_MIB_EN (1<<17)
4714 +#define VQTX_NUM_0 (4<<0)
4715 +#define VQTX_NUM_1 (4<<4)
4716 +#define VQTX_NUM_2 (4<<8)
4717 +#define VQTX_NUM_3 (4<<12)
4719 +/*=========================================
4720 + SFQ Table Format define
4721 +=========================================*/
4722 +typedef struct _SFQ_INFO1_ SFQ_INFO1_T;
4726 + unsigned int VQHPTR;
4728 +//-------------------------------------------------
4729 +typedef struct _SFQ_INFO2_ SFQ_INFO2_T;
4733 + unsigned int VQTPTR;
4735 +//-------------------------------------------------
4736 +typedef struct _SFQ_INFO3_ SFQ_INFO3_T;
4740 + unsigned int QUE_DEPTH:16;
4741 + unsigned int DEFICIT_CNT:16;
4743 +//-------------------------------------------------
4744 +typedef struct _SFQ_INFO4_ SFQ_INFO4_T;
4748 + unsigned int RESV;
4750 +//-------------------------------------------------
4752 +typedef struct _SFQ_INFO5_ SFQ_INFO5_T;
4756 + unsigned int PKT_CNT;
4758 +//-------------------------------------------------
4760 +typedef struct _SFQ_INFO6_ SFQ_INFO6_T;
4764 + unsigned int BYTE_CNT;
4766 +//-------------------------------------------------
4768 +typedef struct _SFQ_INFO7_ SFQ_INFO7_T;
4772 + unsigned int BYTE_CNT;
4774 +//-------------------------------------------------
4776 +typedef struct _SFQ_INFO8_ SFQ_INFO8_T;
4780 + unsigned int RESV;
4785 + SFQ_INFO1_T sfq_info1;
4786 + SFQ_INFO2_T sfq_info2;
4787 + SFQ_INFO3_T sfq_info3;
4788 + SFQ_INFO4_T sfq_info4;
4789 + SFQ_INFO5_T sfq_info5;
4790 + SFQ_INFO6_T sfq_info6;
4791 + SFQ_INFO7_T sfq_info7;
4792 + SFQ_INFO8_T sfq_info8;
4796 +#if defined (CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
4797 +#define FE_GDM_RXID1_OFFSET (0x0130)
4798 +#define FE_GDM_RXID1 (RALINK_FRAME_ENGINE_BASE+FE_GDM_RXID1_OFFSET)
4799 +#define GDM_VLAN_PRI7_RXID_SEL BITS(30,31)
4800 +#define GDM_VLAN_PRI6_RXID_SEL BITS(28,29)
4801 +#define GDM_VLAN_PRI5_RXID_SEL BITS(26,27)
4802 +#define GDM_VLAN_PRI4_RXID_SEL BITS(24,25)
4803 +#define GDM_VLAN_PRI3_RXID_SEL BITS(22,23)
4804 +#define GDM_VLAN_PRI2_RXID_SEL BITS(20,21)
4805 +#define GDM_VLAN_PRI1_RXID_SEL BITS(18,19)
4806 +#define GDM_VLAN_PRI0_RXID_SEL BITS(16,17)
4807 +#define GDM_TCP_ACK_RXID_SEL BITS(4,5)
4808 +#define GDM_TCP_ACK_WZPC BIT(3)
4809 +#define GDM_RXID_PRI_SEL BITS(0,2)
4811 +#define FE_GDM_RXID2_OFFSET (0x0134)
4812 +#define FE_GDM_RXID2 (RALINK_FRAME_ENGINE_BASE+FE_GDM_RXID2_OFFSET)
4813 +#define GDM_STAG7_RXID_SEL BITS(30,31)
4814 +#define GDM_STAG6_RXID_SEL BITS(28,29)
4815 +#define GDM_STAG5_RXID_SEL BITS(26,27)
4816 +#define GDM_STAG4_RXID_SEL BITS(24,25)
4817 +#define GDM_STAG3_RXID_SEL BITS(22,23)
4818 +#define GDM_STAG2_RXID_SEL BITS(20,21)
4819 +#define GDM_STAG1_RXID_SEL BITS(18,19)
4820 +#define GDM_STAG0_RXID_SEL BITS(16,17)
4821 +#define GDM_PID2_RXID_SEL BITS(2,3)
4822 +#define GDM_PID1_RXID_SEL BITS(0,1)
4824 +#define GDM_PRI_PID (0)
4825 +#define GDM_PRI_VLAN_PID (1)
4826 +#define GDM_PRI_ACK_PID (2)
4827 +#define GDM_PRI_VLAN_ACK_PID (3)
4828 +#define GDM_PRI_ACK_VLAN_PID (4)
4830 +#define SET_GDM_VLAN_PRI_RXID_SEL(x,y) \
4831 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
4832 + *addr &= ~(0x03 << (((x) << 1)+16)); \
4833 + *addr |= ((y) & 0x3) << (((x) << 1)+16); \
4835 +#define SET_GDM_TCP_ACK_RXID_SEL(x) \
4836 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
4837 + *addr &= ~(GDM_TCP_ACK_RXID_SEL); \
4838 + *addr |= ((x) & 0x3) << 4; \
4840 +#define SET_GDM_TCP_ACK_WZPC(x) \
4841 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
4842 + *addr &= ~(GDM_TCP_ACK_WZPC); \
4843 + *addr |= ((x) & 0x1) << 3; \
4845 +#define SET_GDM_RXID_PRI_SEL(x) \
4846 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
4847 + *addr &= ~(GDM_RXID_PRI_SEL); \
4848 + *addr |= (x) & 0x7; \
4850 +#define GDM_STAG_RXID_SEL(x,y) \
4851 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID2; \
4852 + *addr &= ~(0x03 << (((x) << 1)+16)); \
4853 + *addr |= ((y) & 0x3) << (((x) << 1)+16); \
4855 +#define SET_GDM_PID2_RXID_SEL(x) \
4856 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID2; \
4857 + *addr &= ~(GDM_PID2_RXID_SEL); \
4858 + *addr |= ((x) & 0x3) << 2; \
4860 +#define SET_GDM_PID1_RXID_SEL(x) \
4861 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID2; \
4862 + *addr &= ~(GDM_PID1_RXID_SEL); \
4863 + *addr |= ((x) & 0x3); \
4865 +#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
4866 +/* Per Port Packet Counts in RT3052, added by bobtseng 2009.4.17. */
4867 +#define PORT0_PKCOUNT (0xb01100e8)
4868 +#define PORT1_PKCOUNT (0xb01100ec)
4869 +#define PORT2_PKCOUNT (0xb01100f0)
4870 +#define PORT3_PKCOUNT (0xb01100f4)
4871 +#define PORT4_PKCOUNT (0xb01100f8)
4872 +#define PORT5_PKCOUNT (0xb01100fc)
4874 +#if defined (CONFIG_ARCH_MT7623)
4875 +#include "sync_write.h"
4876 +#define sysRegRead(phys) (*(volatile unsigned int *)((phys)))
4877 +#define sysRegWrite(phys, val) mt65xx_reg_sync_writel((val), (phys))
4879 +#define PHYS_TO_K1(physaddr) KSEG1ADDR(physaddr)
4880 +#define sysRegRead(phys) (*(volatile unsigned int *)PHYS_TO_K1(phys))
4881 +#define sysRegWrite(phys, val) ((*(volatile unsigned int *)PHYS_TO_K1(phys)) = (val))
4884 +#define u_long unsigned long
4885 +#define u32 unsigned int
4886 +#define u16 unsigned short
4889 +/* ====================================== */
4890 +#define GDM1_DISPAD BIT(18)
4891 +#define GDM1_DISCRC BIT(17)
4893 +//GDMA1 uni-cast frames destination port
4894 +#define GDM1_ICS_EN (0x1 << 22)
4895 +#define GDM1_TCS_EN (0x1 << 21)
4896 +#define GDM1_UCS_EN (0x1 << 20)
4897 +#define GDM1_JMB_EN (0x1 << 19)
4898 +#define GDM1_STRPCRC (0x1 << 16)
4899 +#define GDM1_UFRC_P_CPU (0 << 12)
4900 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4901 +#define GDM1_UFRC_P_PPE (4 << 12)
4903 +#define GDM1_UFRC_P_PPE (6 << 12)
4906 +//GDMA1 broad-cast MAC address frames
4907 +#define GDM1_BFRC_P_CPU (0 << 8)
4908 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4909 +#define GDM1_BFRC_P_PPE (4 << 8)
4911 +#define GDM1_BFRC_P_PPE (6 << 8)
4914 +//GDMA1 multi-cast MAC address frames
4915 +#define GDM1_MFRC_P_CPU (0 << 4)
4916 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4917 +#define GDM1_MFRC_P_PPE (4 << 4)
4919 +#define GDM1_MFRC_P_PPE (6 << 4)
4922 +//GDMA1 other MAC address frames destination port
4923 +#define GDM1_OFRC_P_CPU (0 << 0)
4924 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4925 +#define GDM1_OFRC_P_PPE (4 << 0)
4927 +#define GDM1_OFRC_P_PPE (6 << 0)
4930 +#if defined (CONFIG_RALINK_RT6856) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4931 +/* checksum generator registers are removed */
4932 +#define ICS_GEN_EN (0 << 2)
4933 +#define UCS_GEN_EN (0 << 1)
4934 +#define TCS_GEN_EN (0 << 0)
4936 +#define ICS_GEN_EN (1 << 2)
4937 +#define UCS_GEN_EN (1 << 1)
4938 +#define TCS_GEN_EN (1 << 0)
4942 +#define MDIO_CFG_GP1_FC_TX (1 << 11)
4943 +#define MDIO_CFG_GP1_FC_RX (1 << 10)
4945 +/* ====================================== */
4946 +/* ====================================== */
4947 +#define GP1_LNK_DWN BIT(9)
4948 +#define GP1_AN_FAIL BIT(8)
4949 +/* ====================================== */
4950 +/* ====================================== */
4951 +#define PSE_RESET BIT(0)
4952 +/* ====================================== */
4953 +#define PST_DRX_IDX3 BIT(19)
4954 +#define PST_DRX_IDX2 BIT(18)
4955 +#define PST_DRX_IDX1 BIT(17)
4956 +#define PST_DRX_IDX0 BIT(16)
4957 +#define PST_DTX_IDX3 BIT(3)
4958 +#define PST_DTX_IDX2 BIT(2)
4959 +#define PST_DTX_IDX1 BIT(1)
4960 +#define PST_DTX_IDX0 BIT(0)
4962 +#define RX_2B_OFFSET BIT(31)
4963 +#define DESC_32B_EN BIT(8)
4964 +#define TX_WB_DDONE BIT(6)
4965 +#define RX_DMA_BUSY BIT(3)
4966 +#define TX_DMA_BUSY BIT(1)
4967 +#define RX_DMA_EN BIT(2)
4968 +#define TX_DMA_EN BIT(0)
4970 +#define PDMA_BT_SIZE_4DWORDS (0<<4)
4971 +#define PDMA_BT_SIZE_8DWORDS (1<<4)
4972 +#define PDMA_BT_SIZE_16DWORDS (2<<4)
4973 +#define PDMA_BT_SIZE_32DWORDS (3<<4)
4975 +#define ADMA_RX_BT_SIZE_4DWORDS (0<<11)
4976 +#define ADMA_RX_BT_SIZE_8DWORDS (1<<11)
4977 +#define ADMA_RX_BT_SIZE_16DWORDS (2<<11)
4978 +#define ADMA_RX_BT_SIZE_32DWORDS (3<<11)
4983 +#define MACCFG_RXEN (1<<2)
4984 +#define MACCFG_TXEN (1<<3)
4985 +#define MACCFG_PROMISC (1<<18)
4986 +#define MACCFG_RXMCAST (1<<19)
4987 +#define MACCFG_FDUPLEX (1<<20)
4988 +#define MACCFG_PORTSEL (1<<27)
4989 +#define MACCFG_HBEATDIS (1<<28)
4992 +#define DMACTL_SR (1<<1) /* Start/Stop Receive */
4993 +#define DMACTL_ST (1<<13) /* Start/Stop Transmission Command */
4995 +#define DMACFG_SWR (1<<0) /* Software Reset */
4996 +#define DMACFG_BURST32 (32<<8)
4998 +#define DMASTAT_TS 0x00700000 /* Transmit Process State */
4999 +#define DMASTAT_RS 0x000e0000 /* Receive Process State */
5001 +#define MACCFG_INIT 0 //(MACCFG_FDUPLEX) // | MACCFG_PORTSEL)
5005 +/* Descriptor bits.
5007 +#define R_OWN 0x80000000 /* Own Bit */
5008 +#define RD_RER 0x02000000 /* Receive End Of Ring */
5009 +#define RD_LS 0x00000100 /* Last Descriptor */
5010 +#define RD_ES 0x00008000 /* Error Summary */
5011 +#define RD_CHAIN 0x01000000 /* Chained */
5014 +#define T_OWN 0x80000000 /* Own Bit */
5015 +#define TD_ES 0x00008000 /* Error Summary */
5018 +#define TD_LS 0x40000000 /* Last Segment */
5019 +#define TD_FS 0x20000000 /* First Segment */
5020 +#define TD_TER 0x08000000 /* Transmit End Of Ring */
5021 +#define TD_CHAIN 0x01000000 /* Chained */
5024 +#define TD_SET 0x08000000 /* Setup Packet */
5027 +#define POLL_DEMAND 1
5029 +#define RSTCTL (0x34)
5030 +#define RSTCTL_RSTENET1 (1<<19)
5031 +#define RSTCTL_RSTENET2 (1<<20)
5033 +#define INIT_VALUE_OF_RT2883_PSE_FQ_CFG 0xff908000
5034 +#define INIT_VALUE_OF_PSE_FQFC_CFG 0x80504000
5035 +#define INIT_VALUE_OF_FORCE_100_FD 0x1001BC01
5036 +#define INIT_VALUE_OF_FORCE_1000_FD 0x1F01DC01
5038 +// Define Whole FE Reset Register
5039 +#define RSTCTRL (RALINK_SYSCTL_BASE + 0x34)
5040 +#define RT2880_AGPIOCFG_REG (RALINK_SYSCTL_BASE + 0x3C)
5042 +/*=========================================
5043 + PDMA RX Descriptor Format define
5044 +=========================================*/
5046 +//-------------------------------------------------
5047 +typedef struct _PDMA_RXD_INFO1_ PDMA_RXD_INFO1_T;
5049 +struct _PDMA_RXD_INFO1_
5051 + unsigned int PDP0;
5053 +//-------------------------------------------------
5054 +typedef struct _PDMA_RXD_INFO2_ PDMA_RXD_INFO2_T;
5056 +struct _PDMA_RXD_INFO2_
5058 +#if defined (CONFIG_ARCH_MT7623)
5059 + unsigned int PLEN1 : 2;
5060 + unsigned int LRO_AGG_CNT : 8;
5061 + unsigned int REV : 5;
5063 + unsigned int PLEN1 : 14;
5064 + unsigned int LS1 : 1;
5065 +#endif /* CONFIG_RAETH_HW_LRO */
5066 + unsigned int TAG : 1;
5067 + unsigned int PLEN0 : 14;
5068 + unsigned int LS0 : 1;
5069 + unsigned int DDONE_bit : 1;
5071 +//-------------------------------------------------
5072 +typedef struct _PDMA_RXD_INFO3_ PDMA_RXD_INFO3_T;
5074 +struct _PDMA_RXD_INFO3_
5076 + unsigned int VID:16;
5077 + unsigned int TPID:16;
5079 +//-------------------------------------------------
5080 +typedef struct _PDMA_RXD_INFO4_ PDMA_RXD_INFO4_T;
5082 +struct _PDMA_RXD_INFO4_
5084 +#if defined (CONFIG_RALINK_MT7620)
5085 + unsigned int FOE_Entry : 14;
5086 + unsigned int CRSN : 5;
5087 + unsigned int SPORT : 3;
5088 + unsigned int L4F : 1;
5089 + unsigned int L4VLD : 1;
5090 + unsigned int TACK : 1;
5091 + unsigned int IP4F : 1;
5092 + unsigned int IP4 : 1;
5093 + unsigned int IP6 : 1;
5094 + unsigned int UN_USE1 : 4;
5095 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
5096 + unsigned int FOE_Entry : 14;
5097 + unsigned int CRSN : 5;
5098 + unsigned int SP : 4;
5099 + unsigned int L4F : 1;
5100 + unsigned int L4VLD : 1;
5101 + unsigned int TACK : 1;
5102 + unsigned int IP4F : 1;
5103 + unsigned int IP4 : 1;
5104 + unsigned int IP6 : 1;
5105 + unsigned int UN_USE1 : 3;
5107 + unsigned int FOE_Entry : 14;
5108 + unsigned int FVLD : 1;
5109 + unsigned int UN_USE1 : 1;
5110 + unsigned int AI : 8;
5111 + unsigned int SP : 3;
5112 + unsigned int AIS : 1;
5113 + unsigned int L4F : 1;
5114 + unsigned int IPF : 1;
5115 + unsigned int L4FVLD_bit : 1;
5116 + unsigned int IPFVLD_bit : 1;
5121 +struct PDMA_rxdesc {
5122 + PDMA_RXD_INFO1_T rxd_info1;
5123 + PDMA_RXD_INFO2_T rxd_info2;
5124 + PDMA_RXD_INFO3_T rxd_info3;
5125 + PDMA_RXD_INFO4_T rxd_info4;
5126 +#ifdef CONFIG_32B_DESC
5127 + unsigned int rxd_info5;
5128 + unsigned int rxd_info6;
5129 + unsigned int rxd_info7;
5130 + unsigned int rxd_info8;
5134 +/*=========================================
5135 + PDMA TX Descriptor Format define
5136 +=========================================*/
5137 +//-------------------------------------------------
5138 +typedef struct _PDMA_TXD_INFO1_ PDMA_TXD_INFO1_T;
5140 +struct _PDMA_TXD_INFO1_
5142 + unsigned int SDP0;
5144 +//-------------------------------------------------
5145 +typedef struct _PDMA_TXD_INFO2_ PDMA_TXD_INFO2_T;
5147 +struct _PDMA_TXD_INFO2_
5149 + unsigned int SDL1 : 14;
5150 + unsigned int LS1_bit : 1;
5151 + unsigned int BURST_bit : 1;
5152 + unsigned int SDL0 : 14;
5153 + unsigned int LS0_bit : 1;
5154 + unsigned int DDONE_bit : 1;
5156 +//-------------------------------------------------
5157 +typedef struct _PDMA_TXD_INFO3_ PDMA_TXD_INFO3_T;
5159 +struct _PDMA_TXD_INFO3_
5161 + unsigned int SDP1;
5163 +//-------------------------------------------------
5164 +typedef struct _PDMA_TXD_INFO4_ PDMA_TXD_INFO4_T;
5166 +struct _PDMA_TXD_INFO4_
5168 +#if defined (CONFIG_RALINK_MT7620)
5169 + unsigned int VPRI_VIDX : 8;
5170 + unsigned int SIDX : 4;
5171 + unsigned int INSP : 1;
5172 + unsigned int RESV : 2;
5173 + unsigned int UDF : 5;
5174 + unsigned int FP_BMAP : 8;
5175 + unsigned int TSO : 1;
5176 + unsigned int TUI_CO : 3;
5177 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
5178 + unsigned int VLAN_TAG :17; // INSV(1)+VPRI(3)+CFI(1)+VID(12)
5179 + unsigned int RESV : 2;
5180 + unsigned int UDF : 6;
5181 + unsigned int FPORT : 3;
5182 + unsigned int TSO : 1;
5183 + unsigned int TUI_CO : 3;
5185 + unsigned int VPRI_VIDX : 8;
5186 + unsigned int SIDX : 4;
5187 + unsigned int INSP : 1;
5188 + unsigned int RESV : 1;
5189 + unsigned int UN_USE3 : 2;
5190 + unsigned int QN : 3;
5191 + unsigned int UN_USE2 : 1;
5192 + unsigned int UDF : 4;
5193 + unsigned int PN : 3;
5194 + unsigned int UN_USE1 : 1;
5195 + unsigned int TSO : 1;
5196 + unsigned int TUI_CO : 3;
5201 +struct PDMA_txdesc {
5202 + PDMA_TXD_INFO1_T txd_info1;
5203 + PDMA_TXD_INFO2_T txd_info2;
5204 + PDMA_TXD_INFO3_T txd_info3;
5205 + PDMA_TXD_INFO4_T txd_info4;
5206 +#ifdef CONFIG_32B_DESC
5207 + unsigned int txd_info5;
5208 + unsigned int txd_info6;
5209 + unsigned int txd_info7;
5210 + unsigned int txd_info8;
5215 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
5216 +/*=========================================
5217 + QDMA TX Descriptor Format define
5218 +=========================================*/
5219 +//-------------------------------------------------
5220 +typedef struct _QDMA_TXD_INFO1_ QDMA_TXD_INFO1_T;
5222 +struct _QDMA_TXD_INFO1_
5226 +//-------------------------------------------------
5227 +typedef struct _QDMA_TXD_INFO2_ QDMA_TXD_INFO2_T;
5229 +struct _QDMA_TXD_INFO2_
5233 +//-------------------------------------------------
5234 +typedef struct _QDMA_TXD_INFO3_ QDMA_TXD_INFO3_T;
5236 +struct _QDMA_TXD_INFO3_
5238 + unsigned int QID : 4;
5239 +#if defined (CONFIG_HW_SFQ)
5240 + //unsigned int VQID : 10;
5241 + unsigned int PROT : 3;
5242 + unsigned int IPOFST : 7;
5244 + unsigned int RESV : 10;
5246 + unsigned int SWC_bit : 1;
5247 + unsigned int BURST_bit : 1;
5248 + unsigned int SDL : 14;
5249 + unsigned int LS_bit : 1;
5250 + unsigned int OWN_bit : 1;
5252 +//-------------------------------------------------
5253 +typedef struct _QDMA_TXD_INFO4_ QDMA_TXD_INFO4_T;
5255 +struct _QDMA_TXD_INFO4_
5257 + unsigned int VLAN_TAG :17; // INSV(1)+VPRI(3)+CFI(1)+VID(12)
5258 +#if defined (CONFIG_RALINK_MT7621)
5259 + unsigned int RESV : 2;
5260 + unsigned int UDF : 6;
5261 +#elif defined(CONFIG_ARCH_MT7623)
5262 + unsigned int VQID0 : 1;
5263 + unsigned int RESV : 7;
5265 + unsigned int FPORT : 3;
5266 + unsigned int TSO : 1;
5267 + unsigned int TUI_CO : 3;
5271 +struct QDMA_txdesc {
5272 + QDMA_TXD_INFO1_T txd_info1;
5273 + QDMA_TXD_INFO2_T txd_info2;
5274 + QDMA_TXD_INFO3_T txd_info3;
5275 + QDMA_TXD_INFO4_T txd_info4;
5276 +#ifdef CONFIG_32B_DESC
5277 + unsigned int txd_info5;
5278 + unsigned int txd_info6;
5279 + unsigned int txd_info7;
5280 + unsigned int txd_info8;
5285 +#if defined (CONFIG_ARCH_MT7623)
5286 +#define phys_to_bus(a) (a)
5288 +#define phys_to_bus(a) (a & 0x1FFFFFFF)
5291 +#define PHY_Enable_Auto_Nego 0x1000
5292 +#define PHY_Restart_Auto_Nego 0x0200
5294 +/* PHY_STAT_REG = 1; */
5295 +#define PHY_Auto_Neco_Comp 0x0020
5296 +#define PHY_Link_Status 0x0004
5298 +/* PHY_AUTO_NEGO_REG = 4; */
5299 +#define PHY_Cap_10_Half 0x0020
5300 +#define PHY_Cap_10_Full 0x0040
5301 +#define PHY_Cap_100_Half 0x0080
5302 +#define PHY_Cap_100_Full 0x0100
5304 +/* proc definition */
5306 +#if !defined (CONFIG_RALINK_RT6855) && !defined(CONFIG_RALINK_RT6855A) && \
5307 + !defined (CONFIG_RALINK_MT7620) && !defined (CONFIG_RALINK_MT7621) && \
5308 + !defined (CONFIG_ARCH_MT7623)
5309 +#define CDMA_OQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x4c)
5310 +#define GDMA1_OQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x50)
5311 +#define PPE_OQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x54)
5312 +#define PSE_IQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x58)
5315 +#define PROCREG_CONTROL_FILE "/var/run/procreg_control"
5316 +#if defined (CONFIG_RALINK_RT2880)
5317 +#define PROCREG_DIR "rt2880"
5318 +#elif defined (CONFIG_RALINK_RT3052)
5319 +#define PROCREG_DIR "rt3052"
5320 +#elif defined (CONFIG_RALINK_RT3352)
5321 +#define PROCREG_DIR "rt3352"
5322 +#elif defined (CONFIG_RALINK_RT5350)
5323 +#define PROCREG_DIR "rt5350"
5324 +#elif defined (CONFIG_RALINK_RT2883)
5325 +#define PROCREG_DIR "rt2883"
5326 +#elif defined (CONFIG_RALINK_RT3883)
5327 +#define PROCREG_DIR "rt3883"
5328 +#elif defined (CONFIG_RALINK_RT6855)
5329 +#define PROCREG_DIR "rt6855"
5330 +#elif defined (CONFIG_RALINK_MT7620)
5331 +#define PROCREG_DIR "mt7620"
5332 +#elif defined (CONFIG_RALINK_MT7621)
5333 +#define PROCREG_DIR "mt7621"
5334 +#elif defined (CONFIG_ARCH_MT7623)
5335 +#define PROCREG_DIR "mt7623"
5336 +#elif defined (CONFIG_RALINK_MT7628)
5337 +#define PROCREG_DIR "mt7628"
5338 +#elif defined (CONFIG_RALINK_RT6855A)
5339 +#define PROCREG_DIR "rt6855a"
5341 +#define PROCREG_DIR "rt2880"
5343 +#define PROCREG_SKBFREE "skb_free"
5344 +#define PROCREG_TXRING "tx_ring"
5345 +#define PROCREG_RXRING "rx_ring"
5346 +#define PROCREG_RXRING1 "rx_ring1"
5347 +#define PROCREG_RXRING2 "rx_ring2"
5348 +#define PROCREG_RXRING3 "rx_ring3"
5349 +#define PROCREG_NUM_OF_TXD "num_of_txd"
5350 +#define PROCREG_TSO_LEN "tso_len"
5351 +#define PROCREG_LRO_STATS "lro_stats"
5352 +#define PROCREG_HW_LRO_STATS "hw_lro_stats"
5353 +#define PROCREG_HW_LRO_AUTO_TLB "hw_lro_auto_tlb"
5354 +#define PROCREG_GMAC "gmac"
5355 +#define PROCREG_GMAC2 "gmac2"
5356 +#define PROCREG_CP0 "cp0"
5357 +#define PROCREG_RAQOS "qos"
5358 +#define PROCREG_READ_VAL "regread_value"
5359 +#define PROCREG_WRITE_VAL "regwrite_value"
5360 +#define PROCREG_ADDR "reg_addr"
5361 +#define PROCREG_CTL "procreg_control"
5362 +#define PROCREG_RXDONE_INTR "rxdone_intr_count"
5363 +#define PROCREG_ESW_INTR "esw_intr_count"
5364 +#define PROCREG_ESW_CNT "esw_cnt"
5365 +#define PROCREG_SNMP "snmp"
5366 +#if defined (TASKLET_WORKQUEUE_SW)
5367 +#define PROCREG_SCHE "schedule"
5369 +#define PROCREG_QDMA "qdma"
5370 +#if defined(CONFIG_RAETH_PDMA_DVT)
5371 +#define PROCREG_PDMA_DVT "pdma_dvt"
5372 +#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
5373 +struct rt2880_reg_op_data {
5375 + unsigned int reg_addr;
5377 + unsigned int reg_value;
5380 +#ifdef CONFIG_RAETH_LRO
5381 +struct lro_counters {
5382 + u32 lro_aggregated;
5387 +struct lro_para_struct {
5388 + unsigned int lan_ip1;
5391 +#endif // CONFIG_RAETH_LRO //
5394 +#if defined (CONFIG_HW_SFQ)
5401 + uint16_t vlan_tag;
5402 + uint16_t vlan1_gap;
5404 + uint16_t vlan2_gap;
5406 + uint16_t vlan_layer;
5409 + uint32_t pppoe_gap;
5411 + uint16_t pppoe_sid;
5414 + uint16_t eth_type;
5416 + struct ipv6hdr ip6h;
5422 + uint32_t pkt_type;
5427 +typedef struct end_device
5430 + unsigned int tx_cpu_owner_idx0;
5431 + unsigned int rx_cpu_owner_idx0;
5432 + unsigned int fe_int_status;
5433 + unsigned int tx_full;
5435 +#if !defined (CONFIG_RAETH_QDMA)
5436 + unsigned int phy_tx_ring0;
5439 + struct sk_buff *free_skb[NUM_TX_DESC];
5440 + unsigned int tx_dma_ptr;
5441 + unsigned int tx_cpu_ptr;
5442 + unsigned int free_txd_num;
5443 + unsigned int free_txd_head;
5444 + unsigned int free_txd_tail;
5445 + struct QDMA_txdesc *txd_pool;
5446 + dma_addr_t phy_txd_pool;
5447 + unsigned int txd_pool_info[NUM_TX_DESC];
5448 + struct QDMA_txdesc *free_head;
5449 + unsigned int phy_free_head;
5450 + unsigned int *free_page_head;
5451 + unsigned int phy_free_page_head;
5452 + struct PDMA_rxdesc *qrx_ring;
5453 + unsigned int phy_qrx_ring;
5454 +#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
5455 + unsigned int phy_tx_ring0;
5459 + unsigned int phy_rx_ring0, phy_rx_ring1, phy_rx_ring2, phy_rx_ring3;
5461 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || \
5462 + defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || \
5463 + defined(CONFIG_RALINK_RT6855A) || defined (CONFIG_RALINK_MT7620) || \
5464 + defined(CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) || \
5465 + defined (CONFIG_ARCH_MT7623)
5466 + //send signal to user application to notify link status changed
5467 + struct work_struct kill_sig_wq;
5470 + struct work_struct reset_task;
5471 +#ifdef WORKQUEUE_BH
5472 + struct work_struct rx_wq;
5474 +#if defined (TASKLET_WORKQUEUE_SW)
5475 + struct work_struct rx_wq;
5477 + struct tasklet_struct rx_tasklet;
5478 + struct tasklet_struct tx_tasklet;
5479 +#endif // WORKQUEUE_BH //
5481 +#if defined(CONFIG_RAETH_QOS)
5482 + struct sk_buff * skb_free[NUM_TX_RINGS][NUM_TX_DESC];
5483 + unsigned int free_idx[NUM_TX_RINGS];
5485 + struct sk_buff* skb_free[NUM_TX_DESC];
5486 + unsigned int free_idx;
5489 + struct net_device_stats stat; /* The new statistics table. */
5490 + spinlock_t page_lock; /* Page register locks */
5491 + struct PDMA_txdesc *tx_ring0;
5492 +#if defined(CONFIG_RAETH_QOS)
5493 + struct PDMA_txdesc *tx_ring1;
5494 + struct PDMA_txdesc *tx_ring2;
5495 + struct PDMA_txdesc *tx_ring3;
5497 + struct PDMA_rxdesc *rx_ring0;
5498 + struct sk_buff *netrx0_skbuf[NUM_RX_DESC];
5499 +#if defined (CONFIG_RAETH_HW_LRO)
5500 + struct PDMA_rxdesc *rx_ring3;
5501 + struct sk_buff *netrx3_skbuf[NUM_RX_DESC];
5502 + struct PDMA_rxdesc *rx_ring2;
5503 + struct sk_buff *netrx2_skbuf[NUM_RX_DESC];
5504 + struct PDMA_rxdesc *rx_ring1;
5505 + struct sk_buff *netrx1_skbuf[NUM_RX_DESC];
5506 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
5507 + struct PDMA_rxdesc *rx_ring1;
5508 + struct sk_buff *netrx1_skbuf[NUM_RX_DESC];
5509 +#if defined(CONFIG_ARCH_MT7623)
5510 + struct PDMA_rxdesc *rx_ring2;
5511 + struct sk_buff *netrx2_skbuf[NUM_RX_DESC];
5512 + struct PDMA_rxdesc *rx_ring3;
5513 + struct sk_buff *netrx3_skbuf[NUM_RX_DESC];
5514 +#endif /* CONFIG_ARCH_MT7623 */
5516 +#ifdef CONFIG_RAETH_NAPI
5518 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5519 + struct napi_struct napi;
5522 +#ifdef CONFIG_PSEUDO_SUPPORT
5523 + struct net_device *PseudoDev;
5524 + unsigned int isPseudo;
5526 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
5527 + struct mii_if_info mii_info;
5529 +#ifdef CONFIG_RAETH_LRO
5530 + struct lro_counters lro_counters;
5531 + struct net_lro_mgr lro_mgr;
5532 + struct net_lro_desc lro_arr[8];
5534 +#ifdef CONFIG_RAETH_HW_VLAN_RX
5535 + struct vlan_group *vlgrp;
5537 +#if defined (CONFIG_RAETH_HW_LRO)
5538 + struct work_struct hw_lro_wq;
5539 + unsigned int hw_lro_pkt_interval[3];
5540 + unsigned int hw_lro_alpha; /* 0 < packet interval alpha <= 10 */
5541 + unsigned int hw_lro_fix_setting; /* 0: dynamical AGG/AGE time, 1: fixed AGG/AGE time */
5542 +#endif /* CONFIG_RAETH_HW_LRO */
5543 +} END_DEVICE, *pEND_DEVICE;
5546 +#define RAETH_VERSION "v3.1"
5550 +#define DMA_GLO_CFG PDMA_GLO_CFG
5552 +#if defined(CONFIG_RAETH_QDMATX_QDMARX)
5553 +#define GDMA1_FWD_PORT 0x5555
5554 +#define GDMA2_FWD_PORT 0x5555
5555 +#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
5556 +#define GDMA1_FWD_PORT 0x5555
5557 +#define GDMA2_FWD_PORT 0x5555
5559 +#define GDMA1_FWD_PORT 0x0000
5560 +#define GDMA2_FWD_PORT 0x0000
5563 +#if defined(CONFIG_RAETH_QDMATX_QDMARX)
5564 +#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
5565 +#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
5566 +#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
5567 +#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
5568 +#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
5570 +#define RAETH_RX_CALC_IDX0 RX_CALC_IDX0
5571 +#define RAETH_RX_CALC_IDX1 RX_CALC_IDX1
5573 +#define RAETH_RX_CALC_IDX2 RX_CALC_IDX2
5574 +#define RAETH_RX_CALC_IDX3 RX_CALC_IDX3
5575 +#define RAETH_FE_INT_STATUS FE_INT_STATUS
5576 +#define RAETH_FE_INT_ALL FE_INT_ALL
5577 +#define RAETH_FE_INT_ENABLE FE_INT_ENABLE
5578 +#define RAETH_FE_INT_DLY_INIT FE_INT_DLY_INIT
5579 +#define RAETH_FE_INT_SETTING RX_DONE_INT0 | RX_DONE_INT1 | TX_DONE_INT0 | TX_DONE_INT1 | TX_DONE_INT2 | TX_DONE_INT3
5580 +#define QFE_INT_SETTING RX_DONE_INT0 | RX_DONE_INT1 | TX_DONE_INT0 | TX_DONE_INT1 | TX_DONE_INT2 | TX_DONE_INT3
5581 +#define RAETH_TX_DLY_INT TX_DLY_INT
5582 +#define RAETH_TX_DONE_INT0 TX_DONE_INT0
5583 +#define RAETH_DLY_INT_CFG DLY_INT_CFG
5584 diff --git a/drivers/net/ethernet/raeth/ra_ethtool.c b/drivers/net/ethernet/raeth/ra_ethtool.c
5585 new file mode 100644
5586 index 0000000..ff13e59
5588 +++ b/drivers/net/ethernet/raeth/ra_ethtool.c
5590 +#include <linux/module.h>
5591 +#include <linux/version.h>
5593 +#include <linux/kernel.h>
5594 +#include <linux/sched.h>
5596 +#include <linux/netdevice.h>
5597 +#include <linux/etherdevice.h>
5598 +#include <linux/skbuff.h>
5599 +#include <linux/if_ether.h>
5600 +#include <linux/ethtool.h>
5602 +#include "ra2882ethreg.h"
5603 +#include "raether.h"
5604 +#include "ra_mac.h"
5605 +#include "ra_ethtool.h"
5607 +#define RAETHER_DRIVER_NAME "raether"
5608 +#define RA_NUM_STATS 4
5612 + const char str[ETH_GSTRING_LEN];
5613 +} ethtool_stats_keys[] = {
5620 +unsigned char get_current_phy_address(void)
5622 + struct net_device *cur_dev_p;
5623 + END_DEVICE *ei_local;
5625 + for(cur_dev_p=dev_base; cur_dev_p!=NULL; cur_dev_p=cur_dev_p->next){
5626 + if (strncmp(cur_dev_p->name, DEV_NAME /* "eth2" usually */, 4) == 0)
5630 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5631 + cur_dev_p = dev_get_by_name(&init_net, DEV_NAME);
5633 + cur_dev_p = dev_get_by_name(DEV_NAME);
5638 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5639 + ei_local = netdev_priv(cur_dev_p);
5641 + ei_local = cur_dev_p->priv;
5643 + return ei_local->mii_info.phy_id;
5646 +static u32 et_get_tx_csum(struct net_device *dev)
5648 + return (sysRegRead(GDMA1_FWD_CFG) & GDM1_DISCRC) ? 0 : 1; // a pitfall here, "0" means to enable.
5651 +static u32 et_get_rx_csum(struct net_device *dev)
5653 + return (sysRegRead(GDMA1_FWD_CFG) & GDM1_STRPCRC) ? 1 : 0;
5656 +static int et_set_tx_csum(struct net_device *dev, u32 data)
5659 + //printk("et_set_tx_csum(): data = %d\n", data);
5661 + value = sysRegRead(GDMA1_FWD_CFG);
5663 + value |= GDM1_DISCRC;
5665 + value &= ~GDM1_DISCRC;
5667 + sysRegWrite(GDMA1_FWD_CFG, value);
5671 +static int et_set_rx_csum(struct net_device *dev, u32 data)
5674 + //printk("et_set_rx_csum(): data = %d\n", data);
5676 + value = sysRegRead(GDMA1_FWD_CFG);
5678 + value |= GDM1_STRPCRC;
5680 + value &= ~GDM1_STRPCRC;
5682 + sysRegWrite(GDMA1_FWD_CFG, value);
5687 +#define MII_CR_ADDR 0x00
5688 +#define MII_CR_MR_AUTONEG_ENABLE (1 << 12)
5689 +#define MII_CR_MR_RESTART_NEGOTIATION (1 << 9)
5691 +#define AUTO_NEGOTIATION_ADVERTISEMENT 0x04
5692 +#define AN_PAUSE (1 << 10)
5694 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5695 +static void et_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5699 + END_DEVICE *ei_local = dev->priv;
5701 + // get mii auto-negotiation register
5702 + mii_mgr_read(ei_local->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
5703 + epause->autoneg = (mii_an_reg & AN_PAUSE) ? 1 : 0; //get autonet_enable flag bit
5705 + mdio_cfg_reg = sysRegRead(MDIO_CFG);
5706 + epause->tx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_TX) ? 1 : 0;
5707 + epause->rx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_RX) ? 1 : 0;
5709 + //printk("et_get_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
5712 +static int et_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5716 + END_DEVICE *ei_local = dev->priv;
5718 + //printk("et_set_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
5721 + mii_mgr_read(ei_local->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
5722 + if(epause->autoneg)
5723 + mii_an_reg |= AN_PAUSE;
5725 + mii_an_reg &= ~AN_PAUSE;
5726 + mii_mgr_write(ei_local->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, mii_an_reg);
5729 + mdio_cfg_reg = sysRegRead(MDIO_CFG);
5730 + if(epause->tx_pause)
5731 + mdio_cfg_reg |= MDIO_CFG_GP1_FC_TX;
5733 + mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_TX;
5734 + if(epause->rx_pause)
5735 + mdio_cfg_reg |= MDIO_CFG_GP1_FC_RX;
5737 + mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_RX;
5738 + sysRegWrite(MDIO_CFG, mdio_cfg_reg);
5743 +static int et_nway_reset(struct net_device *dev)
5745 + END_DEVICE *ei_local = dev->priv;
5746 + return mii_nway_restart(&ei_local->mii_info);
5750 +static u32 et_get_link(struct net_device *dev)
5752 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5753 + END_DEVICE *ei_local = netdev_priv(dev);
5755 + END_DEVICE *ei_local = dev->priv;
5757 + return mii_link_ok(&ei_local->mii_info);
5760 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5761 +static int et_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5763 + END_DEVICE *ei_local = dev->priv;
5765 + rc = mii_ethtool_sset(&ei_local->mii_info, cmd);
5770 +static int et_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5772 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5773 + END_DEVICE *ei_local = netdev_priv(dev);
5775 + END_DEVICE *ei_local = dev->priv;
5777 + mii_ethtool_gset(&ei_local->mii_info, cmd);
5781 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5782 +static u32 et_get_msglevel(struct net_device *dev)
5787 +static void et_set_msglevel(struct net_device *dev, u32 datum)
5792 +static void et_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5794 + //END_DEVICE *ei_local = dev->priv;
5795 + strcpy(info->driver, RAETHER_DRIVER_NAME);
5796 + strcpy(info->version, RAETH_VERSION);
5797 + strcpy(info->bus_info, "n/a");
5798 + info->n_stats = RA_NUM_STATS;
5799 + info->eedump_len = 0;
5800 + info->regdump_len = 0;
5803 +static int et_get_stats_count(struct net_device *dev)
5805 + return RA_NUM_STATS;
5808 +static void et_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
5810 +// END_DEVICE *ei_local = dev->priv;
5811 + data[0] = 0;//np->xstats.early_rx;
5812 + data[1] = 0;//np->xstats.tx_buf_mapped;
5813 + data[2] = 0;//np->xstats.tx_timeouts;
5814 + data[3] = 0;//np->xstats.rx_lost_in_ring;
5817 +static void et_get_strings(struct net_device *dev, u32 stringset, u8 *data)
5819 + memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
5824 + * mii_mgr_read wrapper for mii.o ethtool
5826 +int mdio_read(struct net_device *dev, int phy_id, int location)
5828 + unsigned int result;
5829 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5830 + END_DEVICE *ei_local = netdev_priv(dev);
5832 + END_DEVICE *ei_local = dev->priv;
5834 + mii_mgr_read( (unsigned int) ei_local->mii_info.phy_id, (unsigned int)location, &result);
5835 + //printk("\n%s mii.o query= phy_id:%d, address:%d retval:%x\n", dev->name, phy_id, location, result);
5836 + return (int)result;
5840 + * mii_mgr_write wrapper for mii.o ethtool
5842 +void mdio_write(struct net_device *dev, int phy_id, int location, int value)
5844 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5845 + END_DEVICE *ei_local = netdev_priv(dev);
5847 + END_DEVICE *ei_local = dev->priv;
5849 + //printk("mii.o write= phy_id:%d, address:%d value:%x\n", phy_id, location, value);
5850 + mii_mgr_write( (unsigned int) ei_local->mii_info.phy_id, (unsigned int)location, (unsigned int)value);
5854 +struct ethtool_ops ra_ethtool_ops = {
5856 + .get_settings = et_get_settings,
5857 + .get_link = et_get_link,
5858 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5859 + .get_drvinfo = et_get_drvinfo,
5860 + .set_settings = et_set_settings,
5861 + .get_pauseparam = et_get_pauseparam,
5862 + .set_pauseparam = et_set_pauseparam,
5863 +// .get_rx_csum = et_get_rx_csum,
5864 +// .set_rx_csum = et_set_rx_csum,
5865 +// .get_tx_csum = et_get_tx_csum,
5866 +// .set_tx_csum = et_set_tx_csum,
5867 + .nway_reset = et_nway_reset,
5868 + .get_msglevel = et_get_msglevel,
5869 + .set_msglevel = et_set_msglevel,
5870 + .get_strings = et_get_strings,
5871 + .get_stats_count = et_get_stats_count,
5872 + .get_ethtool_stats = et_get_ethtool_stats,
5873 +/* .get_regs_len = et_get_regs_len,
5874 + .get_regs = et_get_regs,
5879 +#ifdef CONFIG_PSEUDO_SUPPORT
5881 + * We unable to re-use the Raether functions because it is hard to tell
5882 + * where the calling from is. From eth2 or eth3?
5884 + * These code size is around 950 bytes.
5886 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5887 +static void et_virt_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5889 + // PSEUDO_ADAPTER *pseudo = dev->priv;
5890 + return et_get_drvinfo(dev, info);
5893 +static void et_virt_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5895 + int mii_an_reg, mdio_cfg_reg;
5896 + PSEUDO_ADAPTER *pseudo = dev->priv;
5898 + // get mii auto-negotiation register
5899 + mii_mgr_read(pseudo->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
5900 + epause->autoneg = (mii_an_reg & AN_PAUSE) ? 1 : 0; //get autonet_enable flag bit
5902 + mdio_cfg_reg = sysRegRead(MDIO_CFG);
5903 + epause->tx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_TX) ? 1 : 0;
5904 + epause->rx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_RX) ? 1 : 0;
5906 + //printk("et_get_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
5909 +static int et_virt_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5913 + PSEUDO_ADAPTER *pseudo = dev->priv;
5915 + //printk("et_set_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
5917 + mii_mgr_read(pseudo->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
5918 + if(epause->autoneg)
5919 + mii_an_reg |= AN_PAUSE;
5921 + mii_an_reg &= ~AN_PAUSE;
5922 + mii_mgr_write(pseudo->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, mii_an_reg);
5925 + mdio_cfg_reg = sysRegRead(MDIO_CFG);
5926 + if(epause->tx_pause)
5927 + mdio_cfg_reg |= MDIO_CFG_GP1_FC_TX;
5929 + mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_TX;
5930 + if(epause->rx_pause)
5931 + mdio_cfg_reg |= MDIO_CFG_GP1_FC_RX;
5933 + mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_RX;
5934 + sysRegWrite(MDIO_CFG, mdio_cfg_reg);
5939 +static u32 et_virt_get_tx_csum(struct net_device *dev)
5941 + return (sysRegRead(GDMA2_FWD_CFG) & GDM1_DISCRC) ? 0 : 1; // a pitfall here, "0" means to enable.
5944 +static u32 et_virt_get_rx_csum(struct net_device *dev)
5946 + return (sysRegRead(GDMA2_FWD_CFG) & GDM1_STRPCRC) ? 1 : 0;
5949 +static int et_virt_set_tx_csum(struct net_device *dev, u32 data)
5952 + //printk("et_set_tx_csum(): data = %d\n", data);
5953 + value = sysRegRead(GDMA2_FWD_CFG);
5955 + value |= GDM1_DISCRC;
5957 + value &= ~GDM1_DISCRC;
5958 + sysRegWrite(GDMA1_FWD_CFG, value);
5962 +static int et_virt_set_rx_csum(struct net_device *dev, u32 data)
5965 + //printk("et_set_rx_csum(): data = %d\n", data);
5966 + value = sysRegRead(GDMA2_FWD_CFG);
5968 + value |= GDM1_STRPCRC;
5970 + value &= ~GDM1_STRPCRC;
5971 + sysRegWrite(GDMA1_FWD_CFG, value);
5975 +static int et_virt_nway_reset(struct net_device *dev)
5977 + PSEUDO_ADAPTER *pseudo = dev->priv;
5978 + return mii_nway_restart(&pseudo->mii_info);
5982 +static u32 et_virt_get_link(struct net_device *dev)
5984 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5985 + PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
5987 + PSEUDO_ADAPTER *pseudo = dev->priv;
5989 + return mii_link_ok(&pseudo->mii_info);
5992 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5993 +static int et_virt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5995 + PSEUDO_ADAPTER *pseudo = dev->priv;
5996 + int rc = mii_ethtool_sset(&pseudo->mii_info, cmd);
6001 +static int et_virt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6003 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
6004 + PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
6006 + PSEUDO_ADAPTER *pseudo = dev->priv;
6008 + mii_ethtool_gset(&pseudo->mii_info, cmd);
6011 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
6012 +static u32 et_virt_get_msglevel(struct net_device *dev)
6017 +static void et_virt_set_msglevel(struct net_device *dev, u32 datum)
6022 +static void et_virt_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
6024 +// PSEUDO_ADAPTER *pseudo = dev->priv;
6025 + data[0] = 0;//np->xstats.early_rx;
6026 + data[1] = 0;//np->xstats.tx_buf_mapped;
6027 + data[2] = 0;//np->xstats.tx_timeouts;
6028 + data[3] = 0;//np->xstats.rx_lost_in_ring;
6031 +/* for virtual interface dedicated */
6032 +#define RA_VIRT_NUM_STATS 4
6034 + const char str[ETH_GSTRING_LEN];
6035 +} ethtool_stats_keys_2[] = {
6042 +static int et_virt_get_stats_count(struct net_device *dev)
6044 + return RA_VIRT_NUM_STATS;
6047 +static void et_virt_get_strings(struct net_device *dev, u32 stringset, u8 *data)
6049 + memcpy(data, ethtool_stats_keys_2, sizeof(ethtool_stats_keys_2));
6053 +struct ethtool_ops ra_virt_ethtool_ops = {
6054 + .get_settings = et_virt_get_settings,
6055 + .get_link = et_virt_get_link,
6056 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
6057 + .get_drvinfo = et_virt_get_drvinfo,
6058 + .set_settings = et_virt_set_settings,
6059 + .get_pauseparam = et_virt_get_pauseparam,
6060 + .set_pauseparam = et_virt_set_pauseparam,
6061 + .get_rx_csum = et_virt_get_rx_csum,
6062 + .set_rx_csum = et_virt_set_rx_csum,
6063 + .get_tx_csum = et_virt_get_tx_csum,
6064 + .set_tx_csum = et_virt_set_tx_csum,
6065 + .nway_reset = et_virt_nway_reset,
6066 + .get_msglevel = et_virt_get_msglevel,
6067 + .set_msglevel = et_virt_set_msglevel,
6068 + .get_strings = et_virt_get_strings,
6069 + .get_stats_count = et_virt_get_stats_count,
6070 + .get_ethtool_stats = et_virt_get_ethtool_stats,
6071 +/* .get_regs_len = et_virt_get_regs_len,
6072 + .get_regs = et_virt_get_regs,
6077 +int mdio_virt_read(struct net_device *dev, int phy_id, int location)
6079 + unsigned int result;
6080 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
6081 + PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
6083 + PSEUDO_ADAPTER *pseudo = dev->priv;
6085 + mii_mgr_read( (unsigned int) pseudo->mii_info.phy_id, (unsigned int)location, &result);
6086 +// printk("%s mii.o query= phy_id:%d, address:%d retval:%d\n", dev->name, phy_id, location, result);
6087 + return (int)result;
6090 +void mdio_virt_write(struct net_device *dev, int phy_id, int location, int value)
6092 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
6093 + PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
6095 + PSEUDO_ADAPTER *pseudo = dev->priv;
6097 +// printk("mii.o write= phy_id:%d, address:%d value:%d\n", phy_id, location, value);
6098 + mii_mgr_write( (unsigned int) pseudo->mii_info.phy_id, (unsigned int)location, (unsigned int)value);
6102 +#endif /* CONFIG_PSEUDO_SUPPORT */
6105 diff --git a/drivers/net/ethernet/raeth/ra_ethtool.h b/drivers/net/ethernet/raeth/ra_ethtool.h
6106 new file mode 100644
6107 index 0000000..d64a1ab
6109 +++ b/drivers/net/ethernet/raeth/ra_ethtool.h
6111 +#ifndef RA_ETHTOOL_H
6112 +#define RA_ETHTOOL_H
6114 +/* ethtool related */
6115 +unsigned char get_current_phy_address(void);
6116 +int mdio_read(struct net_device *dev, int phy_id, int location);
6117 +void mdio_write(struct net_device *dev, int phy_id, int location, int value);
6119 +/* for pseudo interface */
6120 +int mdio_virt_read(struct net_device *dev, int phy_id, int location);
6121 +void mdio_virt_write(struct net_device *dev, int phy_id, int location, int value);
6124 diff --git a/drivers/net/ethernet/raeth/ra_ioctl.h b/drivers/net/ethernet/raeth/ra_ioctl.h
6125 new file mode 100644
6126 index 0000000..83b806a
6128 +++ b/drivers/net/ethernet/raeth/ra_ioctl.h
6130 +#ifndef _RAETH_IOCTL_H
6131 +#define _RAETH_IOCTL_H
6133 +/* ioctl commands */
6134 +#define RAETH_ESW_REG_READ 0x89F1
6135 +#define RAETH_ESW_REG_WRITE 0x89F2
6136 +#define RAETH_MII_READ 0x89F3
6137 +#define RAETH_MII_WRITE 0x89F4
6138 +#define RAETH_ESW_INGRESS_RATE 0x89F5
6139 +#define RAETH_ESW_EGRESS_RATE 0x89F6
6140 +#define RAETH_ESW_PHY_DUMP 0x89F7
6141 +#define RAETH_QDMA_REG_READ 0x89F8
6142 +#define RAETH_QDMA_REG_WRITE 0x89F9
6143 +#define RAETH_QDMA_QUEUE_MAPPING 0x89FA
6144 +#define RAETH_QDMA_READ_CPU_CLK 0x89FB
6145 +#define RAETH_MII_READ_CL45 0x89FC
6146 +#define RAETH_MII_WRITE_CL45 0x89FD
6147 +#if defined(CONFIG_HW_SFQ)
6148 +#define RAETH_QDMA_SFQ_WEB_ENABLE 0x89FE
6151 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
6152 + defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
6153 + defined (CONFIG_ARCH_MT7623)
6155 +#define REG_ESW_WT_MAC_MFC 0x10
6156 +#define REG_ESW_ISC 0x18
6157 +#define REG_ESW_WT_MAC_ATA1 0x74
6158 +#define REG_ESW_WT_MAC_ATA2 0x78
6159 +#define REG_ESW_WT_MAC_ATWD 0x7C
6160 +#define REG_ESW_WT_MAC_ATC 0x80
6162 +#define REG_ESW_TABLE_TSRA1 0x84
6163 +#define REG_ESW_TABLE_TSRA2 0x88
6164 +#define REG_ESW_TABLE_ATRD 0x8C
6167 +#define REG_ESW_VLAN_VTCR 0x90
6168 +#define REG_ESW_VLAN_VAWD1 0x94
6169 +#define REG_ESW_VLAN_VAWD2 0x98
6172 +#define REG_ESW_VLAN_ID_BASE 0x100
6174 +//#define REG_ESW_VLAN_ID_BASE 0x50
6175 +#define REG_ESW_VLAN_MEMB_BASE 0x70
6176 +#define REG_ESW_TABLE_SEARCH 0x24
6177 +#define REG_ESW_TABLE_STATUS0 0x28
6178 +#define REG_ESW_TABLE_STATUS1 0x2C
6179 +#define REG_ESW_TABLE_STATUS2 0x30
6180 +#define REG_ESW_WT_MAC_AD0 0x34
6181 +#define REG_ESW_WT_MAC_AD1 0x38
6182 +#define REG_ESW_WT_MAC_AD2 0x3C
6185 +/* rt3052 embedded ethernet switch registers */
6186 +#define REG_ESW_VLAN_ID_BASE 0x50
6187 +#define REG_ESW_VLAN_MEMB_BASE 0x70
6188 +#define REG_ESW_TABLE_SEARCH 0x24
6189 +#define REG_ESW_TABLE_STATUS0 0x28
6190 +#define REG_ESW_TABLE_STATUS1 0x2C
6191 +#define REG_ESW_TABLE_STATUS2 0x30
6192 +#define REG_ESW_WT_MAC_AD0 0x34
6193 +#define REG_ESW_WT_MAC_AD1 0x38
6194 +#define REG_ESW_WT_MAC_AD2 0x3C
6198 +#if defined(CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
6199 +#define REG_ESW_MAX 0x16C
6200 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
6201 + defined (CONFIG_RALINK_MT7620)
6202 +#define REG_ESW_MAX 0x7FFFF
6203 +#else //RT305x, RT3350
6204 +#define REG_ESW_MAX 0xFC
6206 +#define REG_HQOS_MAX 0x3FFF
6209 +typedef struct rt3052_esw_reg {
6214 +typedef struct ralink_mii_ioctl_data {
6222 +} ra_mii_ioctl_data;
6224 +typedef struct rt335x_esw_reg {
6225 + unsigned int on_off;
6226 + unsigned int port;
6227 + unsigned int bw;/*Mbps*/
6232 diff --git a/drivers/net/ethernet/raeth/ra_mac.c b/drivers/net/ethernet/raeth/ra_mac.c
6233 new file mode 100644
6234 index 0000000..e8e978d
6236 +++ b/drivers/net/ethernet/raeth/ra_mac.c
6238 +#include <linux/module.h>
6239 +#include <linux/version.h>
6240 +#include <linux/kernel.h>
6241 +#include <linux/sched.h>
6242 +#include <linux/types.h>
6243 +#include <linux/fcntl.h>
6244 +#include <linux/interrupt.h>
6245 +#include <linux/ptrace.h>
6246 +#include <linux/ioport.h>
6247 +#include <linux/in.h>
6248 +#include <linux/slab.h>
6249 +#include <linux/string.h>
6250 +#include <linux/signal.h>
6251 +#include <linux/irq.h>
6252 +#include <linux/ctype.h>
6253 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,4)
6254 +#include <asm/system.h>
6255 +#include <linux/mca.h>
6257 +#include <asm/io.h>
6258 +#include <asm/bitops.h>
6259 +#include <asm/io.h>
6260 +#include <asm/dma.h>
6262 +#include <asm/rt2880/surfboardint.h> /* for cp0 reg access, added by bobtseng */
6264 +#include <linux/errno.h>
6265 +#include <linux/init.h>
6267 +#include <linux/netdevice.h>
6268 +#include <linux/etherdevice.h>
6269 +#include <linux/skbuff.h>
6271 +#include <linux/init.h>
6272 +#include <linux/module.h>
6273 +#include <linux/proc_fs.h>
6274 +#include <asm/uaccess.h>
6276 +#include <linux/seq_file.h>
6279 +#if defined(CONFIG_RAETH_LRO)
6280 +#include <linux/inet_lro.h>
6283 +#include "ra2882ethreg.h"
6284 +#include "raether.h"
6285 +#include "ra_mac.h"
6286 +#include "ra_ethtool.h"
6287 +#if defined(CONFIG_RAETH_PDMA_DVT)
6288 +#include "dvt/raether_pdma_dvt.h"
6289 +#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
6291 +extern struct net_device *dev_raether;
6293 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
6294 + defined (CONFIG_RALINK_MT7620)
6295 +extern unsigned short p0_rx_good_cnt;
6296 +extern unsigned short p0_tx_good_cnt;
6297 +extern unsigned short p1_rx_good_cnt;
6298 +extern unsigned short p1_tx_good_cnt;
6299 +extern unsigned short p2_rx_good_cnt;
6300 +extern unsigned short p2_tx_good_cnt;
6301 +extern unsigned short p3_rx_good_cnt;
6302 +extern unsigned short p3_tx_good_cnt;
6303 +extern unsigned short p4_rx_good_cnt;
6304 +extern unsigned short p4_tx_good_cnt;
6305 +extern unsigned short p5_rx_good_cnt;
6306 +extern unsigned short p5_tx_good_cnt;
6307 +extern unsigned short p6_rx_good_cnt;
6308 +extern unsigned short p6_tx_good_cnt;
6310 +extern unsigned short p0_rx_byte_cnt;
6311 +extern unsigned short p1_rx_byte_cnt;
6312 +extern unsigned short p2_rx_byte_cnt;
6313 +extern unsigned short p3_rx_byte_cnt;
6314 +extern unsigned short p4_rx_byte_cnt;
6315 +extern unsigned short p5_rx_byte_cnt;
6316 +extern unsigned short p6_rx_byte_cnt;
6317 +extern unsigned short p0_tx_byte_cnt;
6318 +extern unsigned short p1_tx_byte_cnt;
6319 +extern unsigned short p2_tx_byte_cnt;
6320 +extern unsigned short p3_tx_byte_cnt;
6321 +extern unsigned short p4_tx_byte_cnt;
6322 +extern unsigned short p5_tx_byte_cnt;
6323 +extern unsigned short p6_tx_byte_cnt;
6325 +#if defined(CONFIG_RALINK_MT7620)
6326 +extern unsigned short p7_rx_good_cnt;
6327 +extern unsigned short p7_tx_good_cnt;
6328 +extern unsigned short p7_rx_byte_cnt;
6329 +extern unsigned short p7_tx_byte_cnt;
6335 +#if defined(CONFIG_RAETH_TSO)
6336 +int txd_cnt[MAX_SKB_FRAGS/2 + 1];
6340 +#if defined(CONFIG_RAETH_LRO)
6341 +#define MAX_AGGR 64
6343 +int lro_stats_cnt[MAX_AGGR + 1];
6344 +int lro_flush_cnt[MAX_AGGR + 1];
6345 +int lro_len_cnt1[16];
6346 +//int lro_len_cnt2[16];
6347 +int aggregated[MAX_DESC];
6348 +int lro_aggregated;
6356 +#if defined(CONFIG_RAETH_HW_LRO)
6357 +#define HW_LRO_RING_NUM 3
6358 +#define MAX_HW_LRO_AGGR 64
6359 +unsigned int hw_lro_agg_num_cnt[HW_LRO_RING_NUM][MAX_HW_LRO_AGGR + 1];
6360 +unsigned int hw_lro_agg_size_cnt[HW_LRO_RING_NUM][16];
6361 +unsigned int hw_lro_tot_agg_cnt[HW_LRO_RING_NUM];
6362 +unsigned int hw_lro_tot_flush_cnt[HW_LRO_RING_NUM];
6363 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
6364 +unsigned int hw_lro_agg_flush_cnt[HW_LRO_RING_NUM];
6365 +unsigned int hw_lro_age_flush_cnt[HW_LRO_RING_NUM];
6366 +unsigned int hw_lro_seq_flush_cnt[HW_LRO_RING_NUM];
6367 +unsigned int hw_lro_timestamp_flush_cnt[HW_LRO_RING_NUM];
6368 +unsigned int hw_lro_norule_flush_cnt[HW_LRO_RING_NUM];
6369 +#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
6370 +#endif /* CONFIG_RAETH_HW_LRO */
6372 +#if defined(CONFIG_RAETH_QDMA)
6373 +extern unsigned int M2Q_table[64];
6374 +extern struct QDMA_txdesc *free_head;
6376 +#if defined (CONFIG_ARCH_MT7623)
6377 +extern struct SFQ_table *sfq0;
6378 +extern struct SFQ_table *sfq1;
6379 +extern struct SFQ_table *sfq2;
6380 +extern struct SFQ_table *sfq3;
6383 +#if defined(CONFIG_USER_SNMPD)
6385 +static int ra_snmp_seq_show(struct seq_file *seq, void *v)
6387 +#if !defined(CONFIG_RALINK_RT5350) && !defined(CONFIG_RALINK_MT7620) && !defined (CONFIG_RALINK_MT7628)
6389 + seq_printf(seq, "rx counters: %x %x %x %x %x %x %x\n", sysRegRead(GDMA_RX_GBCNT0), sysRegRead(GDMA_RX_GPCNT0),sysRegRead(GDMA_RX_OERCNT0), sysRegRead(GDMA_RX_FERCNT0), sysRegRead(GDMA_RX_SERCNT0), sysRegRead(GDMA_RX_LERCNT0), sysRegRead(GDMA_RX_CERCNT0));
6391 + seq_printf(seq, "fc config: %x %x %x %x\n", sysRegRead(CDMA_FC_CFG), sysRegRead(GDMA1_FC_CFG), PDMA_FC_CFG, sysRegRead(PDMA_FC_CFG));
6393 + seq_printf(seq, "scheduler: %x %x %x\n", sysRegRead(GDMA1_SCH_CFG), sysRegRead(GDMA2_SCH_CFG), sysRegRead(PDMA_SCH_CFG));
6396 + seq_printf(seq, "ports: %x %x %x %x %x %x\n", sysRegRead(PORT0_PKCOUNT), sysRegRead(PORT1_PKCOUNT), sysRegRead(PORT2_PKCOUNT), sysRegRead(PORT3_PKCOUNT), sysRegRead(PORT4_PKCOUNT), sysRegRead(PORT5_PKCOUNT));
6401 +static int ra_snmp_seq_open(struct inode *inode, struct file *file)
6403 + return single_open(file, ra_snmp_seq_show, NULL);
6406 +static const struct file_operations ra_snmp_seq_fops = {
6407 + .owner = THIS_MODULE,
6408 + .open = ra_snmp_seq_open,
6410 + .llseek = seq_lseek,
6411 + .release = single_release
6416 +#if defined (CONFIG_GIGAPHY) || defined (CONFIG_100PHY) || \
6417 + defined (CONFIG_P5_MAC_TO_PHY_MODE) || defined (CONFIG_RAETH_GMAC2)
6418 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
6419 + defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
6420 + defined (CONFIG_ARCH_MT7623)
6421 +void enable_auto_negotiate(int unused)
6424 +#if !defined (CONFIG_RALINK_MT7621) && !defined (CONFIG_ARCH_MT7623)
6425 + u32 addr = CONFIG_MAC_TO_GIGAPHY_MODE_ADDR;
6428 +#if defined (CONFIG_RALINK_MT7621)
6429 + //enable MDIO mode all the time
6430 + regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
6431 + regValue &= ~(0x3 << 12);
6432 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
6435 + /* FIXME: we don't know how to deal with PHY end addr */
6436 + regValue = sysRegRead(ESW_PHY_POLLING);
6437 + regValue |= (1<<31);
6438 + regValue &= ~(0x1f);
6439 + regValue &= ~(0x1f<<8);
6440 +#if defined (CONFIG_RALINK_MT7620)
6441 + regValue |= ((addr-1) << 0);//setup PHY address for auto polling (Start Addr).
6442 + regValue |= (addr << 8);// setup PHY address for auto polling (End Addr).
6443 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
6444 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)|| defined (CONFIG_GE_RGMII_INTERNAL_P4_AN) || defined (CONFIG_GE2_RGMII_AN)
6445 + regValue |= ((CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2-1)&0x1f << 0);//setup PHY address for auto polling (Start Addr).
6446 + regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 << 8);// setup PHY address for auto polling (End Addr).
6448 + regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR << 0);//setup PHY address for auto polling (Start Addr).
6449 + regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 << 8);// setup PHY address for auto polling (End Addr).
6452 + regValue |= (addr << 0);// setup PHY address for auto polling (start Addr).
6453 + regValue |= (addr << 8);// setup PHY address for auto polling (End Addr).
6456 + /*kurtis: AN is strange*/
6457 + sysRegWrite(ESW_PHY_POLLING, regValue);
6459 +#if defined (CONFIG_P4_MAC_TO_PHY_MODE)
6460 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x56330;
6462 +#if defined (CONFIG_P5_MAC_TO_PHY_MODE)
6463 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x56330;
6466 +#elif defined (CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT3883) || \
6467 + defined (CONFIG_RALINK_RT3052) || defined(CONFIG_RALINK_RT3352)
6469 +void enable_auto_negotiate(int ge)
6471 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352)
6472 + u32 regValue = sysRegRead(0xb01100C8);
6475 + regValue = (ge == 2)? sysRegRead(MDIO_CFG2) : sysRegRead(MDIO_CFG);
6478 + regValue &= 0xe0ff7fff; // clear auto polling related field:
6479 + // (MD_PHY1ADDR & GP1_FRC_EN).
6480 + regValue |= 0x20000000; // force to enable MDC/MDIO auto polling.
6482 +#if defined (CONFIG_GE2_RGMII_AN) || defined (CONFIG_GE2_MII_AN)
6484 + regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 << 24); // setup PHY address for auto polling.
6487 +#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_GE1_MII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
6489 + regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR << 24); // setup PHY address for auto polling.
6493 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352)
6494 + sysRegWrite(0xb01100C8, regValue);
6497 + sysRegWrite(MDIO_CFG2, regValue);
6499 + sysRegWrite(MDIO_CFG, regValue);
6504 +void ra2880stop(END_DEVICE *ei_local)
6506 + unsigned int regValue;
6507 + printk("ra2880stop()...");
6509 + regValue = sysRegRead(DMA_GLO_CFG);
6510 + regValue &= ~(TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
6511 + sysRegWrite(DMA_GLO_CFG, regValue);
6514 + // printk("Done0x%x...\n", readreg(DMA_GLO_CFG));
6517 +void ei_irq_clear(void)
6519 + sysRegWrite(FE_INT_STATUS, 0xFFFFFFFF);
6522 +void rt2880_gmac_hard_reset(void)
6524 +#if !defined (CONFIG_RALINK_RT6855A)
6526 + sysRegWrite(RSTCTRL, RALINK_FE_RST);
6527 + sysRegWrite(RSTCTRL, 0);
6531 +void ra2880EnableInterrupt()
6533 + unsigned int regValue = sysRegRead(FE_INT_ENABLE);
6534 + RAETH_PRINT("FE_INT_ENABLE -- : 0x%08x\n", regValue);
6535 +// regValue |= (RX_DONE_INT0 | TX_DONE_INT0);
6537 + sysRegWrite(FE_INT_ENABLE, regValue);
6540 +void ra2880MacAddressSet(unsigned char p[6])
6542 + unsigned long regValue;
6544 + regValue = (p[0] << 8) | (p[1]);
6545 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
6546 + sysRegWrite(SDM_MAC_ADRH, regValue);
6547 + printk("GMAC1_MAC_ADRH -- : 0x%08x\n", sysRegRead(SDM_MAC_ADRH));
6548 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)
6549 + sysRegWrite(GDMA1_MAC_ADRH, regValue);
6550 + printk("GMAC1_MAC_ADRH -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRH));
6552 + /* To keep the consistence between RT6855 and RT62806, GSW should keep the register. */
6553 + sysRegWrite(SMACCR1, regValue);
6554 + printk("SMACCR1 -- : 0x%08x\n", sysRegRead(SMACCR1));
6555 +#elif defined (CONFIG_RALINK_MT7620)
6556 + sysRegWrite(SMACCR1, regValue);
6557 + printk("SMACCR1 -- : 0x%08x\n", sysRegRead(SMACCR1));
6559 + sysRegWrite(GDMA1_MAC_ADRH, regValue);
6560 + printk("GMAC1_MAC_ADRH -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRH));
6563 + regValue = (p[2] << 24) | (p[3] <<16) | (p[4] << 8) | p[5];
6564 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
6565 + sysRegWrite(SDM_MAC_ADRL, regValue);
6566 + printk("GMAC1_MAC_ADRL -- : 0x%08x\n", sysRegRead(SDM_MAC_ADRL));
6567 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)
6568 + sysRegWrite(GDMA1_MAC_ADRL, regValue);
6569 + printk("GMAC1_MAC_ADRL -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRL));
6571 + /* To keep the consistence between RT6855 and RT62806, GSW should keep the register. */
6572 + sysRegWrite(SMACCR0, regValue);
6573 + printk("SMACCR0 -- : 0x%08x\n", sysRegRead(SMACCR0));
6574 +#elif defined (CONFIG_RALINK_MT7620)
6575 + sysRegWrite(SMACCR0, regValue);
6576 + printk("SMACCR0 -- : 0x%08x\n", sysRegRead(SMACCR0));
6578 + sysRegWrite(GDMA1_MAC_ADRL, regValue);
6579 + printk("GMAC1_MAC_ADRL -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRL));
6585 +#ifdef CONFIG_PSEUDO_SUPPORT
6586 +void ra2880Mac2AddressSet(unsigned char p[6])
6588 + unsigned long regValue;
6590 + regValue = (p[0] << 8) | (p[1]);
6591 + sysRegWrite(GDMA2_MAC_ADRH, regValue);
6593 + regValue = (p[2] << 24) | (p[3] <<16) | (p[4] << 8) | p[5];
6594 + sysRegWrite(GDMA2_MAC_ADRL, regValue);
6596 + printk("GDMA2_MAC_ADRH -- : 0x%08x\n", sysRegRead(GDMA2_MAC_ADRH));
6597 + printk("GDMA2_MAC_ADRL -- : 0x%08x\n", sysRegRead(GDMA2_MAC_ADRL));
6603 + * hard_init - Called by raeth_probe to inititialize network device
6604 + * @dev: device pointer
6606 + * ethdev_init initilize dev->priv and set to END_DEVICE structure
6609 +void ethtool_init(struct net_device *dev)
6611 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
6612 + END_DEVICE *ei_local = netdev_priv(dev);
6614 + // init mii structure
6615 + ei_local->mii_info.dev = dev;
6616 + ei_local->mii_info.mdio_read = mdio_read;
6617 + ei_local->mii_info.mdio_write = mdio_write;
6618 + ei_local->mii_info.phy_id_mask = 0x1f;
6619 + ei_local->mii_info.reg_num_mask = 0x1f;
6620 + ei_local->mii_info.supports_gmii = mii_check_gmii_support(&ei_local->mii_info);
6621 + // TODO: phy_id: 0~4
6622 + ei_local->mii_info.phy_id = 1;
6628 + * Routine Name : get_idx(mode, index)
6629 + * Description: calculate ring usage for tx/rx rings
6630 + * Mode 1 : Tx Ring
6631 + * Mode 2 : Rx Ring
6633 +int get_ring_usage(int mode, int i)
6635 + unsigned long tx_ctx_idx, tx_dtx_idx, tx_usage;
6636 + unsigned long rx_calc_idx, rx_drx_idx, rx_usage;
6638 + struct PDMA_rxdesc* rxring;
6639 + struct PDMA_txdesc* txring;
6641 + END_DEVICE *ei_local = netdev_priv(dev_raether);
6645 + /* cpu point to the next descriptor of rx dma ring */
6646 + rx_calc_idx = *(unsigned long*)RX_CALC_IDX0;
6647 + rx_drx_idx = *(unsigned long*)RX_DRX_IDX0;
6648 + rxring = (struct PDMA_rxdesc*)RX_BASE_PTR0;
6650 + rx_usage = (rx_drx_idx - rx_calc_idx -1 + NUM_RX_DESC) % NUM_RX_DESC;
6651 + if ( rx_calc_idx == rx_drx_idx ) {
6652 + if ( rxring[rx_drx_idx].rxd_info2.DDONE_bit == 1)
6653 + tx_usage = NUM_RX_DESC;
6663 + tx_ctx_idx = *(unsigned long*)TX_CTX_IDX0;
6664 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX0;
6665 + txring = ei_local->tx_ring0;
6667 +#if defined(CONFIG_RAETH_QOS)
6669 + tx_ctx_idx = *(unsigned long*)TX_CTX_IDX1;
6670 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX1;
6671 + txring = ei_local->tx_ring1;
6674 + tx_ctx_idx = *(unsigned long*)TX_CTX_IDX2;
6675 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX2;
6676 + txring = ei_local->tx_ring2;
6679 + tx_ctx_idx = *(unsigned long*)TX_CTX_IDX3;
6680 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX3;
6681 + txring = ei_local->tx_ring3;
6685 + printk("get_tx_idx failed %d %d\n", mode, i);
6689 + tx_usage = (tx_ctx_idx - tx_dtx_idx + NUM_TX_DESC) % NUM_TX_DESC;
6690 + if ( tx_ctx_idx == tx_dtx_idx ) {
6691 + if ( txring[tx_ctx_idx].txd_info2.DDONE_bit == 1)
6694 + tx_usage = NUM_TX_DESC;
6700 +#if defined(CONFIG_RAETH_QOS)
6701 +void dump_qos(struct seq_file *s)
6706 + seq_printf(s, "\n-----Raeth QOS -----\n\n");
6708 + for ( i = 0; i < 4; i++) {
6709 + usage = get_ring_usage(1,i);
6710 + seq_printf(s, "Tx Ring%d Usage : %d/%d\n", i, usage, NUM_TX_DESC);
6713 + usage = get_ring_usage(2,0);
6714 + seq_printf(s, "RX Usage : %d/%d\n\n", usage, NUM_RX_DESC);
6715 +#if defined (CONFIG_RALINK_MT7620)
6716 + seq_printf(s, "PSE_FQFC_CFG(0x%08x) : 0x%08x\n", PSE_FQFC_CFG, sysRegRead(PSE_FQFC_CFG));
6717 + seq_printf(s, "PSE_IQ_CFG(0x%08x) : 0x%08x\n", PSE_IQ_CFG, sysRegRead(PSE_IQ_CFG));
6718 + seq_printf(s, "PSE_QUE_STA(0x%08x) : 0x%08x\n", PSE_QUE_STA, sysRegRead(PSE_QUE_STA));
6719 +#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
6722 + seq_printf(s, "GDMA1_FC_CFG(0x%08x) : 0x%08x\n", GDMA1_FC_CFG, sysRegRead(GDMA1_FC_CFG));
6723 + seq_printf(s, "GDMA2_FC_CFG(0x%08x) : 0x%08x\n", GDMA2_FC_CFG, sysRegRead(GDMA2_FC_CFG));
6724 + seq_printf(s, "PDMA_FC_CFG(0x%08x) : 0x%08x\n", PDMA_FC_CFG, sysRegRead(PDMA_FC_CFG));
6725 + seq_printf(s, "PSE_FQ_CFG(0x%08x) : 0x%08x\n", PSE_FQ_CFG, sysRegRead(PSE_FQ_CFG));
6727 + seq_printf(s, "\n\nTX_CTX_IDX0 : 0x%08x\n", sysRegRead(TX_CTX_IDX0));
6728 + seq_printf(s, "TX_DTX_IDX0 : 0x%08x\n", sysRegRead(TX_DTX_IDX0));
6729 + seq_printf(s, "TX_CTX_IDX1 : 0x%08x\n", sysRegRead(TX_CTX_IDX1));
6730 + seq_printf(s, "TX_DTX_IDX1 : 0x%08x\n", sysRegRead(TX_DTX_IDX1));
6731 + seq_printf(s, "TX_CTX_IDX2 : 0x%08x\n", sysRegRead(TX_CTX_IDX2));
6732 + seq_printf(s, "TX_DTX_IDX2 : 0x%08x\n", sysRegRead(TX_DTX_IDX2));
6733 + seq_printf(s, "TX_CTX_IDX3 : 0x%08x\n", sysRegRead(TX_CTX_IDX3));
6734 + seq_printf(s, "TX_DTX_IDX3 : 0x%08x\n", sysRegRead(TX_DTX_IDX3));
6735 + seq_printf(s, "RX_CALC_IDX0 : 0x%08x\n", sysRegRead(RX_CALC_IDX0));
6736 + seq_printf(s, "RX_DRX_IDX0 : 0x%08x\n", sysRegRead(RX_DRX_IDX0));
6738 + seq_printf(s, "\n------------------------------\n\n");
6742 +void dump_reg(struct seq_file *s)
6744 + int fe_int_enable;
6751 +#if !defined (CONFIG_RAETH_QDMA)
6753 + int tx_base_ptr[4];
6754 + int tx_max_cnt[4];
6755 + int tx_ctx_idx[4];
6756 + int tx_dtx_idx[4];
6760 + fe_int_enable = sysRegRead(FE_INT_ENABLE);
6761 + rx_usage = get_ring_usage(2,0);
6763 + dly_int_cfg = sysRegRead(DLY_INT_CFG);
6765 +#if !defined (CONFIG_RAETH_QDMA)
6766 + tx_usage = get_ring_usage(1,0);
6768 + tx_base_ptr[0] = sysRegRead(TX_BASE_PTR0);
6769 + tx_max_cnt[0] = sysRegRead(TX_MAX_CNT0);
6770 + tx_ctx_idx[0] = sysRegRead(TX_CTX_IDX0);
6771 + tx_dtx_idx[0] = sysRegRead(TX_DTX_IDX0);
6773 + tx_base_ptr[1] = sysRegRead(TX_BASE_PTR1);
6774 + tx_max_cnt[1] = sysRegRead(TX_MAX_CNT1);
6775 + tx_ctx_idx[1] = sysRegRead(TX_CTX_IDX1);
6776 + tx_dtx_idx[1] = sysRegRead(TX_DTX_IDX1);
6778 + tx_base_ptr[2] = sysRegRead(TX_BASE_PTR2);
6779 + tx_max_cnt[2] = sysRegRead(TX_MAX_CNT2);
6780 + tx_ctx_idx[2] = sysRegRead(TX_CTX_IDX2);
6781 + tx_dtx_idx[2] = sysRegRead(TX_DTX_IDX2);
6783 + tx_base_ptr[3] = sysRegRead(TX_BASE_PTR3);
6784 + tx_max_cnt[3] = sysRegRead(TX_MAX_CNT3);
6785 + tx_ctx_idx[3] = sysRegRead(TX_CTX_IDX3);
6786 + tx_dtx_idx[3] = sysRegRead(TX_DTX_IDX3);
6789 + rx_base_ptr0 = sysRegRead(RX_BASE_PTR0);
6790 + rx_max_cnt0 = sysRegRead(RX_MAX_CNT0);
6791 + rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
6792 + rx_drx_idx0 = sysRegRead(RX_DRX_IDX0);
6794 + seq_printf(s, "\n\nFE_INT_ENABLE : 0x%08x\n", fe_int_enable);
6795 +#if !defined (CONFIG_RAETH_QDMA)
6796 + seq_printf(s, "TxRing PktCnt: %d/%d\n", tx_usage, NUM_TX_DESC);
6798 + seq_printf(s, "RxRing PktCnt: %d/%d\n\n", rx_usage, NUM_RX_DESC);
6799 + seq_printf(s, "DLY_INT_CFG : 0x%08x\n", dly_int_cfg);
6801 +#if !defined (CONFIG_RAETH_QDMA)
6802 + for(i=0;i<4;i++) {
6803 + seq_printf(s, "TX_BASE_PTR%d : 0x%08x\n", i, tx_base_ptr[i]);
6804 + seq_printf(s, "TX_MAX_CNT%d : 0x%08x\n", i, tx_max_cnt[i]);
6805 + seq_printf(s, "TX_CTX_IDX%d : 0x%08x\n", i, tx_ctx_idx[i]);
6806 + seq_printf(s, "TX_DTX_IDX%d : 0x%08x\n", i, tx_dtx_idx[i]);
6810 + seq_printf(s, "RX_BASE_PTR0 : 0x%08x\n", rx_base_ptr0);
6811 + seq_printf(s, "RX_MAX_CNT0 : 0x%08x\n", rx_max_cnt0);
6812 + seq_printf(s, "RX_CALC_IDX0 : 0x%08x\n", rx_calc_idx0);
6813 + seq_printf(s, "RX_DRX_IDX0 : 0x%08x\n", rx_drx_idx0);
6815 +#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER)
6816 + seq_printf(s, "The current PHY address selected by ethtool is %d\n", get_current_phy_address());
6819 +#if defined (CONFIG_RALINK_RT2883) || defined(CONFIG_RALINK_RT3883)
6820 + seq_printf(s, "GDMA_RX_FCCNT1(0x%08x) : 0x%08x\n\n", GDMA_RX_FCCNT1, sysRegRead(GDMA_RX_FCCNT1));
6825 +void dump_cp0(void)
6827 + printk("CP0 Register dump --\n");
6828 + printk("CP0_INDEX\t: 0x%08x\n", read_32bit_cp0_register(CP0_INDEX));
6829 + printk("CP0_RANDOM\t: 0x%08x\n", read_32bit_cp0_register(CP0_RANDOM));
6830 + printk("CP0_ENTRYLO0\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO0));
6831 + printk("CP0_ENTRYLO1\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO1));
6832 + printk("CP0_CONF\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONF));
6833 + printk("CP0_CONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONTEXT));
6834 + printk("CP0_PAGEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_PAGEMASK));
6835 + printk("CP0_WIRED\t: 0x%08x\n", read_32bit_cp0_register(CP0_WIRED));
6836 + printk("CP0_INFO\t: 0x%08x\n", read_32bit_cp0_register(CP0_INFO));
6837 + printk("CP0_BADVADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_BADVADDR));
6838 + printk("CP0_COUNT\t: 0x%08x\n", read_32bit_cp0_register(CP0_COUNT));
6839 + printk("CP0_ENTRYHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYHI));
6840 + printk("CP0_COMPARE\t: 0x%08x\n", read_32bit_cp0_register(CP0_COMPARE));
6841 + printk("CP0_STATUS\t: 0x%08x\n", read_32bit_cp0_register(CP0_STATUS));
6842 + printk("CP0_CAUSE\t: 0x%08x\n", read_32bit_cp0_register(CP0_CAUSE));
6843 + printk("CP0_EPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_EPC));
6844 + printk("CP0_PRID\t: 0x%08x\n", read_32bit_cp0_register(CP0_PRID));
6845 + printk("CP0_CONFIG\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONFIG));
6846 + printk("CP0_LLADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_LLADDR));
6847 + printk("CP0_WATCHLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHLO));
6848 + printk("CP0_WATCHHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHHI));
6849 + printk("CP0_XCONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_XCONTEXT));
6850 + printk("CP0_FRAMEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_FRAMEMASK));
6851 + printk("CP0_DIAGNOSTIC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DIAGNOSTIC));
6852 + printk("CP0_DEBUG\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEBUG));
6853 + printk("CP0_DEPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEPC));
6854 + printk("CP0_PERFORMANCE\t: 0x%08x\n", read_32bit_cp0_register(CP0_PERFORMANCE));
6855 + printk("CP0_ECC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ECC));
6856 + printk("CP0_CACHEERR\t: 0x%08x\n", read_32bit_cp0_register(CP0_CACHEERR));
6857 + printk("CP0_TAGLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGLO));
6858 + printk("CP0_TAGHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGHI));
6859 + printk("CP0_ERROREPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ERROREPC));
6860 + printk("CP0_DESAVE\t: 0x%08x\n\n", read_32bit_cp0_register(CP0_DESAVE));
6864 +struct proc_dir_entry *procRegDir;
6865 +static struct proc_dir_entry *procGmac, *procSysCP0, *procTxRing, *procRxRing, *procSkbFree;
6866 +#if defined(CONFIG_PSEUDO_SUPPORT) && defined(CONFIG_ETHTOOL)
6867 +static struct proc_dir_entry *procGmac2;
6869 +#if defined(CONFIG_USER_SNMPD)
6870 +static struct proc_dir_entry *procRaSnmp;
6872 +#if defined(CONFIG_RAETH_TSO)
6873 +static struct proc_dir_entry *procNumOfTxd, *procTsoLen;
6876 +#if defined(CONFIG_RAETH_LRO)
6877 +static struct proc_dir_entry *procLroStats;
6879 +#if defined(CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
6880 +static struct proc_dir_entry *procRxRing1, *procRxRing2, *procRxRing3;
6881 +static struct proc_dir_entry *procHwLroStats, *procHwLroAutoTlb;
6882 +const static HWLRO_DBG_FUNC hw_lro_dbg_func[] =
6884 + [0] = hwlro_agg_cnt_ctrl,
6885 + [1] = hwlro_agg_time_ctrl,
6886 + [2] = hwlro_age_time_ctrl,
6887 + [3] = hwlro_pkt_int_alpha_ctrl,
6888 + [4] = hwlro_threshold_ctrl,
6889 + [5] = hwlro_fix_setting_switch_ctrl,
6891 +#endif /* CONFIG_RAETH_HW_LRO */
6892 +#if defined (TASKLET_WORKQUEUE_SW)
6893 +static struct proc_dir_entry *procSCHE;
6896 +#if defined(CONFIG_RAETH_PDMA_DVT)
6897 +static struct proc_dir_entry *procPdmaDvt;
6899 +const static PDMA_DBG_FUNC pdma_dvt_dbg_func[] =
6901 + [0] = pdma_dvt_show_ctrl,
6902 + [1] = pdma_dvt_test_rx_ctrl,
6903 + [2] = pdma_dvt_test_tx_ctrl,
6904 + [3] = pdma_dvt_test_debug_ctrl,
6905 + [4] = pdma_dvt_test_lro_ctrl,
6907 +#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
6909 +int RegReadMain(struct seq_file *seq, void *v)
6915 +static void *seq_SkbFree_start(struct seq_file *seq, loff_t *pos)
6917 + if (*pos < NUM_TX_DESC)
6922 +static void *seq_SkbFree_next(struct seq_file *seq, void *v, loff_t *pos)
6925 + if (*pos >= NUM_TX_DESC)
6930 +static void seq_SkbFree_stop(struct seq_file *seq, void *v)
6932 + /* Nothing to do */
6935 +static int seq_SkbFree_show(struct seq_file *seq, void *v)
6937 + int i = *(loff_t *) v;
6938 + END_DEVICE *ei_local = netdev_priv(dev_raether);
6940 + seq_printf(seq, "%d: %08x\n",i, *(int *)&ei_local->skb_free[i]);
6945 +static const struct seq_operations seq_skb_free_ops = {
6946 + .start = seq_SkbFree_start,
6947 + .next = seq_SkbFree_next,
6948 + .stop = seq_SkbFree_stop,
6949 + .show = seq_SkbFree_show
6952 +static int skb_free_open(struct inode *inode, struct file *file)
6954 + return seq_open(file, &seq_skb_free_ops);
6957 +static const struct file_operations skb_free_fops = {
6958 + .owner = THIS_MODULE,
6959 + .open = skb_free_open,
6961 + .llseek = seq_lseek,
6962 + .release = seq_release
6965 +#if defined (CONFIG_RAETH_QDMA)
6966 +int QDMARead(struct seq_file *seq, void *v)
6968 + unsigned int temp,i;
6969 + unsigned int sw_fq, hw_fq;
6970 + unsigned int min_en, min_rate, max_en, max_rate, sch, weight;
6971 + unsigned int queue, tx_des_cnt, hw_resv, sw_resv, queue_head, queue_tail;
6972 + struct net_device *dev = dev_raether;
6973 + END_DEVICE *ei_local = netdev_priv(dev);
6975 + seq_printf(seq, "==== General Information ====\n");
6976 + temp = sysRegRead(QDMA_FQ_CNT);
6977 + sw_fq = (temp&0xFFFF0000)>>16;
6978 + hw_fq = (temp&0x0000FFFF);
6979 + seq_printf(seq, "SW TXD: %d/%d; HW TXD: %d/%d\n", sw_fq, NUM_TX_DESC, hw_fq,NUM_QDMA_PAGE);
6980 + seq_printf(seq, "SW TXD virtual start address: 0x%08x\n", ei_local->txd_pool);
6981 + seq_printf(seq, "HW TXD virtual start address: 0x%08x\n\n", free_head);
6983 + seq_printf(seq, "==== Scheduler Information ====\n");
6984 + temp = sysRegRead(QDMA_TX_SCH);
6985 + max_en = (temp&0x00000800)>>11;
6986 + max_rate = (temp&0x000007F0)>>4;
6987 + for(i=0;i<(temp&0x0000000F);i++)
6989 + seq_printf(seq, "SCH1 rate control:%d. Rate is %dKbps.\n", max_en, max_rate);
6990 + max_en = (temp&0x08000000)>>27;
6991 + max_rate = (temp&0x07F00000)>>20;
6992 + for(i=0;i<(temp&0x000F0000);i++)
6994 + seq_printf(seq, "SCH2 rate control:%d. Rate is %dKbps.\n\n", max_en, max_rate);
6996 + seq_printf(seq, "==== Physical Queue Information ====\n");
6997 + for (queue = 0; queue < 16; queue++){
6998 + temp = sysRegRead(QTX_CFG_0 + 0x10 * queue);
6999 + tx_des_cnt = (temp & 0xffff0000) >> 16;
7000 + hw_resv = (temp & 0xff00) >> 8;
7001 + sw_resv = (temp & 0xff);
7002 + temp = sysRegRead(QTX_CFG_0 +(0x10 * queue) + 0x4);
7003 + sch = (temp >> 31) + 1 ;
7004 + min_en = (temp & 0x8000000) >> 27;
7005 + min_rate = (temp & 0x7f00000) >> 20;
7006 + for (i = 0; i< (temp & 0xf0000) >> 16; i++)
7008 + max_en = (temp & 0x800) >> 11;
7009 + max_rate = (temp & 0x7f0) >> 4;
7010 + for (i = 0; i< (temp & 0xf); i++)
7012 + weight = (temp & 0xf000) >> 12;
7013 + queue_head = sysRegRead(QTX_HEAD_0 + 0x10 * queue);
7014 + queue_tail = sysRegRead(QTX_TAIL_0 + 0x10 * queue);
7016 + seq_printf(seq, "Queue#%d Information:\n", queue);
7017 + seq_printf(seq, "%d packets in the queue; head address is 0x%08x, tail address is 0x%08x.\n", tx_des_cnt, queue_head, queue_tail);
7018 + seq_printf(seq, "HW_RESV: %d; SW_RESV: %d; SCH: %d; Weighting: %d\n", hw_resv, sw_resv, sch, weight);
7019 + seq_printf(seq, "Min_Rate_En is %d, Min_Rate is %dKbps; Max_Rate_En is %d, Max_Rate is %dKbps.\n\n", min_en, min_rate, max_en, max_rate);
7021 +#if defined (CONFIG_ARCH_MT7623) && defined(CONFIG_HW_SFQ)
7022 + seq_printf(seq, "==== Virtual Queue Information ====\n");
7023 + seq_printf(seq, "VQTX_TB_BASE_0:0x%08x;VQTX_TB_BASE_1:0x%08x;VQTX_TB_BASE_2:0x%08x;VQTX_TB_BASE_3:0x%08x\n", \
7024 + sfq0, sfq1, sfq2, sfq3);
7025 + temp = sysRegRead(VQTX_NUM);
7026 + seq_printf(seq, "VQTX_NUM_0:0x%01x;VQTX_NUM_1:0x%01x;VQTX_NUM_2:0x%01x;VQTX_NUM_3:0x%01x\n\n", \
7027 + temp&0xF, (temp&0xF0)>>4, (temp&0xF00)>>8, (temp&0xF000)>>12);
7031 + seq_printf(seq, "==== Flow Control Information ====\n");
7032 + temp = sysRegRead(QDMA_FC_THRES);
7033 + seq_printf(seq, "SW_DROP_EN:%x; SW_DROP_FFA:%d; SW_DROP_MODE:%d\n", \
7034 + (temp&0x1000000)>>24, (temp&0x200000)>>25, (temp&0x30000000)>>28);
7035 + seq_printf(seq, "WH_DROP_EN:%x; HW_DROP_FFA:%d; HW_DROP_MODE:%d\n", \
7036 + (temp&0x10000)>>16, (temp&0x2000)>>17, (temp&0x300000)>>20);
7037 +#if defined (CONFIG_ARCH_MT7623)
7038 + seq_printf(seq, "SW_DROP_FSTVQ_MODE:%d;SW_DROP_FSTVQ:%d\n", \
7039 + (temp&0xC0000000)>>30, (temp&0x08000000)>>27);
7040 + seq_printf(seq, "HW_DROP_FSTVQ_MODE:%d;HW_DROP_FSTVQ:%d\n", \
7041 + (temp&0xC00000)>>22, (temp&0x080000)>>19);
7044 + seq_printf(seq, "\n==== FSM Information\n");
7045 + temp = sysRegRead(QDMA_DMA);
7046 +#if defined (CONFIG_ARCH_MT7623)
7047 + seq_printf(seq, "VQTB_FSM:0x%01x\n", (temp&0x0F000000)>>24);
7049 + seq_printf(seq, "FQ_FSM:0x%01x\n", (temp&0x000F0000)>>16);
7050 + seq_printf(seq, "TX_FSM:0x%01x\n", (temp&0x00000F00)>>12);
7051 + seq_printf(seq, "RX_FSM:0x%01x\n\n", (temp&0x0000000f));
7053 + seq_printf(seq, "==== M2Q Information ====\n");
7054 + for (i = 0; i < 64; i+=8){
7055 + seq_printf(seq, " (%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)\n",
7056 + i, M2Q_table[i], i+1, M2Q_table[i+1], i+2, M2Q_table[i+2], i+3, M2Q_table[i+3],
7057 + i+4, M2Q_table[i+4], i+5, M2Q_table[i+5], i+6, M2Q_table[i+6], i+7, M2Q_table[i+7]);
7064 +static int qdma_open(struct inode *inode, struct file *file)
7066 + return single_open(file, QDMARead, NULL);
7069 +static const struct file_operations qdma_fops = {
7070 + .owner = THIS_MODULE,
7071 + .open = qdma_open,
7073 + .llseek = seq_lseek,
7074 + .release = single_release
7078 +int TxRingRead(struct seq_file *seq, void *v)
7080 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7081 + struct PDMA_txdesc *tx_ring;
7084 + tx_ring = kmalloc(sizeof(struct PDMA_txdesc) * NUM_TX_DESC, GFP_KERNEL);
7085 + if(tx_ring==NULL){
7086 + seq_printf(seq, " allocate temp tx_ring fail.\n");
7090 + for (i=0; i < NUM_TX_DESC; i++) {
7091 + tx_ring[i] = ei_local->tx_ring0[i];
7094 + for (i=0; i < NUM_TX_DESC; i++) {
7095 +#ifdef CONFIG_32B_DESC
7096 + seq_printf(seq, "%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",i, *(int *)&tx_ring[i].txd_info1,
7097 + *(int *)&tx_ring[i].txd_info2, *(int *)&tx_ring[i].txd_info3,
7098 + *(int *)&tx_ring[i].txd_info4, *(int *)&tx_ring[i].txd_info5,
7099 + *(int *)&tx_ring[i].txd_info6, *(int *)&tx_ring[i].txd_info7,
7100 + *(int *)&tx_ring[i].txd_info8);
7102 + seq_printf(seq, "%d: %08x %08x %08x %08x\n",i, *(int *)&tx_ring[i].txd_info1, *(int *)&tx_ring[i].txd_info2,
7103 + *(int *)&tx_ring[i].txd_info3, *(int *)&tx_ring[i].txd_info4);
7111 +static int tx_ring_open(struct inode *inode, struct file *file)
7113 +#if !defined (CONFIG_RAETH_QDMA)
7114 + return single_open(file, TxRingRead, NULL);
7116 + return single_open(file, QDMARead, NULL);
7120 +static const struct file_operations tx_ring_fops = {
7121 + .owner = THIS_MODULE,
7122 + .open = tx_ring_open,
7124 + .llseek = seq_lseek,
7125 + .release = single_release
7128 +int RxRingRead(struct seq_file *seq, void *v)
7130 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7131 + struct PDMA_rxdesc *rx_ring;
7134 + rx_ring = kmalloc(sizeof(struct PDMA_rxdesc) * NUM_RX_DESC, GFP_KERNEL);
7135 + if(rx_ring==NULL){
7136 + seq_printf(seq, " allocate temp rx_ring fail.\n");
7140 + for (i=0; i < NUM_RX_DESC; i++) {
7141 + memcpy(&rx_ring[i], &ei_local->rx_ring0[i], sizeof(struct PDMA_rxdesc));
7144 + for (i=0; i < NUM_RX_DESC; i++) {
7145 +#ifdef CONFIG_32B_DESC
7146 + seq_printf(seq, "%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",i, *(int *)&rx_ring[i].rxd_info1,
7147 + *(int *)&rx_ring[i].rxd_info2, *(int *)&rx_ring[i].rxd_info3,
7148 + *(int *)&rx_ring[i].rxd_info4, *(int *)&rx_ring[i].rxd_info5,
7149 + *(int *)&rx_ring[i].rxd_info6, *(int *)&rx_ring[i].rxd_info7,
7150 + *(int *)&rx_ring[i].rxd_info8);
7152 + seq_printf(seq, "%d: %08x %08x %08x %08x\n",i, *(int *)&rx_ring[i].rxd_info1, *(int *)&rx_ring[i].rxd_info2,
7153 + *(int *)&rx_ring[i].rxd_info3, *(int *)&rx_ring[i].rxd_info4);
7161 +static int rx_ring_open(struct inode *inode, struct file *file)
7163 + return single_open(file, RxRingRead, NULL);
7166 +static const struct file_operations rx_ring_fops = {
7167 + .owner = THIS_MODULE,
7168 + .open = rx_ring_open,
7170 + .llseek = seq_lseek,
7171 + .release = single_release
7174 +#if defined(CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
7175 +int RxLRORingRead(struct seq_file *seq, void *v, struct PDMA_rxdesc *rx_ring_p)
7177 + struct PDMA_rxdesc *rx_ring;
7180 + rx_ring = kmalloc(sizeof(struct PDMA_rxdesc) * NUM_LRO_RX_DESC, GFP_KERNEL);
7181 + if(rx_ring==NULL){
7182 + seq_printf(seq, " allocate temp rx_ring fail.\n");
7186 + for (i=0; i < NUM_LRO_RX_DESC; i++) {
7187 + memcpy(&rx_ring[i], &rx_ring_p[i], sizeof(struct PDMA_rxdesc));
7190 + for (i=0; i < NUM_LRO_RX_DESC; i++) {
7191 +#ifdef CONFIG_32B_DESC
7192 + seq_printf(seq, "%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",i, *(int *)&rx_ring[i].rxd_info1,
7193 + *(int *)&rx_ring[i].rxd_info2, *(int *)&rx_ring[i].rxd_info3,
7194 + *(int *)&rx_ring[i].rxd_info4, *(int *)&rx_ring[i].rxd_info5,
7195 + *(int *)&rx_ring[i].rxd_info6, *(int *)&rx_ring[i].rxd_info7,
7196 + *(int *)&rx_ring[i].rxd_info8);
7198 + seq_printf(seq, "%d: %08x %08x %08x %08x\n",i, *(int *)&rx_ring[i].rxd_info1, *(int *)&rx_ring[i].rxd_info2,
7199 + *(int *)&rx_ring[i].rxd_info3, *(int *)&rx_ring[i].rxd_info4);
7207 +int RxRing1Read(struct seq_file *seq, void *v)
7209 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7210 + RxLRORingRead(seq, v, ei_local->rx_ring1);
7215 +int RxRing2Read(struct seq_file *seq, void *v)
7217 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7218 + RxLRORingRead(seq, v, ei_local->rx_ring2);
7223 +int RxRing3Read(struct seq_file *seq, void *v)
7225 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7226 + RxLRORingRead(seq, v, ei_local->rx_ring3);
7231 +static int rx_ring1_open(struct inode *inode, struct file *file)
7233 + return single_open(file, RxRing1Read, NULL);
7236 +static int rx_ring2_open(struct inode *inode, struct file *file)
7238 + return single_open(file, RxRing2Read, NULL);
7241 +static int rx_ring3_open(struct inode *inode, struct file *file)
7243 + return single_open(file, RxRing3Read, NULL);
7246 +static const struct file_operations rx_ring1_fops = {
7247 + .owner = THIS_MODULE,
7248 + .open = rx_ring1_open,
7250 + .llseek = seq_lseek,
7251 + .release = single_release
7254 +static const struct file_operations rx_ring2_fops = {
7255 + .owner = THIS_MODULE,
7256 + .open = rx_ring2_open,
7258 + .llseek = seq_lseek,
7259 + .release = single_release
7262 +static const struct file_operations rx_ring3_fops = {
7263 + .owner = THIS_MODULE,
7264 + .open = rx_ring3_open,
7266 + .llseek = seq_lseek,
7267 + .release = single_release
7269 +#endif /* CONFIG_RAETH_HW_LRO */
7271 +#if defined(CONFIG_RAETH_TSO)
7273 +int NumOfTxdUpdate(int num_of_txd)
7276 + txd_cnt[num_of_txd]++;
7281 +static void *seq_TsoTxdNum_start(struct seq_file *seq, loff_t *pos)
7283 + seq_printf(seq, "TXD | Count\n");
7284 + if (*pos < (MAX_SKB_FRAGS/2 + 1))
7289 +static void *seq_TsoTxdNum_next(struct seq_file *seq, void *v, loff_t *pos)
7292 + if (*pos >= (MAX_SKB_FRAGS/2 + 1))
7297 +static void seq_TsoTxdNum_stop(struct seq_file *seq, void *v)
7299 + /* Nothing to do */
7302 +static int seq_TsoTxdNum_show(struct seq_file *seq, void *v)
7304 + int i = *(loff_t *) v;
7305 + seq_printf(seq, "%d: %d\n",i , txd_cnt[i]);
7310 +ssize_t NumOfTxdWrite(struct file *file, const char __user *buffer,
7311 + size_t count, loff_t *data)
7313 + memset(txd_cnt, 0, sizeof(txd_cnt));
7314 + printk("clear txd cnt table\n");
7319 +int TsoLenUpdate(int tso_len)
7322 + if(tso_len > 70000) {
7324 + }else if(tso_len > 65000) {
7326 + }else if(tso_len > 60000) {
7328 + }else if(tso_len > 55000) {
7330 + }else if(tso_len > 50000) {
7332 + }else if(tso_len > 45000) {
7334 + }else if(tso_len > 40000) {
7336 + }else if(tso_len > 35000) {
7338 + }else if(tso_len > 30000) {
7340 + }else if(tso_len > 25000) {
7342 + }else if(tso_len > 20000) {
7344 + }else if(tso_len > 15000) {
7346 + }else if(tso_len > 10000) {
7348 + }else if(tso_len > 5000) {
7357 +ssize_t TsoLenWrite(struct file *file, const char __user *buffer,
7358 + size_t count, loff_t *data)
7360 + memset(tso_cnt, 0, sizeof(tso_cnt));
7361 + printk("clear tso cnt table\n");
7366 +static void *seq_TsoLen_start(struct seq_file *seq, loff_t *pos)
7368 + seq_printf(seq, " Length | Count\n");
7374 +static void *seq_TsoLen_next(struct seq_file *seq, void *v, loff_t *pos)
7382 +static void seq_TsoLen_stop(struct seq_file *seq, void *v)
7384 + /* Nothing to do */
7387 +static int seq_TsoLen_show(struct seq_file *seq, void *v)
7389 + int i = *(loff_t *) v;
7391 + seq_printf(seq, "%d~%d: %d\n", i*5000, (i+1)*5000, tso_cnt[i]);
7396 +static const struct seq_operations seq_tso_txd_num_ops = {
7397 + .start = seq_TsoTxdNum_start,
7398 + .next = seq_TsoTxdNum_next,
7399 + .stop = seq_TsoTxdNum_stop,
7400 + .show = seq_TsoTxdNum_show
7403 +static int tso_txd_num_open(struct inode *inode, struct file *file)
7405 + return seq_open(file, &seq_tso_txd_num_ops);
7408 +static struct file_operations tso_txd_num_fops = {
7409 + .owner = THIS_MODULE,
7410 + .open = tso_txd_num_open,
7412 + .llseek = seq_lseek,
7413 + .write = NumOfTxdWrite,
7414 + .release = seq_release
7417 +static const struct seq_operations seq_tso_len_ops = {
7418 + .start = seq_TsoLen_start,
7419 + .next = seq_TsoLen_next,
7420 + .stop = seq_TsoLen_stop,
7421 + .show = seq_TsoLen_show
7424 +static int tso_len_open(struct inode *inode, struct file *file)
7426 + return seq_open(file, &seq_tso_len_ops);
7429 +static struct file_operations tso_len_fops = {
7430 + .owner = THIS_MODULE,
7431 + .open = tso_len_open,
7433 + .llseek = seq_lseek,
7434 + .write = TsoLenWrite,
7435 + .release = seq_release
7439 +#if defined(CONFIG_RAETH_LRO)
7440 +static int LroLenUpdate(struct net_lro_desc *lro_desc)
7444 + if(lro_desc->ip_tot_len > 65000) {
7446 + }else if(lro_desc->ip_tot_len > 60000) {
7448 + }else if(lro_desc->ip_tot_len > 55000) {
7450 + }else if(lro_desc->ip_tot_len > 50000) {
7452 + }else if(lro_desc->ip_tot_len > 45000) {
7454 + }else if(lro_desc->ip_tot_len > 40000) {
7456 + }else if(lro_desc->ip_tot_len > 35000) {
7458 + }else if(lro_desc->ip_tot_len > 30000) {
7460 + }else if(lro_desc->ip_tot_len > 25000) {
7462 + }else if(lro_desc->ip_tot_len > 20000) {
7464 + }else if(lro_desc->ip_tot_len > 15000) {
7466 + }else if(lro_desc->ip_tot_len > 10000) {
7468 + }else if(lro_desc->ip_tot_len > 5000) {
7476 +int LroStatsUpdate(struct net_lro_mgr *lro_mgr, bool all_flushed)
7478 + struct net_lro_desc *tmp;
7482 + if (all_flushed) {
7483 + for (i=0; i< MAX_DESC; i++) {
7484 + tmp = & lro_mgr->lro_arr[i];
7485 + if (tmp->pkt_aggr_cnt !=0) {
7486 + for(j=0; j<=MAX_AGGR; j++) {
7487 + if(tmp->pkt_aggr_cnt == j) {
7488 + lro_flush_cnt[j]++;
7491 + len_idx = LroLenUpdate(tmp);
7492 + lro_len_cnt1[len_idx]++;
7495 + aggregated[i] = 0;
7498 + if (lro_flushed != lro_mgr->stats.flushed) {
7499 + if (lro_aggregated != lro_mgr->stats.aggregated) {
7500 + for (i=0; i<MAX_DESC; i++) {
7501 + tmp = &lro_mgr->lro_arr[i];
7502 + if ((aggregated[i]!= tmp->pkt_aggr_cnt)
7503 + && (tmp->pkt_aggr_cnt == 0)) {
7505 + for (j=0; j<=MAX_AGGR; j++) {
7506 + if (aggregated[i] == j) {
7507 + lro_stats_cnt[j] ++;
7510 + aggregated[i] = 0;
7511 + //len_idx = LroLenUpdate(tmp);
7512 + //lro_len_cnt2[len_idx]++;
7517 + for (i=0; i<MAX_DESC; i++) {
7518 + tmp = &lro_mgr->lro_arr[i];
7519 + if ((aggregated[i] != 0) && (tmp->pkt_aggr_cnt==0)) {
7520 + for (j=0; j<=MAX_AGGR; j++) {
7521 + if (aggregated[i] == j) {
7522 + lro_stats_cnt[j] ++;
7525 + aggregated[i] = 0;
7526 + //len_idx = LroLenUpdate(tmp);
7527 + //lro_len_cnt2[len_idx]++;
7534 + if (lro_aggregated != lro_mgr->stats.aggregated) {
7535 + for (i=0; i<MAX_DESC; i++) {
7536 + tmp = &lro_mgr->lro_arr[i];
7537 + if (tmp->active) {
7538 + if (aggregated[i] != tmp->pkt_aggr_cnt)
7539 + aggregated[i] = tmp->pkt_aggr_cnt;
7541 + aggregated[i] = 0;
7548 + lro_aggregated = lro_mgr->stats.aggregated;
7549 + lro_flushed = lro_mgr->stats.flushed;
7550 + lro_nodesc = lro_mgr->stats.no_desc;
7557 +ssize_t LroStatsWrite(struct file *file, const char __user *buffer,
7558 + size_t count, loff_t *data)
7560 + memset(lro_stats_cnt, 0, sizeof(lro_stats_cnt));
7561 + memset(lro_flush_cnt, 0, sizeof(lro_flush_cnt));
7562 + memset(lro_len_cnt1, 0, sizeof(lro_len_cnt1));
7563 + //memset(lro_len_cnt2, 0, sizeof(lro_len_cnt2));
7564 + memset(aggregated, 0, sizeof(aggregated));
7565 + lro_aggregated = 0;
7571 + printk("clear lro cnt table\n");
7576 +int LroStatsRead(struct seq_file *seq, void *v)
7583 + seq_printf(seq, "LRO statistic dump:\n");
7584 + seq_printf(seq, "Cnt: Kernel | Driver\n");
7585 + for(i=0; i<=MAX_AGGR; i++) {
7586 + tot_cnt = tot_cnt + lro_stats_cnt[i] + lro_flush_cnt[i];
7587 + seq_printf(seq, " %d : %d %d\n", i, lro_stats_cnt[i], lro_flush_cnt[i]);
7588 + tot_aggr = tot_aggr + i * (lro_stats_cnt[i] + lro_flush_cnt[i]);
7590 + ave_aggr = lro_aggregated/lro_flushed;
7591 + seq_printf(seq, "Total aggregated pkt: %d\n", lro_aggregated);
7592 + seq_printf(seq, "Flushed pkt: %d %d\n", lro_flushed, force_flush);
7593 + seq_printf(seq, "Average flush cnt: %d\n", ave_aggr);
7594 + seq_printf(seq, "No descriptor pkt: %d\n\n\n", lro_nodesc);
7596 + seq_printf(seq, "Driver flush pkt len:\n");
7597 + seq_printf(seq, " Length | Count\n");
7598 + for(i=0; i<15; i++) {
7599 + seq_printf(seq, "%d~%d: %d\n", i*5000, (i+1)*5000, lro_len_cnt1[i]);
7601 + seq_printf(seq, "Kernel flush: %d; Driver flush: %d\n", tot_called2, tot_called1);
7605 +static int lro_stats_open(struct inode *inode, struct file *file)
7607 + return single_open(file, LroStatsRead, NULL);
7610 +static struct file_operations lro_stats_fops = {
7611 + .owner = THIS_MODULE,
7612 + .open = lro_stats_open,
7614 + .llseek = seq_lseek,
7615 + .write = LroStatsWrite,
7616 + .release = single_release
7620 +int getnext(const char *src, int separator, char *dest)
7625 + if ( (src == NULL) || (dest == NULL) ) {
7629 + c = strchr(src, separator);
7631 + strcpy(dest, src);
7635 + strncpy(dest, src, len);
7640 +int str_to_ip(unsigned int *ip, const char *str)
7643 + const char *ptr = str;
7645 + unsigned char c[4];
7648 + for (i = 0; i < 3; ++i) {
7649 + if ((len = getnext(ptr, '.', buf)) == -1) {
7650 + return 1; /* parse error */
7652 + c[i] = simple_strtoul(buf, NULL, 10);
7655 + c[3] = simple_strtoul(ptr, NULL, 0);
7656 + *ip = (c[0]<<24) + (c[1]<<16) + (c[2]<<8) + c[3];
7660 +#if defined(CONFIG_RAETH_HW_LRO)
7661 +static int HwLroLenUpdate(unsigned int agg_size)
7665 + if(agg_size > 65000) {
7667 + }else if(agg_size > 60000) {
7669 + }else if(agg_size > 55000) {
7671 + }else if(agg_size > 50000) {
7673 + }else if(agg_size > 45000) {
7675 + }else if(agg_size > 40000) {
7677 + }else if(agg_size > 35000) {
7679 + }else if(agg_size > 30000) {
7681 + }else if(agg_size > 25000) {
7683 + }else if(agg_size > 20000) {
7685 + }else if(agg_size > 15000) {
7687 + }else if(agg_size > 10000) {
7689 + }else if(agg_size > 5000) {
7698 +int HwLroStatsUpdate(unsigned int ring_num, unsigned int agg_cnt, unsigned int agg_size)
7700 + if( (ring_num > 0) && (ring_num < 4) )
7702 + hw_lro_agg_size_cnt[ring_num-1][HwLroLenUpdate(agg_size)]++;
7703 + hw_lro_agg_num_cnt[ring_num-1][agg_cnt]++;
7704 + hw_lro_tot_flush_cnt[ring_num-1]++;
7705 + hw_lro_tot_agg_cnt[ring_num-1] += agg_cnt;
7711 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
7712 +int HwLroFlushStatsUpdate(unsigned int ring_num, unsigned int flush_reason)
7714 + if( (ring_num > 0) && (ring_num < 4) )
7717 + if ( (flush_reason & 0x7) == HW_LRO_AGG_FLUSH )
7718 + hw_lro_agg_flush_cnt[ring_num-1]++;
7719 + else if ( (flush_reason & 0x7) == HW_LRO_AGE_FLUSH )
7720 + hw_lro_age_flush_cnt[ring_num-1]++;
7721 + else if ( (flush_reason & 0x7) == HW_LRO_NOT_IN_SEQ_FLUSH )
7722 + hw_lro_seq_flush_cnt[ring_num-1]++;
7723 + else if ( (flush_reason & 0x7) == HW_LRO_TIMESTAMP_FLUSH )
7724 + hw_lro_timestamp_flush_cnt[ring_num-1]++;
7725 + else if ( (flush_reason & 0x7) == HW_LRO_NON_RULE_FLUSH )
7726 + hw_lro_norule_flush_cnt[ring_num-1]++;
7728 + if ( flush_reason & BIT(4) )
7729 + hw_lro_agg_flush_cnt[ring_num-1]++;
7730 + else if ( flush_reason & BIT(3) )
7731 + hw_lro_age_flush_cnt[ring_num-1]++;
7732 + else if ( flush_reason & BIT(2) )
7733 + hw_lro_seq_flush_cnt[ring_num-1]++;
7734 + else if ( flush_reason & BIT(1) )
7735 + hw_lro_timestamp_flush_cnt[ring_num-1]++;
7736 + else if ( flush_reason & BIT(0) )
7737 + hw_lro_norule_flush_cnt[ring_num-1]++;
7743 +#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
7745 +ssize_t HwLroStatsWrite(struct file *file, const char __user *buffer,
7746 + size_t count, loff_t *data)
7748 + memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
7749 + memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
7750 + memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
7751 + memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
7752 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
7753 + memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
7754 + memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
7755 + memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
7756 + memset(hw_lro_timestamp_flush_cnt, 0, sizeof(hw_lro_timestamp_flush_cnt));
7757 + memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
7758 +#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
7760 + printk("clear hw lro cnt table\n");
7765 +int HwLroStatsRead(struct seq_file *seq, void *v)
7769 + seq_printf(seq, "HW LRO statistic dump:\n");
7771 + /* Agg number count */
7772 + seq_printf(seq, "Cnt: RING1 | RING2 | RING3 | Total\n");
7773 + for(i=0; i<=MAX_HW_LRO_AGGR; i++) {
7774 + seq_printf(seq, " %d : %d %d %d %d\n",
7775 + i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
7776 + hw_lro_agg_num_cnt[0][i]+hw_lro_agg_num_cnt[1][i]+hw_lro_agg_num_cnt[2][i]);
7779 + /* Total agg count */
7780 + seq_printf(seq, "Total agg: RING1 | RING2 | RING3 | Total\n");
7781 + seq_printf(seq, " %d %d %d %d\n",
7782 + hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1], hw_lro_tot_agg_cnt[2],
7783 + hw_lro_tot_agg_cnt[0]+hw_lro_tot_agg_cnt[1]+hw_lro_tot_agg_cnt[2]);
7785 + /* Total flush count */
7786 + seq_printf(seq, "Total flush: RING1 | RING2 | RING3 | Total\n");
7787 + seq_printf(seq, " %d %d %d %d\n",
7788 + hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1], hw_lro_tot_flush_cnt[2],
7789 + hw_lro_tot_flush_cnt[0]+hw_lro_tot_flush_cnt[1]+hw_lro_tot_flush_cnt[2]);
7791 + /* Avg agg count */
7792 + seq_printf(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n");
7793 + seq_printf(seq, " %d %d %d %d\n",
7794 + (hw_lro_tot_flush_cnt[0]) ? hw_lro_tot_agg_cnt[0]/hw_lro_tot_flush_cnt[0] : 0,
7795 + (hw_lro_tot_flush_cnt[1]) ? hw_lro_tot_agg_cnt[1]/hw_lro_tot_flush_cnt[1] : 0,
7796 + (hw_lro_tot_flush_cnt[2]) ? hw_lro_tot_agg_cnt[2]/hw_lro_tot_flush_cnt[2] : 0,
7797 + (hw_lro_tot_flush_cnt[0]+hw_lro_tot_flush_cnt[1]+hw_lro_tot_flush_cnt[2]) ? \
7798 + ((hw_lro_tot_agg_cnt[0]+hw_lro_tot_agg_cnt[1]+hw_lro_tot_agg_cnt[2])/(hw_lro_tot_flush_cnt[0]+hw_lro_tot_flush_cnt[1]+hw_lro_tot_flush_cnt[2])) : 0
7801 + /* Statistics of aggregation size counts */
7802 + seq_printf(seq, "HW LRO flush pkt len:\n");
7803 + seq_printf(seq, " Length | RING1 | RING2 | RING3 | Total\n");
7804 + for(i=0; i<15; i++) {
7805 + seq_printf(seq, "%d~%d: %d %d %d %d\n", i*5000, (i+1)*5000,
7806 + hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
7807 + hw_lro_agg_size_cnt[0][i]+hw_lro_agg_size_cnt[1][i]+hw_lro_agg_size_cnt[2][i]);
7809 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
7810 + seq_printf(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n");
7811 + seq_printf(seq, "AGG timeout: %d %d %d %d\n",
7812 + hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1], hw_lro_agg_flush_cnt[2],
7813 + (hw_lro_agg_flush_cnt[0]+hw_lro_agg_flush_cnt[1]+hw_lro_agg_flush_cnt[2])
7815 + seq_printf(seq, "AGE timeout: %d %d %d %d\n",
7816 + hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1], hw_lro_age_flush_cnt[2],
7817 + (hw_lro_age_flush_cnt[0]+hw_lro_age_flush_cnt[1]+hw_lro_age_flush_cnt[2])
7819 + seq_printf(seq, "Not in-sequence: %d %d %d %d\n",
7820 + hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1], hw_lro_seq_flush_cnt[2],
7821 + (hw_lro_seq_flush_cnt[0]+hw_lro_seq_flush_cnt[1]+hw_lro_seq_flush_cnt[2])
7823 + seq_printf(seq, "Timestamp: %d %d %d %d\n",
7824 + hw_lro_timestamp_flush_cnt[0], hw_lro_timestamp_flush_cnt[1], hw_lro_timestamp_flush_cnt[2],
7825 + (hw_lro_timestamp_flush_cnt[0]+hw_lro_timestamp_flush_cnt[1]+hw_lro_timestamp_flush_cnt[2])
7827 + seq_printf(seq, "No LRO rule: %d %d %d %d\n",
7828 + hw_lro_norule_flush_cnt[0], hw_lro_norule_flush_cnt[1], hw_lro_norule_flush_cnt[2],
7829 + (hw_lro_norule_flush_cnt[0]+hw_lro_norule_flush_cnt[1]+hw_lro_norule_flush_cnt[2])
7831 +#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
7836 +static int hw_lro_stats_open(struct inode *inode, struct file *file)
7838 + return single_open(file, HwLroStatsRead, NULL);
7841 +static struct file_operations hw_lro_stats_fops = {
7842 + .owner = THIS_MODULE,
7843 + .open = hw_lro_stats_open,
7845 + .llseek = seq_lseek,
7846 + .write = HwLroStatsWrite,
7847 + .release = single_release
7850 +int hwlro_agg_cnt_ctrl(int par1, int par2)
7852 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, par2);
7853 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, par2);
7854 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, par2);
7858 +int hwlro_agg_time_ctrl(int par1, int par2)
7860 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, par2);
7861 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, par2);
7862 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, par2);
7866 +int hwlro_age_time_ctrl(int par1, int par2)
7868 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, par2);
7869 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, par2);
7870 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, par2);
7874 +int hwlro_pkt_int_alpha_ctrl(int par1, int par2)
7876 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7878 + ei_local->hw_lro_alpha = par2;
7879 + printk("[hwlro_pkt_int_alpha_ctrl]ei_local->hw_lro_alpha = %d\n", ei_local->hw_lro_alpha);
7884 +int hwlro_threshold_ctrl(int par1, int par2)
7886 + /* bandwidth threshold setting */
7887 + SET_PDMA_LRO_BW_THRESHOLD(par2);
7891 +int hwlro_fix_setting_switch_ctrl(int par1, int par2)
7893 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
7894 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7896 + ei_local->hw_lro_fix_setting = par2;
7897 + printk("[hwlro_pkt_int_alpha_ctrl]ei_local->hw_lro_fix_setting = %d\n", ei_local->hw_lro_fix_setting);
7898 +#endif /* CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG */
7903 +ssize_t HwLroAutoTlbWrite(struct file *file, const char __user *buffer,
7904 + size_t count, loff_t *data)
7910 + char *pToken = NULL;
7911 + char *pDelimiter = " \t";
7913 + printk("[HwLroAutoTlbWrite]write parameter len = %d\n\r", (int)len);
7914 + if(len >= sizeof(buf)){
7915 + printk("input handling fail!\n");
7916 + len = sizeof(buf) - 1;
7920 + if(copy_from_user(buf, buffer, len)){
7924 + printk("[HwLroAutoTlbWrite]write parameter data = %s\n\r", buf);
7927 + pToken = strsep(&pBuf, pDelimiter);
7928 + x = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
7930 + pToken = strsep(&pBuf, "\t\n ");
7931 + if(pToken != NULL){
7932 + y = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
7933 + printk("y = 0x%08x \n\r", y);
7936 + if ( (sizeof(hw_lro_dbg_func)/sizeof(hw_lro_dbg_func[0]) > x) && NULL != hw_lro_dbg_func[x])
7938 + (*hw_lro_dbg_func[x])(x, y);
7944 +void HwLroAutoTlbDump(struct seq_file *seq, unsigned int index)
7947 + struct PDMA_LRO_AUTO_TLB_INFO pdma_lro_auto_tlb;
7948 + unsigned int tlb_info[9];
7949 + unsigned int dw_len, cnt, priority;
7950 + unsigned int entry;
7953 + index = index - 1;
7954 + entry = (index * 9) + 1;
7956 + /* read valid entries of the auto-learn table */
7957 + sysRegWrite( PDMA_FE_ALT_CF8, entry );
7959 + //seq_printf(seq, "\nEntry = %d\n", entry);
7960 + for(i=0; i<9; i++){
7961 + tlb_info[i] = sysRegRead(PDMA_FE_ALT_SEQ_CFC);
7962 + //seq_printf(seq, "tlb_info[%d] = 0x%x\n", i, tlb_info[i]);
7964 + memcpy(&pdma_lro_auto_tlb, tlb_info, sizeof(struct PDMA_LRO_AUTO_TLB_INFO));
7966 + dw_len = pdma_lro_auto_tlb.auto_tlb_info7.DW_LEN;
7967 + cnt = pdma_lro_auto_tlb.auto_tlb_info6.CNT;
7969 + if ( sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_ALT_SCORE_MODE ) /* packet count */
7971 + else /* byte count */
7972 + priority = dw_len;
7974 + /* dump valid entries of the auto-learn table */
7976 + seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
7978 + seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
7979 + if( pdma_lro_auto_tlb.auto_tlb_info8.IPV4 ){
7980 + seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv4)\n",
7981 + pdma_lro_auto_tlb.auto_tlb_info4.SIP3,
7982 + pdma_lro_auto_tlb.auto_tlb_info3.SIP2,
7983 + pdma_lro_auto_tlb.auto_tlb_info2.SIP1,
7984 + pdma_lro_auto_tlb.auto_tlb_info1.SIP0);
7987 + seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
7988 + pdma_lro_auto_tlb.auto_tlb_info4.SIP3,
7989 + pdma_lro_auto_tlb.auto_tlb_info3.SIP2,
7990 + pdma_lro_auto_tlb.auto_tlb_info2.SIP1,
7991 + pdma_lro_auto_tlb.auto_tlb_info1.SIP0);
7993 + seq_printf(seq, "DIP_ID = %d\n", pdma_lro_auto_tlb.auto_tlb_info8.DIP_ID);
7994 + seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
7995 + pdma_lro_auto_tlb.auto_tlb_info0.STP,
7996 + pdma_lro_auto_tlb.auto_tlb_info0.DTP);
7997 + seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d \n",
7998 + pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0,
7999 + (pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 << 12),
8000 + (pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 << 24),
8001 + pdma_lro_auto_tlb.auto_tlb_info6.VLAN_VID1);
8002 + seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
8003 + seq_printf(seq, "PRIORITY = %d\n", priority);
8006 +int HwLroAutoTlbRead(struct seq_file *seq, void *v)
8009 + unsigned int regVal;
8010 + unsigned int regOp1, regOp2, regOp3, regOp4;
8011 + unsigned int agg_cnt, agg_time, age_time;
8013 + /* Read valid entries of the auto-learn table */
8014 + sysRegWrite(PDMA_FE_ALT_CF8, 0);
8015 + regVal = sysRegRead(PDMA_FE_ALT_SEQ_CFC);
8017 + seq_printf(seq, "HW LRO Auto-learn Table: (PDMA_LRO_ALT_CFC_RSEQ_DBG=0x%x)\n", regVal);
8019 + for(i = 7; i >= 0; i--)
8021 + if( regVal & (1 << i) )
8022 + HwLroAutoTlbDump(seq, i);
8025 + /* Read the agg_time/age_time/agg_cnt of LRO rings */
8026 + seq_printf(seq, "\nHW LRO Ring Settings\n");
8027 + for(i = 1; i <= 3; i++)
8029 + regOp1 = sysRegRead( LRO_RX_RING0_CTRL_DW1 + (i * 0x40) );
8030 + regOp2 = sysRegRead( LRO_RX_RING0_CTRL_DW2 + (i * 0x40) );
8031 + regOp3 = sysRegRead( LRO_RX_RING0_CTRL_DW3 + (i * 0x40) );
8032 + regOp4 = sysRegRead( ADMA_LRO_CTRL_DW2 );
8033 + agg_cnt = ((regOp3 & 0x03) << PDMA_LRO_AGG_CNT_H_OFFSET) | ((regOp2 >> PDMA_LRO_RING_AGG_CNT1_OFFSET) & 0x3f);
8034 + agg_time = (regOp2 >> PDMA_LRO_RING_AGG_OFFSET) & 0xffff;
8035 + age_time = ((regOp2 & 0x03f) << PDMA_LRO_AGE_H_OFFSET) | ((regOp1 >> PDMA_LRO_RING_AGE1_OFFSET) & 0x3ff);
8036 + seq_printf(seq, "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
8037 + i, agg_cnt, agg_time, age_time, regOp4);
8043 +static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
8045 + return single_open(file, HwLroAutoTlbRead, NULL);
8048 +static struct file_operations hw_lro_auto_tlb_fops = {
8049 + .owner = THIS_MODULE,
8050 + .open = hw_lro_auto_tlb_open,
8052 + .llseek = seq_lseek,
8053 + .write = HwLroAutoTlbWrite,
8054 + .release = single_release
8056 +#endif /* CONFIG_RAETH_HW_LRO */
8058 +#if defined (CONFIG_MIPS)
8059 +int CP0RegRead(struct seq_file *seq, void *v)
8061 + seq_printf(seq, "CP0 Register dump --\n");
8062 + seq_printf(seq, "CP0_INDEX\t: 0x%08x\n", read_32bit_cp0_register(CP0_INDEX));
8063 + seq_printf(seq, "CP0_RANDOM\t: 0x%08x\n", read_32bit_cp0_register(CP0_RANDOM));
8064 + seq_printf(seq, "CP0_ENTRYLO0\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO0));
8065 + seq_printf(seq, "CP0_ENTRYLO1\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO1));
8066 + seq_printf(seq, "CP0_CONF\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONF));
8067 + seq_printf(seq, "CP0_CONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONTEXT));
8068 + seq_printf(seq, "CP0_PAGEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_PAGEMASK));
8069 + seq_printf(seq, "CP0_WIRED\t: 0x%08x\n", read_32bit_cp0_register(CP0_WIRED));
8070 + seq_printf(seq, "CP0_INFO\t: 0x%08x\n", read_32bit_cp0_register(CP0_INFO));
8071 + seq_printf(seq, "CP0_BADVADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_BADVADDR));
8072 + seq_printf(seq, "CP0_COUNT\t: 0x%08x\n", read_32bit_cp0_register(CP0_COUNT));
8073 + seq_printf(seq, "CP0_ENTRYHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYHI));
8074 + seq_printf(seq, "CP0_COMPARE\t: 0x%08x\n", read_32bit_cp0_register(CP0_COMPARE));
8075 + seq_printf(seq, "CP0_STATUS\t: 0x%08x\n", read_32bit_cp0_register(CP0_STATUS));
8076 + seq_printf(seq, "CP0_CAUSE\t: 0x%08x\n", read_32bit_cp0_register(CP0_CAUSE));
8077 + seq_printf(seq, "CP0_EPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_EPC));
8078 + seq_printf(seq, "CP0_PRID\t: 0x%08x\n", read_32bit_cp0_register(CP0_PRID));
8079 + seq_printf(seq, "CP0_CONFIG\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONFIG));
8080 + seq_printf(seq, "CP0_LLADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_LLADDR));
8081 + seq_printf(seq, "CP0_WATCHLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHLO));
8082 + seq_printf(seq, "CP0_WATCHHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHHI));
8083 + seq_printf(seq, "CP0_XCONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_XCONTEXT));
8084 + seq_printf(seq, "CP0_FRAMEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_FRAMEMASK));
8085 + seq_printf(seq, "CP0_DIAGNOSTIC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DIAGNOSTIC));
8086 + seq_printf(seq, "CP0_DEBUG\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEBUG));
8087 + seq_printf(seq, "CP0_DEPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEPC));
8088 + seq_printf(seq, "CP0_PERFORMANCE\t: 0x%08x\n", read_32bit_cp0_register(CP0_PERFORMANCE));
8089 + seq_printf(seq, "CP0_ECC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ECC));
8090 + seq_printf(seq, "CP0_CACHEERR\t: 0x%08x\n", read_32bit_cp0_register(CP0_CACHEERR));
8091 + seq_printf(seq, "CP0_TAGLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGLO));
8092 + seq_printf(seq, "CP0_TAGHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGHI));
8093 + seq_printf(seq, "CP0_ERROREPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ERROREPC));
8094 + seq_printf(seq, "CP0_DESAVE\t: 0x%08x\n\n", read_32bit_cp0_register(CP0_DESAVE));
8099 +static int cp0_reg_open(struct inode *inode, struct file *file)
8101 + return single_open(file, CP0RegRead, NULL);
8104 +static const struct file_operations cp0_reg_fops = {
8105 + .owner = THIS_MODULE,
8106 + .open = cp0_reg_open,
8108 + .llseek = seq_lseek,
8109 + .release = single_release
8113 +#if defined(CONFIG_RAETH_QOS)
8114 +static struct proc_dir_entry *procRaQOS, *procRaFeIntr, *procRaEswIntr;
8115 +extern uint32_t num_of_rxdone_intr;
8116 +extern uint32_t num_of_esw_intr;
8118 +int RaQOSRegRead(struct seq_file *seq, void *v)
8124 +static int raeth_qos_open(struct inode *inode, struct file *file)
8126 + return single_open(file, RaQOSRegRead, NULL);
8129 +static const struct file_operations raeth_qos_fops = {
8130 + .owner = THIS_MODULE,
8131 + .open = raeth_qos_open,
8133 + .llseek = seq_lseek,
8134 + .release = single_release
8138 +static struct proc_dir_entry *procEswCnt;
8140 +int EswCntRead(struct seq_file *seq, void *v)
8142 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE) || defined (CONFIG_ARCH_MT7623)
8143 + unsigned int pkt_cnt = 0;
8146 + seq_printf(seq, "\n <<CPU>> \n");
8147 + seq_printf(seq, " | \n");
8148 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
8149 + seq_printf(seq, "+-----------------------------------------------+\n");
8150 + seq_printf(seq, "| <<PDMA>> |\n");
8151 + seq_printf(seq, "+-----------------------------------------------+\n");
8153 + seq_printf(seq, "+-----------------------------------------------+\n");
8154 + seq_printf(seq, "| <<PSE>> |\n");
8155 + seq_printf(seq, "+-----------------------------------------------+\n");
8156 + seq_printf(seq, " | \n");
8157 + seq_printf(seq, "+-----------------------------------------------+\n");
8158 + seq_printf(seq, "| <<GDMA>> |\n");
8159 +#if defined (CONFIG_RALINK_MT7620)
8160 + seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1304));
8161 + seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1324));
8162 + seq_printf(seq, "| |\n");
8163 + seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (skip) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1308));
8164 + seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x130c));
8165 + seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1328));
8166 + seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x132c));
8167 + seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1330));
8168 + seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1334));
8169 + seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (l3/l4 checksum) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1338));
8170 + seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x133c));
8172 + seq_printf(seq, "| |\n");
8173 + seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1344));
8174 + seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1364));
8175 + seq_printf(seq, "| |\n");
8176 + seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1348));
8177 + seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x134c));
8178 + seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1368));
8179 + seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x136c));
8180 + seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1370));
8181 + seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1374));
8182 + seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (l3/l4 checksum) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1378));
8183 + seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x137c));
8184 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
8185 + seq_printf(seq, "| GDMA1_RX_GBCNT : %010u (Rx Good Bytes) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2400));
8186 + seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2408));
8187 + seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2410));
8188 + seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2414));
8189 + seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2418));
8190 + seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x241C));
8191 + seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2420));
8192 + seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2424));
8193 + seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2428));
8194 + seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x242C));
8195 + seq_printf(seq, "| GDMA1_TX_GBCNT : %010u (Tx Good Bytes) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2430));
8196 + seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2438));
8197 + seq_printf(seq, "| |\n");
8198 + seq_printf(seq, "| GDMA2_RX_GBCNT : %010u (Rx Good Bytes) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2440));
8199 + seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2448));
8200 + seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2450));
8201 + seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2454));
8202 + seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2458));
8203 + seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x245C));
8204 + seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2460));
8205 + seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2464));
8206 + seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2468));
8207 + seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x246C));
8208 + seq_printf(seq, "| GDMA2_TX_GBCNT : %010u (Tx Good Bytes) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2470));
8209 + seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2478));
8211 + seq_printf(seq, "| GDMA_TX_GPCNT1 : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x704));
8212 + seq_printf(seq, "| GDMA_RX_GPCNT1 : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x724));
8213 + seq_printf(seq, "| |\n");
8214 + seq_printf(seq, "| GDMA_TX_SKIPCNT1: %010u (skip) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x708));
8215 + seq_printf(seq, "| GDMA_TX_COLCNT1 : %010u (collision) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x70c));
8216 + seq_printf(seq, "| GDMA_RX_OERCNT1 : %010u (overflow) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x728));
8217 + seq_printf(seq, "| GDMA_RX_FERCNT1 : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x72c));
8218 + seq_printf(seq, "| GDMA_RX_SERCNT1 : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x730));
8219 + seq_printf(seq, "| GDMA_RX_LERCNT1 : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x734));
8220 + seq_printf(seq, "| GDMA_RX_CERCNT1 : %010u (l3/l4 checksum) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x738));
8221 + seq_printf(seq, "| GDMA_RX_FCCNT1 : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x73c));
8224 + seq_printf(seq, "+-----------------------------------------------+\n");
8227 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
8228 + defined (CONFIG_RALINK_MT7620)
8230 + seq_printf(seq, " ^ \n");
8231 + seq_printf(seq, " | Port6 Rx:%010u Good Pkt \n", ((p6_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4620)&0xFFFF)));
8232 + seq_printf(seq, " | Port6 Rx:%010u Bad Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0x4620)>>16);
8233 + seq_printf(seq, " | Port6 Tx:%010u Good Pkt \n", ((p6_tx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4610)&0xFFFF)));
8234 + seq_printf(seq, " | Port6 Tx:%010u Bad Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0x4610)>>16);
8235 +#if defined (CONFIG_RALINK_MT7620)
8237 + seq_printf(seq, " | Port7 Rx:%010u Good Pkt \n", ((p7_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4720)&0xFFFF)));
8238 + seq_printf(seq, " | Port7 Rx:%010u Bad Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0x4720)>>16);
8239 + seq_printf(seq, " | Port7 Tx:%010u Good Pkt \n", ((p7_tx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4710)&0xFFFF)));
8240 + seq_printf(seq, " | Port7 Tx:%010u Bad Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0x4710)>>16);
8242 + seq_printf(seq, "+---------------------v-------------------------+\n");
8243 + seq_printf(seq, "| P6 |\n");
8244 + seq_printf(seq, "| <<10/100/1000 Embedded Switch>> |\n");
8245 + seq_printf(seq, "| P0 P1 P2 P3 P4 P5 |\n");
8246 + seq_printf(seq, "+-----------------------------------------------+\n");
8247 + seq_printf(seq, " | | | | | | \n");
8248 +#elif defined (CONFIG_RALINK_RT3883) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
8249 + /* no built-in switch */
8251 + seq_printf(seq, " ^ \n");
8252 + seq_printf(seq, " | Port6 Rx:%08u Good Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0xE0)&0xFFFF);
8253 + seq_printf(seq, " | Port6 Tx:%08u Good Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0xE0)>>16);
8254 + seq_printf(seq, "+---------------------v-------------------------+\n");
8255 + seq_printf(seq, "| P6 |\n");
8256 + seq_printf(seq, "| <<10/100 Embedded Switch>> |\n");
8257 + seq_printf(seq, "| P0 P1 P2 P3 P4 P5 |\n");
8258 + seq_printf(seq, "+-----------------------------------------------+\n");
8259 + seq_printf(seq, " | | | | | | \n");
8262 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
8263 + defined (CONFIG_RALINK_MT7620)
8265 + seq_printf(seq, "Port0 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p0_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4020)&0xFFFF)), ((p0_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4010)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4020)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4010)>>16);
8267 + seq_printf(seq, "Port1 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p1_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4120)&0xFFFF)), ((p1_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4110)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4120)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4110)>>16);
8269 + seq_printf(seq, "Port2 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p2_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4220)&0xFFFF)), ((p2_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4210)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4220)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4210)>>16);
8271 + seq_printf(seq, "Port3 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p3_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4320)&0xFFFF)), ((p3_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4310)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4320)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4310)>>16);
8273 + seq_printf(seq, "Port4 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p4_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4420)&0xFFFF)), ((p4_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4410)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4420)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4410)>>16);
8275 + seq_printf(seq, "Port5 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p5_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4520)&0xFFFF)), ((p5_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4510)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4520)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4510)>>16);
8277 + seq_printf(seq, "Port0 KBytes RX=%010u Tx=%010u \n", ((p0_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4028) >> 10)), ((p0_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4018) >> 10)));
8279 + seq_printf(seq, "Port1 KBytes RX=%010u Tx=%010u \n", ((p1_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4128) >> 10)), ((p1_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4118) >> 10)));
8281 + seq_printf(seq, "Port2 KBytes RX=%010u Tx=%010u \n", ((p2_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4228) >> 10)), ((p2_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4218) >> 10)));
8283 + seq_printf(seq, "Port3 KBytes RX=%010u Tx=%010u \n", ((p3_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4328) >> 10)), ((p3_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4318) >> 10)));
8285 + seq_printf(seq, "Port4 KBytes RX=%010u Tx=%010u \n", ((p4_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4428) >> 10)), ((p4_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4418) >> 10)));
8287 + seq_printf(seq, "Port5 KBytes RX=%010u Tx=%010u \n", ((p5_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4528) >> 10)), ((p5_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4518) >> 10)));
8289 +#if defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
8290 +#define DUMP_EACH_PORT(base) \
8291 + for(i=0; i < 7;i++) { \
8292 + mii_mgr_read(31, (base) + (i*0x100), &pkt_cnt); \
8293 + seq_printf(seq, "%8u ", pkt_cnt); \
8295 + seq_printf(seq, "\n");
8296 + seq_printf(seq, "========================================[MT7530] READ CLEAR========================\n");
8298 + seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n","Port0", "Port1", "Port2", "Port3", "Port4", "Port5", "Port6");
8299 + seq_printf(seq, "Tx Drop Packet :"); DUMP_EACH_PORT(0x4000);
8300 + //seq_printf(seq, "Tx CRC Error :"); DUMP_EACH_PORT(0x4004);
8301 + seq_printf(seq, "Tx Unicast Packet :"); DUMP_EACH_PORT(0x4008);
8302 + seq_printf(seq, "Tx Multicast Packet :"); DUMP_EACH_PORT(0x400C);
8303 + seq_printf(seq, "Tx Broadcast Packet :"); DUMP_EACH_PORT(0x4010);
8304 + //seq_printf(seq, "Tx Collision Event :"); DUMP_EACH_PORT(0x4014);
8305 + seq_printf(seq, "Tx Pause Packet :"); DUMP_EACH_PORT(0x402C);
8306 + seq_printf(seq, "Rx Drop Packet :"); DUMP_EACH_PORT(0x4060);
8307 + seq_printf(seq, "Rx Filtering Packet :"); DUMP_EACH_PORT(0x4064);
8308 + seq_printf(seq, "Rx Unicast Packet :"); DUMP_EACH_PORT(0x4068);
8309 + seq_printf(seq, "Rx Multicast Packet :"); DUMP_EACH_PORT(0x406C);
8310 + seq_printf(seq, "Rx Broadcast Packet :"); DUMP_EACH_PORT(0x4070);
8311 + seq_printf(seq, "Rx Alignment Error :"); DUMP_EACH_PORT(0x4074);
8312 + seq_printf(seq, "Rx CRC Error :"); DUMP_EACH_PORT(0x4078);
8313 + seq_printf(seq, "Rx Undersize Error :"); DUMP_EACH_PORT(0x407C);
8314 + //seq_printf(seq, "Rx Fragment Error :"); DUMP_EACH_PORT(0x4080);
8315 + //seq_printf(seq, "Rx Oversize Error :"); DUMP_EACH_PORT(0x4084);
8316 + //seq_printf(seq, "Rx Jabber Error :"); DUMP_EACH_PORT(0x4088);
8317 + seq_printf(seq, "Rx Pause Packet :"); DUMP_EACH_PORT(0x408C);
8318 + mii_mgr_write(31, 0x4fe0, 0xf0);
8319 + mii_mgr_write(31, 0x4fe0, 0x800000f0);
8323 +#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
8324 + seq_printf(seq, "Port0 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xE8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x150)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xE8)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x150)>>16);
8326 + seq_printf(seq, "Port1 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xEC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x154)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xEC)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x154)>>16);
8328 + seq_printf(seq, "Port2 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF0)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x158)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF0)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x158)>>16);
8330 + seq_printf(seq, "Port3 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF4)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x15C)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF4)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x15c)>>16);
8332 + seq_printf(seq, "Port4 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x160)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF8)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x160)>>16);
8334 + seq_printf(seq, "Port5 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xFC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x164)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xFC)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x164)>>16);
8335 +#elif defined (CONFIG_RALINK_RT3883)
8336 + /* no built-in switch */
8337 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
8339 +#define DUMP_EACH_PORT(base) \
8340 + for(i=0; i < 7;i++) { \
8341 + mii_mgr_read(31, (base) + (i*0x100), &pkt_cnt); \
8342 + seq_printf(seq, "%8u ", pkt_cnt); \
8344 + seq_printf(seq, "\n");
8346 +#if defined (CONFIG_RALINK_MT7621) /* TODO: need to update to use MT7530 compiler flag */
8347 + if(sysRegRead(0xbe00000c & (1<<16)))//MCM
8350 + seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n","Port0", "Port1", "Port2", "Port3", "Port4", "Port5", "Port6");
8351 + seq_printf(seq, "Tx Drop Packet :"); DUMP_EACH_PORT(0x4000);
8352 + seq_printf(seq, "Tx CRC Error :"); DUMP_EACH_PORT(0x4004);
8353 + seq_printf(seq, "Tx Unicast Packet :"); DUMP_EACH_PORT(0x4008);
8354 + seq_printf(seq, "Tx Multicast Packet :"); DUMP_EACH_PORT(0x400C);
8355 + seq_printf(seq, "Tx Broadcast Packet :"); DUMP_EACH_PORT(0x4010);
8356 + seq_printf(seq, "Tx Collision Event :"); DUMP_EACH_PORT(0x4014);
8357 + seq_printf(seq, "Tx Pause Packet :"); DUMP_EACH_PORT(0x402C);
8358 + seq_printf(seq, "Rx Drop Packet :"); DUMP_EACH_PORT(0x4060);
8359 + seq_printf(seq, "Rx Filtering Packet :"); DUMP_EACH_PORT(0x4064);
8360 + seq_printf(seq, "Rx Unicast Packet :"); DUMP_EACH_PORT(0x4068);
8361 + seq_printf(seq, "Rx Multicast Packet :"); DUMP_EACH_PORT(0x406C);
8362 + seq_printf(seq, "Rx Broadcast Packet :"); DUMP_EACH_PORT(0x4070);
8363 + seq_printf(seq, "Rx Alignment Error :"); DUMP_EACH_PORT(0x4074);
8364 + seq_printf(seq, "Rx CRC Error :"); DUMP_EACH_PORT(0x4078);
8365 + seq_printf(seq, "Rx Undersize Error :"); DUMP_EACH_PORT(0x407C);
8366 + seq_printf(seq, "Rx Fragment Error :"); DUMP_EACH_PORT(0x4080);
8367 + seq_printf(seq, "Rx Oversize Error :"); DUMP_EACH_PORT(0x4084);
8368 + seq_printf(seq, "Rx Jabber Error :"); DUMP_EACH_PORT(0x4088);
8369 + seq_printf(seq, "Rx Pause Packet :"); DUMP_EACH_PORT(0x408C);
8370 + mii_mgr_write(31, 0x4fe0, 0xf0);
8371 + mii_mgr_write(31, 0x4fe0, 0x800000f0);
8373 +#if defined (CONFIG_RALINK_MT7621) /* TODO: need to update to use MT7530 compiler flag */
8375 + seq_printf(seq, "no built-in switch\n");
8379 +#else /* RT305x, RT3352 */
8380 + seq_printf(seq, "Port0: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xE8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xE8)>>16);
8381 + seq_printf(seq, "Port1: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xEC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xEC)>>16);
8382 + seq_printf(seq, "Port2: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF0)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF0)>>16);
8383 + seq_printf(seq, "Port3: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF4)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF4)>>16);
8384 + seq_printf(seq, "Port4: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF8)>>16);
8385 + seq_printf(seq, "Port5: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xFC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xFC)>>16);
8387 + seq_printf(seq, "\n");
8392 +static int switch_count_open(struct inode *inode, struct file *file)
8394 + return single_open(file, EswCntRead, NULL);
8397 +static const struct file_operations switch_count_fops = {
8398 + .owner = THIS_MODULE,
8399 + .open = switch_count_open,
8401 + .llseek = seq_lseek,
8402 + .release = single_release
8405 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
8407 + * proc write procedure
8409 +static ssize_t change_phyid(struct file *file, const char __user *buffer,
8410 + size_t count, loff_t *data)
8413 + struct net_device *cur_dev_p;
8414 + END_DEVICE *ei_local;
8416 + unsigned int phy_id;
8420 + memset(buf, 0, 32);
8421 + if (copy_from_user(buf, buffer, count))
8424 + /* determine interface name */
8425 + strcpy(if_name, DEV_NAME); /* "eth2" by default */
8426 + if(isalpha(buf[0]))
8427 + sscanf(buf, "%s %d", if_name, &phy_id);
8429 + phy_id = simple_strtol(buf, 0, 10);
8430 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
8431 + cur_dev_p = dev_get_by_name(&init_net, DEV_NAME);
8433 + cur_dev_p = dev_get_by_name(DEV_NAME);
8435 + if (cur_dev_p == NULL)
8438 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
8439 + ei_local = netdev_priv(cur_dev_p);
8441 + ei_local = cur_dev_p->priv;
8443 + ei_local->mii_info.phy_id = (unsigned char)phy_id;
8447 +#if defined(CONFIG_PSEUDO_SUPPORT)
8448 +static ssize_t change_gmac2_phyid(struct file *file, const char __user *buffer,
8449 + size_t count, loff_t *data)
8452 + struct net_device *cur_dev_p;
8453 + PSEUDO_ADAPTER *pPseudoAd;
8455 + unsigned int phy_id;
8459 + memset(buf, 0, 32);
8460 + if (copy_from_user(buf, buffer, count))
8462 + /* determine interface name */
8463 + strcpy(if_name, DEV2_NAME); /* "eth3" by default */
8464 + if(isalpha(buf[0]))
8465 + sscanf(buf, "%s %d", if_name, &phy_id);
8467 + phy_id = simple_strtol(buf, 0, 10);
8469 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
8470 + cur_dev_p = dev_get_by_name(&init_net, DEV2_NAME);
8472 + cur_dev_p = dev_get_by_name(DEV2_NAMEj);
8474 + if (cur_dev_p == NULL)
8476 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
8477 + pPseudoAd = netdev_priv(cur_dev_p);
8479 + pPseudoAd = cur_dev_p->priv;
8481 + pPseudoAd->mii_info.phy_id = (unsigned char)phy_id;
8485 +static struct file_operations gmac2_fops = {
8486 + .owner = THIS_MODULE,
8487 + .write = change_gmac2_phyid
8492 +static int gmac_open(struct inode *inode, struct file *file)
8494 + return single_open(file, RegReadMain, NULL);
8497 +static struct file_operations gmac_fops = {
8498 + .owner = THIS_MODULE,
8499 + .open = gmac_open,
8501 + .llseek = seq_lseek,
8502 +#if defined (CONFIG_ETHTOOL)
8503 + .write = change_phyid,
8505 + .release = single_release
8508 +#if defined (TASKLET_WORKQUEUE_SW)
8509 +extern int init_schedule;
8510 +extern int working_schedule;
8511 +static int ScheduleRead(struct seq_file *seq, void *v)
8513 + if (init_schedule == 1)
8514 + seq_printf(seq, "Initialize Raeth with workqueque<%d>\n", init_schedule);
8516 + seq_printf(seq, "Initialize Raeth with tasklet<%d>\n", init_schedule);
8517 + if (working_schedule == 1)
8518 + seq_printf(seq, "Raeth is running at workqueque<%d>\n", working_schedule);
8520 + seq_printf(seq, "Raeth is running at tasklet<%d>\n", working_schedule);
8525 +static ssize_t ScheduleWrite(struct file *file, const char __user *buffer,
8526 + size_t count, loff_t *data)
8531 + if (copy_from_user(buf, buffer, count))
8533 + old = init_schedule;
8534 + init_schedule = simple_strtol(buf, 0, 10);
8535 + printk("Change Raeth initial schedule from <%d> to <%d>\n! Not running schedule at present !\n",
8536 + old, init_schedule);
8541 +static int schedule_switch_open(struct inode *inode, struct file *file)
8543 + return single_open(file, ScheduleRead, NULL);
8546 +static const struct file_operations schedule_sw_fops = {
8547 + .owner = THIS_MODULE,
8548 + .open = schedule_switch_open,
8550 + .write = ScheduleWrite,
8551 + .llseek = seq_lseek,
8552 + .release = single_release
8556 +#if defined(CONFIG_RAETH_PDMA_DVT)
8557 +static int PdmaDvtRead(struct seq_file *seq, void *v)
8559 + seq_printf(seq, "g_pdma_dvt_show_config = 0x%x\n", pdma_dvt_get_show_config());
8560 + seq_printf(seq, "g_pdma_dvt_rx_test_config = 0x%x\n", pdma_dvt_get_rx_test_config());
8561 + seq_printf(seq, "g_pdma_dvt_tx_test_config = 0x%x\n", pdma_dvt_get_tx_test_config());
8566 +static int PdmaDvtOpen(struct inode *inode, struct file *file)
8568 + return single_open(file, PdmaDvtRead, NULL);
8571 +static ssize_t PdmaDvtWrite(struct file *file, const char __user *buffer,
8572 + size_t count, loff_t *data)
8578 + char *pToken = NULL;
8579 + char *pDelimiter = " \t";
8581 + printk("write parameter len = %d\n\r", (int)len);
8582 + if(len >= sizeof(buf)){
8583 + printk("input handling fail!\n");
8584 + len = sizeof(buf) - 1;
8588 + if(copy_from_user(buf, buffer, len)){
8592 + printk("write parameter data = %s\n\r", buf);
8595 + pToken = strsep(&pBuf, pDelimiter);
8596 + x = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
8598 + pToken = strsep(&pBuf, "\t\n ");
8599 + if(pToken != NULL){
8600 + y = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
8601 + printk("y = 0x%08x \n\r", y);
8604 + if ( (sizeof(pdma_dvt_dbg_func)/sizeof(pdma_dvt_dbg_func[0]) > x) && NULL != pdma_dvt_dbg_func[x])
8606 + (*pdma_dvt_dbg_func[x])(x, y);
8610 + printk("no handler defined for command id(0x%08x)\n\r", x);
8613 + printk("x(0x%08x), y(0x%08x)\n", x, y);
8618 +static const struct file_operations pdma_dev_sw_fops = {
8619 + .owner = THIS_MODULE,
8620 + .open = PdmaDvtOpen,
8622 + .write = PdmaDvtWrite
8624 +#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
8626 +int debug_proc_init(void)
8628 + if (procRegDir == NULL)
8629 + procRegDir = proc_mkdir(PROCREG_DIR, NULL);
8631 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8632 + if ((procGmac = create_proc_entry(PROCREG_GMAC, 0, procRegDir)))
8633 + procGmac->proc_fops = &gmac_fops;
8636 + if (!(procGmac = proc_create(PROCREG_GMAC, 0, procRegDir, &gmac_fops)))
8638 + printk("!! FAIL to create %s PROC !!\n", PROCREG_GMAC);
8639 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
8640 +#if defined(CONFIG_PSEUDO_SUPPORT)
8641 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8642 + if ((procGmac2 = create_proc_entry(PROCREG_GMAC2, 0, procRegDir)))
8643 + procGmac2->proc_fops = &gmac2_fops;
8646 + if (!(procGmac2 = proc_create(PROCREG_GMAC2, 0, procRegDir, &gmac2_fops)))
8648 + printk("!! FAIL to create %s PROC !!\n", PROCREG_GMAC2);
8652 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8653 + if ((procSkbFree = create_proc_entry(PROCREG_SKBFREE, 0, procRegDir)))
8654 + procSkbFree->proc_fops = &skb_free_fops;
8657 + if (!(procSkbFree = proc_create(PROCREG_SKBFREE, 0, procRegDir, &skb_free_fops)))
8659 + printk("!! FAIL to create %s PROC !!\n", PROCREG_SKBFREE);
8661 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8662 + if ((procTxRing = create_proc_entry(PROCREG_TXRING, 0, procRegDir)))
8663 + procTxRing->proc_fops = &tx_ring_fops;
8666 + if (!(procTxRing = proc_create(PROCREG_TXRING, 0, procRegDir, &tx_ring_fops)))
8668 + printk("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
8670 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8671 + if ((procRxRing = create_proc_entry(PROCREG_RXRING, 0, procRegDir)))
8672 + procRxRing->proc_fops = &rx_ring_fops;
8675 + if (!(procRxRing = proc_create(PROCREG_RXRING, 0, procRegDir, &rx_ring_fops)))
8677 + printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
8679 +#if defined (CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
8680 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8681 + if ((procRxRing1 = create_proc_entry(PROCREG_RXRING1, 0, procRegDir)))
8682 + procRxRing1->proc_fops = &rx_ring1_fops;
8685 + if (!(procRxRing1 = proc_create(PROCREG_RXRING1, 0, procRegDir, &rx_ring1_fops)))
8687 + printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING1);
8689 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8690 + if ((procRxRing2 = create_proc_entry(PROCREG_RXRING2, 0, procRegDir)))
8691 + procRxRing2->proc_fops = &rx_ring2_fops;
8694 + if (!(procRxRing2 = proc_create(PROCREG_RXRING2, 0, procRegDir, &rx_ring2_fops)))
8696 + printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING2);
8698 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8699 + if ((procRxRing3 = create_proc_entry(PROCREG_RXRING3, 0, procRegDir)))
8700 + procRxRing3->proc_fops = &rx_ring3_fops;
8703 + if (!(procRxRing3 = proc_create(PROCREG_RXRING3, 0, procRegDir, &rx_ring3_fops)))
8705 + printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING3);
8706 +#endif /* CONFIG_RAETH_HW_LRO */
8708 +#if defined (CONFIG_MIPS)
8709 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8710 + if ((procSysCP0 = create_proc_entry(PROCREG_CP0, 0, procRegDir)))
8711 + procSysCP0->proc_fops = &cp0_reg_fops;
8714 + if (!(procSysCP0 = proc_create(PROCREG_CP0, 0, procRegDir, &cp0_reg_fops)))
8716 + printk("!! FAIL to create %s PROC !!\n", PROCREG_CP0);
8719 +#if defined(CONFIG_RAETH_TSO)
8720 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8721 + if ((procNumOfTxd = create_proc_entry(PROCREG_NUM_OF_TXD, 0, procRegDir)))
8722 + procNumOfTxd->proc_fops = &tso_txd_num_fops;
8725 + if (!(procNumOfTxd = proc_create(PROCREG_NUM_OF_TXD, 0, procRegDir, &tso_txd_num_fops)))
8727 + printk("!! FAIL to create %s PROC !!\n", PROCREG_NUM_OF_TXD);
8729 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8730 + if ((procTsoLen = create_proc_entry(PROCREG_TSO_LEN, 0, procRegDir)))
8731 + procTsoLen->proc_fops = &tso_len_fops;
8734 + if (!(procTsoLen = proc_create(PROCREG_TSO_LEN, 0, procRegDir, &tso_len_fops)))
8736 + printk("!! FAIL to create %s PROC !!\n", PROCREG_TSO_LEN);
8739 +#if defined(CONFIG_RAETH_LRO)
8740 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8741 + if ((procLroStats = create_proc_entry(PROCREG_LRO_STATS, 0, procRegDir)))
8742 + procLroStats->proc_fops = &lro_stats_fops;
8745 + if (!(procLroStats = proc_create(PROCREG_LRO_STATS, 0, procRegDir, &lro_stats_fops)))
8747 + printk("!! FAIL to create %s PROC !!\n", PROCREG_LRO_STATS);
8750 +#if defined(CONFIG_RAETH_HW_LRO)
8751 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8752 + if ((procHwLroStats = create_proc_entry(PROCREG_HW_LRO_STATS, 0, procRegDir)))
8753 + procHwLroStats->proc_fops = &hw_lro_stats_fops;
8756 + if (!(procHwLroStats = proc_create(PROCREG_HW_LRO_STATS, 0, procRegDir, &hw_lro_stats_fops)))
8758 + printk("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
8759 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8760 + if ((procHwLroAutoTlb = create_proc_entry(PROCREG_HW_LRO_AUTO_TLB, 0, procRegDir)))
8761 + procHwLroAutoTlb->proc_fops = &hw_lro_auto_tlb_fops;
8764 + if (!(procHwLroAutoTlb = proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, procRegDir, &hw_lro_auto_tlb_fops)))
8766 + printk("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_AUTO_TLB);
8767 +#endif /* CONFIG_RAETH_HW_LRO */
8769 +#if defined(CONFIG_RAETH_QOS)
8770 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8771 + if ((procRaQOS = create_proc_entry(PROCREG_RAQOS, 0, procRegDir)))
8772 + procRaQOS->proc_fops = &raeth_qos_fops;
8775 + if (!(procRaQOS = proc_create(PROCREG_RAQOS, 0, procRegDir, &raeth_qos_fops)))
8777 + printk("!! FAIL to create %s PROC !!\n", PROCREG_RAQOS);
8780 +#if defined(CONFIG_USER_SNMPD)
8781 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8782 + if ((procRaSnmp = create_proc_entry(PROCREG_SNMP, S_IRUGO, procRegDir)))
8783 + procRaSnmp->proc_fops = &ra_snmp_seq_fops;
8786 + if (!(procRaSnmp = proc_create(PROCREG_SNMP, S_IRUGO, procRegDir, &ra_snmp_seq_fops)))
8788 + printk("!! FAIL to create %s PROC !!\n", PROCREG_SNMP);
8791 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8792 + if ((procEswCnt = create_proc_entry(PROCREG_ESW_CNT, 0, procRegDir)))
8793 + procEswCnt->proc_fops = &switch_count_fops;
8796 + if (!(procEswCnt = proc_create(PROCREG_ESW_CNT, 0, procRegDir, &switch_count_fops)))
8798 + printk("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
8800 +#if defined (TASKLET_WORKQUEUE_SW)
8801 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8802 + if ((procSCHE = create_proc_entry(PROCREG_SCHE, 0, procRegDir)))
8803 + procSCHE->proc_fops = &schedule_sw_fops;
8806 + if (!(procSCHE = proc_create(PROCREG_SCHE, 0, procRegDir, &schedule_sw_fops)))
8808 + printk("!! FAIL to create %s PROC !!\n", PROCREG_SCHE);
8811 +#if defined(CONFIG_RAETH_PDMA_DVT)
8812 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8813 + if ((procPdmaDvt = create_proc_entry(PROCREG_PDMA_DVT, 0, procRegDir)))
8814 + procPdmaDvt->proc_fops = &pdma_dev_sw_fops;
8817 + if (!(procPdmaDvt = proc_create(PROCREG_PDMA_DVT, 0, procRegDir, &pdma_dev_sw_fops )))
8819 + printk("!! FAIL to create %s PROC !!\n", PROCREG_PDMA_DVT);
8820 +#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
8822 + printk(KERN_ALERT "PROC INIT OK!\n");
8826 +void debug_proc_exit(void)
8830 + remove_proc_entry(PROCREG_CP0, procRegDir);
8833 + remove_proc_entry(PROCREG_GMAC, procRegDir);
8834 +#if defined(CONFIG_PSEUDO_SUPPORT) && defined(CONFIG_ETHTOOL)
8836 + remove_proc_entry(PROCREG_GMAC, procRegDir);
8839 + remove_proc_entry(PROCREG_SKBFREE, procRegDir);
8842 + remove_proc_entry(PROCREG_TXRING, procRegDir);
8845 + remove_proc_entry(PROCREG_RXRING, procRegDir);
8847 +#if defined(CONFIG_RAETH_TSO)
8849 + remove_proc_entry(PROCREG_NUM_OF_TXD, procRegDir);
8852 + remove_proc_entry(PROCREG_TSO_LEN, procRegDir);
8855 +#if defined(CONFIG_RAETH_LRO)
8857 + remove_proc_entry(PROCREG_LRO_STATS, procRegDir);
8860 +#if defined(CONFIG_RAETH_QOS)
8862 + remove_proc_entry(PROCREG_RAQOS, procRegDir);
8864 + remove_proc_entry(PROCREG_RXDONE_INTR, procRegDir);
8865 + if (procRaEswIntr)
8866 + remove_proc_entry(PROCREG_ESW_INTR, procRegDir);
8869 +#if defined(CONFIG_USER_SNMPD)
8871 + remove_proc_entry(PROCREG_SNMP, procRegDir);
8875 + remove_proc_entry(PROCREG_ESW_CNT, procRegDir);
8878 + //remove_proc_entry(PROCREG_DIR, 0);
8880 + printk(KERN_ALERT "proc exit\n");
8882 +EXPORT_SYMBOL(procRegDir);
8883 diff --git a/drivers/net/ethernet/raeth/ra_mac.h b/drivers/net/ethernet/raeth/ra_mac.h
8884 new file mode 100644
8885 index 0000000..66b32d3
8887 +++ b/drivers/net/ethernet/raeth/ra_mac.h
8892 +void ra2880stop(END_DEVICE *ei_local);
8893 +void ra2880MacAddressSet(unsigned char p[6]);
8894 +void ra2880Mac2AddressSet(unsigned char p[6]);
8895 +void ethtool_init(struct net_device *dev);
8897 +void ra2880EnableInterrupt(void);
8899 +void dump_qos(void);
8900 +void dump_reg(struct seq_file *s);
8901 +void dump_cp0(void);
8903 +int debug_proc_init(void);
8904 +void debug_proc_exit(void);
8906 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
8907 + defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621)
8908 +void enable_auto_negotiate(int unused);
8910 +void enable_auto_negotiate(int ge);
8913 +void rt2880_gmac_hard_reset(void);
8915 +int TsoLenUpdate(int tso_len);
8916 +int NumOfTxdUpdate(int num_of_txd);
8918 +#ifdef CONFIG_RAETH_LRO
8919 +int LroStatsUpdate(struct net_lro_mgr *lro_mgr, bool all_flushed);
8921 +#ifdef CONFIG_RAETH_HW_LRO
8922 +int HwLroStatsUpdate(unsigned int ring_num, unsigned int agg_cnt, unsigned int agg_size);
8923 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
8924 +#define HW_LRO_AGG_FLUSH (1)
8925 +#define HW_LRO_AGE_FLUSH (2)
8926 +#define HW_LRO_NOT_IN_SEQ_FLUSH (3)
8927 +#define HW_LRO_TIMESTAMP_FLUSH (4)
8928 +#define HW_LRO_NON_RULE_FLUSH (5)
8929 +int HwLroFlushStatsUpdate(unsigned int ring_num, unsigned int flush_reason);
8930 +#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
8931 +typedef int (*HWLRO_DBG_FUNC)(int par1, int par2);
8932 +int hwlro_agg_cnt_ctrl(int par1, int par2);
8933 +int hwlro_agg_time_ctrl(int par1, int par2);
8934 +int hwlro_age_time_ctrl(int par1, int par2);
8935 +int hwlro_pkt_int_alpha_ctrl(int par1, int par2);
8936 +int hwlro_threshold_ctrl(int par1, int par2);
8937 +int hwlro_fix_setting_switch_ctrl(int par1, int par2);
8938 +#endif /* CONFIG_RAETH_HW_LRO */
8939 +int getnext(const char *src, int separator, char *dest);
8940 +int str_to_ip(unsigned int *ip, const char *str);
8942 +#if defined(CONFIG_RAETH_PDMA_DVT)
8943 +typedef int (*PDMA_DBG_FUNC)(int par1, int par2);
8944 +#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
8946 diff --git a/drivers/net/ethernet/raeth/ra_netlink.c b/drivers/net/ethernet/raeth/ra_netlink.c
8947 new file mode 100644
8948 index 0000000..f7c3650
8950 +++ b/drivers/net/ethernet/raeth/ra_netlink.c
8952 +// for netlink header
8953 +#include <asm/types.h>
8954 +#include <net/sock.h>
8955 +#include <linux/socket.h>
8956 +#include <linux/netlink.h>
8957 +#include <linux/skbuff.h>
8958 +#include <linux/net.h>
8959 +#include <linux/version.h>
8961 +#include "csr_netlink.h"
8962 +#include "ra2882ethreg.h"
8963 +#include "ra_netlink.h"
8965 +static struct sock *csr_msg_socket = NULL; // synchronize socket for netlink use
8966 +unsigned int flags;
8968 +void rt2880_csr_receiver(struct sock *sk, int len)
8970 + struct sk_buff *skb;
8972 + struct nlmsghdr *nlh;
8973 + unsigned int reg_value = 0;
8975 + RAETH_PRINT("csr netlink receiver!\n");
8976 + skb = skb_recv_datagram(sk, 0, 1, &err);
8978 + RAETH_PRINT("error no : %d\n", err);
8980 + if (skb == NULL) {
8981 + printk("rt2880_csr_receiver(): No data received, error!\n");
8985 + nlh = (struct nlmsghdr*)skb->data;
8987 + csrmsg = NLMSG_DATA(nlh);
8989 + if (csrmsg->enable == CSR_READ ) {
8990 + reg_value = sysRegRead(csrmsg->address);
8992 + printk("raeth -- 0x%08x: 0x%08x\n", csrmsg->address, reg_value);
8994 + } else if ( csrmsg->enable == CSR_WRITE ) {
8995 + sysRegWrite(csrmsg->address, csrmsg->default_value);
8996 + reg_value = sysRegRead(csrmsg->address);
8997 + } else if ( csrmsg->enable == CSR_TEST ) {
8998 + reg_value = sysRegRead(csrmsg->address);
8999 + printk("0x%08x: 0x%08x\n", (unsigned int)csrmsg->address, reg_value);
9002 + printk("drv: Command format error!\n");
9004 + csrmsg->default_value = reg_value;
9006 + RAETH_PRINT("drv: rt2880_csr_msgsend() - msg to send!\n");
9008 + err = rt2880_csr_msgsend(csrmsg);
9011 + printk("drv: msg send error!\n");
9013 + skb_free_datagram(sk, skb);
9016 +int rt2880_csr_msgsend(CSR_MSG* csrmsg)
9018 + struct sk_buff *skb;
9019 + struct nlmsghdr *nlh = NULL;
9021 + struct sock *send_syncnl = csr_msg_socket;
9024 + if (send_syncnl == NULL) {
9025 + printk("drv: netlink_kernel_create() failed!\n");
9029 + size = NLMSG_SPACE(sizeof(CSR_MSG));
9030 + skb = alloc_skb(size, GFP_ATOMIC);
9034 + printk("rt2880_csr_msgsend() : error! msg structure not available\n");
9038 + nlh = NLMSG_PUT(skb, 0, 0, RALINK_CSR_GROUP, size - sizeof(struct nlmsghdr));
9042 + printk("rt2880_csr_msgsend() : error! nlh structure not available\n");
9046 + csr_reg = NLMSG_DATA(nlh);
9049 + printk("rt2880_csr_msgsend() : error! nlh structure not available\n");
9053 + csr_reg->address = csrmsg->address;
9054 + csr_reg->default_value = csrmsg->default_value;
9055 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
9056 + NETLINK_CB(skb).dst_group = RALINK_CSR_GROUP;
9058 + NETLINK_CB(skb).dst_groups = RALINK_CSR_GROUP;
9060 + netlink_broadcast(send_syncnl, skb, 0, RALINK_CSR_GROUP, GFP_ATOMIC);
9067 +int csr_netlink_init()
9070 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
9071 + csr_msg_socket = netlink_kernel_create(NETLINK_CSR, RALINK_CSR_GROUP, rt2880_csr_receiver, THIS_MODULE);
9073 + csr_msg_socket = netlink_kernel_create(NETLINK_CSR, rt2880_csr_receiver);
9076 + if ( csr_msg_socket == NULL )
9077 + printk("unable to create netlink socket!\n");
9079 + printk("Netlink init ok!\n");
9083 +void csr_netlink_end()
9085 + if (csr_msg_socket != NULL){
9086 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
9087 + sock_release(csr_msg_socket->sk_socket);
9089 + sock_release(csr_msg_socket->socket);
9091 + printk("Netlink end...\n");
9094 diff --git a/drivers/net/ethernet/raeth/ra_netlink.h b/drivers/net/ethernet/raeth/ra_netlink.h
9095 new file mode 100644
9096 index 0000000..19ca71f
9098 +++ b/drivers/net/ethernet/raeth/ra_netlink.h
9103 +#include "csr_netlink.h"
9104 +int rt2880_csr_msgsend(CSR_MSG* csrmsg);
9105 +void rt2880_csr_receiver(struct sock *sk, int len);
9106 +int csr_netlink_init(void);
9107 +void csr_netlink_end(void);
9110 diff --git a/drivers/net/ethernet/raeth/ra_qos.c b/drivers/net/ethernet/raeth/ra_qos.c
9111 new file mode 100644
9112 index 0000000..0a7d9c5
9114 +++ b/drivers/net/ethernet/raeth/ra_qos.c
9116 +#include <asm/io.h>
9117 +#include <linux/pci.h>
9118 +#include <linux/netdevice.h>
9119 +#include <linux/etherdevice.h>
9120 +#include <linux/net.h>
9121 +#include <linux/in.h>
9122 +#include "ra_qos.h"
9123 +#include "raether.h"
9124 +#include "ra2882ethreg.h"
9126 +#include <asm/types.h>
9127 +#include <net/sock.h>
9128 +#include <linux/socket.h>
9129 +#include <linux/skbuff.h>
9130 +#include <linux/net.h>
9131 +#include <linux/if_vlan.h>
9132 +#include <linux/ip.h>
9135 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
9136 +#include "../../../net/nat/hw_nat/ra_nat.h"
9139 +#define CONTI_TX_SEND_MAX_SIZE 1440
9142 + * set tx queue # to descriptor
9144 +void rt3052_tx_queue_init(unsigned long data)
9146 + /* define qos p */
9150 +void rt3052_pse_port0_fc_clear(unsigned long data)
9152 + /* clear FE_INT_STATUS.PSE_P0_FC */
9156 +inline int get_tx_ctx_idx(unsigned int ring_no, unsigned long *idx)
9158 + switch (ring_no) {
9160 + *idx = *(unsigned long*)TX_CTX_IDX0;
9163 + *idx = *(unsigned long*)TX_CTX_IDX1;
9166 + *idx = *(unsigned long*)TX_CTX_IDX2;
9169 + *idx = *(unsigned long*)TX_CTX_IDX3;
9172 + printk("set_tx_ctx_idex error\n");
9178 +inline int set_tx_ctx_idx(unsigned int ring_no, unsigned int idx)
9180 + switch (ring_no ) {
9182 + *(unsigned long*)TX_CTX_IDX0 = cpu_to_le32((u32)idx);
9185 + *(unsigned long*)TX_CTX_IDX1 = cpu_to_le32((u32)idx);
9188 + *(unsigned long*)TX_CTX_IDX2 = cpu_to_le32((u32)idx);
9191 + *(unsigned long*)TX_CTX_IDX3 = cpu_to_le32((u32)idx);
9194 + printk("set_tx_ctx_idex error\n");
9201 +void get_tx_desc_and_dtx_idx(END_DEVICE* ei_local, int ring_no, unsigned long *tx_dtx_idx, struct PDMA_txdesc **tx_desc)
9203 + switch (ring_no) {
9205 + *tx_desc = ei_local->tx_ring0;
9206 + *tx_dtx_idx = *(unsigned long*)TX_DTX_IDX0;
9209 + *tx_desc = ei_local->tx_ring1;
9210 + *tx_dtx_idx = *(unsigned long*)TX_DTX_IDX1;
9213 + *tx_desc = ei_local->tx_ring2;
9214 + *tx_dtx_idx = *(unsigned long*)TX_DTX_IDX2;
9217 + *tx_desc = ei_local->tx_ring3;
9218 + *tx_dtx_idx = *(unsigned long*)TX_DTX_IDX3;
9221 + printk("ring_no input error... %d\n", ring_no);
9225 +int fe_qos_packet_send(struct net_device *dev, struct sk_buff* skb, unsigned int ring_no, unsigned int qn, unsigned pn)
9227 + END_DEVICE* ei_local = netdev_priv(dev);
9228 + struct PDMA_txdesc* tx_desc;
9229 + unsigned int tx_cpu_owner_idx, tx_dtx_idx;
9231 + unsigned int length=skb->len;
9233 + unsigned long flags;
9235 + //printk("fe_qos_packet_send: ring_no=%d qn=%d pn=%d\n", ring_no, qn, pn);
9237 + switch ( ring_no ) {
9239 + tx_desc = ei_local->tx_ring0;
9240 + tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX0;
9241 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX0;
9244 + tx_desc = ei_local->tx_ring1;
9245 + tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX1;
9246 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX1;
9249 + tx_desc = ei_local->tx_ring2;
9250 + tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX2;
9251 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX2;
9254 + tx_desc = ei_local->tx_ring3;
9255 + tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX3;
9256 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX3;
9259 + printk("ring_no input error... %d\n", ring_no);
9263 + //printk("tx_cpu_owner_idx=%d tx_dtx_idx=%d\n", tx_cpu_owner_idx, tx_dtx_idx);
9265 + if(tx_desc == NULL) {
9266 + printk("%s : txdesc is NULL\n", dev->name);
9270 + tx_desc[tx_cpu_owner_idx].txd_info1.SDP0 = virt_to_phys(skb->data);
9271 + tx_desc[tx_cpu_owner_idx].txd_info2.SDL0 = length;
9272 + tx_desc[tx_cpu_owner_idx].txd_info2.DDONE_bit = 0;
9273 + tx_desc[tx_cpu_owner_idx].txd_info4.PN = pn;
9274 + tx_desc[tx_cpu_owner_idx].txd_info4.QN = qn;
9276 +#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
9277 + ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.TCO = 1;
9278 + ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.UCO = 1;
9279 + ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.ICO = 1;
9282 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
9283 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
9284 + tx_desc[tx_cpu_owner_idx].txd_info4.PN = 6; /* PPE */
9286 + tx_desc[tx_cpu_owner_idx].txd_info4.PN = pn;
9291 + spin_lock_irqsave(&ei_local->page_lock, flags);
9292 + ei_local->skb_free[ring_no][tx_cpu_owner_idx] = skb;
9293 + tx_cpu_owner_idx = (tx_cpu_owner_idx +1) % NUM_TX_DESC;
9294 + ret = set_tx_ctx_idx(ring_no, tx_cpu_owner_idx);
9295 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
9297 + ei_local->stat.tx_packets++;
9298 + ei_local->stat.tx_bytes += length;
9300 +#ifdef CONFIG_RAETH_NAPI
9301 + switch ( ring_no ) {
9303 + if ( ei_local->tx0_full == 1) {
9304 + ei_local->tx0_full = 0;
9305 + netif_wake_queue(dev);
9309 + if ( ei_local->tx1_full == 1) {
9310 + ei_local->tx1_full = 0;
9311 + netif_wake_queue(dev);
9315 + if ( ei_local->tx2_full == 1) {
9316 + ei_local->tx2_full = 0;
9317 + netif_wake_queue(dev);
9321 + if ( ei_local->tx3_full == 1) {
9322 + ei_local->tx3_full = 0;
9323 + netif_wake_queue(dev);
9327 + printk("ring_no input error %d\n", ring_no);
9333 +int fe_tx_desc_init(struct net_device *dev, unsigned int ring_no, unsigned int qn, unsigned int pn)
9335 + END_DEVICE* ei_local = netdev_priv(dev);
9336 + struct PDMA_txdesc *tx_desc;
9337 + unsigned int tx_cpu_owner_idx = 0;
9339 + unsigned int phy_tx_ring;
9342 + if ( ring_no > 3 ){
9343 + printk("%s : ring_no - %d, please under 4...\n", dev->name, ring_no);
9348 + printk("%s : pn - %d, please under 2...\n", dev->name, pn);
9352 + tx_desc = pci_alloc_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), &phy_tx_ring);
9353 + ei_local->tx_cpu_owner_idx0 = tx_cpu_owner_idx;
9355 + switch (ring_no) {
9357 + ei_local->tx_ring0 = tx_desc;
9358 + ei_local->phy_tx_ring0 = phy_tx_ring;
9361 + ei_local->phy_tx_ring1 = phy_tx_ring;
9362 + ei_local->tx_ring1 = tx_desc;
9365 + ei_local->phy_tx_ring2 = phy_tx_ring;
9366 + ei_local->tx_ring2 = tx_desc;
9369 + ei_local->phy_tx_ring3 = phy_tx_ring;
9370 + ei_local->tx_ring3 = tx_desc;
9373 + printk("ring_no input error! %d\n", ring_no);
9374 + pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), tx_desc, phy_tx_ring);
9378 + if ( tx_desc == NULL)
9380 + printk("tx desc allocation failed!\n");
9384 + for( i = 0; i < NUM_TX_DESC; i++) {
9385 + memset( &tx_desc[i], 0, sizeof(struct PDMA_txdesc));
9386 + tx_desc[i].txd_info2.LS0_bit = 1;
9387 + tx_desc[i].txd_info2.DDONE_bit = 1;
9388 + tx_desc[i].txd_info4.PN = pn;
9389 + tx_desc[i].txd_info4.QN = qn;
9392 + switch ( ring_no ) {
9394 + *(unsigned long*)TX_BASE_PTR0 = phys_to_bus((u32) phy_tx_ring);
9395 + *(unsigned long*)TX_MAX_CNT0 = cpu_to_le32((u32)NUM_TX_DESC);
9396 + *(unsigned long*)TX_CTX_IDX0 = cpu_to_le32((u32) tx_cpu_owner_idx);
9397 + sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX0);
9400 + *(unsigned long*)TX_BASE_PTR1 = phys_to_bus((u32) phy_tx_ring);
9401 + *(unsigned long*)TX_MAX_CNT1 = cpu_to_le32((u32)NUM_TX_DESC);
9402 + *(unsigned long*)TX_CTX_IDX1 = cpu_to_le32((u32) tx_cpu_owner_idx);
9403 + sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX1);
9406 + *(unsigned long*)TX_BASE_PTR2 = phys_to_bus((u32) phy_tx_ring);
9407 + *(unsigned long*)TX_MAX_CNT2 = cpu_to_le32((u32)NUM_TX_DESC);
9408 + *(unsigned long*)TX_CTX_IDX2 = cpu_to_le32((u32) tx_cpu_owner_idx);
9409 + sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX2);
9412 + *(unsigned long*)TX_BASE_PTR3 = phys_to_bus((u32) phy_tx_ring);
9413 + *(unsigned long*)TX_MAX_CNT3 = cpu_to_le32((u32)NUM_TX_DESC);
9414 + *(unsigned long*)TX_CTX_IDX3 = cpu_to_le32((u32) tx_cpu_owner_idx);
9415 + sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX3);
9418 + printk("tx descriptor init failed %d\n", ring_no);
9425 + DSCP | AC | WMM_AC (Access Category)
9426 + ------+----+--------
9437 + DSCP |(bit5~bit7)| WMM
9438 + -------+-----------+-------
9448 + Notes: BE should be mapped to AC1, but mapped to AC0 in linux kernel.
9452 +int pkt_classifier(struct sk_buff *skb,int gmac_no, int *ring_no, int *queue_no, int *port_no)
9454 +#if defined(CONFIG_RALINK_RT2880)
9455 + /* RT2880 -- Assume using 1 Ring (Ring0), Queue 0, and Port 0 */
9460 + unsigned int ac=0;
9461 + unsigned int bridge_traffic=0, lan_traffic=0;
9462 + struct iphdr *iph=NULL;
9463 + struct vlan_ethhdr *veth=NULL;
9464 + unsigned int vlan_id=0;
9465 +#if defined (CONFIG_RAETH_QOS_DSCP_BASED)
9466 + static char DscpToAcMap[8]={1,0,0,1,2,2,3,3};
9467 +#elif defined (CONFIG_RAETH_QOS_VPRI_BASED)
9468 + static char VlanPriToAcMap[8]={1,0,0,1,2,2,3,3};
9471 + /* Bridge:: {BG,BE,VI,VO} */
9472 + /* GateWay:: WAN: {BG,BE,VI,VO}, LAN: {BG,BE,VI,VO} */
9473 +#if defined (CONFIG_RALINK_RT3883) && defined (CONFIG_RAETH_GMAC2)
9476 + * 1.1) GMAC1 ONLY:
9477 + * VO/VI->Ring3, BG/BE->Ring2
9478 + * 1.2) GMAC1+GMAC2:
9479 + * GMAC1:: VO/VI->Ring3, BG/BE->Ring2
9480 + * GMAC2:: VO/VI->Ring1, BG/BE->Ring0
9482 + * 2.1) GMAC1 ONLY:
9483 + * GMAC1:: LAN:VI/VO->Ring2, BE/BK->Ring2
9484 + * WAN:VI/VO->Ring3, BE/BK->Ring3
9485 + * 2.2)GMAC1+GMAC2:
9486 + * GMAC1:: LAN:VI/VO/BE/BK->Ring2, WAN:VI/VO/BE/BK->Ring3
9487 + * GMAC2:: VI/VO->Ring1, BE/BK->Ring0
9489 + static unsigned char AcToRing_BridgeMap[4] = {2, 2, 3, 3};
9490 + static unsigned char AcToRing_GE1Map[2][4] = {{3, 3, 3, 3},{2, 2, 2, 2}};
9491 + static unsigned char AcToRing_GE2Map[4] = {0, 0, 1, 1};
9492 +#elif defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT2883) || \
9493 + defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || \
9494 + defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
9495 + defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
9496 + defined (CONFIG_RALINK_MT7628) || \
9497 + (defined (CONFIG_RALINK_RT3883) && !defined(CONFIG_RAETH_GMAC2))
9499 + * 1) Bridge: VO->Ring3, VI->Ring2, BG->Ring1, BE->Ring0
9501 + * 2.1) GMAC1:: LAN:VI/VO->Ring1, BE/BK->Ring0
9502 + * WAN:VI/VO->Ring3, BE/BK->Ring2
9504 + static unsigned char AcToRing_BridgeMap[4] = {0, 1, 2, 3};
9505 + static unsigned char AcToRing_GE1Map[2][4] = {{2, 2, 3, 3},{0, 0, 1, 1}};
9506 +#endif // CONFIG_RALINK_RT2883
9509 + * Set queue no - QN field in TX Descriptor
9510 + * always use queue 3 for the packet from CPU to GMAC
9514 + /* Get access category */
9515 + veth = (struct vlan_ethhdr *)(skb->data);
9516 + if(veth->h_vlan_proto == htons(ETH_P_8021Q)) { // VLAN traffic
9517 + iph= (struct iphdr *)(skb->data + VLAN_ETH_HLEN);
9519 + vlan_id = ntohs(veth->h_vlan_TCI & VLAN_VID_MASK);
9520 + if(vlan_id==1) { //LAN
9526 + if (veth->h_vlan_encapsulated_proto == htons(ETH_P_IP)) { //IPv4
9527 +#if defined (CONFIG_RAETH_QOS_DSCP_BASED)
9528 + ac = DscpToAcMap[(iph->tos & 0xe0) >> 5];
9529 +#elif defined (CONFIG_RAETH_QOS_VPRI_BASED)
9530 + ac = VlanPriToAcMap[skb->priority];
9532 + }else { //Ipv6, ARP ...etc
9535 + }else { // non-VLAN traffic
9536 + if (veth->h_vlan_proto == htons(ETH_P_IP)) { //IPv4
9537 +#if defined (CONFIG_RAETH_QOS_DSCP_BASED)
9538 + iph= (struct iphdr *)(skb->data + ETH_HLEN);
9539 + ac = DscpToAcMap[(iph->tos & 0xe0) >> 5];
9540 +#elif defined (CONFIG_RAETH_QOS_VPRI_BASED)
9541 + ac= VlanPriToAcMap[skb->priority];
9543 + }else { // IPv6, ARP ...etc
9551 + /* Set Tx Ring no */
9552 + if(gmac_no==1) { //GMAC1
9553 + if(bridge_traffic) { //Bridge Mode
9554 + *ring_no = AcToRing_BridgeMap[ac];
9555 + }else { //GateWay Mode
9556 + *ring_no = AcToRing_GE1Map[lan_traffic][ac];
9559 +#if defined (CONFIG_RALINK_RT3883) && defined (CONFIG_RAETH_GMAC2)
9560 + *ring_no = AcToRing_GE2Map[ac];
9565 + /* Set Port No - PN field in Tx Descriptor*/
9566 +#if defined(CONFIG_RAETH_GMAC2)
9567 + *port_no = gmac_no;
9569 + if(bridge_traffic) {
9572 + if(lan_traffic==1) { //LAN use VP1
9574 + }else { //WAN use VP2
9578 +#endif // CONFIG_RAETH_GMAC2 //
9588 + * Routine Description :
9589 + * Hi/Li Rings and Queues definition for QoS Purpose
9591 + * Related registers: (Detail information refer to pp106 of RT3052_DS_20080226.doc)
9592 + * Priority High/Low Definition - PDMA_FC_CFG, GDMA1_FC_CFG, GDMA2_FC_CFG
9593 + * Bit 28 - Allows high priority Q to share low priority Q's reserved pages
9594 + * Bit 27:24 - Px high priority definition bitmap
9595 + * Weight Configuration - GDMA1_SCH_CFG, GDMA2_SCH_CFG, PDMA_SCH_CFG -> default 3210
9601 +#define PSE_P1_LQ_FULL (1<<2)
9602 +#define PSE_P1_HQ_FULL (1<<3)
9603 +#define PSE_P2_LQ_FULL (1<<4)
9604 +#define PSE_P2_HQ_FULL (1<<5)
9606 +#define HIGH_QUEUE(queue) (1<<(queue))
9607 +#define LOW_QUEUE(queue) (0<<(queue))
9608 +#define PAGES_SHARING (1<<28)
9609 +#define RSEV_PAGE_COUNT_HQ 0x10 /* Reserved page count for high priority Q */
9610 +#define RSEV_PAGE_COUNT_LQ 0x10 /* Reserved page count for low priority Q */
9611 +#define VIQ_FC_ASRT 0x10 /* Virtual input Q FC assertion threshold */
9613 +#define QUEUE_WEIGHT_1 0
9614 +#define QUEUE_WEIGHT_2 1
9615 +#define QUEUE_WEIGHT_4 2
9616 +#define QUEUE_WEIGHT_8 3
9617 +#define QUEUE_WEIGHT_16 4
9619 +#define WRR_SCH 0 /*WRR */
9620 +#define STRICT_PRI_SCH 1 /* Strict Priority */
9621 +#define MIX_SCH 2 /* Mixed : Q3>WRR(Q2,Q1,Q0) */
9624 + * Ring3 Ring2 Ring1 Ring0
9627 + * --------------------------------
9628 + * | WRR Scheduler |
9629 + * --------------------------------
9631 + * ---------------------------------------
9633 + * ---------------------------------------
9634 + * |Q3||Q2||Q1||Q0| |Q3||Q2||Q1||Q0|
9635 + * | || || || | | || || || |
9636 + * ------------------- -------------------
9637 + * | GDMA2 | | GDMA1 |
9638 + * ------------------- -------------------
9640 + * ------------------------------------
9642 + * ------------------------------------
9646 +void set_scheduler_weight(void)
9648 +#if !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
9650 + * STEP1: Queue scheduling configuration
9652 + *(unsigned long *)GDMA1_SCH_CFG = (WRR_SCH << 24) |
9653 + (QUEUE_WEIGHT_16 << 12) | /* queue 3 weight */
9654 + (QUEUE_WEIGHT_8 << 8) | /* queue 2 weight */
9655 + (QUEUE_WEIGHT_4 << 4) | /* queue 1 weight */
9656 + (QUEUE_WEIGHT_2 << 0); /* queue 0 weight */
9658 + *(unsigned long *)GDMA2_SCH_CFG = (WRR_SCH << 24) |
9659 + (QUEUE_WEIGHT_16 << 12) | /* queue 3 weight */
9660 + (QUEUE_WEIGHT_8 << 8) | /* queue 2 weight */
9661 + (QUEUE_WEIGHT_4 << 4) | /* queue 1 weight */
9662 + (QUEUE_WEIGHT_2 << 0); /* queue 0 weight */
9666 + * STEP2: Ring scheduling configuration
9668 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
9669 + defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621)
9670 + /* MIN_RATE_RATIO0=0, MAX_RATE_ULMT0=1, Weight0=1 */
9671 + *(unsigned long *)SCH_Q01_CFG = (0 << 10) | (1<<14) | (0 << 12);
9672 + /* MIN_RATE_RATIO1=0, MAX_RATE_ULMT1=1, Weight1=4 */
9673 + *(unsigned long *)SCH_Q01_CFG |= (0 << 26) | (1<<30) | (2 << 28);
9675 + /* MIN_RATE_RATIO2=0, MAX_RATE_ULMT2=1, Weight0=1 */
9676 + *(unsigned long *)SCH_Q23_CFG = (0 << 10) | (1<<14) | (0 << 12);
9677 + /* MIN_RATE_RATIO3=0, MAX_RATE_ULMT3=1, Weight1=4 */
9678 + *(unsigned long *)SCH_Q23_CFG |= (0 << 26) | (1<<30) | (2 << 28);
9680 + *(unsigned long *)PDMA_SCH_CFG = (WRR_SCH << 24) |
9681 + (QUEUE_WEIGHT_16 << 12) | /* ring 3 weight */
9682 + (QUEUE_WEIGHT_4 << 8) | /* ring 2 weight */
9683 + (QUEUE_WEIGHT_16 << 4) | /* ring 1 weight */
9684 + (QUEUE_WEIGHT_4 << 0); /* ring 0 weight */
9689 + * Routine Description :
9690 + * Bucket size and related information from ASIC Designer,
9691 + * please check Max Lee to update these values
9693 + * Related Registers
9694 + * FE_GLO_CFG - initialize clock rate for rate limiting
9695 + * PDMA_FC_CFG - Pause mechanism for Rings (Ref to pp116 in datasheet)
9701 + * Bit 29:24 - Q3 flow control pause condition
9702 + * Bit 21:16 - Q2 flow control pause condition
9703 + * Bit 13:8 - Q1 flow control pause condition
9704 + * Bit 5:0 - Q0 flow control pause condition
9707 + * Bit[5] - Pause Qx when PSE p2 HQ full
9708 + * Bit[4] - Pause Qx when PSE p2 LQ full
9709 + * Bit[3] - Pause Qx when PSE p1 HQ full
9710 + * Bit[2] - Pause Qx when PSE p1 LQ full
9711 + * Bit[1] - Pause Qx when PSE p0 HQ full
9712 + * Bit[0] - Pause Qx when PSE p0 LQ full
9714 +void set_schedule_pause_condition(void)
9716 +#if defined (CONFIG_RALINK_MT7620)
9718 +#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
9719 + *(unsigned long *)SDM_TRING = (0xC << 28) | (0x3 << 24) | (0xC << 4) | 0x3;
9722 + * STEP1: Set queue priority is high or low
9724 + * Set queue 3 as high queue in GMAC1/GMAC2
9726 + *(unsigned long *)GDMA1_FC_CFG = ((HIGH_QUEUE(3)|LOW_QUEUE(2) |
9727 + LOW_QUEUE(1)|LOW_QUEUE(0))<<24) |
9728 + (RSEV_PAGE_COUNT_HQ << 16) |
9729 + (RSEV_PAGE_COUNT_LQ <<8) |
9730 + VIQ_FC_ASRT | PAGES_SHARING;
9732 + *(unsigned long *)GDMA2_FC_CFG = ((HIGH_QUEUE(3)|LOW_QUEUE(2) |
9733 + LOW_QUEUE(1)|LOW_QUEUE(0))<<24) |
9734 + (RSEV_PAGE_COUNT_HQ << 16) |
9735 + (RSEV_PAGE_COUNT_LQ <<8) |
9736 + VIQ_FC_ASRT | PAGES_SHARING;
9739 + * STEP2: Set flow control pause condition
9741 + * CPU always use queue 3, and queue3 is high queue.
9742 + * If P2(GMAC2) high queue is full, pause ring3/ring2
9743 + * If P1(GMAC1) high queue is full, pause ring1/ring0
9745 + *(unsigned long *)PDMA_FC_CFG = ( PSE_P2_HQ_FULL << 24 ) | /* queue 3 */
9746 + ( PSE_P2_HQ_FULL << 16 ) | /* queue 2 */
9747 + ( PSE_P1_HQ_FULL << 8 ) | /* queue 1 */
9748 + ( PSE_P1_HQ_FULL << 0 ); /* queue 0 */
9754 +void set_output_shaper(void)
9756 +#define GDMA1_TOKEN_RATE 16 /* unit=64bits/ms */
9757 +#define GDMA2_TOKEN_RATE 16 /* unit=64bits/ms */
9760 + *(unsigned long *)GDMA1_SHPR_CFG = (1 << 24) | /* output shaper enable */
9761 + (128 << 16) | /* bucket size (unit=1KB) */
9762 + (GDMA1_TOKEN_RATE << 0); /* token rate (unit=8B/ms) */
9766 + *(unsigned long *)GDMA2_SHPR_CFG = (1 << 24) | /* output shaper enable */
9767 + (128 << 16) | /* bucket size (unit=1KB) */
9768 + (GDMA2_TOKEN_RATE << 0); /* token rate (unit=8B/ms) */
9771 diff --git a/drivers/net/ethernet/raeth/ra_qos.h b/drivers/net/ethernet/raeth/ra_qos.h
9772 new file mode 100644
9773 index 0000000..7f2a8a1
9775 +++ b/drivers/net/ethernet/raeth/ra_qos.h
9780 +#include "ra2882ethreg.h"
9785 +void get_tx_desc_and_dtx_idx(END_DEVICE* ei_local, int ring_no, unsigned long *tx_dtx_idx, struct PDMA_txdesc **tx_desc);
9786 +int get_tx_ctx_idx(unsigned int ring_no, unsigned long *idx);
9787 +int fe_tx_desc_init(struct net_device *dev, unsigned int ring_no, unsigned int qn, unsigned int pn);
9788 +int fe_qos_packet_send(struct net_device *dev, struct sk_buff* skb, unsigned int ring_no, unsigned int qn, unsigned int pn);
9790 +int pkt_classifier(struct sk_buff *skb,int gmac_no, int *ring_no, int *queue_no, int *port_no);
9791 +void set_schedule_pause_condition(void);
9792 +void set_scheduler_weight(void);
9793 +void set_output_shaper(void);
9795 diff --git a/drivers/net/ethernet/raeth/ra_rfrw.c b/drivers/net/ethernet/raeth/ra_rfrw.c
9796 new file mode 100644
9797 index 0000000..d73db01
9799 +++ b/drivers/net/ethernet/raeth/ra_rfrw.c
9801 +#include <linux/module.h>
9802 +#include <linux/version.h>
9803 +#include <linux/kernel.h>
9804 +#include <linux/sched.h>
9805 +#include <linux/types.h>
9806 +#include <linux/fcntl.h>
9807 +#include <linux/interrupt.h>
9808 +#include <linux/ptrace.h>
9809 +#include <linux/ioport.h>
9810 +#include <linux/in.h>
9811 +#include <linux/slab.h>
9812 +#include <linux/string.h>
9813 +#include <linux/signal.h>
9814 +#include <linux/irq.h>
9815 +#include <linux/netdevice.h>
9816 +#include <linux/etherdevice.h>
9817 +#include <linux/skbuff.h>
9819 +#include "ra2882ethreg.h"
9820 +#include "raether.h"
9821 +#include "ra_mac.h"
9823 +#define RF_CSR_CFG 0xb0180500
9824 +#define RF_CSR_KICK (1<<17)
9825 +int rw_rf_reg(int write, int reg, int *data)
9827 + unsigned long rfcsr, i = 0;
9830 + rfcsr = sysRegRead(RF_CSR_CFG);
9831 + if (! (rfcsr & (u32)RF_CSR_KICK) )
9833 + if (++i > 10000) {
9834 + printk("Warning: Abort rw rf register: too busy\n");
9839 + rfcsr = (u32)(RF_CSR_KICK | ((reg&0x3f) << 8) | (*data & 0xff));
9843 + sysRegRead(RF_CSR_CFG) = cpu_to_le32(rfcsr);
9847 + rfcsr = sysRegRead(RF_CSR_CFG);
9848 + if (! (rfcsr & (u32)RF_CSR_KICK) )
9850 + if (++i > 10000) {
9851 + printk("Warning: still busy\n");
9856 + rfcsr = sysRegRead(RF_CSR_CFG);
9858 + if (((rfcsr&0x1f00) >> 8) != (reg & 0x1f)) {
9859 + printk("Error: rw register failed\n");
9862 + *data = (int)(rfcsr & 0xff);
9867 diff --git a/drivers/net/ethernet/raeth/ra_rfrw.h b/drivers/net/ethernet/raeth/ra_rfrw.h
9868 new file mode 100644
9869 index 0000000..da5a371
9871 +++ b/drivers/net/ethernet/raeth/ra_rfrw.h
9876 +int rw_rf_reg(int write, int reg, int *data);
9879 diff --git a/drivers/net/ethernet/raeth/raether.c b/drivers/net/ethernet/raeth/raether.c
9880 new file mode 100644
9881 index 0000000..328285a
9883 +++ b/drivers/net/ethernet/raeth/raether.c
9885 +#include <linux/module.h>
9886 +#include <linux/version.h>
9887 +#include <linux/kernel.h>
9888 +#include <linux/types.h>
9889 +#include <linux/pci.h>
9890 +#include <linux/init.h>
9891 +#include <linux/skbuff.h>
9892 +#include <linux/if_vlan.h>
9893 +#include <linux/if_ether.h>
9894 +#include <linux/fs.h>
9895 +#include <asm/uaccess.h>
9896 +#include <asm/rt2880/surfboardint.h>
9897 +#include <linux/platform_device.h>
9898 +#if defined (CONFIG_RAETH_TSO)
9899 +#include <linux/tcp.h>
9900 +#include <net/ipv6.h>
9901 +#include <linux/ip.h>
9902 +#include <net/ip.h>
9903 +#include <net/tcp.h>
9904 +#include <linux/in.h>
9905 +#include <linux/ppp_defs.h>
9906 +#include <linux/if_pppox.h>
9908 +#if defined (CONFIG_RAETH_LRO)
9909 +#include <linux/inet_lro.h>
9911 +#include <linux/delay.h>
9912 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
9913 +#include <linux/sched.h>
9916 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
9917 +#include <asm/rt2880/rt_mmap.h>
9919 +#include <linux/libata-compat.h>
9922 +#include "ra2882ethreg.h"
9923 +#include "raether.h"
9924 +#include "ra_mac.h"
9925 +#include "ra_ioctl.h"
9926 +#include "ra_rfrw.h"
9927 +#ifdef CONFIG_RAETH_NETLINK
9928 +#include "ra_netlink.h"
9930 +#if defined (CONFIG_RAETH_QOS)
9931 +#include "ra_qos.h"
9934 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
9935 +#include "../../../net/nat/hw_nat/ra_nat.h"
9937 +#if defined(CONFIG_RAETH_PDMA_DVT)
9938 +#include "dvt/raether_pdma_dvt.h"
9939 +#endif /* CONFIG_RAETH_PDMA_DVT */
9941 +static int fe_irq = 0;
9943 +#if defined (TASKLET_WORKQUEUE_SW)
9945 +int working_schedule;
9948 +#ifdef CONFIG_RAETH_NAPI
9949 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
9950 +static int raeth_clean(struct napi_struct *napi, int budget);
9952 +static int raeth_clean(struct net_device *dev, int *budget);
9955 +static int rt2880_eth_recv(struct net_device* dev, int *work_done, int work_to_do);
9957 +static int rt2880_eth_recv(struct net_device* dev);
9960 +#if !defined(CONFIG_RA_NAT_NONE)
9963 +extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
9964 +extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
9967 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
9970 +#include <asm/mipsregs.h>
9971 +extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
9972 +extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
9973 +#endif /* CONFIG_RA_CLASSIFIER */
9975 +#if defined (CONFIG_RALINK_RT3052_MP2)
9976 +int32_t mcast_rx(struct sk_buff * skb);
9977 +int32_t mcast_tx(struct sk_buff * skb);
9980 +int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf)
9986 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE) || defined (CONFIG_ARCH_MT7623)
9987 +void setup_internal_gsw(void);
9988 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200)
9989 +void apll_xtal_enable(void);
9990 +#define REGBIT(x, n) (x << n)
9994 +#if defined (CONFIG_MT7623_FPGA)
9995 +void setup_fpga_gsw(void);
9998 +/* gmac driver feature set config */
9999 +#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
10002 +#if defined (CONFIG_ARCH_MT7623)
10005 +#define DELAY_INT 1
10009 +//#define CONFIG_UNH_TEST
10010 +/* end of config */
10012 +#if defined (CONFIG_RAETH_JUMBOFRAME)
10013 +#define MAX_RX_LENGTH 4096
10015 +#define MAX_RX_LENGTH 1536
10018 +struct net_device *dev_raether;
10020 +static int rx_dma_owner_idx;
10021 +static int rx_dma_owner_idx0;
10022 +#if defined (CONFIG_RAETH_HW_LRO)
10023 +static int rx_dma_owner_lro1;
10024 +static int rx_dma_owner_lro2;
10025 +static int rx_dma_owner_lro3;
10026 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
10027 +static int rx_dma_owner_idx1;
10028 +#if defined(CONFIG_ARCH_MT7623)
10029 +static int rx_dma_owner_idx2;
10030 +static int rx_dma_owner_idx3;
10031 +#endif /* CONFIG_ARCH_MT7623 */
10032 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10036 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10039 +static int pending_recv;
10040 +static struct PDMA_rxdesc *rx_ring;
10041 +unsigned long tx_ring_full=0;
10043 +#if defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
10044 + defined(CONFIG_RALINK_MT7620)
10045 +unsigned short p0_rx_good_cnt = 0;
10046 +unsigned short p1_rx_good_cnt = 0;
10047 +unsigned short p2_rx_good_cnt = 0;
10048 +unsigned short p3_rx_good_cnt = 0;
10049 +unsigned short p4_rx_good_cnt = 0;
10050 +unsigned short p5_rx_good_cnt = 0;
10051 +unsigned short p6_rx_good_cnt = 0;
10052 +unsigned short p0_tx_good_cnt = 0;
10053 +unsigned short p1_tx_good_cnt = 0;
10054 +unsigned short p2_tx_good_cnt = 0;
10055 +unsigned short p3_tx_good_cnt = 0;
10056 +unsigned short p4_tx_good_cnt = 0;
10057 +unsigned short p5_tx_good_cnt = 0;
10058 +unsigned short p6_tx_good_cnt = 0;
10060 +unsigned short p0_rx_byte_cnt = 0;
10061 +unsigned short p1_rx_byte_cnt = 0;
10062 +unsigned short p2_rx_byte_cnt = 0;
10063 +unsigned short p3_rx_byte_cnt = 0;
10064 +unsigned short p4_rx_byte_cnt = 0;
10065 +unsigned short p5_rx_byte_cnt = 0;
10066 +unsigned short p6_rx_byte_cnt = 0;
10067 +unsigned short p0_tx_byte_cnt = 0;
10068 +unsigned short p1_tx_byte_cnt = 0;
10069 +unsigned short p2_tx_byte_cnt = 0;
10070 +unsigned short p3_tx_byte_cnt = 0;
10071 +unsigned short p4_tx_byte_cnt = 0;
10072 +unsigned short p5_tx_byte_cnt = 0;
10073 +unsigned short p6_tx_byte_cnt = 0;
10075 +#if defined(CONFIG_RALINK_MT7620)
10076 +unsigned short p7_rx_good_cnt = 0;
10077 +unsigned short p7_tx_good_cnt = 0;
10079 +unsigned short p7_rx_byte_cnt = 0;
10080 +unsigned short p7_tx_byte_cnt = 0;
10087 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
10088 +#include "ra_ethtool.h"
10089 +extern struct ethtool_ops ra_ethtool_ops;
10090 +#ifdef CONFIG_PSEUDO_SUPPORT
10091 +extern struct ethtool_ops ra_virt_ethtool_ops;
10092 +#endif // CONFIG_PSEUDO_SUPPORT //
10093 +#endif // (CONFIG_ETHTOOL //
10095 +#ifdef CONFIG_RALINK_VISTA_BASIC
10096 +int is_switch_175c = 1;
10099 +unsigned int M2Q_table[64] = {0};
10100 +unsigned int lan_wan_separate = 0;
10102 +#if defined(CONFIG_HW_SFQ)
10103 +unsigned int web_sfq_enable = 0;
10104 +EXPORT_SYMBOL(web_sfq_enable);
10107 +EXPORT_SYMBOL(M2Q_table);
10108 +EXPORT_SYMBOL(lan_wan_separate);
10109 +#if defined (CONFIG_RAETH_LRO)
10110 +unsigned int lan_ip;
10111 +struct lro_para_struct lro_para;
10112 +int lro_flush_needed;
10113 +extern char const *nvram_get(int index, char *name);
10116 +#define KSEG1 0xa0000000
10117 +#if defined (CONFIG_MIPS)
10118 +#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1))
10119 +#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1)
10121 +#define PHYS_TO_VIRT(x) phys_to_virt(x)
10122 +#define VIRT_TO_PHYS(x) virt_to_phys(x)
10125 +extern int fe_dma_init(struct net_device *dev);
10126 +extern int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no);
10127 +extern void ei_xmit_housekeeping(unsigned long unused);
10128 +extern inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no);
10129 +#if defined (CONFIG_RAETH_HW_LRO)
10130 +extern int fe_hw_lro_init(struct net_device *dev);
10131 +#endif /* CONFIG_RAETH_HW_LRO */
10134 +void skb_dump(struct sk_buff* sk) {
10137 + printk("skb_dump: from %s with len %d (%d) headroom=%d tailroom=%d\n",
10138 + sk->dev?sk->dev->name:"ip stack",sk->len,sk->truesize,
10139 + skb_headroom(sk),skb_tailroom(sk));
10141 + //for(i=(unsigned int)sk->head;i<=(unsigned int)sk->tail;i++) {
10142 + for(i=(unsigned int)sk->head;i<=(unsigned int)sk->data+20;i++) {
10143 + if((i % 20) == 0)
10145 + if(i==(unsigned int)sk->data) printk("{");
10146 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21)
10147 + if(i==(unsigned int)sk->transport_header) printk("#");
10148 + if(i==(unsigned int)sk->network_header) printk("|");
10149 + if(i==(unsigned int)sk->mac_header) printk("*");
10151 + if(i==(unsigned int)sk->h.raw) printk("#");
10152 + if(i==(unsigned int)sk->nh.raw) printk("|");
10153 + if(i==(unsigned int)sk->mac.raw) printk("*");
10155 + printk("%02X-",*((unsigned char*)i));
10156 + if(i==(unsigned int)sk->tail) printk("}");
10164 +#if defined (CONFIG_GIGAPHY) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
10165 +int isICPlusGigaPHY(int ge)
10167 + u32 phy_id0 = 0, phy_id1 = 0;
10169 +#ifdef CONFIG_GE2_RGMII_AN
10171 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 2, &phy_id0)) {
10172 + printk("\n Read PhyID 1 is Fail!!\n");
10175 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 3, &phy_id1)) {
10176 + printk("\n Read PhyID 1 is Fail!!\n");
10182 +#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
10184 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 2, &phy_id0)) {
10185 + printk("\n Read PhyID 0 is Fail!!\n");
10188 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 3, &phy_id1)) {
10189 + printk("\n Read PhyID 0 is Fail!!\n");
10195 + if ((phy_id0 == EV_ICPLUS_PHY_ID0) && ((phy_id1 & 0xfff0) == EV_ICPLUS_PHY_ID1))
10201 +int isMarvellGigaPHY(int ge)
10203 + u32 phy_id0 = 0, phy_id1 = 0;
10205 +#if defined (CONFIG_GE2_RGMII_AN) || defined (CONFIG_P4_MAC_TO_PHY_MODE)
10207 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 2, &phy_id0)) {
10208 + printk("\n Read PhyID 1 is Fail!!\n");
10211 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 3, &phy_id1)) {
10212 + printk("\n Read PhyID 1 is Fail!!\n");
10218 +#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
10220 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 2, &phy_id0)) {
10221 + printk("\n Read PhyID 0 is Fail!!\n");
10224 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 3, &phy_id1)) {
10225 + printk("\n Read PhyID 0 is Fail!!\n");
10231 + if ((phy_id0 == EV_MARVELL_PHY_ID0) && (phy_id1 == EV_MARVELL_PHY_ID1))
10236 +int isVtssGigaPHY(int ge)
10238 + u32 phy_id0 = 0, phy_id1 = 0;
10240 +#if defined (CONFIG_GE2_RGMII_AN) || defined (CONFIG_P4_MAC_TO_PHY_MODE)
10242 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 2, &phy_id0)) {
10243 + printk("\n Read PhyID 1 is Fail!!\n");
10246 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 3, &phy_id1)) {
10247 + printk("\n Read PhyID 1 is Fail!!\n");
10253 +#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
10255 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 2, &phy_id0)) {
10256 + printk("\n Read PhyID 0 is Fail!!\n");
10259 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 3, &phy_id1)) {
10260 + printk("\n Read PhyID 0 is Fail!!\n");
10266 + if ((phy_id0 == EV_VTSS_PHY_ID0) && (phy_id1 == EV_VTSS_PHY_ID1))
10273 + * Set the hardware MAC address.
10275 +static int ei_set_mac_addr(struct net_device *dev, void *p)
10277 + struct sockaddr *addr = p;
10279 + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10281 + if(netif_running(dev))
10284 + ra2880MacAddressSet(addr->sa_data);
10288 +#ifdef CONFIG_PSEUDO_SUPPORT
10289 +static int ei_set_mac2_addr(struct net_device *dev, void *p)
10291 + struct sockaddr *addr = p;
10293 + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10295 + if(netif_running(dev))
10298 + ra2880Mac2AddressSet(addr->sa_data);
10303 +void set_fe_dma_glo_cfg(void)
10305 + int dma_glo_cfg=0;
10306 +#if defined (CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT2883) || \
10307 + defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3883)
10308 + int fe_glo_cfg=0;
10311 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)
10312 + dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_32DWORDS);
10313 +#elif defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621)
10314 + dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS);
10315 +#elif defined (CONFIG_ARCH_MT7623)
10316 + dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS | ADMA_RX_BT_SIZE_32DWORDS);
10318 + dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_4DWORDS);
10321 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
10322 + dma_glo_cfg |= (RX_2B_OFFSET);
10325 +#if defined (CONFIG_32B_DESC)
10326 + dma_glo_cfg |= (DESC_32B_EN);
10328 + sysRegWrite(DMA_GLO_CFG, dma_glo_cfg);
10329 +#ifdef CONFIG_RAETH_QDMA
10330 + sysRegWrite(QDMA_GLO_CFG, dma_glo_cfg);
10333 + /* only the following chipset need to set it */
10334 +#if defined (CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT2883) || \
10335 + defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3883)
10336 + //set 1us timer count in unit of clock cycle
10337 + fe_glo_cfg = sysRegRead(FE_GLO_CFG);
10338 + fe_glo_cfg &= ~(0xff << 8); //clear bit8-bit15
10339 + fe_glo_cfg |= (((get_surfboard_sysclk()/1000000)) << 8);
10340 + sysRegWrite(FE_GLO_CFG, fe_glo_cfg);
10344 +int forward_config(struct net_device *dev)
10347 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
10349 + /* RT5350: No GDMA, PSE, CDMA, PPE */
10350 + unsigned int sdmVal;
10351 + sdmVal = sysRegRead(SDM_CON);
10353 +#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
10354 + sdmVal |= 0x7<<16; // UDPCS, TCPCS, IPCS=1
10355 +#endif // CONFIG_RAETH_CHECKSUM_OFFLOAD //
10357 +#if defined (CONFIG_RAETH_SPECIAL_TAG)
10358 + sdmVal |= 0x1<<20; // TCI_81XX
10359 +#endif // CONFIG_RAETH_SPECIAL_TAG //
10361 + sysRegWrite(SDM_CON, sdmVal);
10363 +#else //Non RT5350 chipset
10365 + unsigned int regVal, regCsg;
10367 +#ifdef CONFIG_PSEUDO_SUPPORT
10368 + unsigned int regVal2;
10371 +#ifdef CONFIG_RAETH_HW_VLAN_TX
10372 +#if defined(CONFIG_RALINK_MT7620)
10373 + /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
10374 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x430) = 0x00010000;
10375 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x434) = 0x00030002;
10376 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x438) = 0x00050004;
10377 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x43C) = 0x00070006;
10378 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x440) = 0x00090008;
10379 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x444) = 0x000b000a;
10380 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x448) = 0x000d000c;
10381 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x44C) = 0x000f000e;
10384 + * VLAN_IDX 0 = VLAN_ID 0
10386 + * VLAN_IDX 15 = VLAN ID 15
10389 + /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
10390 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xa8) = 0x00010000;
10391 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xac) = 0x00030002;
10392 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xb0) = 0x00050004;
10393 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xb4) = 0x00070006;
10394 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xb8) = 0x00090008;
10395 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xbc) = 0x000b000a;
10396 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xc0) = 0x000d000c;
10397 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xc4) = 0x000f000e;
10401 + regVal = sysRegRead(GDMA1_FWD_CFG);
10402 + regCsg = sysRegRead(CDMA_CSG_CFG);
10404 +#ifdef CONFIG_PSEUDO_SUPPORT
10405 + regVal2 = sysRegRead(GDMA2_FWD_CFG);
10408 + //set unicast/multicast/broadcast frame to cpu
10409 +#if defined (CONFIG_RALINK_MT7620)
10410 + /* GDMA1 frames destination port is port0 CPU*/
10413 + regVal &= ~0xFFFF;
10414 + regVal |= GDMA1_FWD_PORT;
10418 +#if defined (CONFIG_RAETH_SPECIAL_TAG)
10419 + regVal |= (1 << 24); //GDM1_TCI_81xx
10423 +#ifdef CONFIG_RAETH_HW_VLAN_TX
10424 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
10425 + dev->features |= NETIF_F_HW_VLAN_TX;
10427 + dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
10430 +#ifdef CONFIG_RAETH_HW_VLAN_RX
10431 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
10432 + dev->features |= NETIF_F_HW_VLAN_RX;
10434 + dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
10438 +#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
10439 + //enable ipv4 header checksum check
10440 + regVal |= GDM1_ICS_EN;
10441 + regCsg |= ICS_GEN_EN;
10443 + //enable tcp checksum check
10444 + regVal |= GDM1_TCS_EN;
10445 + regCsg |= TCS_GEN_EN;
10447 + //enable udp checksum check
10448 + regVal |= GDM1_UCS_EN;
10449 + regCsg |= UCS_GEN_EN;
10451 +#ifdef CONFIG_PSEUDO_SUPPORT
10452 + regVal2 &= ~0xFFFF;
10453 + regVal2 |= GDMA2_FWD_PORT;
10455 + regVal2 |= GDM1_ICS_EN;
10456 + regVal2 |= GDM1_TCS_EN;
10457 + regVal2 |= GDM1_UCS_EN;
10460 +#if defined (CONFIG_RAETH_HW_LRO)
10461 + dev->features |= NETIF_F_HW_CSUM;
10463 + dev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4 */
10464 +#endif /* CONFIG_RAETH_HW_LRO */
10465 +//#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
10466 +// dev->vlan_features |= NETIF_F_IP_CSUM;
10469 +#if defined(CONFIG_RALINK_MT7620)
10470 +#if defined (CONFIG_RAETH_TSO)
10471 + if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
10472 + dev->features |= NETIF_F_SG;
10473 + dev->features |= NETIF_F_TSO;
10475 +#endif // CONFIG_RAETH_TSO //
10477 +#if defined (CONFIG_RAETH_TSOV6)
10478 + if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
10479 + dev->features |= NETIF_F_TSO6;
10480 + dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
10482 +#endif // CONFIG_RAETH_TSOV6 //
10484 +#if defined (CONFIG_RAETH_TSO)
10485 + dev->features |= NETIF_F_SG;
10486 + dev->features |= NETIF_F_TSO;
10487 +#endif // CONFIG_RAETH_TSO //
10489 +#if defined (CONFIG_RAETH_TSOV6)
10490 + dev->features |= NETIF_F_TSO6;
10491 + dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
10492 +#endif // CONFIG_RAETH_TSOV6 //
10493 +#endif // CONFIG_RALINK_MT7620 //
10494 +#else // Checksum offload disabled
10496 + //disable ipv4 header checksum check
10497 + regVal &= ~GDM1_ICS_EN;
10498 + regCsg &= ~ICS_GEN_EN;
10500 + //disable tcp checksum check
10501 + regVal &= ~GDM1_TCS_EN;
10502 + regCsg &= ~TCS_GEN_EN;
10504 + //disable udp checksum check
10505 + regVal &= ~GDM1_UCS_EN;
10506 + regCsg &= ~UCS_GEN_EN;
10508 +#ifdef CONFIG_PSEUDO_SUPPORT
10509 + regVal2 &= ~GDM1_ICS_EN;
10510 + regVal2 &= ~GDM1_TCS_EN;
10511 + regVal2 &= ~GDM1_UCS_EN;
10514 + dev->features &= ~NETIF_F_IP_CSUM; /* disable checksum TCP/UDP over IPv4 */
10515 +#endif // CONFIG_RAETH_CHECKSUM_OFFLOAD //
10517 +#ifdef CONFIG_RAETH_JUMBOFRAME
10518 + regVal |= GDM1_JMB_EN;
10519 +#ifdef CONFIG_PSEUDO_SUPPORT
10520 + regVal2 |= GDM1_JMB_EN;
10524 + sysRegWrite(GDMA1_FWD_CFG, regVal);
10525 + sysRegWrite(CDMA_CSG_CFG, regCsg);
10526 +#ifdef CONFIG_PSEUDO_SUPPORT
10527 + sysRegWrite(GDMA2_FWD_CFG, regVal2);
10530 +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
10531 + dev->vlan_features = dev->features;
10535 + * PSE_FQ_CFG register definition -
10537 + * Define max free queue page count in PSE. (31:24)
10538 + * RT2883/RT3883 - 0xff908000 (255 pages)
10539 + * RT3052 - 0x80504000 (128 pages)
10540 + * RT2880 - 0x80504000 (128 pages)
10542 + * In each page, there are 128 bytes in each page.
10544 + * 23:16 - free queue flow control release threshold
10545 + * 15:8 - free queue flow control assertion threshold
10546 + * 7:0 - free queue empty threshold
10548 + * The register affects QOS correctness in frame engine!
10551 +#if defined(CONFIG_RALINK_RT2883) || defined(CONFIG_RALINK_RT3883)
10552 + sysRegWrite(PSE_FQ_CFG, cpu_to_le32(INIT_VALUE_OF_RT2883_PSE_FQ_CFG));
10553 +#elif defined(CONFIG_RALINK_RT3352) || defined(CONFIG_RALINK_RT5350) || \
10554 + defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
10555 + defined(CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
10556 + defined (CONFIG_RALINK_MT7628) || defined(CONFIG_ARCH_MT7623)
10557 + /*use default value*/
10559 + sysRegWrite(PSE_FQ_CFG, cpu_to_le32(INIT_VALUE_OF_PSE_FQFC_CFG));
10563 + *FE_RST_GLO register definition -
10565 + *Reset PSE after re-programming PSE_FQ_CFG.
10568 + sysRegWrite(FE_RST_GL, regVal);
10569 + sysRegWrite(FE_RST_GL, 0); // update for RSTCTL issue
10571 + regCsg = sysRegRead(CDMA_CSG_CFG);
10572 + printk("CDMA_CSG_CFG = %0X\n",regCsg);
10573 + regVal = sysRegRead(GDMA1_FWD_CFG);
10574 + printk("GDMA1_FWD_CFG = %0X\n",regVal);
10576 +#ifdef CONFIG_PSEUDO_SUPPORT
10577 + regVal = sysRegRead(GDMA2_FWD_CFG);
10578 + printk("GDMA2_FWD_CFG = %0X\n",regVal);
10584 +#ifdef CONFIG_RAETH_LRO
10586 +rt_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
10587 + u64 *hdr_flags, void *priv)
10589 + struct iphdr *iph = NULL;
10590 + int vhdr_len = 0;
10593 + * Make sure that this packet is Ethernet II, is not VLAN
10594 + * tagged, is IPv4, has a valid IP header, and is TCP.
10596 + if (skb->protocol == 0x0081) {
10597 + vhdr_len = VLAN_HLEN;
10600 + iph = (struct iphdr *)(skb->data + vhdr_len);
10601 + if (iph->daddr != lro_para.lan_ip1) {
10605 + if(iph->protocol != IPPROTO_TCP) {
10609 + *tcph = skb->data + (iph->ihl << 2) + vhdr_len;
10610 + *hdr_flags = LRO_IPV4 | LRO_TCP;
10612 + lro_flush_needed = 1;
10616 +#endif // CONFIG_RAETH_LRO //
10618 +#ifdef CONFIG_RAETH_NAPI
10619 +static int rt2880_eth_recv(struct net_device* dev, int *work_done, int work_to_do)
10621 +static int rt2880_eth_recv(struct net_device* dev)
10624 + struct sk_buff *skb, *rx_skb;
10625 + unsigned int length = 0;
10626 + unsigned long RxProcessed;
10628 + int bReschedule = 0;
10629 + END_DEVICE* ei_local = netdev_priv(dev);
10630 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING) || defined (CONFIG_RAETH_HW_LRO)
10631 + int rx_ring_no=0;
10634 +#if defined (CONFIG_RAETH_SPECIAL_TAG)
10635 + struct vlan_ethhdr *veth=NULL;
10638 +#ifdef CONFIG_PSEUDO_SUPPORT
10639 + PSEUDO_ADAPTER *pAd;
10643 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10644 + rx_dma_owner_idx0 = (rx_calc_idx0 + 1) % NUM_RX_DESC;
10646 + rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
10649 +#if defined (CONFIG_32B_DESC)
10650 + dma_cache_sync(NULL, &ei_local->rx_ring0[rx_dma_owner_idx0], sizeof(struct PDMA_rxdesc), DMA_FROM_DEVICE);
10652 +#if defined (CONFIG_RAETH_HW_LRO)
10653 + rx_dma_owner_lro1 = (sysRegRead(RX_CALC_IDX1) + 1) % NUM_LRO_RX_DESC;
10654 + rx_dma_owner_lro2 = (sysRegRead(RX_CALC_IDX2) + 1) % NUM_LRO_RX_DESC;
10655 + rx_dma_owner_lro3 = (sysRegRead(RX_CALC_IDX3) + 1) % NUM_LRO_RX_DESC;
10656 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
10657 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10658 + rx_dma_owner_idx1 = (rx_calc_idx1 + 1) % NUM_RX_DESC;
10660 + rx_dma_owner_idx1 = (sysRegRead(RX_CALC_IDX1) + 1) % NUM_RX_DESC;
10661 +#endif /* CONFIG_RAETH_RW_PDMAPTR_FROM_VAR */
10662 +#if defined(CONFIG_ARCH_MT7623)
10663 + rx_dma_owner_idx2 = (sysRegRead(RX_CALC_IDX2) + 1) % NUM_RX_DESC;
10664 + rx_dma_owner_idx3 = (sysRegRead(RX_CALC_IDX3) + 1) % NUM_RX_DESC;
10666 +#if defined (CONFIG_32B_DESC)
10667 + dma_cache_sync(NULL, &ei_local->rx_ring1[rx_dma_owner_idx1], sizeof(struct PDMA_rxdesc), DMA_FROM_DEVICE);
10673 +#ifdef CONFIG_RAETH_NAPI
10674 + if(*work_done >= work_to_do)
10678 + if (RxProcessed++ > NUM_RX_MAX_PROCESS)
10680 + // need to reschedule rx handle
10687 +#if defined (CONFIG_RAETH_HW_LRO)
10688 + if (ei_local->rx_ring3[rx_dma_owner_lro3].rxd_info2.DDONE_bit == 1) {
10689 + rx_ring = ei_local->rx_ring3;
10690 + rx_dma_owner_idx = rx_dma_owner_lro3;
10691 + // printk("rx_dma_owner_lro3=%x\n",rx_dma_owner_lro3);
10694 + else if (ei_local->rx_ring2[rx_dma_owner_lro2].rxd_info2.DDONE_bit == 1) {
10695 + rx_ring = ei_local->rx_ring2;
10696 + rx_dma_owner_idx = rx_dma_owner_lro2;
10697 + // printk("rx_dma_owner_lro2=%x\n",rx_dma_owner_lro2);
10700 + else if (ei_local->rx_ring1[rx_dma_owner_lro1].rxd_info2.DDONE_bit == 1) {
10701 + rx_ring = ei_local->rx_ring1;
10702 + rx_dma_owner_idx = rx_dma_owner_lro1;
10703 + // printk("rx_dma_owner_lro1=%x\n",rx_dma_owner_lro1);
10706 + else if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1) {
10707 + rx_ring = ei_local->rx_ring0;
10708 + rx_dma_owner_idx = rx_dma_owner_idx0;
10709 + // printk("rx_dma_owner_idx0=%x\n",rx_dma_owner_idx0);
10714 + #if defined (CONFIG_RAETH_HW_LRO_DBG)
10715 + HwLroStatsUpdate(rx_ring_no, rx_ring[rx_dma_owner_idx].rxd_info2.LRO_AGG_CNT, \
10716 + (rx_ring[rx_dma_owner_idx].rxd_info2.PLEN1 << 14) | rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0);
10718 + #if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
10719 + HwLroFlushStatsUpdate(rx_ring_no, rx_ring[rx_dma_owner_idx].rxd_info2.REV);
10721 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
10722 + if (ei_local->rx_ring1[rx_dma_owner_idx1].rxd_info2.DDONE_bit == 1) {
10723 + rx_ring = ei_local->rx_ring1;
10724 + rx_dma_owner_idx = rx_dma_owner_idx1;
10725 + // printk("rx_dma_owner_idx1=%x\n",rx_dma_owner_idx1);
10728 +#if defined(CONFIG_ARCH_MT7623)
10729 + else if (ei_local->rx_ring2[rx_dma_owner_idx2].rxd_info2.DDONE_bit == 1) {
10730 + rx_ring = ei_local->rx_ring2;
10731 + rx_dma_owner_idx = rx_dma_owner_idx2;
10732 + // printk("rx_dma_owner_idx2=%x\n",rx_dma_owner_idx2);
10735 + else if (ei_local->rx_ring3[rx_dma_owner_idx3].rxd_info2.DDONE_bit == 1) {
10736 + rx_ring = ei_local->rx_ring3;
10737 + rx_dma_owner_idx = rx_dma_owner_idx3;
10738 + // printk("rx_dma_owner_idx3=%x\n",rx_dma_owner_idx3);
10741 +#endif /* CONFIG_ARCH_MT7623 */
10742 + else if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1) {
10743 + rx_ring = ei_local->rx_ring0;
10744 + rx_dma_owner_idx = rx_dma_owner_idx0;
10745 + // printk("rx_dma_owner_idx0=%x\n",rx_dma_owner_idx0);
10752 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1) {
10753 + rx_ring = ei_local->rx_ring0;
10754 + rx_dma_owner_idx = rx_dma_owner_idx0;
10760 +#if defined (CONFIG_32B_DESC)
10761 + prefetch(&rx_ring[(rx_dma_owner_idx + 1) % NUM_RX_DESC]);
10763 + /* skb processing */
10764 +#if defined (CONFIG_RAETH_HW_LRO)
10765 + length = (rx_ring[rx_dma_owner_idx].rxd_info2.PLEN1 << 14) | rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0;
10767 + length = rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0;
10768 +#endif /* CONFIG_RAETH_HW_LRO */
10770 +#if defined (CONFIG_ARCH_MT7623)
10771 + dma_unmap_single(NULL, rx_ring[rx_dma_owner_idx].rxd_info1.PDP0, length, DMA_FROM_DEVICE);
10774 +#if defined (CONFIG_RAETH_HW_LRO)
10775 + if(rx_ring_no==3) {
10776 + rx_skb = ei_local->netrx3_skbuf[rx_dma_owner_idx];
10777 + rx_skb->data = ei_local->netrx3_skbuf[rx_dma_owner_idx]->data;
10779 + else if(rx_ring_no==2) {
10780 + rx_skb = ei_local->netrx2_skbuf[rx_dma_owner_idx];
10781 + rx_skb->data = ei_local->netrx2_skbuf[rx_dma_owner_idx]->data;
10783 + else if(rx_ring_no==1) {
10784 + rx_skb = ei_local->netrx1_skbuf[rx_dma_owner_idx];
10785 + rx_skb->data = ei_local->netrx1_skbuf[rx_dma_owner_idx]->data;
10788 + rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx];
10789 + rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data;
10791 + #if defined(CONFIG_RAETH_PDMA_DVT)
10792 + raeth_pdma_lro_dvt( rx_ring_no, ei_local, rx_dma_owner_idx );
10793 + #endif /* CONFIG_RAETH_PDMA_DVT */
10794 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
10795 + if(rx_ring_no==1) {
10796 + rx_skb = ei_local->netrx1_skbuf[rx_dma_owner_idx];
10797 + rx_skb->data = ei_local->netrx1_skbuf[rx_dma_owner_idx]->data;
10799 +#if defined(CONFIG_ARCH_MT7623)
10800 + else if(rx_ring_no==2) {
10801 + rx_skb = ei_local->netrx2_skbuf[rx_dma_owner_idx];
10802 + rx_skb->data = ei_local->netrx2_skbuf[rx_dma_owner_idx]->data;
10804 + else if(rx_ring_no==3) {
10805 + rx_skb = ei_local->netrx3_skbuf[rx_dma_owner_idx];
10806 + rx_skb->data = ei_local->netrx3_skbuf[rx_dma_owner_idx]->data;
10808 +#endif /* CONFIG_ARCH_MT7623 */
10810 + rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx];
10811 + rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data;
10813 + #if defined(CONFIG_RAETH_PDMA_DVT)
10814 + raeth_pdma_lro_dvt( rx_ring_no, ei_local, rx_dma_owner_idx );
10815 + #endif /* CONFIG_RAETH_PDMA_DVT */
10817 + rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx];
10818 + rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data;
10819 + #if defined(CONFIG_RAETH_PDMA_DVT)
10820 + raeth_pdma_rx_desc_dvt( ei_local, rx_dma_owner_idx0 );
10821 + #endif /* CONFIG_RAETH_PDMA_DVT */
10823 + rx_skb->len = length;
10825 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
10826 + rx_skb->data += NET_IP_ALIGN;
10828 + rx_skb->tail = rx_skb->data + length;
10830 +#ifdef CONFIG_PSEUDO_SUPPORT
10831 + if(rx_ring[rx_dma_owner_idx].rxd_info4.SP == 2) {
10832 + if(ei_local->PseudoDev!=NULL) {
10833 + rx_skb->dev = ei_local->PseudoDev;
10834 + rx_skb->protocol = eth_type_trans(rx_skb,ei_local->PseudoDev);
10836 + printk("ERROR: PseudoDev is still not initialize but receive packet from GMAC2\n");
10839 + rx_skb->dev = dev;
10840 + rx_skb->protocol = eth_type_trans(rx_skb,dev);
10843 + rx_skb->dev = dev;
10844 + rx_skb->protocol = eth_type_trans(rx_skb,dev);
10847 +#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
10848 +#if defined (CONFIG_PDMA_NEW)
10849 + if(rx_ring[rx_dma_owner_idx].rxd_info4.L4VLD) {
10850 + rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
10852 + rx_skb->ip_summed = CHECKSUM_NONE;
10855 + if(rx_ring[rx_dma_owner_idx].rxd_info4.IPFVLD_bit) {
10856 + rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
10858 + rx_skb->ip_summed = CHECKSUM_NONE;
10862 + rx_skb->ip_summed = CHECKSUM_NONE;
10865 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
10868 + if(ra_classifier_hook_rx!= NULL)
10870 +#if defined(CONFIG_RALINK_EXTERNAL_TIMER)
10871 + ra_classifier_hook_rx(rx_skb, (*((volatile u32 *)(0xB0000D08))&0x0FFFF));
10873 + ra_classifier_hook_rx(rx_skb, read_c0_count());
10876 +#endif /* CONFIG_RA_CLASSIFIER */
10878 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
10879 + if(ra_sw_nat_hook_rx != NULL) {
10880 + FOE_MAGIC_TAG(rx_skb)= FOE_MAGIC_GE;
10881 + *(uint32_t *)(FOE_INFO_START_ADDR(rx_skb)+2) = *(uint32_t *)&rx_ring[rx_dma_owner_idx].rxd_info4;
10882 + FOE_ALG(rx_skb) = 0;
10886 + /* We have to check the free memory size is big enough
10887 + * before pass the packet to cpu*/
10888 +#if defined (CONFIG_RAETH_SKB_RECYCLE_2K)
10889 +#if defined (CONFIG_RAETH_HW_LRO)
10890 + if( rx_ring != ei_local->rx_ring0 )
10891 + skb = __dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
10893 +#endif /* CONFIG_RAETH_HW_LRO */
10894 + skb = skbmgr_dev_alloc_skb2k();
10896 +#if defined (CONFIG_RAETH_HW_LRO)
10897 + if( rx_ring != ei_local->rx_ring0 )
10898 + skb = __dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
10900 +#endif /* CONFIG_RAETH_HW_LRO */
10901 + skb = __dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
10904 + if (unlikely(skb == NULL))
10906 + printk(KERN_ERR "skb not available...\n");
10907 +#ifdef CONFIG_PSEUDO_SUPPORT
10908 + if (rx_ring[rx_dma_owner_idx].rxd_info4.SP == 2) {
10909 + if (ei_local->PseudoDev != NULL) {
10910 + pAd = netdev_priv(ei_local->PseudoDev);
10911 + pAd->stat.rx_dropped++;
10915 + ei_local->stat.rx_dropped++;
10919 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
10920 + skb_reserve(skb, NET_IP_ALIGN);
10923 +#if defined (CONFIG_RAETH_SPECIAL_TAG)
10924 + // port0: 0x8100 => 0x8100 0001
10925 + // port1: 0x8101 => 0x8100 0002
10926 + // port2: 0x8102 => 0x8100 0003
10927 + // port3: 0x8103 => 0x8100 0004
10928 + // port4: 0x8104 => 0x8100 0005
10929 + // port5: 0x8105 => 0x8100 0006
10930 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21)
10931 + veth = (struct vlan_ethhdr *)(rx_skb->mac_header);
10933 + veth = (struct vlan_ethhdr *)(rx_skb->mac.raw);
10935 + /*donot check 0x81 due to MT7530 SPEC*/
10936 + //if((veth->h_vlan_proto & 0xFF) == 0x81)
10938 + veth->h_vlan_TCI = htons( (((veth->h_vlan_proto >> 8) & 0xF) + 1) );
10939 + rx_skb->protocol = veth->h_vlan_proto = htons(ETH_P_8021Q);
10943 +/* ra_sw_nat_hook_rx return 1 --> continue
10944 + * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
10946 +#if !defined(CONFIG_RA_NAT_NONE)
10947 + if((ra_sw_nat_hook_rx == NULL) ||
10948 + (ra_sw_nat_hook_rx!= NULL && ra_sw_nat_hook_rx(rx_skb)))
10951 +#if defined (CONFIG_RALINK_RT3052_MP2)
10952 + if(mcast_rx(rx_skb)==0) {
10953 + kfree_skb(rx_skb);
10956 +#if defined (CONFIG_RAETH_LRO)
10957 + if (rx_skb->ip_summed == CHECKSUM_UNNECESSARY) {
10958 + lro_receive_skb(&ei_local->lro_mgr, rx_skb, NULL);
10959 + //LroStatsUpdate(&ei_local->lro_mgr,0);
10962 +#ifdef CONFIG_RAETH_NAPI
10963 + netif_receive_skb(rx_skb);
10965 +#ifdef CONFIG_RAETH_HW_VLAN_RX
10966 + if(ei_local->vlgrp && rx_ring[rx_dma_owner_idx].rxd_info2.TAG) {
10967 + vlan_hwaccel_rx(rx_skb, ei_local->vlgrp, rx_ring[rx_dma_owner_idx].rxd_info3.VID);
10969 + netif_rx(rx_skb);
10972 +#ifdef CONFIG_RAETH_CPU_LOOPBACK
10973 + skb_push(rx_skb,ETH_HLEN);
10974 + ei_start_xmit(rx_skb, dev, 1);
10976 + netif_rx(rx_skb);
10982 +#ifdef CONFIG_PSEUDO_SUPPORT
10983 + if (rx_ring[rx_dma_owner_idx].rxd_info4.SP == 2) {
10984 + if (ei_local->PseudoDev != NULL) {
10985 + pAd = netdev_priv(ei_local->PseudoDev);
10986 + pAd->stat.rx_packets++;
10987 + pAd->stat.rx_bytes += length;
10992 + ei_local->stat.rx_packets++;
10993 + ei_local->stat.rx_bytes += length;
10997 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
10998 +#if defined (CONFIG_RAETH_HW_LRO)
10999 + if( rx_ring != ei_local->rx_ring0 ){
11000 + rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0 = SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
11001 + rx_ring[rx_dma_owner_idx].rxd_info2.PLEN1 = SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
11004 +#endif /* CONFIG_RAETH_HW_LRO */
11005 + rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0 = MAX_RX_LENGTH;
11006 + rx_ring[rx_dma_owner_idx].rxd_info2.LS0 = 0;
11008 + rx_ring[rx_dma_owner_idx].rxd_info2.DDONE_bit = 0;
11009 +#if defined (CONFIG_RAETH_HW_LRO)
11010 + if( rx_ring != ei_local->rx_ring0 )
11011 + rx_ring[rx_dma_owner_idx].rxd_info1.PDP0 = dma_map_single(NULL, skb->data, MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
11013 +#endif /* CONFIG_RAETH_HW_LRO */
11014 + rx_ring[rx_dma_owner_idx].rxd_info1.PDP0 = dma_map_single(NULL, skb->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
11015 +#ifdef CONFIG_32B_DESC
11016 + dma_cache_sync(NULL, &rx_ring[rx_dma_owner_idx], sizeof(struct PDMA_rxdesc), DMA_TO_DEVICE);
11018 + /* Move point to next RXD which wants to alloc*/
11019 +#if defined (CONFIG_RAETH_HW_LRO)
11020 + if(rx_ring_no==3) {
11021 + sysRegWrite(RAETH_RX_CALC_IDX3, rx_dma_owner_idx);
11022 + ei_local->netrx3_skbuf[rx_dma_owner_idx] = skb;
11024 + else if(rx_ring_no==2) {
11025 + sysRegWrite(RAETH_RX_CALC_IDX2, rx_dma_owner_idx);
11026 + ei_local->netrx2_skbuf[rx_dma_owner_idx] = skb;
11028 + else if(rx_ring_no==1) {
11029 + sysRegWrite(RAETH_RX_CALC_IDX1, rx_dma_owner_idx);
11030 + ei_local->netrx1_skbuf[rx_dma_owner_idx] = skb;
11032 + else if(rx_ring_no==0) {
11033 + sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
11034 + ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb;
11036 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
11037 + if(rx_ring_no==0) {
11038 + sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
11039 + ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb;
11040 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
11041 + rx_calc_idx0 = rx_dma_owner_idx;
11044 +#if defined(CONFIG_ARCH_MT7623)
11045 + else if(rx_ring_no==3) {
11046 + sysRegWrite(RAETH_RX_CALC_IDX3, rx_dma_owner_idx);
11047 + ei_local->netrx3_skbuf[rx_dma_owner_idx] = skb;
11049 + else if(rx_ring_no==2) {
11050 + sysRegWrite(RAETH_RX_CALC_IDX2, rx_dma_owner_idx);
11051 + ei_local->netrx2_skbuf[rx_dma_owner_idx] = skb;
11053 +#endif /* CONFIG_ARCH_MT7623 */
11055 + sysRegWrite(RAETH_RX_CALC_IDX1, rx_dma_owner_idx);
11056 + ei_local->netrx1_skbuf[rx_dma_owner_idx] = skb;
11057 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
11058 + rx_calc_idx1 = rx_dma_owner_idx;
11062 + sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
11063 + ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb;
11064 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
11065 + rx_calc_idx0 = rx_dma_owner_idx;
11070 + /* Update to Next packet point that was received.
11072 +#if defined (CONFIG_RAETH_HW_LRO)
11073 + if(rx_ring_no==3)
11074 + rx_dma_owner_lro3 = (sysRegRead(RAETH_RX_CALC_IDX3) + 1) % NUM_LRO_RX_DESC;
11075 + else if(rx_ring_no==2)
11076 + rx_dma_owner_lro2 = (sysRegRead(RAETH_RX_CALC_IDX2) + 1) % NUM_LRO_RX_DESC;
11077 + else if(rx_ring_no==1)
11078 + rx_dma_owner_lro1 = (sysRegRead(RAETH_RX_CALC_IDX1) + 1) % NUM_LRO_RX_DESC;
11079 + else if(rx_ring_no==0)
11080 + rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
11083 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
11084 + if(rx_ring_no==0) {
11085 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
11086 + rx_dma_owner_idx0 = (rx_dma_owner_idx + 1) % NUM_RX_DESC;
11088 + rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
11090 +#if defined(CONFIG_ARCH_MT7623)
11091 + }else if(rx_ring_no==3) {
11092 + rx_dma_owner_idx3 = (sysRegRead(RAETH_RX_CALC_IDX3) + 1) % NUM_RX_DESC;
11093 + }else if(rx_ring_no==2) {
11094 + rx_dma_owner_idx2 = (sysRegRead(RAETH_RX_CALC_IDX2) + 1) % NUM_RX_DESC;
11095 +#endif /* CONFIG_ARCH_MT7623 */
11097 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
11098 + rx_dma_owner_idx1 = (rx_dma_owner_idx + 1) % NUM_RX_DESC;
11100 + rx_dma_owner_idx1 = (sysRegRead(RAETH_RX_CALC_IDX1) + 1) % NUM_RX_DESC;
11104 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
11105 + rx_dma_owner_idx0 = (rx_dma_owner_idx + 1) % NUM_RX_DESC;
11107 + rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
11112 +#if defined (CONFIG_RAETH_LRO)
11113 + if (lro_flush_needed) {
11114 + //LroStatsUpdate(&ei_local->lro_mgr,1);
11115 + lro_flush_all(&ei_local->lro_mgr);
11116 + lro_flush_needed = 0;
11119 + return bReschedule;
11123 +///////////////////////////////////////////////////////////////////
11125 +///// ra_get_stats - gather packet information for management plane
11127 +///// Pass net_device_stats to the upper layer.
11130 +///// RETURNS: pointer to net_device_stats
11131 +///////////////////////////////////////////////////////////////////
11133 +struct net_device_stats *ra_get_stats(struct net_device *dev)
11135 + END_DEVICE *ei_local = netdev_priv(dev);
11136 + return &ei_local->stat;
11139 +#if defined (CONFIG_RT_3052_ESW)
11140 +void kill_sig_workq(struct work_struct *work)
11144 + struct task_struct *p = NULL;
11146 + //read udhcpc pid from file, and send signal USR2,USR1 to get a new IP
11147 + fp = filp_open("/var/run/udhcpc.pid", O_RDONLY, 0);
11151 + if (fp->f_op && fp->f_op->read) {
11152 + if (fp->f_op->read(fp, pid, 8, &fp->f_pos) > 0) {
11153 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11154 + p = pid_task(find_get_pid(simple_strtoul(pid, NULL, 10)), PIDTYPE_PID);
11156 + p = find_task_by_pid(simple_strtoul(pid, NULL, 10));
11160 + send_sig(SIGUSR2, p, 0);
11161 + send_sig(SIGUSR1, p, 0);
11165 + filp_close(fp, NULL);
11171 +///////////////////////////////////////////////////////////////////
11173 +///// ra2880Recv - process the next incoming packet
11175 +///// Handle one incoming packet. The packet is checked for errors and sent
11176 +///// to the upper layer.
11178 +///// RETURNS: OK on success or ERROR.
11179 +///////////////////////////////////////////////////////////////////
11181 +#ifndef CONFIG_RAETH_NAPI
11182 +#if defined WORKQUEUE_BH || defined (TASKLET_WORKQUEUE_SW)
11183 +void ei_receive_workq(struct work_struct *work)
11185 +void ei_receive(unsigned long unused) // device structure
11186 +#endif // WORKQUEUE_BH //
11188 + struct net_device *dev = dev_raether;
11189 + END_DEVICE *ei_local = netdev_priv(dev);
11190 + unsigned long reg_int_mask=0;
11191 + int bReschedule=0;
11194 + if(tx_ring_full==0){
11195 + bReschedule = rt2880_eth_recv(dev);
11198 +#ifdef WORKQUEUE_BH
11199 + schedule_work(&ei_local->rx_wq);
11201 +#if defined (TASKLET_WORKQUEUE_SW)
11202 + if (working_schedule == 1)
11203 + schedule_work(&ei_local->rx_wq);
11206 + tasklet_hi_schedule(&ei_local->rx_tasklet);
11207 +#endif // WORKQUEUE_BH //
11209 + reg_int_mask=sysRegRead(RAETH_FE_INT_ENABLE);
11210 +#if defined(DELAY_INT)
11211 + sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask| RX_DLY_INT);
11213 + sysRegWrite(RAETH_FE_INT_ENABLE, (reg_int_mask | RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3));
11215 +#ifdef CONFIG_RAETH_QDMA
11216 + reg_int_mask=sysRegRead(QFE_INT_ENABLE);
11217 +#if defined(DELAY_INT)
11218 + sysRegWrite(QFE_INT_ENABLE, reg_int_mask| RX_DLY_INT);
11220 + sysRegWrite(QFE_INT_ENABLE, (reg_int_mask | RX_DONE_INT0 | RX_DONE_INT1));
11227 +#ifdef WORKQUEUE_BH
11228 + schedule_work(&ei_local->rx_wq);
11230 +#if defined (TASKLET_WORKQUEUE_SW)
11231 + if (working_schedule == 1)
11232 + schedule_work(&ei_local->rx_wq);
11235 + tasklet_schedule(&ei_local->rx_tasklet);
11236 +#endif // WORKQUEUE_BH //
11241 +#if defined (CONFIG_RAETH_HW_LRO)
11242 +void ei_hw_lro_auto_adj(unsigned int index, END_DEVICE* ei_local)
11244 + unsigned int entry;
11245 + unsigned int pkt_cnt;
11246 + unsigned int tick_cnt;
11247 + unsigned int duration_us;
11248 + unsigned int byte_cnt;
11250 + /* read packet count statitics of the auto-learn table */
11251 + entry = index + 68;
11252 + sysRegWrite( PDMA_FE_ALT_CF8, entry );
11253 + pkt_cnt = sysRegRead(PDMA_FE_ALT_SGL_CFC) & 0xfff;
11254 + tick_cnt = (sysRegRead(PDMA_FE_ALT_SGL_CFC) >> 16) & 0xffff;
11255 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11256 + printk("[HW LRO] ei_hw_lro_auto_adj(): pkt_cnt[%d]=%d, tick_cnt[%d]=%d\n", index, pkt_cnt, index, tick_cnt);
11257 + printk("[HW LRO] ei_hw_lro_auto_adj(): packet_interval[%d]=%d (ticks/pkt)\n", index, tick_cnt/pkt_cnt);
11260 + /* read byte count statitics of the auto-learn table */
11261 + entry = index + 64;
11262 + sysRegWrite( PDMA_FE_ALT_CF8, entry );
11263 + byte_cnt = sysRegRead(PDMA_FE_ALT_SGL_CFC);
11264 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11265 + printk("[HW LRO] ei_hw_lro_auto_adj(): byte_cnt[%d]=%d\n", index, byte_cnt);
11268 + /* calculate the packet interval of the rx flow */
11269 + duration_us = tick_cnt * HW_LRO_TIMER_UNIT;
11270 + ei_local->hw_lro_pkt_interval[index - 1] = (duration_us/pkt_cnt) * ei_local->hw_lro_alpha / 100;
11271 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11272 + printk("[HW LRO] ei_hw_lro_auto_adj(): packet_interval[%d]=%d (20us)\n", index, duration_us/pkt_cnt);
11275 + if ( !ei_local->hw_lro_fix_setting ){
11276 + /* adjust age_time, agg_time for the lro ring */
11277 + if(ei_local->hw_lro_pkt_interval[index - 1] > 0){
11278 + SET_PDMA_RXRING_AGE_TIME(index, (ei_local->hw_lro_pkt_interval[index - 1] * HW_LRO_MAX_AGG_CNT));
11279 + SET_PDMA_RXRING_AGG_TIME(index, (ei_local->hw_lro_pkt_interval[index - 1] * HW_LRO_AGG_DELTA));
11282 + SET_PDMA_RXRING_AGE_TIME(index, HW_LRO_MAX_AGG_CNT);
11283 + SET_PDMA_RXRING_AGG_TIME(index, HW_LRO_AGG_DELTA);
11288 +void ei_hw_lro_workq(struct work_struct *work)
11290 + END_DEVICE *ei_local;
11291 + unsigned int reg_int_val;
11292 + unsigned int reg_int_mask;
11294 + ei_local = container_of(work, struct end_device, hw_lro_wq);
11296 + reg_int_val = sysRegRead(RAETH_FE_INT_STATUS);
11297 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11298 + printk("[HW LRO] ei_hw_lro_workq(): RAETH_FE_INT_STATUS=0x%x\n", reg_int_val);
11300 + if((reg_int_val & ALT_RPLC_INT3)){
11301 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11302 + printk("[HW LRO] ALT_RPLC_INT3 occurred!\n");
11304 + sysRegWrite(RAETH_FE_INT_STATUS, ALT_RPLC_INT3);
11305 + ei_hw_lro_auto_adj(3, ei_local);
11307 + if((reg_int_val & ALT_RPLC_INT2)){
11308 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11309 + printk("[HW LRO] ALT_RPLC_INT2 occurred!\n");
11311 + sysRegWrite(RAETH_FE_INT_STATUS, ALT_RPLC_INT2);
11312 + ei_hw_lro_auto_adj(2, ei_local);
11314 + if((reg_int_val & ALT_RPLC_INT1)){
11315 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11316 + printk("[HW LRO] ALT_RPLC_INT1 occurred!\n");
11318 + sysRegWrite(RAETH_FE_INT_STATUS, ALT_RPLC_INT1);
11319 + ei_hw_lro_auto_adj(1, ei_local);
11322 + /* unmask interrupts of rx flow to hw lor rings */
11323 + reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE);
11324 + sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask | ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1);
11326 +#endif /* CONFIG_RAETH_HW_LRO */
11328 +#ifdef CONFIG_RAETH_NAPI
11330 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11331 +raeth_clean(struct napi_struct *napi, int budget)
11333 +raeth_clean(struct net_device *netdev, int *budget)
11336 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11337 + struct net_device *netdev=dev_raether;
11338 + int work_to_do = budget;
11340 + int work_to_do = min(*budget, netdev->quota);
11342 + END_DEVICE *ei_local =netdev_priv(netdev);
11343 + int work_done = 0;
11344 + unsigned long reg_int_mask=0;
11346 + ei_xmit_housekeeping(0);
11348 + rt2880_eth_recv(netdev, &work_done, work_to_do);
11350 + /* this could control when to re-enable interrupt, 0-> mean never enable interrupt*/
11351 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
11352 + *budget -= work_done;
11353 + netdev->quota -= work_done;
11355 + /* if no Tx and not enough Rx work done, exit the polling mode */
11356 + if(( (work_done < work_to_do)) || !netif_running(netdev)) {
11357 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11358 + napi_complete(&ei_local->napi);
11360 + netif_rx_complete(netdev);
11362 + atomic_dec_and_test(&ei_local->irq_sem);
11364 + sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_ALL); // ack all fe interrupts
11365 + reg_int_mask=sysRegRead(RAETH_FE_INT_ENABLE);
11368 + sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask |RAETH_FE_INT_DLY_INIT); // init delay interrupt only
11370 + sysRegWrite(RAETH_FE_INT_ENABLE,reg_int_mask | RAETH_FE_INT_SETTING);
11373 +#ifdef CONFIG_RAETH_QDMA
11374 + sysRegWrite(QFE_INT_STATUS, QFE_INT_ALL);
11375 + reg_int_mask=sysRegRead(QFE_INT_ENABLE);
11377 + sysRegWrite(QFE_INT_ENABLE, reg_int_mask |QFE_INT_DLY_INIT); // init delay interrupt only
11379 + sysRegWrite(QFE_INT_ENABLE,reg_int_mask | (RX_DONE_INT0 | RX_DONE_INT1 | RLS_DONE_INT));
11381 +#endif // CONFIG_RAETH_QDMA //
11392 +void gsw_delay_setting(void)
11394 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN) || defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
11395 + END_DEVICE *ei_local = netdev_priv(dev_raether);
11396 + int reg_int_val = 0;
11397 + int link_speed = 0;
11399 + reg_int_val = sysRegRead(FE_INT_STATUS2);
11400 +#if defined (CONFIG_RALINK_MT7621)
11401 + if( reg_int_val & BIT(25))
11403 + if(sysRegRead(RALINK_ETH_SW_BASE+0x0208) & 0x1) // link up
11405 + link_speed = (sysRegRead(RALINK_ETH_SW_BASE+0x0208)>>2 & 0x3);
11406 + if(link_speed == 1)
11408 + // delay setting for 100M
11409 + if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101)
11410 + mii_mgr_write(31, 0x7b00, 8);
11411 + printk("MT7621 GE2 link rate to 100M\n");
11414 + //delay setting for 10/1000M
11415 + if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101)
11416 + mii_mgr_write(31, 0x7b00, 0x102);
11417 + printk("MT7621 GE2 link rate to 10M/1G\n");
11419 + schedule_work(&ei_local->kill_sig_wq);
11423 + sysRegWrite(FE_INT_STATUS2, reg_int_val);
11428 + * ei_interrupt - handle controler interrupt
11430 + * This routine is called at interrupt level in response to an interrupt from
11431 + * the controller.
11435 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
11436 +static irqreturn_t ei_interrupt(int irq, void *dev_id)
11438 +static irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
11441 +#if !defined(CONFIG_RAETH_NAPI)
11442 + unsigned long reg_int_val;
11443 + unsigned long reg_int_mask=0;
11444 + unsigned int recv = 0;
11445 + unsigned int transmit __maybe_unused = 0;
11446 + unsigned long flags;
11449 + struct net_device *dev = (struct net_device *) dev_id;
11450 + END_DEVICE *ei_local = netdev_priv(dev);
11454 + unsigned long old,cur,dcycle;
11455 + static int cnt = 0;
11456 + static unsigned long max_dcycle = 0,tcycle = 0;
11457 + old = read_c0_count();
11461 + printk (KERN_ERR "net_interrupt(): irq %x for unknown device.\n", IRQ_ENET0);
11465 +#ifdef CONFIG_RAETH_NAPI
11466 + gsw_delay_setting();
11467 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11468 + if(napi_schedule_prep(&ei_local->napi)) {
11470 + if(netif_rx_schedule_prep(dev)) {
11472 + atomic_inc(&ei_local->irq_sem);
11473 + sysRegWrite(RAETH_FE_INT_ENABLE, 0);
11474 +#ifdef CONFIG_RAETH_QDMA
11475 + sysRegWrite(QFE_INT_ENABLE, 0);
11477 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11478 + __napi_schedule(&ei_local->napi);
11480 + __netif_rx_schedule(dev);
11485 + spin_lock_irqsave(&(ei_local->page_lock), flags);
11486 + reg_int_val = sysRegRead(RAETH_FE_INT_STATUS);
11487 +#ifdef CONFIG_RAETH_QDMA
11488 + reg_int_val |= sysRegRead(QFE_INT_STATUS);
11490 +#if defined (DELAY_INT)
11491 + if((reg_int_val & RX_DLY_INT))
11494 + if (reg_int_val & RAETH_TX_DLY_INT)
11497 +#if defined(CONFIG_RAETH_PDMA_DVT)
11498 + raeth_pdma_lro_dly_int_dvt();
11499 +#endif /* CONFIG_RAETH_PDMA_DVT */
11502 + if((reg_int_val & (RX_DONE_INT0 | RX_DONE_INT3 | RX_DONE_INT2 | RX_DONE_INT1)))
11505 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
11506 +#if defined(CONFIG_ARCH_MT7623)
11507 + if((reg_int_val & RX_DONE_INT3))
11509 + if((reg_int_val & RX_DONE_INT2))
11511 +#endif /* CONFIG_ARCH_MT7623 */
11512 + if((reg_int_val & RX_DONE_INT1))
11516 + if (reg_int_val & RAETH_TX_DONE_INT0)
11517 + transmit |= RAETH_TX_DONE_INT0;
11518 +#if defined (CONFIG_RAETH_QOS)
11519 + if (reg_int_val & TX_DONE_INT1)
11520 + transmit |= TX_DONE_INT1;
11521 + if (reg_int_val & TX_DONE_INT2)
11522 + transmit |= TX_DONE_INT2;
11523 + if (reg_int_val & TX_DONE_INT3)
11524 + transmit |= TX_DONE_INT3;
11525 +#endif //CONFIG_RAETH_QOS
11527 +#endif //DELAY_INT
11529 +#if defined (DELAY_INT)
11530 + sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_DLY_INIT);
11532 + sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_ALL);
11534 +#ifdef CONFIG_RAETH_QDMA
11535 +#if defined (DELAY_INT)
11536 + sysRegWrite(QFE_INT_STATUS, QFE_INT_DLY_INIT);
11538 + sysRegWrite(QFE_INT_STATUS, QFE_INT_ALL);
11542 +#if defined (CONFIG_RAETH_HW_LRO)
11543 + if( reg_int_val & (ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1) ){
11544 + /* mask interrupts of rx flow to hw lor rings */
11545 + reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE);
11546 + sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1));
11547 + schedule_work(&ei_local->hw_lro_wq);
11549 +#endif /* CONFIG_RAETH_HW_LRO */
11551 +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
11553 + ei_xmit_housekeeping(0);
11555 + ei_xmit_housekeeping(0);
11558 + if (((recv == 1) || (pending_recv ==1)) && (tx_ring_full==0))
11560 + reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE);
11561 +#if defined (DELAY_INT)
11562 + sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(RX_DLY_INT));
11564 + sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3));
11565 +#endif //DELAY_INT
11566 +#ifdef CONFIG_RAETH_QDMA
11567 + reg_int_mask = sysRegRead(QFE_INT_ENABLE);
11568 +#if defined (DELAY_INT)
11569 + sysRegWrite(QFE_INT_ENABLE, reg_int_mask & ~(RX_DLY_INT));
11571 + sysRegWrite(QFE_INT_ENABLE, reg_int_mask & ~(RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3));
11572 +#endif //DELAY_INT
11576 +#ifdef WORKQUEUE_BH
11577 + schedule_work(&ei_local->rx_wq);
11579 +#if defined (TASKLET_WORKQUEUE_SW)
11580 + if (working_schedule == 1)
11581 + schedule_work(&ei_local->rx_wq);
11584 + tasklet_hi_schedule(&ei_local->rx_tasklet);
11585 +#endif // WORKQUEUE_BH //
11587 + else if (recv == 1 && tx_ring_full==1)
11591 + else if((recv == 0) && (transmit == 0))
11593 + gsw_delay_setting();
11595 + spin_unlock_irqrestore(&(ei_local->page_lock), flags);
11598 + return IRQ_HANDLED;
11601 +#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \
11602 + defined (CONFIG_RALINK_MT7620)|| defined (CONFIG_RALINK_MT7621)
11603 +static void esw_link_status_changed(int port_no, void *dev_id)
11605 + unsigned int reg_val;
11606 + struct net_device *dev = (struct net_device *) dev_id;
11607 + END_DEVICE *ei_local = netdev_priv(dev);
11609 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
11610 + defined (CONFIG_RALINK_MT7620)
11611 + reg_val = *((volatile u32 *)(RALINK_ETH_SW_BASE+ 0x3008 + (port_no*0x100)));
11612 +#elif defined (CONFIG_RALINK_MT7621)
11613 + mii_mgr_read(31, (0x3008 + (port_no*0x100)), ®_val);
11615 + if(reg_val & 0x1) {
11616 + printk("ESW: Link Status Changed - Port%d Link UP\n", port_no);
11617 +#if defined (CONFIG_RALINK_MT7621) && defined (CONFIG_RAETH_8023AZ_EEE)
11618 + mii_mgr_write(port_no, 31, 0x52b5);
11619 + mii_mgr_write(port_no, 16, 0xb780);
11620 + mii_mgr_write(port_no, 17, 0x00e0);
11621 + mii_mgr_write(port_no, 16, 0x9780);
11624 +#if defined (CONFIG_WAN_AT_P0)
11626 + schedule_work(&ei_local->kill_sig_wq);
11628 +#elif defined (CONFIG_WAN_AT_P4)
11630 + schedule_work(&ei_local->kill_sig_wq);
11634 + printk("ESW: Link Status Changed - Port%d Link Down\n", port_no);
11635 +#if defined (CONFIG_RALINK_MT7621) && defined (CONFIG_RAETH_8023AZ_EEE)
11636 + mii_mgr_write(port_no, 31, 0x52b5);
11637 + mii_mgr_write(port_no, 16, 0xb780);
11638 + mii_mgr_write(port_no, 17, 0x0000);
11639 + mii_mgr_write(port_no, 16, 0x9780);
11646 +#if defined (CONFIG_RT_3052_ESW) && ! defined(CONFIG_RALINK_MT7621) && ! defined(CONFIG_ARCH_MT7623)
11647 +static irqreturn_t esw_interrupt(int irq, void *dev_id)
11649 + unsigned long flags;
11650 + unsigned long reg_int_val;
11651 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
11652 + defined(CONFIG_RALINK_MT7620)
11653 + unsigned long acl_int_val;
11654 + unsigned long mib_int_val;
11656 + static unsigned long stat;
11657 + unsigned long stat_curr;
11660 + struct net_device *dev = (struct net_device *) dev_id;
11661 + END_DEVICE *ei_local = netdev_priv(dev);
11664 + spin_lock_irqsave(&(ei_local->page_lock), flags);
11665 + reg_int_val = (*((volatile u32 *)(ESW_ISR))); //Interrupt Status Register
11667 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
11668 + defined(CONFIG_RALINK_MT7620)
11669 + if (reg_int_val & P5_LINK_CH) {
11670 + esw_link_status_changed(5, dev_id);
11672 + if (reg_int_val & P4_LINK_CH) {
11673 + esw_link_status_changed(4, dev_id);
11675 + if (reg_int_val & P3_LINK_CH) {
11676 + esw_link_status_changed(3, dev_id);
11678 + if (reg_int_val & P2_LINK_CH) {
11679 + esw_link_status_changed(2, dev_id);
11681 + if (reg_int_val & P1_LINK_CH) {
11682 + esw_link_status_changed(1, dev_id);
11684 + if (reg_int_val & P0_LINK_CH) {
11685 + esw_link_status_changed(0, dev_id);
11687 + if (reg_int_val & ACL_INT) {
11688 + acl_int_val = sysRegRead(ESW_AISR);
11689 + sysRegWrite(ESW_AISR, acl_int_val);
11691 + if (reg_int_val & MIB_INT) {
11693 + mib_int_val = sysRegRead(ESW_P0_IntSn);
11695 + sysRegWrite(ESW_P0_IntSn, mib_int_val);
11696 + if(mib_int_val & RX_GOOD_CNT)
11697 + p0_rx_good_cnt ++;
11698 + if(mib_int_val & TX_GOOD_CNT)
11699 + p0_tx_good_cnt ++;
11700 + if(mib_int_val & RX_GOCT_CNT)
11701 + p0_rx_byte_cnt ++;
11702 + if(mib_int_val & TX_GOCT_CNT)
11703 + p0_tx_byte_cnt ++;
11706 + mib_int_val = sysRegRead(ESW_P1_IntSn);
11708 + sysRegWrite(ESW_P1_IntSn, mib_int_val);
11709 + if(mib_int_val & RX_GOOD_CNT)
11710 + p1_rx_good_cnt ++;
11711 + if(mib_int_val & TX_GOOD_CNT)
11712 + p1_tx_good_cnt ++;
11713 + if(mib_int_val & RX_GOCT_CNT)
11714 + p1_rx_byte_cnt ++;
11715 + if(mib_int_val & TX_GOCT_CNT)
11716 + p1_tx_byte_cnt ++;
11719 + mib_int_val = sysRegRead(ESW_P2_IntSn);
11721 + sysRegWrite(ESW_P2_IntSn, mib_int_val);
11722 + if(mib_int_val & RX_GOOD_CNT)
11723 + p2_rx_good_cnt ++;
11724 + if(mib_int_val & TX_GOOD_CNT)
11725 + p2_tx_good_cnt ++;
11726 + if(mib_int_val & RX_GOCT_CNT)
11727 + p2_rx_byte_cnt ++;
11728 + if(mib_int_val & TX_GOCT_CNT)
11729 + p2_tx_byte_cnt ++;
11733 + mib_int_val = sysRegRead(ESW_P3_IntSn);
11735 + sysRegWrite(ESW_P3_IntSn, mib_int_val);
11736 + if(mib_int_val & RX_GOOD_CNT)
11737 + p3_rx_good_cnt ++;
11738 + if(mib_int_val & TX_GOOD_CNT)
11739 + p3_tx_good_cnt ++;
11740 + if(mib_int_val & RX_GOCT_CNT)
11741 + p3_rx_byte_cnt ++;
11742 + if(mib_int_val & TX_GOCT_CNT)
11743 + p3_tx_byte_cnt ++;
11746 + mib_int_val = sysRegRead(ESW_P4_IntSn);
11748 + sysRegWrite(ESW_P4_IntSn, mib_int_val);
11749 + if(mib_int_val & RX_GOOD_CNT)
11750 + p4_rx_good_cnt ++;
11751 + if(mib_int_val & TX_GOOD_CNT)
11752 + p4_tx_good_cnt ++;
11753 + if(mib_int_val & RX_GOCT_CNT)
11754 + p4_rx_byte_cnt ++;
11755 + if(mib_int_val & TX_GOCT_CNT)
11756 + p4_tx_byte_cnt ++;
11759 + mib_int_val = sysRegRead(ESW_P5_IntSn);
11761 + sysRegWrite(ESW_P5_IntSn, mib_int_val);
11762 + if(mib_int_val & RX_GOOD_CNT)
11763 + p5_rx_good_cnt ++;
11764 + if(mib_int_val & TX_GOOD_CNT)
11765 + p5_tx_good_cnt ++;
11766 + if(mib_int_val & RX_GOCT_CNT)
11767 + p5_rx_byte_cnt ++;
11768 + if(mib_int_val & TX_GOCT_CNT)
11769 + p5_tx_byte_cnt ++;
11772 + mib_int_val = sysRegRead(ESW_P6_IntSn);
11774 + sysRegWrite(ESW_P6_IntSn, mib_int_val);
11775 + if(mib_int_val & RX_GOOD_CNT)
11776 + p6_rx_good_cnt ++;
11777 + if(mib_int_val & TX_GOOD_CNT)
11778 + p6_tx_good_cnt ++;
11779 + if(mib_int_val & RX_GOCT_CNT)
11780 + p6_rx_byte_cnt ++;
11781 + if(mib_int_val & TX_GOCT_CNT)
11782 + p6_tx_byte_cnt ++;
11784 +#if defined (CONFIG_RALINK_MT7620)
11785 + mib_int_val = sysRegRead(ESW_P7_IntSn);
11787 + sysRegWrite(ESW_P7_IntSn, mib_int_val);
11788 + if(mib_int_val & RX_GOOD_CNT)
11789 + p7_rx_good_cnt ++;
11790 + if(mib_int_val & TX_GOOD_CNT)
11791 + p7_tx_good_cnt ++;
11792 + if(mib_int_val & RX_GOCT_CNT)
11793 + p7_rx_byte_cnt ++;
11794 + if(mib_int_val & TX_GOCT_CNT)
11795 + p7_tx_byte_cnt ++;
11801 +#else // not RT6855
11802 + if (reg_int_val & PORT_ST_CHG) {
11803 + printk("RT305x_ESW: Link Status Changed\n");
11805 + stat_curr = *((volatile u32 *)(RALINK_ETH_SW_BASE+0x80));
11806 +#ifdef CONFIG_WAN_AT_P0
11807 + //link down --> link up : send signal to user application
11808 + //link up --> link down : ignore
11809 + if ((stat & (1<<25)) || !(stat_curr & (1<<25)))
11811 + if ((stat & (1<<29)) || !(stat_curr & (1<<29)))
11815 + schedule_work(&ei_local->kill_sig_wq);
11817 + stat = stat_curr;
11820 +#endif // defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)//
11822 + sysRegWrite(ESW_ISR, reg_int_val);
11824 + spin_unlock_irqrestore(&(ei_local->page_lock), flags);
11825 + return IRQ_HANDLED;
11830 +#elif defined (CONFIG_RT_3052_ESW) && defined(CONFIG_RALINK_MT7621)
11832 +static irqreturn_t esw_interrupt(int irq, void *dev_id)
11834 + unsigned long flags;
11835 + unsigned int reg_int_val;
11836 + struct net_device *dev = (struct net_device *) dev_id;
11837 + END_DEVICE *ei_local = netdev_priv(dev);
11839 + spin_lock_irqsave(&(ei_local->page_lock), flags);
11840 + mii_mgr_read(31, 0x700c, ®_int_val);
11842 + if (reg_int_val & P4_LINK_CH) {
11843 + esw_link_status_changed(4, dev_id);
11846 + if (reg_int_val & P3_LINK_CH) {
11847 + esw_link_status_changed(3, dev_id);
11849 + if (reg_int_val & P2_LINK_CH) {
11850 + esw_link_status_changed(2, dev_id);
11852 + if (reg_int_val & P1_LINK_CH) {
11853 + esw_link_status_changed(1, dev_id);
11855 + if (reg_int_val & P0_LINK_CH) {
11856 + esw_link_status_changed(0, dev_id);
11859 + mii_mgr_write(31, 0x700c, 0x1f); //ack switch link change
11861 + spin_unlock_irqrestore(&(ei_local->page_lock), flags);
11862 + return IRQ_HANDLED;
11868 +static int ei_start_xmit_fake(struct sk_buff* skb, struct net_device *dev)
11870 + return ei_start_xmit(skb, dev, 1);
11874 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350)
11875 +void dump_phy_reg(int port_no, int from, int to, int is_local)
11880 + if(is_local==0) {
11881 + printk("Global Register\n");
11882 + printk("===============");
11883 + mii_mgr_write(0, 31, 0); //select global register
11884 + for(i=from;i<=to;i++) {
11888 + mii_mgr_read(port_no,i, &temp);
11889 + printk("%02d: %04X ",i, temp);
11892 + mii_mgr_write(0, 31, 0x8000); //select local register
11893 + printk("\n\nLocal Register Port %d\n",port_no);
11894 + printk("===============");
11895 + for(i=from;i<=to;i++) {
11899 + mii_mgr_read(port_no,i, &temp);
11900 + printk("%02d: %04X ",i, temp);
11906 +void dump_phy_reg(int port_no, int from, int to, int is_local, int page_no)
11914 + if(is_local==0) {
11916 + printk("\n\nGlobal Register Page %d\n",page_no);
11917 + printk("===============");
11918 + r31 |= 0 << 15; //global
11919 + r31 |= ((page_no&0x7) << 12); //page no
11920 + mii_mgr_write(port_no, 31, r31); //select global page x
11921 + for(i=16;i<32;i++) {
11925 + mii_mgr_read(port_no,i, &temp);
11926 + printk("%02d: %04X ",i, temp);
11929 + printk("\n\nLocal Register Port %d Page %d\n",port_no, page_no);
11930 + printk("===============");
11931 + r31 |= 1 << 15; //local
11932 + r31 |= ((page_no&0x7) << 12); //page no
11933 + mii_mgr_write(port_no, 31, r31); //select local page x
11934 + for(i=16;i<32;i++) {
11938 + mii_mgr_read(port_no,i, &temp);
11939 + printk("%02d: %04X ",i, temp);
11947 +int ei_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11949 +#if defined(CONFIG_RT_3052_ESW) || defined(CONFIG_RAETH_QDMA)
11952 +#if defined(CONFIG_RALINK_RT3352) || defined(CONFIG_RALINK_RT5350) || \
11953 + defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
11954 + defined(CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
11955 + defined (CONFIG_RALINK_MT7628) || defined (CONFIG_ARCH_MT7623)
11956 + esw_rate ratelimit;
11958 +#if defined(CONFIG_RT_3052_ESW)
11959 + unsigned int offset = 0;
11960 + unsigned int value = 0;
11964 + END_DEVICE *ei_local = netdev_priv(dev);
11965 + ra_mii_ioctl_data mii;
11966 + spin_lock_irq(&ei_local->page_lock);
11969 +#if defined(CONFIG_RAETH_QDMA)
11970 +#define _HQOS_REG(x) (*((volatile u32 *)(RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + x)))
11971 + case RAETH_QDMA_REG_READ:
11972 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
11973 + if (reg.off > REG_HQOS_MAX) {
11977 + reg.val = _HQOS_REG(reg.off);
11978 + //printk("read reg off:%x val:%x\n", reg.off, reg.val);
11979 + copy_to_user(ifr->ifr_data, ®, sizeof(reg));
11981 + case RAETH_QDMA_REG_WRITE:
11982 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
11983 + if (reg.off > REG_HQOS_MAX) {
11987 + _HQOS_REG(reg.off) = reg.val;
11988 + //printk("write reg off:%x val:%x\n", reg.off, reg.val);
11991 + case RAETH_QDMA_READ_CPU_CLK:
11992 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
11993 + reg.val = get_surfboard_sysclk();
11994 + //printk("read reg off:%x val:%x\n", reg.off, reg.val);
11995 + copy_to_user(ifr->ifr_data, ®, sizeof(reg));
11998 + case RAETH_QDMA_QUEUE_MAPPING:
11999 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
12000 + if((reg.off&0x100) == 0x100){
12001 + lan_wan_separate = 1;
12004 + lan_wan_separate = 0;
12006 + M2Q_table[reg.off] = reg.val;
12008 +#if defined(CONFIG_HW_SFQ)
12009 + case RAETH_QDMA_SFQ_WEB_ENABLE:
12010 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
12011 + if((reg.val) == 0x1){
12012 + web_sfq_enable = 1;
12015 + web_sfq_enable = 0;
12022 + case RAETH_MII_READ:
12023 + copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
12024 + mii_mgr_read(mii.phy_id, mii.reg_num, &mii.val_out);
12025 + //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_out);
12026 + copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
12029 + case RAETH_MII_WRITE:
12030 + copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
12031 + //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_in);
12032 + mii_mgr_write(mii.phy_id, mii.reg_num, mii.val_in);
12034 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_ARCH_MT7623)
12035 + case RAETH_MII_READ_CL45:
12036 + copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
12037 + //mii_mgr_cl45_set_address(mii.port_num, mii.dev_addr, mii.reg_addr);
12038 + mii_mgr_read_cl45(mii.port_num, mii.dev_addr, mii.reg_addr, &mii.val_out);
12039 + copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
12041 + case RAETH_MII_WRITE_CL45:
12042 + copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
12043 + //mii_mgr_cl45_set_address(mii.port_num, mii.dev_addr, mii.reg_addr);
12044 + mii_mgr_write_cl45(mii.port_num, mii.dev_addr, mii.reg_addr, mii.val_in);
12048 +#if defined(CONFIG_RT_3052_ESW)
12049 +#define _ESW_REG(x) (*((volatile u32 *)(RALINK_ETH_SW_BASE + x)))
12050 + case RAETH_ESW_REG_READ:
12051 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
12052 + if (reg.off > REG_ESW_MAX) {
12056 + reg.val = _ESW_REG(reg.off);
12057 + //printk("read reg off:%x val:%x\n", reg.off, reg.val);
12058 + copy_to_user(ifr->ifr_data, ®, sizeof(reg));
12060 + case RAETH_ESW_REG_WRITE:
12061 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
12062 + if (reg.off > REG_ESW_MAX) {
12066 + _ESW_REG(reg.off) = reg.val;
12067 + //printk("write reg off:%x val:%x\n", reg.off, reg.val);
12069 + case RAETH_ESW_PHY_DUMP:
12070 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
12071 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350)
12072 + if (reg.val ==32 ) {//dump all phy register
12073 + /* Global Register 0~31
12074 + * Local Register 0~31
12076 + dump_phy_reg(0, 0, 31, 0); //dump global register
12077 + for(offset=0;offset<5;offset++) {
12078 + dump_phy_reg(offset, 0, 31, 1); //dump local register
12081 + dump_phy_reg(reg.val, 0, 31, 0); //dump global register
12082 + dump_phy_reg(reg.val, 0, 31, 1); //dump local register
12085 + /* SPEC defined Register 0~15
12086 + * Global Register 16~31 for each page
12087 + * Local Register 16~31 for each page
12089 + printk("SPEC defined Register");
12090 + if (reg.val ==32 ) {//dump all phy register
12092 + for(i=0; i<5; i++){
12093 + printk("\n[Port %d]===============",i);
12094 + for(offset=0;offset<16;offset++) {
12095 + if(offset%8==0) {
12098 + mii_mgr_read(i,offset, &value);
12099 + printk("%02d: %04X ",offset, value);
12104 + printk("\n[Port %d]===============",reg.val);
12105 + for(offset=0;offset<16;offset++) {
12106 + if(offset%8==0) {
12109 + mii_mgr_read(reg.val,offset, &value);
12110 + printk("%02d: %04X ",offset, value);
12114 +#if defined (CONFIG_RALINK_MT7628)
12115 + for(offset=0;offset<7;offset++) { //global register page 0~6
12117 + for(offset=0;offset<5;offset++) { //global register page 0~4
12119 + if(reg.val == 32) //dump all phy register
12120 + dump_phy_reg(0, 16, 31, 0, offset);
12122 + dump_phy_reg(reg.val, 16, 31, 0, offset);
12125 + if (reg.val == 32) {//dump all phy register
12126 +#if !defined (CONFIG_RAETH_HAS_PORT4)
12127 + for(offset=0;offset<5;offset++) { //local register port 0-port4
12129 + for(offset=0;offset<4;offset++) { //local register port 0-port3
12131 + dump_phy_reg(offset, 16, 31, 1, 0); //dump local page 0
12132 + dump_phy_reg(offset, 16, 31, 1, 1); //dump local page 1
12133 + dump_phy_reg(offset, 16, 31, 1, 2); //dump local page 2
12134 + dump_phy_reg(offset, 16, 31, 1, 3); //dump local page 3
12137 + dump_phy_reg(reg.val, 16, 31, 1, 0); //dump local page 0
12138 + dump_phy_reg(reg.val, 16, 31, 1, 1); //dump local page 1
12139 + dump_phy_reg(reg.val, 16, 31, 1, 2); //dump local page 2
12140 + dump_phy_reg(reg.val, 16, 31, 1, 3); //dump local page 3
12145 +#if defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
12146 +#define _ESW_REG(x) (*((volatile u32 *)(RALINK_ETH_SW_BASE + x)))
12147 + case RAETH_ESW_INGRESS_RATE:
12148 + copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
12149 + offset = 0x11c + (4 * (ratelimit.port / 2));
12150 + value = _ESW_REG(offset);
12152 + if((ratelimit.port % 2) == 0)
12154 + value &= 0xffff0000;
12155 + if(ratelimit.on_off == 1)
12157 + value |= (ratelimit.on_off << 14);
12158 + value |= (0x07 << 10);
12159 + value |= ratelimit.bw;
12162 + else if((ratelimit.port % 2) == 1)
12164 + value &= 0x0000ffff;
12165 + if(ratelimit.on_off == 1)
12167 + value |= (ratelimit.on_off << 30);
12168 + value |= (0x07 << 26);
12169 + value |= (ratelimit.bw << 16);
12172 + printk("offset = 0x%4x value=0x%x\n\r", offset, value);
12174 + _ESW_REG(offset) = value;
12177 + case RAETH_ESW_EGRESS_RATE:
12178 + copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
12179 + offset = 0x140 + (4 * (ratelimit.port / 2));
12180 + value = _ESW_REG(offset);
12182 + if((ratelimit.port % 2) == 0)
12184 + value &= 0xffff0000;
12185 + if(ratelimit.on_off == 1)
12187 + value |= (ratelimit.on_off << 12);
12188 + value |= (0x03 << 10);
12189 + value |= ratelimit.bw;
12192 + else if((ratelimit.port % 2) == 1)
12194 + value &= 0x0000ffff;
12195 + if(ratelimit.on_off == 1)
12197 + value |= (ratelimit.on_off << 28);
12198 + value |= (0x03 << 26);
12199 + value |= (ratelimit.bw << 16);
12202 + printk("offset = 0x%4x value=0x%x\n\r", offset, value);
12203 + _ESW_REG(offset) = value;
12205 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
12206 + defined(CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
12207 +#define _ESW_REG(x) (*((volatile u32 *)(RALINK_ETH_SW_BASE + x)))
12208 + case RAETH_ESW_INGRESS_RATE:
12209 + copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
12210 +#if defined(CONFIG_RALINK_RT6855A) || defined(CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
12211 + offset = 0x1800 + (0x100 * ratelimit.port);
12213 + offset = 0x1080 + (0x100 * ratelimit.port);
12215 + value = _ESW_REG(offset);
12217 + value &= 0xffff0000;
12218 + if(ratelimit.on_off == 1)
12220 + value |= (ratelimit.on_off << 15);
12221 + if (ratelimit.bw < 100)
12223 + value |= (0x0 << 8);
12224 + value |= ratelimit.bw;
12225 + }else if(ratelimit.bw < 1000)
12227 + value |= (0x1 << 8);
12228 + value |= ratelimit.bw/10;
12229 + }else if(ratelimit.bw < 10000)
12231 + value |= (0x2 << 8);
12232 + value |= ratelimit.bw/100;
12233 + }else if(ratelimit.bw < 100000)
12235 + value |= (0x3 << 8);
12236 + value |= ratelimit.bw/1000;
12239 + value |= (0x4 << 8);
12240 + value |= ratelimit.bw/10000;
12243 + printk("offset = 0x%4x value=0x%x\n\r", offset, value);
12244 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
12245 + mii_mgr_write(0x1f, offset, value);
12247 + _ESW_REG(offset) = value;
12251 + case RAETH_ESW_EGRESS_RATE:
12252 + copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
12253 + offset = 0x1040 + (0x100 * ratelimit.port);
12254 + value = _ESW_REG(offset);
12256 + value &= 0xffff0000;
12257 + if(ratelimit.on_off == 1)
12259 + value |= (ratelimit.on_off << 15);
12260 + if (ratelimit.bw < 100)
12262 + value |= (0x0 << 8);
12263 + value |= ratelimit.bw;
12264 + }else if(ratelimit.bw < 1000)
12266 + value |= (0x1 << 8);
12267 + value |= ratelimit.bw/10;
12268 + }else if(ratelimit.bw < 10000)
12270 + value |= (0x2 << 8);
12271 + value |= ratelimit.bw/100;
12272 + }else if(ratelimit.bw < 100000)
12274 + value |= (0x3 << 8);
12275 + value |= ratelimit.bw/1000;
12278 + value |= (0x4 << 8);
12279 + value |= ratelimit.bw/10000;
12282 + printk("offset = 0x%4x value=0x%x\n\r", offset, value);
12283 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
12284 + mii_mgr_write(0x1f, offset, value);
12286 + _ESW_REG(offset) = value;
12290 +#endif // CONFIG_RT_3052_ESW
12292 + ret = -EOPNOTSUPP;
12297 + spin_unlock_irq(&ei_local->page_lock);
12302 + * Set new MTU size
12303 + * Change the mtu of Raeth Ethernet Device
12305 +static int ei_change_mtu(struct net_device *dev, int new_mtu)
12307 + END_DEVICE *ei_local = netdev_priv(dev); // get priv ei_local pointer from net_dev structure
12309 + if ( ei_local == NULL ) {
12310 + printk(KERN_EMERG "%s: ei_change_mtu passed a non-existent private pointer from net_dev!\n", dev->name);
12315 + if ( (new_mtu > 4096) || (new_mtu < 64)) {
12319 +#ifndef CONFIG_RAETH_JUMBOFRAME
12320 + if ( new_mtu > 1500 ) {
12325 + dev->mtu = new_mtu;
12330 +#ifdef CONFIG_RAETH_HW_VLAN_RX
12331 +static void ei_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
12333 + END_DEVICE *ei_local = netdev_priv(dev);
12335 + ei_local->vlgrp = grp;
12337 + /* enable HW VLAN RX */
12338 + sysRegWrite(CDMP_EG_CTRL, 1);
12343 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12344 +static const struct net_device_ops ei_netdev_ops = {
12345 + .ndo_init = rather_probe,
12346 + .ndo_open = ei_open,
12347 + .ndo_stop = ei_close,
12348 + .ndo_start_xmit = ei_start_xmit_fake,
12349 + .ndo_get_stats = ra_get_stats,
12350 + .ndo_set_mac_address = eth_mac_addr,
12351 + .ndo_change_mtu = ei_change_mtu,
12352 + .ndo_do_ioctl = ei_ioctl,
12353 + .ndo_validate_addr = eth_validate_addr,
12354 +#ifdef CONFIG_RAETH_HW_VLAN_RX
12355 + .ndo_vlan_rx_register = ei_vlan_rx_register,
12357 +#ifdef CONFIG_NET_POLL_CONTROLLER
12358 + .ndo_poll_controller = raeth_clean,
12360 +// .ndo_tx_timeout = ei_tx_timeout,
12364 +void ra2880_setup_dev_fptable(struct net_device *dev)
12366 + RAETH_PRINT(__FUNCTION__ "is called!\n");
12368 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12369 + dev->netdev_ops = &ei_netdev_ops;
12371 + dev->open = ei_open;
12372 + dev->stop = ei_close;
12373 + dev->hard_start_xmit = ei_start_xmit_fake;
12374 + dev->get_stats = ra_get_stats;
12375 + dev->set_mac_address = ei_set_mac_addr;
12376 + dev->change_mtu = ei_change_mtu;
12378 + dev->do_ioctl = ei_ioctl;
12379 +// dev->tx_timeout = ei_tx_timeout;
12381 +#ifdef CONFIG_RAETH_NAPI
12382 + dev->poll = &raeth_clean;
12383 +#if defined (CONFIG_RAETH_ROUTER)
12384 + dev->weight = 32;
12385 +#elif defined (CONFIG_RT_3052_ESW)
12386 + dev->weight = 32;
12388 + dev->weight = 128;
12392 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
12393 + dev->ethtool_ops = &ra_ethtool_ops;
12395 +#define TX_TIMEOUT (5*HZ)
12396 + dev->watchdog_timeo = TX_TIMEOUT;
12400 +/* reset frame engine */
12401 +void fe_reset(void)
12403 +#if defined (CONFIG_RALINK_RT6855A)
12408 + //val = *(volatile u32 *)(0x1b000000);
12409 + //printk("0x1b000000 is 0x%x\n", val);
12410 + //val = sysRegRead(0xFB110100);
12412 + //sysRegWrite(0xFB110100, val);
12416 + val = sysRegRead(RSTCTRL);
12418 +// RT5350 need to reset ESW and FE at the same to avoid PDMA panic //
12419 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
12420 + val = val | RALINK_FE_RST | RALINK_ESW_RST ;
12422 + val = val | RALINK_FE_RST;
12424 + sysRegWrite(RSTCTRL, val);
12425 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7628)
12426 + val = val & ~(RALINK_FE_RST | RALINK_ESW_RST);
12428 + val = val & ~(RALINK_FE_RST);
12431 + sysRegWrite(RSTCTRL, val);
12436 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
12437 +void trgmii_set_7621(void)
12442 + val = sysRegRead(RSTCTRL);
12443 +// MT7621 need to reset GMAC and FE first //
12444 + val = val | RALINK_FE_RST | RALINK_ETH_RST ;
12445 + sysRegWrite(RSTCTRL, val);
12447 +//set TRGMII clock//
12448 + val_0 = sysRegRead(CLK_CFG_0);
12449 + val_0 &= 0xffffff9f;
12450 + val_0 |= (0x1 << 5);
12451 + sysRegWrite(CLK_CFG_0, val_0);
12453 + val_0 = sysRegRead(CLK_CFG_0);
12454 + printk("set CLK_CFG_0 = 0x%x!!!!!!!!!!!!!!!!!!1\n",val_0);
12455 + val = val & ~(RALINK_FE_RST | RALINK_ETH_RST);
12456 + sysRegWrite(RSTCTRL, val);
12459 +void trgmii_set_7530(void)
12464 + mii_mgr_write(31, 103, 0x0020);
12468 + mii_mgr_write(0, 0x16, 0);
12469 + mii_mgr_write(1, 0x16, 0);
12470 + mii_mgr_write(2, 0x16, 0);
12471 + mii_mgr_write(3, 0x16, 0);
12472 + mii_mgr_write(4, 0x16, 0);
12475 + //PLL reset for E2
12476 + mii_mgr_write(31, 104, 0x0608);
12477 + mii_mgr_write(31, 104, 0x2608);
12479 + mii_mgr_write(31, 0x7808, 0x0);
12481 + mii_mgr_write(31, 0x7804, 0x01017e8f);
12483 + mii_mgr_write(31, 0x7808, 0x1);
12490 + mii_mgr_write(0, 13, 0x1f);
12491 + mii_mgr_write(0, 14, 0x404);
12492 + mii_mgr_write(0, 13, 0x401f);
12493 + mii_mgr_read(31, 0x7800, ®Value);
12494 + regValue = (regValue >> 9) & 0x3;
12495 + if(regValue == 0x3) { //25Mhz Xtal
12496 + mii_mgr_write(0, 14, 0x0A00);//25Mhz XTAL for 150Mhz CLK
12497 + } else if(regValue == 0x2) { //40Mhz
12498 + mii_mgr_write(0, 14, 0x0780);//40Mhz XTAL for 150Mhz CLK
12500 + //mii_mgr_write(0, 14, 0x0C00);//ori
12503 + mii_mgr_write(0, 13, 0x1f);
12504 + mii_mgr_write(0, 14, 0x409);
12505 + mii_mgr_write(0, 13, 0x401f);
12506 + mii_mgr_write(0, 14, 0x57);
12509 + mii_mgr_write(0, 13, 0x1f);
12510 + mii_mgr_write(0, 14, 0x40a);
12511 + mii_mgr_write(0, 13, 0x401f);
12512 + mii_mgr_write(0, 14, 0x57);
12515 + mii_mgr_write(0, 13, 0x1f);
12516 + mii_mgr_write(0, 14, 0x403);
12517 + mii_mgr_write(0, 13, 0x401f);
12518 + mii_mgr_write(0, 14, 0x1800);
12522 + mii_mgr_write(0, 13, 0x1f);
12523 + mii_mgr_write(0, 14, 0x403);
12524 + mii_mgr_write(0, 13, 0x401f);
12525 + mii_mgr_write(0, 14, 0x1c00);
12528 + mii_mgr_write(0, 13, 0x1f);
12529 + mii_mgr_write(0, 14, 0x401);
12530 + mii_mgr_write(0, 13, 0x401f);
12531 + mii_mgr_write(0, 14, 0xc020);
12534 + mii_mgr_write(0, 13, 0x1f);
12535 + mii_mgr_write(0, 14, 0x406);
12536 + mii_mgr_write(0, 13, 0x401f);
12537 + mii_mgr_write(0, 14, 0xa030);
12541 + mii_mgr_write(0, 13, 0x1f);
12542 + mii_mgr_write(0, 14, 0x410);
12543 + mii_mgr_write(0, 13, 0x401f);
12544 + mii_mgr_write(0, 14, 0x0003);
12547 + mii_mgr_write(31, 0x3600, 0x5e33b);
12550 + mii_mgr_write(31, 0x7830, 0x1);
12556 +void ei_reset_task(struct work_struct *work)
12558 + struct net_device *dev = dev_raether;
12566 +void ei_tx_timeout(struct net_device *dev)
12568 + END_DEVICE *ei_local = netdev_priv(dev);
12570 + schedule_work(&ei_local->reset_task);
12573 +void setup_statistics(END_DEVICE* ei_local)
12575 + ei_local->stat.tx_packets = 0;
12576 + ei_local->stat.tx_bytes = 0;
12577 + ei_local->stat.tx_dropped = 0;
12578 + ei_local->stat.tx_errors = 0;
12579 + ei_local->stat.tx_aborted_errors= 0;
12580 + ei_local->stat.tx_carrier_errors= 0;
12581 + ei_local->stat.tx_fifo_errors = 0;
12582 + ei_local->stat.tx_heartbeat_errors = 0;
12583 + ei_local->stat.tx_window_errors = 0;
12585 + ei_local->stat.rx_packets = 0;
12586 + ei_local->stat.rx_bytes = 0;
12587 + ei_local->stat.rx_dropped = 0;
12588 + ei_local->stat.rx_errors = 0;
12589 + ei_local->stat.rx_length_errors = 0;
12590 + ei_local->stat.rx_over_errors = 0;
12591 + ei_local->stat.rx_crc_errors = 0;
12592 + ei_local->stat.rx_frame_errors = 0;
12593 + ei_local->stat.rx_fifo_errors = 0;
12594 + ei_local->stat.rx_missed_errors = 0;
12596 + ei_local->stat.collisions = 0;
12597 +#if defined (CONFIG_RAETH_QOS)
12598 + ei_local->tx3_full = 0;
12599 + ei_local->tx2_full = 0;
12600 + ei_local->tx1_full = 0;
12601 + ei_local->tx0_full = 0;
12603 + ei_local->tx_full = 0;
12605 +#ifdef CONFIG_RAETH_NAPI
12606 + atomic_set(&ei_local->irq_sem, 1);
12612 + * rather_probe - pick up ethernet port at boot time
12613 + * @dev: network device to probe
12615 + * This routine probe the ethernet port at boot time.
12620 +int __init rather_probe(struct net_device *dev)
12623 + END_DEVICE *ei_local = netdev_priv(dev);
12624 + struct sockaddr addr;
12625 + unsigned char zero1[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
12626 + unsigned char zero2[6]={0x00,0x00,0x00,0x00,0x00,0x00};
12630 + //Get mac0 address from flash
12631 +#ifdef RA_MTD_RW_BY_NUM
12632 + i = ra_mtd_read(2, GMAC0_OFFSET, 6, addr.sa_data);
12634 + i = ra_mtd_read_nm("Factory", GMAC0_OFFSET, 6, addr.sa_data);
12636 + //If reading mtd failed or mac0 is empty, generate a mac address
12637 + if (i < 0 || ((memcmp(addr.sa_data, zero1, 6) == 0) || (addr.sa_data[0] & 0x1)) ||
12638 + (memcmp(addr.sa_data, zero2, 6) == 0)) {
12639 + unsigned char mac_addr01234[5] = {0x00, 0x0C, 0x43, 0x28, 0x80};
12640 + // net_srandom(jiffies);
12641 + memcpy(addr.sa_data, mac_addr01234, 5);
12642 + // addr.sa_data[5] = net_random()&0xFF;
12645 +#ifdef CONFIG_RAETH_NAPI
12646 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12647 + netif_napi_add(dev, &ei_local->napi, raeth_clean, 128);
12650 + ei_set_mac_addr(dev, &addr);
12651 + spin_lock_init(&ei_local->page_lock);
12652 + ether_setup(dev);
12654 +#ifdef CONFIG_RAETH_LRO
12655 + ei_local->lro_mgr.dev = dev;
12656 + memset(&ei_local->lro_mgr.stats, 0, sizeof(ei_local->lro_mgr.stats));
12657 + ei_local->lro_mgr.features = LRO_F_NAPI;
12658 + ei_local->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
12659 + ei_local->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
12660 + ei_local->lro_mgr.max_desc = ARRAY_SIZE(ei_local->lro_arr);
12661 + ei_local->lro_mgr.max_aggr = 64;
12662 + ei_local->lro_mgr.frag_align_pad = 0;
12663 + ei_local->lro_mgr.lro_arr = ei_local->lro_arr;
12664 + ei_local->lro_mgr.get_skb_header = rt_get_skb_header;
12667 + setup_statistics(ei_local);
12672 +#ifdef CONFIG_PSEUDO_SUPPORT
12673 +int VirtualIF_ioctl(struct net_device * net_dev,
12674 + struct ifreq * ifr, int cmd)
12676 + ra_mii_ioctl_data mii;
12679 + case RAETH_MII_READ:
12680 + copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
12681 + mii_mgr_read(mii.phy_id, mii.reg_num, &mii.val_out);
12682 + //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_out);
12683 + copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
12686 + case RAETH_MII_WRITE:
12687 + copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
12688 + //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_in);
12689 + mii_mgr_write(mii.phy_id, mii.reg_num, mii.val_in);
12692 + return -EOPNOTSUPP;
12698 +struct net_device_stats *VirtualIF_get_stats(struct net_device *dev)
12700 + PSEUDO_ADAPTER *pAd = netdev_priv(dev);
12701 + return &pAd->stat;
12704 +int VirtualIF_open(struct net_device * dev)
12706 + PSEUDO_ADAPTER *pPesueoAd = netdev_priv(dev);
12708 + printk("%s: ===> VirtualIF_open\n", dev->name);
12710 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN) || defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
12711 + *((volatile u32 *)(FE_INT_ENABLE2)) |= (1<<25); //enable GE2 link change intr for MT7530 delay setting
12714 + netif_start_queue(pPesueoAd->PseudoDev);
12719 +int VirtualIF_close(struct net_device * dev)
12721 + PSEUDO_ADAPTER *pPesueoAd = netdev_priv(dev);
12723 + printk("%s: ===> VirtualIF_close\n", dev->name);
12725 + netif_stop_queue(pPesueoAd->PseudoDev);
12730 +int VirtualIFSendPackets(struct sk_buff * pSkb,
12731 + struct net_device * dev)
12733 + PSEUDO_ADAPTER *pPesueoAd = netdev_priv(dev);
12734 + END_DEVICE *ei_local __maybe_unused;
12737 + //printk("VirtualIFSendPackets --->\n");
12739 + ei_local = netdev_priv(dev);
12740 + if (!(pPesueoAd->RaethDev->flags & IFF_UP)) {
12741 + dev_kfree_skb_any(pSkb);
12744 + //pSkb->cb[40]=0x5a;
12745 + pSkb->dev = pPesueoAd->RaethDev;
12746 + ei_start_xmit(pSkb, pPesueoAd->RaethDev, 2);
12750 +void virtif_setup_statistics(PSEUDO_ADAPTER* pAd)
12752 + pAd->stat.tx_packets = 0;
12753 + pAd->stat.tx_bytes = 0;
12754 + pAd->stat.tx_dropped = 0;
12755 + pAd->stat.tx_errors = 0;
12756 + pAd->stat.tx_aborted_errors= 0;
12757 + pAd->stat.tx_carrier_errors= 0;
12758 + pAd->stat.tx_fifo_errors = 0;
12759 + pAd->stat.tx_heartbeat_errors = 0;
12760 + pAd->stat.tx_window_errors = 0;
12762 + pAd->stat.rx_packets = 0;
12763 + pAd->stat.rx_bytes = 0;
12764 + pAd->stat.rx_dropped = 0;
12765 + pAd->stat.rx_errors = 0;
12766 + pAd->stat.rx_length_errors = 0;
12767 + pAd->stat.rx_over_errors = 0;
12768 + pAd->stat.rx_crc_errors = 0;
12769 + pAd->stat.rx_frame_errors = 0;
12770 + pAd->stat.rx_fifo_errors = 0;
12771 + pAd->stat.rx_missed_errors = 0;
12773 + pAd->stat.collisions = 0;
12776 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12777 +static const struct net_device_ops VirtualIF_netdev_ops = {
12778 + .ndo_open = VirtualIF_open,
12779 + .ndo_stop = VirtualIF_close,
12780 + .ndo_start_xmit = VirtualIFSendPackets,
12781 + .ndo_get_stats = VirtualIF_get_stats,
12782 + .ndo_set_mac_address = ei_set_mac2_addr,
12783 + .ndo_change_mtu = ei_change_mtu,
12784 + .ndo_do_ioctl = VirtualIF_ioctl,
12785 + .ndo_validate_addr = eth_validate_addr,
12788 +// Register pseudo interface
12789 +void RAETH_Init_PSEUDO(pEND_DEVICE pAd, struct net_device *net_dev)
12792 + struct net_device *dev;
12793 + PSEUDO_ADAPTER *pPseudoAd;
12795 + struct sockaddr addr;
12796 + unsigned char zero1[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
12797 + unsigned char zero2[6]={0x00,0x00,0x00,0x00,0x00,0x00};
12799 + for (index = 0; index < MAX_PSEUDO_ENTRY; index++) {
12801 + dev = alloc_etherdev(sizeof(PSEUDO_ADAPTER));
12804 + printk(" alloc_etherdev for PSEUDO_ADAPTER failed.\n");
12807 + strcpy(dev->name, DEV2_NAME);
12809 + //Get mac2 address from flash
12810 +#ifdef RA_MTD_RW_BY_NUM
12811 + i = ra_mtd_read(2, GMAC2_OFFSET, 6, addr.sa_data);
12813 + i = ra_mtd_read_nm("Factory", GMAC2_OFFSET, 6, addr.sa_data);
12816 + //If reading mtd failed or mac0 is empty, generate a mac address
12817 + if (i < 0 || ((memcmp(addr.sa_data, zero1, 6) == 0) || (addr.sa_data[0] & 0x1)) ||
12818 + (memcmp(addr.sa_data, zero2, 6) == 0)) {
12819 + unsigned char mac_addr01234[5] = {0x00, 0x0C, 0x43, 0x28, 0x80};
12820 + // net_srandom(jiffies);
12821 + memcpy(addr.sa_data, mac_addr01234, 5);
12822 + // addr.sa_data[5] = net_random()&0xFF;
12825 + ei_set_mac2_addr(dev, &addr);
12826 + ether_setup(dev);
12827 + pPseudoAd = netdev_priv(dev);
12829 + pPseudoAd->PseudoDev = dev;
12830 + pPseudoAd->RaethDev = net_dev;
12831 + virtif_setup_statistics(pPseudoAd);
12832 + pAd->PseudoDev = dev;
12834 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12835 + dev->netdev_ops = &VirtualIF_netdev_ops;
12837 + dev->hard_start_xmit = VirtualIFSendPackets;
12838 + dev->stop = VirtualIF_close;
12839 + dev->open = VirtualIF_open;
12840 + dev->do_ioctl = VirtualIF_ioctl;
12841 + dev->set_mac_address = ei_set_mac2_addr;
12842 + dev->get_stats = VirtualIF_get_stats;
12843 + dev->change_mtu = ei_change_mtu;
12847 +#if defined (CONFIG_RAETH_HW_LRO)
12848 + dev->features |= NETIF_F_HW_CSUM;
12850 + dev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4 */
12851 +#endif /* CONFIG_RAETH_HW_LRO */
12853 +#if defined(CONFIG_RALINK_MT7620)
12854 +#if defined (CONFIG_RAETH_TSO)
12855 + if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
12856 + dev->features |= NETIF_F_SG;
12857 + dev->features |= NETIF_F_TSO;
12859 +#endif // CONFIG_RAETH_TSO //
12861 +#if defined (CONFIG_RAETH_TSOV6)
12862 + if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
12863 + dev->features |= NETIF_F_TSO6;
12864 + dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
12868 +#if defined (CONFIG_RAETH_TSO)
12869 + dev->features |= NETIF_F_SG;
12870 + dev->features |= NETIF_F_TSO;
12871 +#endif // CONFIG_RAETH_TSO //
12873 +#if defined (CONFIG_RAETH_TSOV6)
12874 + dev->features |= NETIF_F_TSO6;
12875 + dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
12877 +#endif // CONFIG_RALINK_MT7620 //
12879 +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
12880 + dev->vlan_features = dev->features;
12884 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
12885 + dev->ethtool_ops = &ra_virt_ethtool_ops;
12886 + // init mii structure
12887 + pPseudoAd->mii_info.dev = dev;
12888 + pPseudoAd->mii_info.mdio_read = mdio_virt_read;
12889 + pPseudoAd->mii_info.mdio_write = mdio_virt_write;
12890 + pPseudoAd->mii_info.phy_id_mask = 0x1f;
12891 + pPseudoAd->mii_info.reg_num_mask = 0x1f;
12892 + pPseudoAd->mii_info.phy_id = 0x1e;
12893 + pPseudoAd->mii_info.supports_gmii = mii_check_gmii_support(&pPseudoAd->mii_info);
12896 + // Register this device
12897 + register_netdevice(dev);
12903 + * ei_open - Open/Initialize the ethernet port.
12904 + * @dev: network device to initialize
12906 + * This routine goes all-out, setting everything
12907 + * up a new at each open, even though many of these registers should only need to be set once at boot.
12909 +int ei_open(struct net_device *dev)
12912 +#if !defined (CONFIG_MT7623_FPGA)
12913 + unsigned long flags;
12915 + END_DEVICE *ei_local;
12917 +#ifdef CONFIG_RAETH_LRO
12918 + const char *lan_ip_tmp;
12919 +#ifdef CONFIG_DUAL_IMAGE
12920 +#define RT2860_NVRAM 1
12922 +#define RT2860_NVRAM 0
12924 +#endif // CONFIG_RAETH_LRO //
12926 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
12927 + if (!try_module_get(THIS_MODULE))
12929 + printk("%s: Cannot reserve module\n", __FUNCTION__);
12933 + MOD_INC_USE_COUNT;
12936 + printk("Raeth %s (",RAETH_VERSION);
12937 +#if defined (CONFIG_RAETH_NAPI)
12938 + printk("NAPI\n");
12939 +#elif defined (CONFIG_RA_NETWORK_TASKLET_BH)
12940 + printk("Tasklet");
12941 +#elif defined (CONFIG_RA_NETWORK_WORKQUEUE_BH)
12942 + printk("Workqueue");
12945 +#if defined (CONFIG_RAETH_SKB_RECYCLE_2K)
12946 + printk(",SkbRecycle");
12951 + ei_local = netdev_priv(dev); // get device pointer from System
12952 + // unsigned int flags;
12954 + if (ei_local == NULL)
12956 + printk(KERN_EMERG "%s: ei_open passed a non-existent device!\n", dev->name);
12960 + /* receiving packet buffer allocation - NUM_RX_DESC x MAX_RX_LENGTH */
12961 + for ( i = 0; i < NUM_RX_DESC; i++)
12963 +#if defined (CONFIG_RAETH_SKB_RECYCLE_2K)
12964 + ei_local->netrx0_skbuf[i] = skbmgr_dev_alloc_skb2k();
12966 + ei_local->netrx0_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
12968 + if (ei_local->netrx0_skbuf[i] == NULL ) {
12969 + printk("rx skbuff buffer allocation failed!");
12971 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12972 + skb_reserve(ei_local->netrx0_skbuf[i], NET_IP_ALIGN);
12977 +#if defined (CONFIG_RAETH_HW_LRO)
12978 + ei_local->netrx3_skbuf[i] = dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN);
12979 + if (ei_local->netrx3_skbuf[i] == NULL ) {
12980 + printk("rx3 skbuff buffer allocation failed!");
12982 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12983 + skb_reserve(ei_local->netrx3_skbuf[i], NET_IP_ALIGN);
12986 + ei_local->netrx2_skbuf[i] = dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN);
12987 + if (ei_local->netrx2_skbuf[i] == NULL ) {
12988 + printk("rx2 skbuff buffer allocation failed!");
12990 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12991 + skb_reserve(ei_local->netrx2_skbuf[i], NET_IP_ALIGN);
12994 + ei_local->netrx1_skbuf[i] = dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN);
12995 + if (ei_local->netrx1_skbuf[i] == NULL ) {
12996 + printk("rx1 skbuff buffer allocation failed!");
12998 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12999 + skb_reserve(ei_local->netrx1_skbuf[i], NET_IP_ALIGN);
13002 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
13003 +#if defined(CONFIG_ARCH_MT7623)
13004 + ei_local->netrx3_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
13005 + if (ei_local->netrx3_skbuf[i] == NULL ) {
13006 + printk("rx3 skbuff buffer allocation failed!");
13008 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
13009 + skb_reserve(ei_local->netrx3_skbuf[i], NET_IP_ALIGN);
13012 + ei_local->netrx2_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
13013 + if (ei_local->netrx2_skbuf[i] == NULL ) {
13014 + printk("rx2 skbuff buffer allocation failed!");
13016 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
13017 + skb_reserve(ei_local->netrx2_skbuf[i], NET_IP_ALIGN);
13020 +#endif /* CONFIG_ARCH_MT7623 */
13021 + ei_local->netrx1_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
13022 + if (ei_local->netrx1_skbuf[i] == NULL ) {
13023 + printk("rx1 skbuff buffer allocation failed!");
13025 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
13026 + skb_reserve(ei_local->netrx1_skbuf[i], NET_IP_ALIGN);
13031 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
13032 + trgmii_set_7621(); //reset FE/GMAC in this function
13035 + fe_dma_init(dev);
13037 +#if defined (CONFIG_RAETH_HW_LRO)
13038 + fe_hw_lro_init(dev);
13039 +#endif /* CONFIG_RAETH_HW_LRO */
13041 + fe_sw_init(); //initialize fe and switch register
13042 +#if defined (CONFIG_MIPS)
13043 + err = request_irq( dev->irq, ei_interrupt, IRQF_DISABLED, dev->name, dev); // try to fix irq in open
13045 + err = request_irq(dev->irq, ei_interrupt, /*IRQF_TRIGGER_LOW*/ 0, dev->name, dev); // try to fix irq in open
13050 + if ( dev->dev_addr != NULL) {
13051 + ra2880MacAddressSet((void *)(dev->dev_addr));
13053 + printk("dev->dev_addr is empty !\n");
13055 +/*TODO: MT7623 MCM INT */
13056 +#if defined (CONFIG_RT_3052_ESW) && !defined(CONFIG_ARCH_MT7623)
13057 + err = request_irq(SURFBOARDINT_ESW, esw_interrupt, IRQF_DISABLED, "Ralink_ESW", dev);
13060 + INIT_WORK(&ei_local->kill_sig_wq, kill_sig_workq);
13061 +#if defined (CONFIG_RALINK_MT7621)
13062 + mii_mgr_write(31, 0x7008, 0x1f); //enable switch link change intr
13065 + *((volatile u32 *)(RALINK_INTCL_BASE + 0x34)) = (1<<17);
13066 + *((volatile u32 *)(ESW_IMR)) &= ~(ESW_INT_ALL);
13068 +#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \
13069 + defined (CONFIG_RALINK_MT7620)
13070 + *((volatile u32 *)(ESW_P0_IntMn)) &= ~(MSK_CNT_INT_ALL);
13071 + *((volatile u32 *)(ESW_P1_IntMn)) &= ~(MSK_CNT_INT_ALL);
13072 + *((volatile u32 *)(ESW_P2_IntMn)) &= ~(MSK_CNT_INT_ALL);
13073 + *((volatile u32 *)(ESW_P3_IntMn)) &= ~(MSK_CNT_INT_ALL);
13074 + *((volatile u32 *)(ESW_P4_IntMn)) &= ~(MSK_CNT_INT_ALL);
13075 + *((volatile u32 *)(ESW_P5_IntMn)) &= ~(MSK_CNT_INT_ALL);
13076 + *((volatile u32 *)(ESW_P6_IntMn)) &= ~(MSK_CNT_INT_ALL);
13078 +#if defined(CONFIG_RALINK_MT7620)
13079 + *((volatile u32 *)(ESW_P7_IntMn)) &= ~(MSK_CNT_INT_ALL);
13083 +#endif // CONFIG_RT_3052_ESW //
13086 +#if !defined (CONFIG_MT7623_FPGA)
13087 + spin_lock_irqsave(&(ei_local->page_lock), flags);
13092 + sysRegWrite(RAETH_DLY_INT_CFG, DELAY_INT_INIT);
13093 + sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_DLY_INIT);
13094 + #if defined (CONFIG_RAETH_HW_LRO)
13095 + sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_DLY_INIT | ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1);
13096 + #endif /* CONFIG_RAETH_HW_LRO */
13098 + sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_ALL);
13099 + #if defined (CONFIG_RAETH_HW_LRO)
13100 + sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_ALL | ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1);
13101 + #endif /* CONFIG_RAETH_HW_LRO */
13104 +#ifdef CONFIG_RAETH_QDMA
13106 + sysRegWrite(QDMA_DELAY_INT, DELAY_INT_INIT);
13107 + sysRegWrite(QFE_INT_ENABLE, QFE_INT_DLY_INIT);
13109 + sysRegWrite(QFE_INT_ENABLE, QFE_INT_ALL);
13114 + INIT_WORK(&ei_local->reset_task, ei_reset_task);
13116 +#ifdef WORKQUEUE_BH
13117 +#ifndef CONFIG_RAETH_NAPI
13118 + INIT_WORK(&ei_local->rx_wq, ei_receive_workq);
13119 +#endif // CONFIG_RAETH_NAPI //
13121 +#ifndef CONFIG_RAETH_NAPI
13122 +#if defined (TASKLET_WORKQUEUE_SW)
13123 + working_schedule = init_schedule;
13124 + INIT_WORK(&ei_local->rx_wq, ei_receive_workq);
13125 + tasklet_init(&ei_local->rx_tasklet, ei_receive_workq, 0);
13127 + tasklet_init(&ei_local->rx_tasklet, ei_receive, 0);
13129 +#endif // CONFIG_RAETH_NAPI //
13130 +#endif // WORKQUEUE_BH //
13132 + netif_start_queue(dev);
13134 +#ifdef CONFIG_RAETH_NAPI
13135 + atomic_dec(&ei_local->irq_sem);
13136 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13137 + napi_enable(&ei_local->napi);
13139 + netif_poll_enable(dev);
13143 +#if !defined (CONFIG_MT7623_FPGA)
13144 + spin_unlock_irqrestore(&(ei_local->page_lock), flags);
13147 +#ifdef CONFIG_PSEUDO_SUPPORT
13148 + if(ei_local->PseudoDev == NULL) {
13149 + RAETH_Init_PSEUDO(ei_local, dev);
13152 + if(ei_local->PseudoDev == NULL)
13153 + printk("Open PseudoDev failed.\n");
13155 + VirtualIF_open(ei_local->PseudoDev);
13159 +#ifdef CONFIG_RAETH_LRO
13160 + lan_ip_tmp = nvram_get(RT2860_NVRAM, "lan_ipaddr");
13161 + str_to_ip(&lan_ip, lan_ip_tmp);
13162 + lro_para.lan_ip1 = lan_ip = htonl(lan_ip);
13163 +#endif // CONFIG_RAETH_LRO //
13165 +#if defined (CONFIG_RAETH_HW_LRO)
13166 + INIT_WORK(&ei_local->hw_lro_wq, ei_hw_lro_workq);
13167 +#endif /* CONFIG_RAETH_HW_LRO */
13169 + forward_config(dev);
13174 + * ei_close - shut down network device
13175 + * @dev: network device to clear
13177 + * This routine shut down network device.
13181 +int ei_close(struct net_device *dev)
13184 + END_DEVICE *ei_local = netdev_priv(dev); // device pointer
13186 + netif_stop_queue(dev);
13187 + ra2880stop(ei_local);
13189 + free_irq(dev->irq, dev);
13191 +/*TODO: MT7623 MCM INT */
13192 +#if defined (CONFIG_RT_3052_ESW) && !defined(CONFIG_ARCH_MT7623)
13193 + free_irq(SURFBOARDINT_ESW, dev);
13195 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13196 + cancel_work_sync(&ei_local->reset_task);
13199 +#ifdef CONFIG_PSEUDO_SUPPORT
13200 + VirtualIF_close(ei_local->PseudoDev);
13204 +#ifdef WORKQUEUE_BH
13205 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13206 + cancel_work_sync(&ei_local->rx_wq);
13209 +#if defined (TASKLET_WORKQUEUE_SW)
13210 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13211 + cancel_work_sync(&ei_local->rx_wq);
13214 + tasklet_kill(&ei_local->tx_tasklet);
13215 + tasklet_kill(&ei_local->rx_tasklet);
13216 +#endif // WORKQUEUE_BH //
13218 +#ifdef CONFIG_RAETH_NAPI
13219 + atomic_inc(&ei_local->irq_sem);
13220 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13221 + napi_disable(&ei_local->napi);
13223 + netif_poll_disable(dev);
13228 +#if defined (CONFIG_RAETH_HW_LRO)
13229 + cancel_work_sync(&ei_local->hw_lro_wq);
13230 +#endif /* CONFIG_RAETH_HW_LRO */
13232 + for ( i = 0; i < NUM_RX_DESC; i++)
13234 + if (ei_local->netrx0_skbuf[i] != NULL) {
13235 + dev_kfree_skb(ei_local->netrx0_skbuf[i]);
13236 + ei_local->netrx0_skbuf[i] = NULL;
13238 +#if defined (CONFIG_RAETH_HW_LRO)
13239 + if (ei_local->netrx3_skbuf[i] != NULL) {
13240 + dev_kfree_skb(ei_local->netrx3_skbuf[i]);
13241 + ei_local->netrx3_skbuf[i] = NULL;
13243 + if (ei_local->netrx2_skbuf[i] != NULL) {
13244 + dev_kfree_skb(ei_local->netrx2_skbuf[i]);
13245 + ei_local->netrx2_skbuf[i] = NULL;
13247 + if (ei_local->netrx1_skbuf[i] != NULL) {
13248 + dev_kfree_skb(ei_local->netrx1_skbuf[i]);
13249 + ei_local->netrx1_skbuf[i] = NULL;
13251 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
13252 +#if defined(CONFIG_ARCH_MT7623)
13253 + if (ei_local->netrx3_skbuf[i] != NULL) {
13254 + dev_kfree_skb(ei_local->netrx3_skbuf[i]);
13255 + ei_local->netrx3_skbuf[i] = NULL;
13257 + if (ei_local->netrx2_skbuf[i] != NULL) {
13258 + dev_kfree_skb(ei_local->netrx2_skbuf[i]);
13259 + ei_local->netrx2_skbuf[i] = NULL;
13261 +#endif /* CONFIG_ARCH_MT7623 */
13262 + if (ei_local->netrx1_skbuf[i] != NULL) {
13263 + dev_kfree_skb(ei_local->netrx1_skbuf[i]);
13264 + ei_local->netrx1_skbuf[i] = NULL;
13269 + for ( i = 0; i < NUM_TX_DESC; i++)
13271 + if((ei_local->skb_free[i]!=(struct sk_buff *)0xFFFFFFFF) && (ei_local->skb_free[i]!= 0))
13273 + dev_kfree_skb_any(ei_local->skb_free[i]);
13278 +#ifdef CONFIG_RAETH_QDMA
13279 + if (ei_local->txd_pool != NULL) {
13280 + pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct QDMA_txdesc), ei_local->txd_pool, ei_local->phy_txd_pool);
13282 + if (ei_local->free_head != NULL){
13283 + pci_free_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), ei_local->free_head, ei_local->phy_free_head);
13285 + if (ei_local->free_page_head != NULL){
13286 + pci_free_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, ei_local->free_page_head, ei_local->phy_free_page_head);
13289 + if (ei_local->tx_ring0 != NULL) {
13290 + pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring0, ei_local->phy_tx_ring0);
13294 +#if defined (CONFIG_RAETH_QOS)
13295 + if (ei_local->tx_ring1 != NULL) {
13296 + pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring1, ei_local->phy_tx_ring1);
13299 +#if !defined (CONFIG_RALINK_RT2880)
13300 + if (ei_local->tx_ring2 != NULL) {
13301 + pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring2, ei_local->phy_tx_ring2);
13304 + if (ei_local->tx_ring3 != NULL) {
13305 + pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring3, ei_local->phy_tx_ring3);
13310 +#ifdef CONFIG_32B_DESC
13311 + kfree(ei_local->rx_ring0);
13313 + pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring0, ei_local->phy_rx_ring0);
13315 +#if defined CONFIG_RAETH_QDMA && !defined(CONFIG_RAETH_QDMATX_QDMARX)
13316 +#ifdef CONFIG_32B_DESC
13317 + kfree(ei_local->qrx_ring);
13319 + pci_free_consistent(NULL, NUM_QRX_DESC*sizeof(struct PDMA_rxdesc), ei_local->qrx_ring, ei_local->phy_qrx_ring);
13322 +#if defined (CONFIG_RAETH_HW_LRO)
13323 + pci_free_consistent(NULL, NUM_LRO_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring3, ei_local->phy_rx_ring3);
13324 + pci_free_consistent(NULL, NUM_LRO_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring2, ei_local->phy_rx_ring2);
13325 + pci_free_consistent(NULL, NUM_LRO_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring1, ei_local->phy_rx_ring1);
13326 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
13327 +#ifdef CONFIG_32B_DESC
13328 + kfree(ei_local->rx_ring1);
13330 +#if defined(CONFIG_ARCH_MT7623)
13331 + pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring3, ei_local->phy_rx_ring3);
13332 + pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring2, ei_local->phy_rx_ring2);
13333 +#endif /* CONFIG_ARCH_MT7623 */
13334 + pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring1, ei_local->phy_rx_ring1);
13338 + printk("Free TX/RX Ring Memory!\n");
13342 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
13343 + module_put(THIS_MODULE);
13345 + MOD_DEC_USE_COUNT;
13350 +#if defined (CONFIG_RT6855A_FPGA)
13351 +void rt6855A_eth_gpio_reset(void)
13353 + u8 ether_gpio = 12;
13355 + /* Load the ethernet gpio value to reset Ethernet PHY */
13356 + *(unsigned long *)(RALINK_PIO_BASE + 0x00) |= 1<<(ether_gpio<<1);
13357 + *(unsigned long *)(RALINK_PIO_BASE + 0x14) |= 1<<(ether_gpio);
13358 + *(unsigned long *)(RALINK_PIO_BASE + 0x04) &= ~(1<<ether_gpio);
13362 + *(unsigned long *)(RALINK_PIO_BASE + 0x04) |= (1<<ether_gpio);
13364 + /* must wait for 0.6 seconds after reset*/
13369 +#if defined(CONFIG_RALINK_RT6855A)
13370 +void rt6855A_gsw_init(void)
13375 +#if defined (CONFIG_RT6855A_FPGA)
13376 + /*keep dump switch mode */
13377 + rt6855A_eth_gpio_reset();
13379 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3000) = 0x5e353;//(P0,Force mode,Link Up,100Mbps,Full-Duplex,FC ON)
13380 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3100) = 0x5e353;//(P1,Force mode,Link Up,100Mbps,Full-Duplex,FC ON)
13381 + //*(unsigned long *)(RALINK_ETH_SW_BASE+0x3000) = 0x5e333;//(P0,Force mode,Link Up,10Mbps,Full-Duplex,FC ON)
13382 + //*(unsigned long *)(RALINK_ETH_SW_BASE+0x3100) = 0x5e333;//(P1,Force mode,Link Up,10Mbps,Full-Duplex,FC ON)
13383 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3200) = 0x8000;//link down
13384 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3300) = 0x8000;//link down
13385 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x8000;//link down
13386 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x8000;//link down
13387 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3600) = 0x5e33b;//CPU Port6 Force Link 1G, FC ON
13389 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0010) = 0xffffffe0;//Set Port6 CPU Port
13391 + /* In order to use 10M/Full on FPGA board. We configure phy capable to
13392 + * 10M Full/Half duplex, so we can use auto-negotiation on PC side */
13393 + for(i=6;i<8;i++){
13394 + mii_mgr_write(i, 4, 0x07e1); //Capable of 10M&100M Full/Half Duplex, flow control on/off
13395 + //mii_mgr_write(i, 4, 0x0461); //Capable of 10M Full/Half Duplex, flow control on/off
13396 + mii_mgr_write(i, 0, 0xB100); //reset all digital logic, except phy_reg
13397 + mii_mgr_read(i, 9, &phy_val);
13398 + phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
13399 + mii_mgr_write(i, 9, phy_val);
13401 +#elif defined (CONFIG_RT6855A_ASIC)
13402 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3600) = 0x5e33b;//CPU Port6 Force Link 1G, FC ON
13403 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0010) = 0xffffffe0;//Set Port6 CPU Port
13405 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE+0x1ec) = 0x0fffffff;//Set PSE should pause 4 tx ring as default
13406 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE+0x1f0) = 0x0fffffff;//switch IOT more stable
13408 + *(unsigned long *)(CKGCR) &= ~(0x3 << 4); //keep rx/tx port clock ticking, disable internal clock-gating to avoid switch stuck
13411 + *Reg 31: Page Control
13412 + * Bit 15 => PortPageSel, 1=local, 0=global
13413 + * Bit 14:12 => PageSel, local:0~3, global:0~4
13415 + *Reg16~30:Local/Global registers
13418 + /*correct PHY setting J8.0*/
13419 + mii_mgr_read(0, 31, &rev);
13422 + mii_mgr_write(1, 31, 0x4000); //global, page 4
13424 + mii_mgr_write(1, 16, 0xd4cc);
13425 + mii_mgr_write(1, 17, 0x7444);
13426 + mii_mgr_write(1, 19, 0x0112);
13427 + mii_mgr_write(1, 21, 0x7160);
13428 + mii_mgr_write(1, 22, 0x10cf);
13429 + mii_mgr_write(1, 26, 0x0777);
13432 + mii_mgr_write(1, 25, 0x0102);
13433 + mii_mgr_write(1, 29, 0x8641);
13436 + mii_mgr_write(1, 25, 0x0212);
13437 + mii_mgr_write(1, 29, 0x4640);
13440 + mii_mgr_write(1, 31, 0x2000); //global, page 2
13441 + mii_mgr_write(1, 21, 0x0655);
13442 + mii_mgr_write(1, 22, 0x0fd3);
13443 + mii_mgr_write(1, 23, 0x003d);
13444 + mii_mgr_write(1, 24, 0x096e);
13445 + mii_mgr_write(1, 25, 0x0fed);
13446 + mii_mgr_write(1, 26, 0x0fc4);
13448 + mii_mgr_write(1, 31, 0x1000); //global, page 1
13449 + mii_mgr_write(1, 17, 0xe7f8);
13452 + mii_mgr_write(1, 31, 0xa000); //local, page 2
13454 + mii_mgr_write(0, 16, 0x0e0e);
13455 + mii_mgr_write(1, 16, 0x0c0c);
13456 + mii_mgr_write(2, 16, 0x0f0f);
13457 + mii_mgr_write(3, 16, 0x1010);
13458 + mii_mgr_write(4, 16, 0x0909);
13460 + mii_mgr_write(0, 17, 0x0000);
13461 + mii_mgr_write(1, 17, 0x0000);
13462 + mii_mgr_write(2, 17, 0x0000);
13463 + mii_mgr_write(3, 17, 0x0000);
13464 + mii_mgr_write(4, 17, 0x0000);
13467 +#if defined (CONFIG_RT6855A_ASIC)
13469 +#if defined (CONFIG_P5_RGMII_TO_MAC_MODE)
13470 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e33b;//(P5, Force mode, Link Up, 1000Mbps, Full-Duplex, FC ON)
13471 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x7014) = 0x1f0c000c;//disable port0-port4 internal phy, set phy base address to 12
13472 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x250c) = 0x000fff10;//disable port5 mac learning
13473 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x260c) = 0x000fff10;//disable port6 mac learning
13475 +#elif defined (CONFIG_P5_MII_TO_MAC_MODE)
13476 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
13477 +#elif defined (CONFIG_P5_MAC_TO_PHY_MODE)
13478 + //rt6855/6 need to modify TX/RX phase
13479 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x7014) = 0xc;//TX/RX CLOCK Phase select
13481 + enable_auto_negotiate(1);
13483 + if (isICPlusGigaPHY(1)) {
13484 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, &phy_val);
13485 + phy_val |= 1<<10; //enable pause ability
13486 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, phy_val);
13488 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
13489 + phy_val |= 1<<9; //restart AN
13490 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
13493 + if (isMarvellGigaPHY(1)) {
13494 + printk("Reset MARVELL phy1\n");
13495 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &phy_val);
13496 + phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
13497 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, phy_val);
13499 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
13500 + phy_val |= 1<<15; //PHY Software Reset
13501 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
13503 + if (isVtssGigaPHY(1)) {
13504 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0001); //extended page
13505 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &phy_val);
13506 + printk("Vitesse phy skew: %x --> ", phy_val);
13507 + phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
13508 + phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
13509 + printk("%x\n", phy_val);
13510 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, phy_val);
13511 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0000); //main registers
13513 +#elif defined (CONFIG_P5_RMII_TO_MAC_MODE)
13514 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
13515 +#else // Port 5 Disabled //
13516 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x8000;//link down
13525 +#if defined (CONFIG_MT7623_FPGA)
13526 +void setup_fpga_gsw(void)
13531 + /* reduce RGMII2 PAD driving strength */
13532 + *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4);
13534 + //RGMII1=Normal mode
13535 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14);
13537 + //GMAC1= RGMII mode
13538 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12);
13540 + //enable MDIO to control MT7530
13541 + regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
13542 + regValue &= ~(0x3 << 12);
13543 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
13545 + for(i=0;i<=4;i++)
13548 + mii_mgr_read(i, 0x0 ,®Value);
13549 + regValue |= (0x1<<11);
13550 + mii_mgr_write(i, 0x0, regValue);
13552 + mii_mgr_write(31, 0x7000, 0x3); //reset switch
13554 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e337);//(GE1, Force 100M/FD, FC ON)
13555 + mii_mgr_write(31, 0x3600, 0x5e337);
13557 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down)
13558 + mii_mgr_write(31, 0x3500, 0x8000);
13560 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
13561 + //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/
13562 + mii_mgr_read(31, 0x7804 ,®Value);
13563 + regValue &= ~(1<<8); //Enable Port 6
13564 + regValue |= (1<<6); //Disable Port 5
13565 + regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
13567 +#if defined (CONFIG_RAETH_GMAC2)
13568 + //RGMII2=Normal mode
13569 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
13571 + //GMAC2= RGMII mode
13572 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
13574 + mii_mgr_write(31, 0x3500, 0x56300); //MT7530 P5 AN, we can ignore this setting??????
13575 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(GE2, auto-polling)
13577 + enable_auto_negotiate(0);//set polling address
13578 + /* set MT7530 Port 5 to PHY 0/4 mode */
13579 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)
13580 + regValue &= ~((1<<13)|(1<<6));
13581 + regValue |= ((1<<7)|(1<<16)|(1<<20));
13582 +#elif defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
13583 + regValue &= ~((1<<13)|(1<<6)|((1<<20)));
13584 + regValue |= ((1<<7)|(1<<16));
13587 + //sysRegWrite(GDMA2_FWD_CFG, 0x20710000);
13589 + regValue |= (1<<16);//change HW-TRAP
13590 + printk("change HW-TRAP to 0x%x\n",regValue);
13591 + mii_mgr_write(31, 0x7804 ,regValue);
13593 + mii_mgr_write(0, 14, 0x1); /*RGMII*/
13594 +/* set MT7530 central align */
13595 + mii_mgr_read(31, 0x7830, ®Value);
13597 + regValue |= 1<<1;
13598 + mii_mgr_write(31, 0x7830, regValue);
13600 + mii_mgr_read(31, 0x7a40, ®Value);
13601 + regValue &= ~(1<<30);
13602 + mii_mgr_write(31, 0x7a40, regValue);
13604 + regValue = 0x855;
13605 + mii_mgr_write(31, 0x7a78, regValue);
13608 + mii_mgr_write(31, 0x7b00, 0x102); //delay setting for 10/1000M
13609 + mii_mgr_write(31, 0x7b04, 0x14); //delay setting for 10/1000M
13611 + for(i=0;i<=4;i++) {
13612 + mii_mgr_read(i, 4, ®Value);
13613 + regValue |= (3<<7); //turn on 100Base-T Advertisement
13614 + //regValue &= ~(3<<7); //turn off 100Base-T Advertisement
13615 + mii_mgr_write(i, 4, regValue);
13617 + mii_mgr_read(i, 9, ®Value);
13618 + //regValue |= (3<<8); //turn on 1000Base-T Advertisement
13619 + regValue &= ~(3<<8); //turn off 1000Base-T Advertisement
13620 + mii_mgr_write(i, 9, regValue);
13623 + mii_mgr_read(i, 0, ®Value);
13624 + regValue |= (1 << 9);
13625 + mii_mgr_write(i, 0, regValue);
13629 + mii_mgr_write(31, 0x7a54, 0x44); //lower driving
13630 + mii_mgr_write(31, 0x7a5c, 0x44); //lower driving
13631 + mii_mgr_write(31, 0x7a64, 0x44); //lower driving
13632 + mii_mgr_write(31, 0x7a6c, 0x44); //lower driving
13633 + mii_mgr_write(31, 0x7a74, 0x44); //lower driving
13634 + mii_mgr_write(31, 0x7a7c, 0x44); //lower driving
13636 + for(i=0;i<=4;i++)
13639 + mii_mgr_read(i, 0x0 ,®Value);
13640 + regValue &= ~(0x1<<11);
13641 + mii_mgr_write(i, 0x0, regValue);
13647 +#if defined (CONFIG_RALINK_MT7621)
13650 +void setup_external_gsw(void)
13654 + /* reduce RGMII2 PAD driving strength */
13655 + *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4);
13657 + regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
13658 + regValue &= ~(0x3 << 12);
13659 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
13661 + //RGMII1=Normal mode
13662 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14);
13663 + //GMAC1= RGMII mode
13664 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12);
13666 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x00008000);//(GE1, Link down)
13668 + //RGMII2=Normal mode
13669 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
13670 + //GMAC2= RGMII mode
13671 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
13673 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x2105e33b);//(GE2, Force 1000M/FD, FC ON)
13686 +void IsSwitchVlanTableBusy(void)
13689 + unsigned int value = 0;
13691 + for (j = 0; j < 20; j++) {
13692 + mii_mgr_read(31, 0x90, &value);
13693 + if ((value & 0x80000000) == 0 ){ //table busy
13699 + printk("set vlan timeout value=0x%x.\n", value);
13702 +void LANWANPartition(void)
13705 +#ifdef CONFIG_WAN_AT_P0
13706 + printk("set LAN/WAN WLLLL\n");
13707 + //WLLLL, wan at P0
13708 + //LAN/WAN ports as security mode
13709 + mii_mgr_write(31, 0x2004, 0xff0003);//port0
13710 + mii_mgr_write(31, 0x2104, 0xff0003);//port1
13711 + mii_mgr_write(31, 0x2204, 0xff0003);//port2
13712 + mii_mgr_write(31, 0x2304, 0xff0003);//port3
13713 + mii_mgr_write(31, 0x2404, 0xff0003);//port4
13716 + mii_mgr_write(31, 0x2014, 0x10002);//port0
13717 + mii_mgr_write(31, 0x2114, 0x10001);//port1
13718 + mii_mgr_write(31, 0x2214, 0x10001);//port2
13719 + mii_mgr_write(31, 0x2314, 0x10001);//port3
13720 + mii_mgr_write(31, 0x2414, 0x10001);//port4
13723 + IsSwitchVlanTableBusy();
13724 + mii_mgr_write(31, 0x94, 0x407e0001);//VAWD1
13725 + mii_mgr_write(31, 0x90, 0x80001001);//VTCR, VID=1
13726 + IsSwitchVlanTableBusy();
13728 + mii_mgr_write(31, 0x94, 0x40610001);//VAWD1
13729 + mii_mgr_write(31, 0x90, 0x80001002);//VTCR, VID=2
13730 + IsSwitchVlanTableBusy();
13732 +#ifdef CONFIG_WAN_AT_P4
13733 + printk("set LAN/WAN LLLLW\n");
13734 + //LLLLW, wan at P4
13735 + //LAN/WAN ports as security mode
13736 + mii_mgr_write(31, 0x2004, 0xff0003);//port0
13737 + mii_mgr_write(31, 0x2104, 0xff0003);//port1
13738 + mii_mgr_write(31, 0x2204, 0xff0003);//port2
13739 + mii_mgr_write(31, 0x2304, 0xff0003);//port3
13740 + mii_mgr_write(31, 0x2404, 0xff0003);//port4
13743 + mii_mgr_write(31, 0x2014, 0x10001);//port0
13744 + mii_mgr_write(31, 0x2114, 0x10001);//port1
13745 + mii_mgr_write(31, 0x2214, 0x10001);//port2
13746 + mii_mgr_write(31, 0x2314, 0x10001);//port3
13747 + mii_mgr_write(31, 0x2414, 0x10002);//port4
13750 + IsSwitchVlanTableBusy();
13751 + mii_mgr_write(31, 0x94, 0x404f0001);//VAWD1
13752 + mii_mgr_write(31, 0x90, 0x80001001);//VTCR, VID=1
13753 + IsSwitchVlanTableBusy();
13754 + mii_mgr_write(31, 0x94, 0x40500001);//VAWD1
13755 + mii_mgr_write(31, 0x90, 0x80001002);//VTCR, VID=2
13756 + IsSwitchVlanTableBusy();
13760 +#if defined (CONFIG_RAETH_8023AZ_EEE) && defined (CONFIG_RALINK_MT7621)
13761 +void mt7621_eee_patch(void)
13768 + mii_mgr_write(i, 13, 0x07);
13769 + mii_mgr_write(i, 14, 0x3c);
13770 + mii_mgr_write(i, 13, 0x4007);
13771 + mii_mgr_write(i, 14, 0x6);
13773 + /* Forced Slave mode */
13774 + mii_mgr_write(i, 31, 0x0);
13775 + mii_mgr_write(i, 9, 0x1600);
13776 + /* Increase SlvDPSready time */
13777 + mii_mgr_write(i, 31, 0x52b5);
13778 + mii_mgr_write(i, 16, 0xafae);
13779 + mii_mgr_write(i, 18, 0x2f);
13780 + mii_mgr_write(i, 16, 0x8fae);
13781 + /* Incease post_update_timer */
13782 + mii_mgr_write(i, 31, 0x3);
13783 + mii_mgr_write(i, 17, 0x4b);
13784 + /* Adjust 100_mse_threshold */
13785 + mii_mgr_write(i, 13, 0x1e);
13786 + mii_mgr_write(i, 14, 0x123);
13787 + mii_mgr_write(i, 13, 0x401e);
13788 + mii_mgr_write(i, 14, 0xffff);
13790 + mii_mgr_write(i, 13, 0x1e);
13791 + mii_mgr_write(i, 14, 0xa6);
13792 + mii_mgr_write(i, 13, 0x401e);
13793 + mii_mgr_write(i, 14, 0x300);
13801 +#if defined (CONFIG_RALINK_MT7621)
13802 +void setup_internal_gsw(void)
13807 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
13808 + /*Hardware reset Switch*/
13809 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) |= (0x1 << 2);
13811 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) &= ~(0x1 << 2);
13814 + /* reduce RGMII2 PAD driving strength */
13815 + *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4);
13817 + //RGMII1=Normal mode
13818 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14);
13820 + //GMAC1= RGMII mode
13821 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12);
13823 + //enable MDIO to control MT7530
13824 + regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
13825 + regValue &= ~(0x3 << 12);
13826 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
13828 + for(i=0;i<=4;i++)
13831 + mii_mgr_read(i, 0x0 ,®Value);
13832 + regValue |= (0x1<<11);
13833 + mii_mgr_write(i, 0x0, regValue);
13835 + mii_mgr_write(31, 0x7000, 0x3); //reset switch
13839 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
13840 + trgmii_set_7530(); //reset FE, config MDIO again
13842 + //enable MDIO to control MT7530
13843 + regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
13844 + regValue &= ~(0x3 << 12);
13845 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
13847 + // switch to APLL if TRGMII and DDR2
13848 + if ((sysRegRead(0xBE000010)>>4)&0x1)
13850 + apll_xtal_enable();
13854 +#if defined (CONFIG_MT7621_ASIC)
13855 + if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101) {
13856 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e30b);//(GE1, Force 1000M/FD, FC ON)
13857 + mii_mgr_write(31, 0x3600, 0x5e30b);
13859 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e33b);//(GE1, Force 1000M/FD, FC ON)
13860 + mii_mgr_write(31, 0x3600, 0x5e33b);
13862 +#elif defined (CONFIG_MT7621_FPGA)
13863 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e337);//(GE1, Force 100M/FD, FC ON)
13864 + mii_mgr_write(31, 0x3600, 0x5e337);
13867 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down)
13870 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
13871 + //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/
13872 + mii_mgr_read(31, 0x7804 ,®Value);
13873 + regValue &= ~(1<<8); //Enable Port 6
13874 + regValue |= (1<<6); //Disable Port 5
13875 + regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
13877 +#if defined (CONFIG_RAETH_GMAC2)
13878 + //RGMII2=Normal mode
13879 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
13881 + //GMAC2= RGMII mode
13882 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
13883 +#if !defined (CONFIG_RAETH_8023AZ_EEE)
13884 + mii_mgr_write(31, 0x3500, 0x56300); //MT7530 P5 AN, we can ignore this setting??????
13885 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(GE2, auto-polling)
13887 + enable_auto_negotiate(0);//set polling address
13889 +#if defined (CONFIG_RAETH_8023AZ_EEE)
13890 + mii_mgr_write(31, 0x3500, 0x5e33b); //MT7530 P5 Force 1000, we can ignore this setting??????
13891 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x2105e33b);//(GE2, Force 1000)
13896 + /* set MT7530 Port 5 to PHY 0/4 mode */
13897 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)
13898 + regValue &= ~((1<<13)|(1<<6));
13899 + regValue |= ((1<<7)|(1<<16)|(1<<20));
13900 +#elif defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
13901 + regValue &= ~((1<<13)|(1<<6)|(1<<20));
13902 + regValue |= ((1<<7)|(1<<16));
13905 +#if defined (CONFIG_RAETH_8023AZ_EEE)
13906 + regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
13908 + //sysRegWrite(GDMA2_FWD_CFG, 0x20710000);
13910 + regValue |= (1<<16);//change HW-TRAP
13911 + //printk("change HW-TRAP to 0x%x\n",regValue);
13912 + mii_mgr_write(31, 0x7804 ,regValue);
13914 + mii_mgr_read(31, 0x7800, ®Value);
13915 + regValue = (regValue >> 9) & 0x3;
13916 + if(regValue == 0x3) { //25Mhz Xtal
13918 + } else if(regValue == 0x2) { //40Mhz
13920 + mii_mgr_write(0, 13, 0x1f); // disable MT7530 core clock
13921 + mii_mgr_write(0, 14, 0x410);
13922 + mii_mgr_write(0, 13, 0x401f);
13923 + mii_mgr_write(0, 14, 0x0);
13925 + mii_mgr_write(0, 13, 0x1f); // disable MT7530 PLL
13926 + mii_mgr_write(0, 14, 0x40d);
13927 + mii_mgr_write(0, 13, 0x401f);
13928 + mii_mgr_write(0, 14, 0x2020);
13930 + mii_mgr_write(0, 13, 0x1f); // for MT7530 core clock = 500Mhz
13931 + mii_mgr_write(0, 14, 0x40e);
13932 + mii_mgr_write(0, 13, 0x401f);
13933 + mii_mgr_write(0, 14, 0x119);
13935 + mii_mgr_write(0, 13, 0x1f); // enable MT7530 PLL
13936 + mii_mgr_write(0, 14, 0x40d);
13937 + mii_mgr_write(0, 13, 0x401f);
13938 + mii_mgr_write(0, 14, 0x2820);
13940 + udelay(20); //suggest by CD
13942 + mii_mgr_write(0, 13, 0x1f); // enable MT7530 core clock
13943 + mii_mgr_write(0, 14, 0x410);
13944 + mii_mgr_write(0, 13, 0x401f);
13945 + }else { //20Mhz Xtal
13950 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
13951 + mii_mgr_write(0, 14, 0x3); /*TRGMII*/
13953 + mii_mgr_write(0, 14, 0x1); /*RGMII*/
13954 +/* set MT7530 central align */
13955 + mii_mgr_read(31, 0x7830, ®Value);
13957 + regValue |= 1<<1;
13958 + mii_mgr_write(31, 0x7830, regValue);
13960 + mii_mgr_read(31, 0x7a40, ®Value);
13961 + regValue &= ~(1<<30);
13962 + mii_mgr_write(31, 0x7a40, regValue);
13964 + regValue = 0x855;
13965 + mii_mgr_write(31, 0x7a78, regValue);
13968 +#if !defined (CONFIG_RAETH_8023AZ_EEE)
13969 + mii_mgr_write(31, 0x7b00, 0x102); //delay setting for 10/1000M
13970 + mii_mgr_write(31, 0x7b04, 0x14); //delay setting for 10/1000M
13973 + for(i=0;i<=4;i++) {
13974 + mii_mgr_read(i, 4, ®Value);
13975 + regValue |= (3<<7); //turn on 100Base-T Advertisement
13976 + //regValue &= ~(3<<7); //turn off 100Base-T Advertisement
13977 + mii_mgr_write(i, 4, regValue);
13979 + mii_mgr_read(i, 9, ®Value);
13980 + regValue |= (3<<8); //turn on 1000Base-T Advertisement
13981 + //regValue &= ~(3<<8); //turn off 1000Base-T Advertisement
13982 + mii_mgr_write(i, 9, regValue);
13985 + mii_mgr_read(i, 0, ®Value);
13986 + regValue |= (1 << 9);
13987 + mii_mgr_write(i, 0, regValue);
13992 + mii_mgr_write(31, 0x7a54, 0x44); //lower driving
13993 + mii_mgr_write(31, 0x7a5c, 0x44); //lower driving
13994 + mii_mgr_write(31, 0x7a64, 0x44); //lower driving
13995 + mii_mgr_write(31, 0x7a6c, 0x44); //lower driving
13996 + mii_mgr_write(31, 0x7a74, 0x44); //lower driving
13997 + mii_mgr_write(31, 0x7a7c, 0x44); //lower driving
14000 + LANWANPartition();
14002 +#if !defined (CONFIG_RAETH_8023AZ_EEE)
14004 + for(i=0;i<=4;i++)
14006 + mii_mgr_write(i, 13, 0x7);
14007 + mii_mgr_write(i, 14, 0x3C);
14008 + mii_mgr_write(i, 13, 0x4007);
14009 + mii_mgr_write(i, 14, 0x0);
14012 + //Disable EEE 10Base-Te:
14013 + for(i=0;i<=4;i++)
14015 + mii_mgr_write(i, 13, 0x1f);
14016 + mii_mgr_write(i, 14, 0x027b);
14017 + mii_mgr_write(i, 13, 0x401f);
14018 + mii_mgr_write(i, 14, 0x1177);
14022 + for(i=0;i<=4;i++)
14025 + mii_mgr_read(i, 0x0 ,®Value);
14026 + regValue &= ~(0x1<<11);
14027 + mii_mgr_write(i, 0x0, regValue);
14030 + mii_mgr_read(31, 0x7808 ,®Value);
14031 + regValue |= (3<<16); //Enable INTR
14032 + mii_mgr_write(31, 0x7808 ,regValue);
14033 +#if defined (CONFIG_RAETH_8023AZ_EEE) && defined (CONFIG_RALINK_MT7621)
14034 + mt7621_eee_patch();
14039 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200)
14040 +void apll_xtal_enable(void)
14042 + unsigned long data = 0;
14043 + unsigned long regValue = 0;
14045 + /* Firstly, reset all required register to default value */
14046 + sysRegWrite(RALINK_ANA_CTRL_BASE, 0x00008000);
14047 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0014, 0x01401d61);
14048 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0018, 0x38233d0e);
14049 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, 0x80120004);
14050 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0020, 0x1c7dbf48);
14052 + /* toggle RG_XPTL_CHG */
14053 + sysRegWrite(RALINK_ANA_CTRL_BASE, 0x00008800);
14054 + sysRegWrite(RALINK_ANA_CTRL_BASE, 0x00008c00);
14056 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x0014);
14057 + data &= ~(0x0000ffc0);
14059 + regValue = *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x10);
14060 + regValue = (regValue >> 6) & 0x7;
14061 + if(regValue < 6) { //20/40Mhz Xtal
14062 + data |= REGBIT(0x1d, 8);
14064 + data |= REGBIT(0x17, 8);
14067 + if(regValue < 6) { //20/40Mhz Xtal
14068 + data |= REGBIT(0x1, 6);
14071 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0014, data);
14073 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x0018);
14074 + data &= ~(0xf0773f00);
14075 + data |= REGBIT(0x3, 28);
14076 + data |= REGBIT(0x2, 20);
14077 + if(regValue < 6) { //20/40Mhz Xtal
14078 + data |= REGBIT(0x3, 16);
14080 + data |= REGBIT(0x2, 16);
14082 + data |= REGBIT(0x3, 12);
14084 + if(regValue < 6) { //20/40Mhz Xtal
14085 + data |= REGBIT(0xd, 8);
14087 + data |= REGBIT(0x7, 8);
14089 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0018, data);
14091 + if(regValue < 6) { //20/40Mhz Xtal
14092 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0020, 0x1c7dbf48);
14094 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0020, 0x1697cc39);
14096 + //*Common setting - Set PLLGP_CTRL_4 *//
14097 + ///* 1. Bit 31 */
14098 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14099 + data &= ~(REGBIT(0x1, 31));
14100 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14103 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14104 + data |= REGBIT(0x1, 0);
14105 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14108 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14109 + data |= REGBIT(0x1, 3);
14110 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14113 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14114 + data |= REGBIT(0x1, 8);
14115 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14118 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14119 + data |= REGBIT(0x1, 6);
14120 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14123 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14124 + data |= REGBIT(0x1, 5);
14125 + data |= REGBIT(0x1, 7);
14126 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14129 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14130 + data &= ~REGBIT(0x1, 17);
14131 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14133 + /* 8. TRGMII TX CLK SEL APLL */
14134 + data = sysRegRead(0xbe00002c);
14135 + data &= 0xffffff9f;
14137 + sysRegWrite(0xbe00002c, data);
14143 +#if defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_MT7620)
14144 +void rt_gsw_init(void)
14146 +#if defined (CONFIG_P4_MAC_TO_PHY_MODE) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
14149 +#if defined (CONFIG_RT6855_FPGA) || defined (CONFIG_MT7620_FPGA)
14151 +#elif defined (CONFIG_MT7620_ASIC)
14154 +#if defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
14155 + unsigned int regValue = 0;
14157 +#if defined (CONFIG_RT6855_FPGA) || defined (CONFIG_MT7620_FPGA)
14158 + /*keep dump switch mode */
14159 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3000) = 0x5e333;//(P0, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14160 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3100) = 0x5e333;//(P1, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14161 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3200) = 0x5e333;//(P2, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14162 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3300) = 0x5e333;//(P3, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14163 +#if defined (CONFIG_RAETH_HAS_PORT4)
14164 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e337;//(P4, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14166 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e333;//(P4, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14168 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14170 + /* In order to use 10M/Full on FPGA board. We configure phy capable to
14171 + * 10M Full/Half duplex, so we can use auto-negotiation on PC side */
14172 +#if defined (CONFIG_RAETH_HAS_PORT4)
14173 + for(i=0;i<4;i++){
14175 + for(i=0;i<5;i++){
14177 + mii_mgr_write(i, 4, 0x0461); //Capable of 10M Full/Half Duplex, flow control on/off
14178 + mii_mgr_write(i, 0, 0xB100); //reset all digital logic, except phy_reg
14183 +#if defined (CONFIG_PDMA_NEW)
14184 + *(unsigned long *)(SYSCFG1) |= (0x1 << 8); //PCIE_RC_MODE=1
14188 +#if defined (CONFIG_MT7620_ASIC) && !defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
14189 + is_BGA = (sysRegRead(RALINK_SYSCTL_BASE + 0xc) >> 16) & 0x1;
14191 + * Reg 31: Page Control
14192 + * Bit 15 => PortPageSel, 1=local, 0=global
14193 + * Bit 14:12 => PageSel, local:0~3, global:0~4
14195 + * Reg16~30:Local/Global registers
14198 + /*correct PHY setting L3.0 BGA*/
14199 + mii_mgr_write(1, 31, 0x4000); //global, page 4
14201 + mii_mgr_write(1, 17, 0x7444);
14203 + mii_mgr_write(1, 19, 0x0114);
14205 + mii_mgr_write(1, 19, 0x0117);
14208 + mii_mgr_write(1, 22, 0x10cf);
14209 + mii_mgr_write(1, 25, 0x6212);
14210 + mii_mgr_write(1, 26, 0x0777);
14211 + mii_mgr_write(1, 29, 0x4000);
14212 + mii_mgr_write(1, 28, 0xc077);
14213 + mii_mgr_write(1, 24, 0x0000);
14215 + mii_mgr_write(1, 31, 0x3000); //global, page 3
14216 + mii_mgr_write(1, 17, 0x4838);
14218 + mii_mgr_write(1, 31, 0x2000); //global, page 2
14220 + mii_mgr_write(1, 21, 0x0515);
14221 + mii_mgr_write(1, 22, 0x0053);
14222 + mii_mgr_write(1, 23, 0x00bf);
14223 + mii_mgr_write(1, 24, 0x0aaf);
14224 + mii_mgr_write(1, 25, 0x0fad);
14225 + mii_mgr_write(1, 26, 0x0fc1);
14227 + mii_mgr_write(1, 21, 0x0517);
14228 + mii_mgr_write(1, 22, 0x0fd2);
14229 + mii_mgr_write(1, 23, 0x00bf);
14230 + mii_mgr_write(1, 24, 0x0aab);
14231 + mii_mgr_write(1, 25, 0x00ae);
14232 + mii_mgr_write(1, 26, 0x0fff);
14234 + mii_mgr_write(1, 31, 0x1000); //global, page 1
14235 + mii_mgr_write(1, 17, 0xe7f8);
14237 + mii_mgr_write(1, 31, 0x8000); //local, page 0
14238 + mii_mgr_write(0, 30, 0xa000);
14239 + mii_mgr_write(1, 30, 0xa000);
14240 + mii_mgr_write(2, 30, 0xa000);
14241 + mii_mgr_write(3, 30, 0xa000);
14242 +#if !defined (CONFIG_RAETH_HAS_PORT4)
14243 + mii_mgr_write(4, 30, 0xa000);
14246 + mii_mgr_write(0, 4, 0x05e1);
14247 + mii_mgr_write(1, 4, 0x05e1);
14248 + mii_mgr_write(2, 4, 0x05e1);
14249 + mii_mgr_write(3, 4, 0x05e1);
14250 +#if !defined (CONFIG_RAETH_HAS_PORT4)
14251 + mii_mgr_write(4, 4, 0x05e1);
14254 + mii_mgr_write(1, 31, 0xa000); //local, page 2
14255 + mii_mgr_write(0, 16, 0x1111);
14256 + mii_mgr_write(1, 16, 0x1010);
14257 + mii_mgr_write(2, 16, 0x1515);
14258 + mii_mgr_write(3, 16, 0x0f0f);
14259 +#if !defined (CONFIG_RAETH_HAS_PORT4)
14260 + mii_mgr_write(4, 16, 0x1313);
14263 +#if !defined (CONFIG_RAETH_8023AZ_EEE)
14264 + mii_mgr_write(1, 31, 0xb000); //local, page 3
14265 + mii_mgr_write(0, 17, 0x0);
14266 + mii_mgr_write(1, 17, 0x0);
14267 + mii_mgr_write(2, 17, 0x0);
14268 + mii_mgr_write(3, 17, 0x0);
14269 +#if !defined (CONFIG_RAETH_HAS_PORT4)
14270 + mii_mgr_write(4, 17, 0x0);
14277 + // for ethernet extended mode
14278 + mii_mgr_write(1, 31, 0x3000);
14279 + mii_mgr_write(1, 19, 0x122);
14280 + mii_mgr_write(1, 20, 0x0044);
14281 + mii_mgr_write(1, 23, 0xa80c);
14282 + mii_mgr_write(1, 24, 0x129d);
14283 + mii_mgr_write(1, 31, 9000);
14284 + mii_mgr_write(0, 18, 0x140c);
14285 + mii_mgr_write(1, 18, 0x140c);
14286 + mii_mgr_write(2, 18, 0x140c);
14287 + mii_mgr_write(3, 18, 0x140c);
14288 + mii_mgr_write(0, 0, 0x3300);
14289 + mii_mgr_write(1, 0, 0x3300);
14290 + mii_mgr_write(2, 0, 0x3300);
14291 + mii_mgr_write(3, 0, 0x3300);
14292 +#if !defined (CONFIG_RAETH_HAS_PORT4)
14293 + mii_mgr_write(4, 18, 0x140c);
14294 + mii_mgr_write(4, 0, 0x3300);
14300 +#if defined(CONFIG_RALINK_MT7620)
14301 + if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
14302 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x701c) = 0x800000c; //enlarge FE2SW_IPG
14304 +#endif // CONFIG_RAETH_7620 //
14308 +#if defined (CONFIG_MT7620_FPGA)|| defined (CONFIG_MT7620_ASIC)
14309 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3600) = 0x5e33b;//CPU Port6 Force Link 1G, FC ON
14310 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0010) = 0x7f7f7fe0;//Set Port6 CPU Port
14312 +#if defined (CONFIG_P5_RGMII_TO_MAC_MODE) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
14313 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e33b;//(P5, Force mode, Link Up, 1000Mbps, Full-Duplex, FC ON)
14314 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x7014) = 0x1f0c000c; //disable port 0 ~ 4 internal phy, set phy base address to 12
14315 + /*MT7620 need mac learning for PPE*/
14316 + //*(unsigned long *)(RALINK_ETH_SW_BASE+0x250c) = 0x000fff10;//disable port5 mac learning
14317 + //*(unsigned long *)(RALINK_ETH_SW_BASE+0x260c) = 0x000fff10;//disable port6 mac learning
14318 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14319 + //rxclk_skew, txclk_skew = 0
14320 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RGMii Mode
14321 +#if defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
14323 + *(unsigned long *)(0xb0000060) &= ~(3 << 7); //set MDIO to Normal mode
14325 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x56330;//(P4, AN)
14326 + *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14327 + //rxclk_skew, txclk_skew = 0
14328 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=RGMii Mode
14331 + /* set MT7530 Port 0 to PHY mode */
14332 + mii_mgr_read(31, 0x7804 ,®Value);
14333 +#if defined (CONFIG_GE_RGMII_MT7530_P0_AN)
14334 + regValue &= ~((1<<13)|(1<<6)|(1<<5)|(1<<15));
14335 + regValue |= ((1<<7)|(1<<16)|(1<<20)|(1<<24));
14336 + //mii_mgr_write(31, 0x7804 ,0x115c8f);
14337 +#elif defined (CONFIG_GE_RGMII_MT7530_P4_AN)
14338 + regValue &= ~((1<<13)|(1<<6)|(1<<20)|(1<<5)|(1<<15));
14339 + regValue |= ((1<<7)|(1<<16)|(1<<24));
14341 + regValue &= ~(1<<8); //Enable Port 6
14342 + mii_mgr_write(31, 0x7804 ,regValue); //bit 24 standalone switch
14344 +/* set MT7530 central align */
14345 + mii_mgr_read(31, 0x7830, ®Value);
14347 + regValue |= 1<<1;
14348 + mii_mgr_write(31, 0x7830, regValue);
14350 + mii_mgr_read(31, 0x7a40, ®Value);
14351 + regValue &= ~(1<<30);
14352 + mii_mgr_write(31, 0x7a40, regValue);
14354 + regValue = 0x855;
14355 + mii_mgr_write(31, 0x7a78, regValue);
14357 + /*AN should be set after MT7530 HWSTRAP*/
14358 +#if defined (CONFIG_GE_RGMII_MT7530_P0_AN)
14359 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x7000) = 0xc5000100;//(P0, AN polling)
14360 +#elif defined (CONFIG_GE_RGMII_MT7530_P4_AN)
14361 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x7000) = 0xc5000504;//(P4, AN polling)
14365 +#elif defined (CONFIG_P5_MII_TO_MAC_MODE)
14366 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14367 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14368 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=Mii Mode
14369 + *(unsigned long *)(SYSCFG1) |= (0x1 << 12);
14371 +#elif defined (CONFIG_P5_MAC_TO_PHY_MODE)
14372 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14373 + *(unsigned long *)(0xb0000060) &= ~(3 << 7); //set MDIO to Normal mode
14374 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RGMii Mode
14376 + enable_auto_negotiate(1);
14378 + if (isICPlusGigaPHY(1)) {
14379 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, &phy_val);
14380 + phy_val |= 1<<10; //enable pause ability
14381 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, phy_val);
14383 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
14384 + phy_val |= 1<<9; //restart AN
14385 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
14386 + }else if (isMarvellGigaPHY(1)) {
14387 +#if defined (CONFIG_MT7620_FPGA)
14388 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, &phy_val);
14389 + phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
14390 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, phy_val);
14392 + printk("Reset MARVELL phy1\n");
14393 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &phy_val);
14394 + phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
14395 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, phy_val);
14397 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
14398 + phy_val |= 1<<15; //PHY Software Reset
14399 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
14400 + }else if (isVtssGigaPHY(1)) {
14401 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0001); //extended page
14402 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &phy_val);
14403 + printk("Vitesse phy skew: %x --> ", phy_val);
14404 + phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
14405 + phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
14406 + printk("%x\n", phy_val);
14407 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, phy_val);
14408 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0000); //main registers
14412 +#elif defined (CONFIG_P5_RMII_TO_MAC_MODE)
14413 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14414 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14415 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RvMii Mode
14416 + *(unsigned long *)(SYSCFG1) |= (0x2 << 12);
14418 +#else // Port 5 Disabled //
14419 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x8000;//link down
14420 + *(unsigned long *)(0xb0000060) |= (1 << 9); //set RGMII to GPIO mode
14424 +#if defined (CONFIG_P4_RGMII_TO_MAC_MODE)
14425 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e33b;//(P4, Force mode, Link Up, 1000Mbps, Full-Duplex, FC ON)
14426 + *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14427 + //rxclk_skew, txclk_skew = 0
14428 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=RGMii Mode
14430 +#elif defined (CONFIG_P4_MII_TO_MAC_MODE)
14431 + *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14432 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=Mii Mode
14433 + *(unsigned long *)(SYSCFG1) |= (0x1 << 14);
14435 +#elif defined (CONFIG_P4_MAC_TO_PHY_MODE)
14436 + *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14437 + *(unsigned long *)(0xb0000060) &= ~(3 << 7); //set MDIO to Normal mode
14438 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=RGMii Mode
14440 + enable_auto_negotiate(1);
14442 + if (isICPlusGigaPHY(2)) {
14443 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 4, &phy_val);
14444 + phy_val |= 1<<10; //enable pause ability
14445 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 4, phy_val);
14447 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, &phy_val);
14448 + phy_val |= 1<<9; //restart AN
14449 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, phy_val);
14450 + }else if (isMarvellGigaPHY(2)) {
14451 +#if defined (CONFIG_MT7620_FPGA)
14452 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, &phy_val);
14453 + phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
14454 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, phy_val);
14456 + printk("Reset MARVELL phy2\n");
14457 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, &phy_val);
14458 + phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
14459 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, phy_val);
14461 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, &phy_val);
14462 + phy_val |= 1<<15; //PHY Software Reset
14463 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, phy_val);
14464 + }else if (isVtssGigaPHY(2)) {
14465 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 0x0001); //extended page
14466 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, &phy_val);
14467 + printk("Vitesse phy skew: %x --> ", phy_val);
14468 + phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
14469 + phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
14470 + printk("%x\n", phy_val);
14471 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, phy_val);
14472 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 0x0000); //main registers
14475 +#elif defined (CONFIG_P4_RMII_TO_MAC_MODE)
14476 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14477 + *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14478 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE1_MODE=RvMii Mode
14479 + *(unsigned long *)(SYSCFG1) |= (0x2 << 14);
14480 +#elif defined (CONFIG_GE_RGMII_MT7530_P0_AN) || defined (CONFIG_GE_RGMII_MT7530_P4_AN)
14481 +#else // Port 4 Disabled //
14482 + *(unsigned long *)(SYSCFG1) |= (0x3 << 14); //GE2_MODE=RJ45 Mode
14483 + *(unsigned long *)(0xb0000060) |= (1 << 10); //set RGMII2 to GPIO mode
14489 +#if defined (CONFIG_RALINK_MT7628)
14491 +void mt7628_ephy_init(void)
14495 + mii_mgr_write(0, 31, 0x2000); //change G2 page
14496 + mii_mgr_write(0, 26, 0x0000);
14498 + for(i=0; i<5; i++){
14499 + mii_mgr_write(i, 31, 0x8000); //change L0 page
14500 + mii_mgr_write(i, 0, 0x3100);
14502 +#if defined (CONFIG_RAETH_8023AZ_EEE)
14503 + mii_mgr_read(i, 26, &phy_val);// EEE setting
14504 + phy_val |= (1 << 5);
14505 + mii_mgr_write(i, 26, phy_val);
14508 + mii_mgr_write(i, 13, 0x7);
14509 + mii_mgr_write(i, 14, 0x3C);
14510 + mii_mgr_write(i, 13, 0x4007);
14511 + mii_mgr_write(i, 14, 0x0);
14513 + mii_mgr_write(i, 30, 0xa000);
14514 + mii_mgr_write(i, 31, 0xa000); // change L2 page
14515 + mii_mgr_write(i, 16, 0x0606);
14516 + mii_mgr_write(i, 23, 0x0f0e);
14517 + mii_mgr_write(i, 24, 0x1610);
14518 + mii_mgr_write(i, 30, 0x1f15);
14519 + mii_mgr_write(i, 28, 0x6111);
14521 + mii_mgr_read(i, 4, &phy_val);
14522 + phy_val |= (1 << 10);
14523 + mii_mgr_write(i, 4, phy_val);
14526 + //100Base AOI setting
14527 + mii_mgr_write(0, 31, 0x5000); //change G5 page
14528 + mii_mgr_write(0, 19, 0x004a);
14529 + mii_mgr_write(0, 20, 0x015a);
14530 + mii_mgr_write(0, 21, 0x00ee);
14531 + mii_mgr_write(0, 22, 0x0033);
14532 + mii_mgr_write(0, 23, 0x020a);
14533 + mii_mgr_write(0, 24, 0x0000);
14534 + mii_mgr_write(0, 25, 0x024a);
14535 + mii_mgr_write(0, 26, 0x035a);
14536 + mii_mgr_write(0, 27, 0x02ee);
14537 + mii_mgr_write(0, 28, 0x0233);
14538 + mii_mgr_write(0, 29, 0x000a);
14539 + mii_mgr_write(0, 30, 0x0000);
14540 + /* Fix EPHY idle state abnormal behavior */
14541 + mii_mgr_write(0, 31, 0x4000); //change G4 page
14542 + mii_mgr_write(0, 29, 0x000d);
14543 + mii_mgr_write(0, 30, 0x0500);
14550 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
14551 +void rt305x_esw_init(void)
14554 + u32 phy_val=0, val=0;
14555 +#if defined (CONFIG_RT3052_ASIC)
14559 +#if defined (CONFIG_RT5350_ASIC)
14560 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0168) = 0x17;
14564 + * FC_RLS_TH=200, FC_SET_TH=160
14565 + * DROP_RLS=120, DROP_SET_TH=80
14567 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0008) = 0xC8A07850;
14568 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00E4) = 0x00000000;
14569 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0014) = 0x00405555;
14570 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0050) = 0x00002001;
14571 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0090) = 0x00007f7f;
14572 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0098) = 0x00007f3f; //disable VLAN
14573 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00CC) = 0x0002500c;
14574 +#ifndef CONFIG_UNH_TEST
14575 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x009C) = 0x0008a301; //hashing algorithm=XOR48, aging interval=300sec
14578 + * bit[30]:1 Backoff Algorithm Option: The latest one to pass UNH test
14579 + * bit[29]:1 Length of Received Frame Check Enable
14580 + * bit[8]:0 Enable collision 16 packet abort and late collision abort
14581 + * bit[7:6]:01 Maximum Packet Length: 1518
14583 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x009C) = 0x6008a241;
14585 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x008C) = 0x02404040;
14586 +#if defined (CONFIG_RT3052_ASIC) || defined (CONFIG_RT3352_ASIC) || defined (CONFIG_RT5350_ASIC) || defined (CONFIG_MT7628_ASIC)
14587 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) = 0x3f502b28; //Change polling Ext PHY Addr=0x1F
14588 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0084) = 0x00000000;
14589 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0110) = 0x7d000000; //1us cycle number=125 (FE's clock=125Mhz)
14590 +#elif defined (CONFIG_RT3052_FPGA) || defined (CONFIG_RT3352_FPGA) || defined (CONFIG_RT5350_FPGA) || defined (CONFIG_MT7628_FPGA)
14591 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) = 0x00f03ff9; //polling Ext PHY Addr=0x0, force port5 as 100F/D (disable auto-polling)
14592 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0084) = 0xffdf1f00;
14593 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0110) = 0x0d000000; //1us cycle number=13 (FE's clock=12.5Mhz)
14595 + /* In order to use 10M/Full on FPGA board. We configure phy capable to
14596 + * 10M Full/Half duplex, so we can use auto-negotiation on PC side */
14597 + for(i=0;i<5;i++){
14598 + mii_mgr_write(i, 4, 0x0461); //Capable of 10M Full/Half Duplex, flow control on/off
14599 + mii_mgr_write(i, 0, 0xB100); //reset all digital logic, except phy_reg
14604 + * set port 5 force to 1000M/Full when connecting to switch or iNIC
14606 +#if defined (CONFIG_P5_RGMII_TO_MAC_MODE)
14607 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14608 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1<<29); //disable port 5 auto-polling
14609 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) |= 0x3fff; //force 1000M full duplex
14610 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(0xf<<20); //rxclk_skew, txclk_skew = 0
14611 +#elif defined (CONFIG_P5_MII_TO_MAC_MODE)
14612 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14613 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1<<29); //disable port 5 auto-polling
14614 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(0x3fff);
14615 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) |= 0x3ffd; //force 100M full duplex
14617 +#if defined (CONFIG_RALINK_RT3352)
14618 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=Mii Mode
14619 + *(unsigned long *)(SYSCFG1) |= (0x1 << 12);
14622 +#elif defined (CONFIG_P5_MAC_TO_PHY_MODE)
14623 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14624 + *(unsigned long *)(0xb0000060) &= ~(1 << 7); //set MDIO to Normal mode
14625 +#if defined (CONFIG_RT3052_ASIC) || defined (CONFIG_RT3352_ASIC)
14626 + enable_auto_negotiate(1);
14628 + if (isMarvellGigaPHY(1)) {
14629 +#if defined (CONFIG_RT3052_FPGA) || defined (CONFIG_RT3352_FPGA)
14630 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, &phy_val);
14631 + phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
14632 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, phy_val);
14634 + printk("\n Reset MARVELL phy\n");
14635 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &phy_val);
14636 + phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
14637 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, phy_val);
14639 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
14640 + phy_val |= 1<<15; //PHY Software Reset
14641 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
14643 + if (isVtssGigaPHY(1)) {
14644 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0001); //extended page
14645 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &phy_val);
14646 + printk("Vitesse phy skew: %x --> ", phy_val);
14647 + phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
14648 + phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
14649 + printk("%x\n", phy_val);
14650 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, phy_val);
14651 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0000); //main registers
14654 +#elif defined (CONFIG_P5_RMII_TO_MAC_MODE)
14655 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14656 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1<<29); //disable port 5 auto-polling
14657 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(0x3fff);
14658 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) |= 0x3ffd; //force 100M full duplex
14660 +#if defined (CONFIG_RALINK_RT3352)
14661 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RvMii Mode
14662 + *(unsigned long *)(SYSCFG1) |= (0x2 << 12);
14664 +#else // Port 5 Disabled //
14666 +#if defined (CONFIG_RALINK_RT3052)
14667 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1 << 29); //port5 auto polling disable
14668 + *(unsigned long *)(0xb0000060) |= (1 << 7); //set MDIO to GPIO mode (GPIO22-GPIO23)
14669 + *(unsigned long *)(0xb0000060) |= (1 << 9); //set RGMII to GPIO mode (GPIO41-GPIO50)
14670 + *(unsigned long *)(0xb0000674) = 0xFFF; //GPIO41-GPIO50 output mode
14671 + *(unsigned long *)(0xb000067C) = 0x0; //GPIO41-GPIO50 output low
14672 +#elif defined (CONFIG_RALINK_RT3352)
14673 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1 << 29); //port5 auto polling disable
14674 + *(unsigned long *)(0xb0000060) |= (1 << 7); //set MDIO to GPIO mode (GPIO22-GPIO23)
14675 + *(unsigned long *)(0xb0000624) = 0xC0000000; //GPIO22-GPIO23 output mode
14676 + *(unsigned long *)(0xb000062C) = 0xC0000000; //GPIO22-GPIO23 output high
14678 + *(unsigned long *)(0xb0000060) |= (1 << 9); //set RGMII to GPIO mode (GPIO24-GPIO35)
14679 + *(unsigned long *)(0xb000064C) = 0xFFF; //GPIO24-GPIO35 output mode
14680 + *(unsigned long *)(0xb0000654) = 0xFFF; //GPIO24-GPIO35 output high
14681 +#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
14684 +#endif // CONFIG_P5_RGMII_TO_MAC_MODE //
14687 +#if defined (CONFIG_RT3052_ASIC)
14688 + rw_rf_reg(0, 0, &phy_val);
14689 + phy_val = phy_val >> 4;
14691 + if(phy_val > 0x5) {
14693 + rw_rf_reg(0, 26, &phy_val);
14694 + phy_val2 = (phy_val | (0x3 << 5));
14695 + rw_rf_reg(1, 26, &phy_val2);
14698 + val = sysRegRead(RSTCTRL);
14699 + val = val | RALINK_EPHY_RST;
14700 + sysRegWrite(RSTCTRL, val);
14701 + val = val & ~(RALINK_EPHY_RST);
14702 + sysRegWrite(RSTCTRL, val);
14704 + rw_rf_reg(1, 26, &phy_val);
14706 + //select local register
14707 + mii_mgr_write(0, 31, 0x8000);
14708 + for(i=0;i<5;i++){
14709 + mii_mgr_write(i, 26, 0x1600); //TX10 waveform coefficient //LSB=0 disable PHY
14710 + mii_mgr_write(i, 29, 0x7058); //TX100/TX10 AD/DA current bias
14711 + mii_mgr_write(i, 30, 0x0018); //TX100 slew rate control
14714 + //select global register
14715 + mii_mgr_write(0, 31, 0x0);
14716 + mii_mgr_write(0, 1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
14717 + mii_mgr_write(0, 2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
14718 + mii_mgr_write(0, 3, 0xa17f); //enlarge agcsel threshold 6
14719 +//#define ENABLE_LDPS
14720 +#if defined (ENABLE_LDPS)
14721 + mii_mgr_write(0, 12, 0x7eaa);
14722 + mii_mgr_write(0, 22, 0x252f); //tune TP_IDL tail and head waveform, enable power down slew rate control
14724 + mii_mgr_write(0, 12, 0x0);
14725 + mii_mgr_write(0, 22, 0x052f);
14727 + mii_mgr_write(0, 14, 0x65); //longer TP_IDL tail length
14728 + mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
14729 + mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
14730 + mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
14731 + mii_mgr_write(0, 27, 0x2fce); //set PLL/Receive bias current are calibrated
14732 + mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
14733 + mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
14734 + mii_mgr_write(0, 31, 0x8000); //select local register
14736 + for(i=0;i<5;i++){
14737 + //LSB=1 enable PHY
14738 + mii_mgr_read(i, 26, &phy_val);
14739 + phy_val |= 0x0001;
14740 + mii_mgr_write(i, 26, phy_val);
14743 + //select local register
14744 + mii_mgr_write(0, 31, 0x8000);
14745 + for(i=0;i<5;i++){
14746 + mii_mgr_write(i, 26, 0x1600); //TX10 waveform coefficient //LSB=0 disable PHY
14747 + mii_mgr_write(i, 29, 0x7058); //TX100/TX10 AD/DA current bias
14748 + mii_mgr_write(i, 30, 0x0018); //TX100 slew rate control
14751 + //select global register
14752 + mii_mgr_write(0, 31, 0x0);
14753 + mii_mgr_write(0, 1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
14754 + mii_mgr_write(0, 2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
14755 + mii_mgr_write(0, 3, 0xa17f); //enlarge agcsel threshold 6
14756 + mii_mgr_write(0, 14, 0x65); //longer TP_IDL tail length
14757 + mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
14758 + mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
14759 + mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
14760 + mii_mgr_write(0, 22, 0x052f); //tune TP_IDL tail and head waveform
14761 + mii_mgr_write(0, 27, 0x2fce); //set PLL/Receive bias current are calibrated
14762 + mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
14763 + mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
14764 + mii_mgr_write(0, 31, 0x8000); //select local register
14766 + for(i=0;i<5;i++){
14767 + //LSB=1 enable PHY
14768 + mii_mgr_read(i, 26, &phy_val);
14769 + phy_val |= 0x0001;
14770 + mii_mgr_write(i, 26, phy_val);
14773 +#elif defined (CONFIG_RT3352_ASIC)
14776 + val = sysRegRead(RSTCTRL);
14777 + val = val | RALINK_EPHY_RST;
14778 + sysRegWrite(RSTCTRL, val);
14779 + val = val & ~(RALINK_EPHY_RST);
14780 + sysRegWrite(RSTCTRL, val);
14782 + //select local register
14783 + mii_mgr_write(0, 31, 0x8000);
14784 + for(i=0;i<5;i++){
14785 + mii_mgr_write(i, 26, 0x1600); //TX10 waveform coefficient //LSB=0 disable PHY
14786 + mii_mgr_write(i, 29, 0x7016); //TX100/TX10 AD/DA current bias
14787 + mii_mgr_write(i, 30, 0x0038); //TX100 slew rate control
14790 + //select global register
14791 + mii_mgr_write(0, 31, 0x0);
14792 + mii_mgr_write(0, 1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
14793 + mii_mgr_write(0, 2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
14794 + mii_mgr_write(0, 3, 0xa17f); //enlarge agcsel threshold 6
14795 + mii_mgr_write(0, 12, 0x7eaa);
14796 + mii_mgr_write(0, 14, 0x65); //longer TP_IDL tail length
14797 + mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
14798 + mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
14799 + mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
14800 + mii_mgr_write(0, 22, 0x253f); //tune TP_IDL tail and head waveform, enable power down slew rate control
14801 + mii_mgr_write(0, 27, 0x2fda); //set PLL/Receive bias current are calibrated
14802 + mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
14803 + mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
14804 + mii_mgr_write(0, 31, 0x8000); //select local register
14806 + for(i=0;i<5;i++){
14807 + //LSB=1 enable PHY
14808 + mii_mgr_read(i, 26, &phy_val);
14809 + phy_val |= 0x0001;
14810 + mii_mgr_write(i, 26, phy_val);
14813 +#elif defined (CONFIG_RT5350_ASIC)
14816 + val = sysRegRead(RSTCTRL);
14817 + val = val | RALINK_EPHY_RST;
14818 + sysRegWrite(RSTCTRL, val);
14819 + val = val & ~(RALINK_EPHY_RST);
14820 + sysRegWrite(RSTCTRL, val);
14822 + //select local register
14823 + mii_mgr_write(0, 31, 0x8000);
14824 + for(i=0;i<5;i++){
14825 + mii_mgr_write(i, 26, 0x1600); //TX10 waveform coefficient //LSB=0 disable PHY
14826 + mii_mgr_write(i, 29, 0x7015); //TX100/TX10 AD/DA current bias
14827 + mii_mgr_write(i, 30, 0x0038); //TX100 slew rate control
14830 + //select global register
14831 + mii_mgr_write(0, 31, 0x0);
14832 + mii_mgr_write(0, 1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
14833 + mii_mgr_write(0, 2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
14834 + mii_mgr_write(0, 3, 0xa17f); //enlarge agcsel threshold 6
14835 + mii_mgr_write(0, 12, 0x7eaa);
14836 + mii_mgr_write(0, 14, 0x65); //longer TP_IDL tail length
14837 + mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
14838 + mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
14839 + mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
14840 + mii_mgr_write(0, 22, 0x253f); //tune TP_IDL tail and head waveform, enable power down slew rate control
14841 + mii_mgr_write(0, 27, 0x2fda); //set PLL/Receive bias current are calibrated
14842 + mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
14843 + mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
14844 + mii_mgr_write(0, 31, 0x8000); //select local register
14846 + for(i=0;i<5;i++){
14847 + //LSB=1 enable PHY
14848 + mii_mgr_read(i, 26, &phy_val);
14849 + phy_val |= 0x0001;
14850 + mii_mgr_write(i, 26, phy_val);
14852 +#elif defined (CONFIG_MT7628_ASIC)
14853 +/*INIT MT7628 PHY HERE*/
14854 + val = sysRegRead(RT2880_AGPIOCFG_REG);
14855 +#if defined (CONFIG_ETH_ONE_PORT_ONLY)
14856 + val |= (MT7628_P0_EPHY_AIO_EN | MT7628_P1_EPHY_AIO_EN | MT7628_P2_EPHY_AIO_EN | MT7628_P3_EPHY_AIO_EN | MT7628_P4_EPHY_AIO_EN);
14857 + val = val & ~(MT7628_P0_EPHY_AIO_EN);
14859 + val = val & ~(MT7628_P0_EPHY_AIO_EN | MT7628_P1_EPHY_AIO_EN | MT7628_P2_EPHY_AIO_EN | MT7628_P3_EPHY_AIO_EN | MT7628_P4_EPHY_AIO_EN);
14861 + if ((*((volatile u32 *)(RALINK_SYSCTL_BASE + 0x8))) & 0x10000)
14862 + val &= ~0x1f0000;
14863 + sysRegWrite(RT2880_AGPIOCFG_REG, val);
14865 + val = sysRegRead(RSTCTRL);
14866 + val = val | RALINK_EPHY_RST;
14867 + sysRegWrite(RSTCTRL, val);
14868 + val = val & ~(RALINK_EPHY_RST);
14869 + sysRegWrite(RSTCTRL, val);
14872 + val = sysRegRead(RALINK_SYSCTL_BASE + 0x64);
14873 +#if defined (CONFIG_ETH_ONE_PORT_ONLY)
14874 + val &= 0xf003f003;
14875 + val |= 0x05540554;
14876 + sysRegWrite(RALINK_SYSCTL_BASE + 0x64, val); // set P0 EPHY LED mode
14878 + val &= 0xf003f003;
14879 + sysRegWrite(RALINK_SYSCTL_BASE + 0x64, val); // set P0~P4 EPHY LED mode
14883 + mt7628_ephy_init();
14889 +#if defined (CONFIG_ARCH_MT7623) /* TODO: just for bring up, should be removed!!! */
14890 +void mt7623_pinmux_set(void)
14892 + unsigned long regValue;
14894 + //printk("[mt7623_pinmux_set]start\n");
14895 + /* Pin277: ESW_RST (1) */
14896 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ad0));
14897 + regValue &= ~(BITS(6,8));
14898 + regValue |= BIT(6);
14899 + *(volatile u_long *)(0xf0005ad0) = regValue;
14901 + /* Pin262: G2_TXEN (1) */
14902 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005aa0));
14903 + regValue &= ~(BITS(6,8));
14904 + regValue |= BIT(6);
14905 + *(volatile u_long *)(0xf0005aa0) = regValue;
14906 + /* Pin263: G2_TXD3 (1) */
14907 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005aa0));
14908 + regValue &= ~(BITS(9,11));
14909 + regValue |= BIT(9);
14910 + *(volatile u_long *)(0xf0005aa0) = regValue;
14911 + /* Pin264: G2_TXD2 (1) */
14912 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005aa0));
14913 + regValue &= ~(BITS(12,14));
14914 + regValue |= BIT(12);
14915 + *(volatile u_long *)(0xf0005aa0) = regValue;
14916 + /* Pin265: G2_TXD1 (1) */
14917 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14918 + regValue &= ~(BITS(0,2));
14919 + regValue |= BIT(0);
14920 + *(volatile u_long *)(0xf0005ab0) = regValue;
14921 + /* Pin266: G2_TXD0 (1) */
14922 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14923 + regValue &= ~(BITS(3,5));
14924 + regValue |= BIT(3);
14925 + *(volatile u_long *)(0xf0005ab0) = regValue;
14926 + /* Pin267: G2_TXC (1) */
14927 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14928 + regValue &= ~(BITS(6,8));
14929 + regValue |= BIT(6);
14930 + *(volatile u_long *)(0xf0005ab0) = regValue;
14931 + /* Pin268: G2_RXC (1) */
14932 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14933 + regValue &= ~(BITS(9,11));
14934 + regValue |= BIT(9);
14935 + *(volatile u_long *)(0xf0005ab0) = regValue;
14936 + /* Pin269: G2_RXD0 (1) */
14937 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14938 + regValue &= ~(BITS(12,14));
14939 + regValue |= BIT(12);
14940 + *(volatile u_long *)(0xf0005ab0) = regValue;
14941 + /* Pin270: G2_RXD1 (1) */
14942 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
14943 + regValue &= ~(BITS(0,2));
14944 + regValue |= BIT(0);
14945 + *(volatile u_long *)(0xf0005ac0) = regValue;
14946 + /* Pin271: G2_RXD2 (1) */
14947 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
14948 + regValue &= ~(BITS(3,5));
14949 + regValue |= BIT(3);
14950 + *(volatile u_long *)(0xf0005ac0) = regValue;
14951 + /* Pin272: G2_RXD3 (1) */
14952 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
14953 + regValue &= ~(BITS(6,8));
14954 + regValue |= BIT(6);
14955 + *(volatile u_long *)(0xf0005ac0) = regValue;
14956 + /* Pin274: G2_RXDV (1) */
14957 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
14958 + regValue &= ~(BITS(12,14));
14959 + regValue |= BIT(12);
14960 + *(volatile u_long *)(0xf0005ac0) = regValue;
14962 + /* Pin275: MDC (1) */
14963 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ad0));
14964 + regValue &= ~(BITS(0,2));
14965 + regValue |= BIT(0);
14966 + *(volatile u_long *)(0xf0005ad0) = regValue;
14967 + /* Pin276: MDIO (1) */
14968 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ad0));
14969 + regValue &= ~(BITS(3,5));
14970 + regValue |= BIT(3);
14971 + *(volatile u_long *)(0xf0005ad0) = regValue;
14972 + //printk("[mt7623_pinmux_set]end\n");
14975 +void wait_loop(void) {
14980 + for(i = 0; i<32; i = i+1){
14981 + read_data = *(volatile u_long *)(0xFB110610);
14987 +void trgmii_calibration_7623(void) {
14989 + unsigned int tap_a[5]; // minumum delay for all correct
14990 + unsigned int tap_b[5]; // maximum delay for all correct
14991 + unsigned int final_tap[5];
14992 + unsigned int bslip_en;
14993 + unsigned int rxc_step_size;
14994 + unsigned int rxd_step_size;
14995 + unsigned int read_data;
14996 + unsigned int tmp;
14997 + unsigned int rd_wd;
14999 + unsigned int err_cnt[5];
15000 + unsigned int init_toggle_data;
15001 + unsigned int err_flag[5];
15002 + unsigned int err_total_flag;
15003 + unsigned int training_word;
15004 + unsigned int rd_tap;
15006 + u32 TRGMII_7623_base;
15007 + u32 TRGMII_7623_RD_0;
15011 + u32 TRGMII_RXCTL;
15012 + u32 TRGMII_RCK_CTRL;
15013 + u32 TRGMII_7530_base;
15014 + TRGMII_7623_base = 0xFB110300;
15015 + TRGMII_7623_RD_0 = TRGMII_7623_base + 0x10;
15016 + TRGMII_RCK_CTRL = TRGMII_7623_base;
15017 + rxd_step_size =0x1;
15018 + rxc_step_size =0x4;
15019 + init_toggle_data = 0x00000055;
15020 + training_word = 0x000000AC;
15022 + //printk("Calibration begin ........");
15023 + *(volatile u_long *)(TRGMII_7623_base +0x04) &= 0x3fffffff; // RX clock gating in MT7623
15024 + *(volatile u_long *)(TRGMII_7623_base +0x00) |= 0x80000000; // Assert RX reset in MT7623
15025 + *(volatile u_long *)(TRGMII_7623_base +0x78) |= 0x00002000; // Set TX OE edge in MT7623
15026 + *(volatile u_long *)(TRGMII_7623_base +0x04) |= 0xC0000000; // Disable RX clock gating in MT7623
15027 + *(volatile u_long *)(TRGMII_7623_base ) &= 0x7fffffff; // Release RX reset in MT7623
15028 + //printk("Check Point 1 .....\n");
15029 + for (i = 0 ; i<5 ; i++) {
15030 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) |= 0x80000000; // Set bslip_en = 1
15033 + //printk("Enable Training Mode in MT7530\n");
15034 + mii_mgr_read(0x1F,0x7A40,&read_data);
15035 + read_data |= 0xc0000000;
15036 + mii_mgr_write(0x1F,0x7A40,read_data); //Enable Training Mode in MT7530
15037 + err_total_flag = 0;
15038 + //printk("Adjust RXC delay in MT7623\n");
15040 + while (err_total_flag == 0 && read_data != 0x68) {
15041 + //printk("2nd Enable EDGE CHK in MT7623\n");
15042 + /* Enable EDGE CHK in MT7623*/
15043 + for (i = 0 ; i<5 ; i++) {
15044 + tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15045 + tmp |= 0x40000000;
15046 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15049 + err_total_flag = 1;
15050 + for (i = 0 ; i<5 ; i++) {
15051 + err_cnt[i] = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 8) & 0x0000000f;
15052 + rd_wd = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 16) & 0x000000ff;
15053 + //printk("ERR_CNT = %d, RD_WD =%x\n",err_cnt[i],rd_wd);
15054 + if ( err_cnt[i] !=0 ) {
15057 + else if (rd_wd != 0x55) {
15063 + err_total_flag = err_flag[i] & err_total_flag;
15066 + //printk("2nd Disable EDGE CHK in MT7623\n");
15067 + /* Disable EDGE CHK in MT7623*/
15068 + for (i = 0 ; i<5 ; i++) {
15069 + tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15070 + tmp |= 0x40000000;
15071 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15074 + //printk("2nd Disable EDGE CHK in MT7623\n");
15075 + /* Adjust RXC delay */
15076 + *(volatile u_long *)(TRGMII_7623_base +0x00) |= 0x80000000; // Assert RX reset in MT7623
15077 + *(volatile u_long *)(TRGMII_7623_base +0x04) &= 0x3fffffff; // RX clock gating in MT7623
15078 + read_data = *(volatile u_long *)(TRGMII_7623_base);
15079 + if (err_total_flag == 0) {
15080 + tmp = (read_data & 0x0000007f) + rxc_step_size;
15081 + //printk(" RXC delay = %d\n", tmp);
15083 + read_data &= 0xffffff80;
15084 + read_data |= tmp;
15086 + read_data &= 0xffffff80;
15088 + *(volatile u_long *)(TRGMII_7623_base) = read_data;
15090 + read_data &=0x000000ff;
15091 + *(volatile u_long *)(TRGMII_7623_base ) &= 0x7fffffff; // Release RX reset in MT7623
15092 + *(volatile u_long *)(TRGMII_7623_base +0x04) |= 0xC0000000; // Disable RX clock gating in MT7623
15093 + for (i = 0 ; i<5 ; i++) {
15094 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = (*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) | 0x80000000; // Set bslip_en = ~bit_slip_en
15097 + //printk("Finish RXC Adjustment while loop\n");
15098 + //printk("Read RD_WD MT7623\n");
15099 + /* Read RD_WD MT7623*/
15100 + for (i = 0 ; i<5 ; i++) {
15102 + while (err_flag[i] != 0) {
15103 + /* Enable EDGE CHK in MT7623*/
15104 + tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15105 + tmp |= 0x40000000;
15106 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15108 + read_data = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15109 + err_cnt[i] = (read_data >> 8) & 0x0000000f; // Read MT7623 Errcnt
15110 + rd_wd = (read_data >> 16) & 0x000000ff;
15111 + if (err_cnt[i] != 0 || rd_wd !=0x55){
15112 + err_flag [i] = 1;
15117 + /* Disable EDGE CHK in MT7623*/
15118 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) &= 0x4fffffff;
15119 + tmp |= 0x40000000;
15120 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15122 + //err_cnt[i] = ((read_data) >> 8) & 0x0000000f; // Read MT7623 Errcnt
15123 + if (err_flag[i] !=0) {
15124 + rd_tap = (read_data & 0x0000007f) + rxd_step_size; // Add RXD delay in MT7623
15125 + read_data = (read_data & 0xffffff80) | rd_tap;
15126 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
15127 + tap_a[i] = rd_tap;
15129 + rd_tap = (read_data & 0x0000007f) + 4;
15130 + read_data = (read_data & 0xffffff80) | rd_tap;
15131 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
15133 + //err_cnt[i] = (*(volatile u_long *)(TRGMII_7623_RD_0 + i*8) >> 8) & 0x0000000f; // Read MT7623 Errcnt
15136 + //printk("%dth bit Tap_a = %d\n", i, tap_a[i]);
15138 + //printk("Last While Loop\n");
15139 + for (i = 0 ; i<5 ; i++) {
15140 + //printk(" Bit%d\n", i);
15142 + while ((err_cnt[i] == 0) && (rd_tap !=128)) {
15143 + read_data = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15144 + rd_tap = (read_data & 0x0000007f) + rxd_step_size; // Add RXD delay in MT7623
15145 + read_data = (read_data & 0xffffff80) | rd_tap;
15146 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
15147 + /* Enable EDGE CHK in MT7623*/
15148 + tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15149 + tmp |= 0x40000000;
15150 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15152 + err_cnt[i] = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 8) & 0x0000000f; // Read MT7623 Errcnt
15153 + /* Disable EDGE CHK in MT7623*/
15154 + tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15155 + tmp |= 0x40000000;
15156 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15158 + //err_cnt[i] = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 8) & 0x0000000f; // Read MT7623 Errcnt
15161 + tap_b[i] = rd_tap;// -rxd_step_size; // Record the max delay TAP_B
15162 + //printk("tap_b[%d] is %d \n", i,tap_b[i]);
15163 + final_tap[i] = (tap_a[i]+tap_b[i])/2; // Calculate RXD delay = (TAP_A + TAP_B)/2
15164 + //printk("%dth bit Final Tap = %d\n", i, final_tap[i]);
15165 + read_data = (read_data & 0xffffff80) | final_tap[i];
15166 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
15168 +// /*word alignment*/
15169 +// mii_mgr_read(0x1F,0x7A50,&read_data);
15170 +// read_data &= ~(0xff);
15171 +// read_data |= 0xac;
15172 +// mii_mgr_write(0x1F,0x7A50,read_data);
15173 +// while (i <10) {
15177 +// /* Enable EDGE CHK in MT7623*/
15178 +// for (i=0; i<5; i++) {
15179 +// tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15180 +// tmp |= 0x40000000;
15181 +// *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15183 +// /* Disable EDGE CHK in MT7623*/
15184 +// tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15185 +// tmp |= 0x40000000;
15186 +// *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15188 +// read_data = *(volatile u_long *)(TRGMII_7623_RD_0+i*8);
15189 +// printk(" MT7623 training word = %x\n", read_data);
15193 + mii_mgr_read(0x1F,0x7A40,&read_data);
15194 + //printk(" MT7530 0x7A40 = %x\n", read_data);
15195 + read_data &=0x3fffffff;
15196 + mii_mgr_write(0x1F,0x7A40,read_data);
15200 +void trgmii_calibration_7530(void){
15202 + unsigned int tap_a[5];
15203 + unsigned int tap_b[5];
15204 + unsigned int final_tap[5];
15205 + unsigned int bslip_en;
15206 + unsigned int rxc_step_size;
15207 + unsigned int rxd_step_size;
15208 + unsigned int read_data;
15209 + unsigned int tmp;
15211 + unsigned int err_cnt[5];
15212 + unsigned int rd_wd;
15213 + unsigned int init_toggle_data;
15214 + unsigned int err_flag[5];
15215 + unsigned int err_total_flag;
15216 + unsigned int training_word;
15217 + unsigned int rd_tap;
15219 + u32 TRGMII_7623_base;
15220 + u32 TRGMII_7530_RD_0;
15224 + u32 TRGMII_RXCTL;
15225 + u32 TRGMII_RCK_CTRL;
15226 + u32 TRGMII_7530_base;
15227 + u32 TRGMII_7530_TX_base;
15228 + TRGMII_7623_base = 0xFB110300;
15229 + TRGMII_7530_base = 0x7A00;
15230 + TRGMII_7530_RD_0 = TRGMII_7530_base + 0x10;
15231 + TRGMII_RCK_CTRL = TRGMII_7623_base;
15232 + rxd_step_size = 0x1;
15233 + rxc_step_size = 0x8;
15234 + init_toggle_data = 0x00000055;
15235 + training_word = 0x000000AC;
15237 + TRGMII_7530_TX_base = TRGMII_7530_base + 0x50;
15239 + //printk("Calibration begin ........\n");
15240 + *(volatile u_long *)(TRGMII_7623_base + 0x40) |= 0x80000000;
15241 + mii_mgr_read(0x1F, 0x7a10, &read_data);
15242 + //printk("TRGMII_7530_RD_0 is %x\n", read_data);
15244 + mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
15245 + read_data &= 0x3fffffff;
15246 + mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data); // RX clock gating in MT7530
15248 + mii_mgr_read(0x1F,TRGMII_7530_base+0x78,&read_data);
15249 + read_data |= 0x00002000;
15250 + mii_mgr_write(0x1F,TRGMII_7530_base+0x78,read_data); // Set TX OE edge in MT7530
15252 + mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15253 + read_data |= 0x80000000;
15254 + mii_mgr_write(0x1F,TRGMII_7530_base,read_data); // Assert RX reset in MT7530
15257 + mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15258 + read_data &= 0x7fffffff;
15259 + mii_mgr_write(0x1F,TRGMII_7530_base,read_data); // Release RX reset in MT7530
15261 + mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
15262 + read_data |= 0xC0000000;
15263 + mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data); // Disable RX clock gating in MT7530
15265 + //printk("Enable Training Mode in MT7623\n");
15266 + /*Enable Training Mode in MT7623*/
15267 + *(volatile u_long *)(TRGMII_7623_base + 0x40) &= 0xbfffffff;
15268 + *(volatile u_long *)(TRGMII_7623_base + 0x40) |= 0x80000000;
15269 + *(volatile u_long *)(TRGMII_7623_base + 0x78) &= 0xfffff0ff;
15270 + *(volatile u_long *)(TRGMII_7623_base + 0x78) |= 0x00000400;
15272 + err_total_flag =0;
15273 + //printk("Adjust RXC delay in MT7530\n");
15275 + while (err_total_flag == 0 && (read_data != 0x68)) {
15276 + //printk("2nd Enable EDGE CHK in MT7530\n");
15277 + /* Enable EDGE CHK in MT7530*/
15278 + for (i = 0 ; i<5 ; i++) {
15279 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15280 + read_data |= 0x40000000;
15281 + read_data &= 0x4fffffff;
15282 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15284 + //printk("2nd Disable EDGE CHK in MT7530\n");
15285 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&err_cnt[i]);
15286 + //printk("***** MT7530 %dth bit ERR_CNT =%x\n",i, err_cnt[i]);
15287 + //printk("MT7530 %dth bit ERR_CNT =%x\n",i, err_cnt[i]);
15288 + err_cnt[i] >>= 8;
15289 + err_cnt[i] &= 0x0000ff0f;
15290 + rd_wd = err_cnt[i] >> 8;
15291 + rd_wd &= 0x000000ff;
15292 + err_cnt[i] &= 0x0000000f;
15293 + //mii_mgr_read(0x1F,0x7a10,&read_data);
15294 + if ( err_cnt[i] !=0 ) {
15297 + else if (rd_wd != 0x55) {
15303 + err_total_flag = err_flag[i];
15305 + err_total_flag = err_flag[i] & err_total_flag;
15307 + /* Disable EDGE CHK in MT7530*/
15308 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15309 + read_data |= 0x40000000;
15310 + read_data &= 0x4fffffff;
15311 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15314 + /*Adjust RXC delay*/
15315 + if (err_total_flag ==0) {
15316 + mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15317 + read_data |= 0x80000000;
15318 + mii_mgr_write(0x1F,TRGMII_7530_base,read_data); // Assert RX reset in MT7530
15320 + mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
15321 + read_data &= 0x3fffffff;
15322 + mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data); // RX clock gating in MT7530
15324 + mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15326 + tmp &= 0x0000007f;
15327 + tmp += rxc_step_size;
15328 + //printk("Current rxc delay = %d\n", tmp);
15329 + read_data &= 0xffffff80;
15330 + read_data |= tmp;
15331 + mii_mgr_write (0x1F,TRGMII_7530_base,read_data);
15332 + mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15333 + //printk("Current RXC delay = %x\n", read_data);
15335 + mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15336 + read_data &= 0x7fffffff;
15337 + mii_mgr_write(0x1F,TRGMII_7530_base,read_data); // Release RX reset in MT7530
15339 + mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
15340 + read_data |= 0xc0000000;
15341 + mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data); // Disable RX clock gating in MT7530
15345 + //printk("RXC delay is %d\n", tmp);
15346 + //printk("Finish RXC Adjustment while loop\n");
15348 + //printk("Read RD_WD MT7530\n");
15349 + /* Read RD_WD MT7530*/
15350 + for (i = 0 ; i<5 ; i++) {
15352 + while (err_flag[i] != 0) {
15353 + /* Enable EDGE CHK in MT7530*/
15354 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15355 + read_data |= 0x40000000;
15356 + read_data &= 0x4fffffff;
15357 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15359 + err_cnt[i] = (read_data >> 8) & 0x0000000f;
15360 + rd_wd = (read_data >> 16) & 0x000000ff;
15361 + //printk("##### %dth bit ERR_CNT = %x RD_WD =%x ######\n", i, err_cnt[i],rd_wd);
15362 + if (err_cnt[i] != 0 || rd_wd !=0x55){
15363 + err_flag [i] = 1;
15368 + if (err_flag[i] !=0 ) {
15369 + rd_tap = (read_data & 0x0000007f) + rxd_step_size; // Add RXD delay in MT7530
15370 + read_data = (read_data & 0xffffff80) | rd_tap;
15371 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15372 + tap_a[i] = rd_tap;
15374 + tap_a[i] = (read_data & 0x0000007f); // Record the min delay TAP_A
15375 + rd_tap = tap_a[i] + 0x4;
15376 + read_data = (read_data & 0xffffff80) | rd_tap ;
15377 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15380 + /* Disable EDGE CHK in MT7530*/
15381 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15382 + read_data |= 0x40000000;
15383 + read_data &= 0x4fffffff;
15384 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15388 + //printk("%dth bit Tap_a = %d\n", i, tap_a[i]);
15390 + //printk("Last While Loop\n");
15391 + for (i = 0 ; i<5 ; i++) {
15393 + while (err_cnt[i] == 0 && (rd_tap!=128)) {
15394 + /* Enable EDGE CHK in MT7530*/
15395 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15396 + read_data |= 0x40000000;
15397 + read_data &= 0x4fffffff;
15398 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15400 + err_cnt[i] = (read_data >> 8) & 0x0000000f;
15401 + //rd_tap = (read_data & 0x0000007f) + 0x4; // Add RXD delay in MT7530
15402 + if (err_cnt[i] == 0 && (rd_tap!=128)) {
15403 + rd_tap = (read_data & 0x0000007f) + rxd_step_size; // Add RXD delay in MT7530
15404 + read_data = (read_data & 0xffffff80) | rd_tap;
15405 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15407 + /* Disable EDGE CHK in MT7530*/
15408 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15409 + read_data |= 0x40000000;
15410 + read_data &= 0x4fffffff;
15411 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15414 + tap_b[i] = rd_tap;// - rxd_step_size; // Record the max delay TAP_B
15415 + //printk("%dth bit Tap_b = %d, ERR_CNT=%d\n", i, tap_b[i],err_cnt[i]);
15416 + final_tap[i] = (tap_a[i]+tap_b[i])/2; // Calculate RXD delay = (TAP_A + TAP_B)/2
15417 + //printk("%dth bit Final Tap = %d\n", i, final_tap[i]);
15419 + read_data = ( read_data & 0xffffff80) | final_tap[i];
15420 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15422 + *(volatile u_long *)(TRGMII_7623_base + 0x40) &=0x3fffffff;
15426 +void set_trgmii_325_delay_setting(void)
15429 + *(volatile u_long *)(0xfb110300) = 0x80020050;
15430 + *(volatile u_long *)(0xfb110304) = 0x00980000;
15431 + *(volatile u_long *)(0xfb110300) = 0x40020050;
15432 + *(volatile u_long *)(0xfb110304) = 0xc0980000;
15433 + *(volatile u_long *)(0xfb110310) = 0x00000028;
15434 + *(volatile u_long *)(0xfb110318) = 0x0000002e;
15435 + *(volatile u_long *)(0xfb110320) = 0x0000002d;
15436 + *(volatile u_long *)(0xfb110328) = 0x0000002b;
15437 + *(volatile u_long *)(0xfb110330) = 0x0000002a;
15438 + *(volatile u_long *)(0xfb110340) = 0x00020000;
15440 + mii_mgr_write(31, 0x7a00, 0x10);
15441 + mii_mgr_write(31, 0x7a10, 0x23);
15442 + mii_mgr_write(31, 0x7a18, 0x27);
15443 + mii_mgr_write(31, 0x7a20, 0x24);
15444 + mii_mgr_write(31, 0x7a28, 0x29);
15445 + mii_mgr_write(31, 0x7a30, 0x24);
15450 +void setup_internal_gsw(void)
15456 + mt7623_pinmux_set(); /* TODO: just for bring up, should be removed!!! */
15459 + /* GE1: RGMII mode setting */
15460 + *(volatile u_long *)(0xfb110300) = 0x80020000;
15461 + *(volatile u_long *)(0xfb110304) = 0x00980000;
15462 + *(volatile u_long *)(0xfb110300) = 0x40020000;
15463 + *(volatile u_long *)(0xfb110304) = 0xc0980000;
15464 + *(volatile u_long *)(0xfb110310) = 0x00000041;
15465 + *(volatile u_long *)(0xfb110318) = 0x00000044;
15466 + *(volatile u_long *)(0xfb110320) = 0x00000043;
15467 + *(volatile u_long *)(0xfb110328) = 0x00000042;
15468 + *(volatile u_long *)(0xfb110330) = 0x00000042;
15469 + *(volatile u_long *)(0xfb110340) = 0x00020000;
15470 + *(volatile u_long *)(0xfb110390) &= 0xfffffff8; //RGMII mode
15472 + /* GE1: TRGMII mode setting */
15473 + *(volatile u_long *)(0xfb110390) |= 0x00000002; //TRGMII mode
15476 + /*Todo: Hardware reset Switch*/
15477 + /*Hardware reset Switch*/
15478 +#if defined(CONFIG_ARCH_MT7623)
15479 + regValue = *(volatile u_long *)(0xfb00000c);
15480 + /*MT7530 Reset. Flows for MT7623 and MT7683 are both excuted.*/
15481 + /* Should Modify this section if EFUSE is ready*/
15482 + /*For MT7683 reset MT7530*/
15483 + if(!(regValue & (1<<16)))
15485 + *(volatile u_long *)(0xf0005520) &= ~(1<<1);
15487 + *(volatile u_long *)(0xf0005520) |= (1<<1);
15490 + //printk("Assert MT7623 RXC reset\n");
15491 + *(volatile u_long *)(0xfb110300) |= 0x80000000; // Assert MT7623 RXC reset
15492 + /*For MT7623 reset MT7530*/
15493 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) |= (0x1 << 2);
15495 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) &= ~(0x1 << 2);
15499 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
15500 + for(i=0;i<=4;i++)
15503 + mii_mgr_read(i, 0x0 ,®Value);
15504 + regValue |= (0x1<<11);
15505 + mii_mgr_write(i, 0x0, regValue);
15507 + mii_mgr_write(31, 0x7000, 0x3); //reset switch
15510 +#if defined (CONFIG_MT7621_ASIC) || defined (CONFIG_ARCH_MT7623)
15512 + if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101) {
15513 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e30b);//(GE1, Force 1000M/FD, FC ON)
15514 + mii_mgr_write(31, 0x3600, 0x5e30b);
15518 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e33b);//(GE1, Force 1000M/FD, FC ON)
15519 + mii_mgr_write(31, 0x3600, 0x5e33b);
15520 + mii_mgr_read(31, 0x3600 ,®Value);
15523 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down)
15526 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
15527 + //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/
15528 + mii_mgr_read(31, 0x7804 ,®Value);
15529 + regValue &= ~(1<<8); //Enable Port 6
15530 + regValue |= (1<<6); //Disable Port 5
15531 + regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
15533 +#if defined (CONFIG_RAETH_GMAC2)
15534 + //RGMII2=Normal mode
15535 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
15537 + //GMAC2= RGMII mode
15538 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
15539 + mii_mgr_write(31, 0x3500, 0x56300); //MT7530 P5 AN, we can ignore this setting??????
15540 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(GE2, auto-polling)
15541 + enable_auto_negotiate(0);//set polling address
15543 + /* set MT7530 Port 5 to PHY 0/4 mode */
15544 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)
15545 + regValue &= ~((1<<13)|(1<<6));
15546 + regValue |= ((1<<7)|(1<<16)|(1<<20));
15547 +#elif defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
15548 + regValue &= ~((1<<13)|(1<<6)|(1<<20));
15549 + regValue |= ((1<<7)|(1<<16));
15551 + /*Set MT7530 phy direct access mode**/
15552 + regValue &= ~(1<<5);
15554 + //sysRegWrite(GDMA2_FWD_CFG, 0x20710000);
15556 + regValue |= (1<<16);//change HW-TRAP
15557 + printk("change HW-TRAP to 0x%x\n",regValue);
15558 + mii_mgr_write(31, 0x7804 ,regValue);
15560 + mii_mgr_read(31, 0x7800, ®Value);
15561 + regValue = (regValue >> 9) & 0x3;
15562 + if(regValue == 0x3)//25Mhz Xtal
15564 + else if(regValue == 0x2) //40Mhz
15569 + if(xtal_mode == 1) { //25Mhz Xtal
15571 + } else if(xtal_mode = 2) { //40Mhz
15572 + mii_mgr_write(0, 13, 0x1f); // disable MT7530 core clock
15573 + mii_mgr_write(0, 14, 0x410);
15574 + mii_mgr_write(0, 13, 0x401f);
15575 + mii_mgr_write(0, 14, 0x0);
15577 + mii_mgr_write(0, 13, 0x1f); // disable MT7530 PLL
15578 + mii_mgr_write(0, 14, 0x40d);
15579 + mii_mgr_write(0, 13, 0x401f);
15580 + mii_mgr_write(0, 14, 0x2020);
15582 + mii_mgr_write(0, 13, 0x1f); // for MT7530 core clock = 500Mhz
15583 + mii_mgr_write(0, 14, 0x40e);
15584 + mii_mgr_write(0, 13, 0x401f);
15585 + mii_mgr_write(0, 14, 0x119);
15587 + mii_mgr_write(0, 13, 0x1f); // enable MT7530 PLL
15588 + mii_mgr_write(0, 14, 0x40d);
15589 + mii_mgr_write(0, 13, 0x401f);
15590 + mii_mgr_write(0, 14, 0x2820);
15592 + udelay(20); //suggest by CD
15594 + mii_mgr_write(0, 13, 0x1f); // enable MT7530 core clock
15595 + mii_mgr_write(0, 14, 0x410);
15596 + mii_mgr_write(0, 13, 0x401f);
15601 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
15602 + mii_mgr_write(0, 14, 0x3); /*TRGMII*/
15604 + mii_mgr_write(0, 14, 0x1); /*RGMII*/
15605 +/* set MT7530 central align */
15606 + mii_mgr_read(31, 0x7830, ®Value);
15608 + regValue |= 1<<1;
15609 + mii_mgr_write(31, 0x7830, regValue);
15611 + mii_mgr_read(31, 0x7a40, ®Value);
15612 + regValue &= ~(1<<30);
15613 + mii_mgr_write(31, 0x7a40, regValue);
15615 + regValue = 0x855;
15616 + mii_mgr_write(31, 0x7a78, regValue);
15619 + mii_mgr_write(31, 0x7b00, 0x104); //delay setting for 10/1000M
15620 + mii_mgr_write(31, 0x7b04, 0x10); //delay setting for 10/1000M
15623 + mii_mgr_write(31, 0x7a54, 0x88); //lower GE1 driving
15624 + mii_mgr_write(31, 0x7a5c, 0x88); //lower GE1 driving
15625 + mii_mgr_write(31, 0x7a64, 0x88); //lower GE1 driving
15626 + mii_mgr_write(31, 0x7a6c, 0x88); //lower GE1 driving
15627 + mii_mgr_write(31, 0x7a74, 0x88); //lower GE1 driving
15628 + mii_mgr_write(31, 0x7a7c, 0x88); //lower GE1 driving
15629 + mii_mgr_write(31, 0x7810, 0x11); //lower GE2 driving
15630 + /*Set MT7623/MT7683 TX Driving*/
15631 + *(volatile u_long *)(0xfb110354) = 0x88;
15632 + *(volatile u_long *)(0xfb11035c) = 0x88;
15633 + *(volatile u_long *)(0xfb110364) = 0x88;
15634 + *(volatile u_long *)(0xfb11036c) = 0x88;
15635 + *(volatile u_long *)(0xfb110374) = 0x88;
15636 + *(volatile u_long *)(0xfb11037c) = 0x88;
15637 +#if defined (CONFIG_GE2_RGMII_AN)
15638 + *(volatile u_long *)(0xf0005f00) = 0xe00; //Set GE2 driving and slew rate
15640 + *(volatile u_long *)(0xf0005f00) = 0xa00; //Set GE2 driving and slew rate
15642 + *(volatile u_long *)(0xf00054c0) = 0x5; //set GE2 TDSEL
15643 + *(volatile u_long *)(0xf0005ed0) = 0; //set GE2 TUNE
15645 + /* TRGMII Clock */
15646 +// printk("Set TRGMII mode clock stage 1\n");
15647 + mii_mgr_write(0, 13, 0x1f);
15648 + mii_mgr_write(0, 14, 0x404);
15649 + mii_mgr_write(0, 13, 0x401f);
15650 + if (xtal_mode == 1){ //25MHz
15651 +#if defined (CONFIG_GE1_TRGMII_FORCE_2900)
15652 + mii_mgr_write(0, 14, 0x1d00); // 362.5MHz
15653 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2600)
15654 + mii_mgr_write(0, 14, 0x1a00); // 325MHz
15655 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2000)
15656 + mii_mgr_write(0, 14, 0x1400); //250MHz
15657 +#elif defined (CONFIG_GE1_RGMII_FORCE_1000)
15658 + mii_mgr_write(0, 14, 0x00a0); //125MHz
15660 + }else if(xtal_mode == 2){//40MHz
15661 +#if defined (CONFIG_GE1_TRGMII_FORCE_2900)
15662 + mii_mgr_write(0, 14, 0x1220); // 362.5MHz
15663 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2600)
15664 + mii_mgr_write(0, 14, 0x1040); // 325MHz
15665 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2000)
15666 + mii_mgr_write(0, 14, 0x0c80); //250MHz
15667 +#elif defined (CONFIG_GE1_RGMII_FORCE_1000)
15668 + mii_mgr_write(0, 14, 0x0640); //125MHz
15671 +// printk("Set TRGMII mode clock stage 2\n");
15672 + mii_mgr_write(0, 13, 0x1f);
15673 + mii_mgr_write(0, 14, 0x405);
15674 + mii_mgr_write(0, 13, 0x401f);
15675 + mii_mgr_write(0, 14, 0x0);
15677 +// printk("Set TRGMII mode clock stage 3\n");
15678 + mii_mgr_write(0, 13, 0x1f);
15679 + mii_mgr_write(0, 14, 0x409);
15680 + mii_mgr_write(0, 13, 0x401f);
15681 + mii_mgr_write(0, 14, 0x0087);
15683 +// printk("Set TRGMII mode clock stage 4\n");
15684 + mii_mgr_write(0, 13, 0x1f);
15685 + mii_mgr_write(0, 14, 0x40a);
15686 + mii_mgr_write(0, 13, 0x401f);
15687 + mii_mgr_write(0, 14, 0x0087);
15689 +// printk("Set TRGMII mode clock stage 5\n");
15690 + mii_mgr_write(0, 13, 0x1f);
15691 + mii_mgr_write(0, 14, 0x403);
15692 + mii_mgr_write(0, 13, 0x401f);
15693 + mii_mgr_write(0, 14, 0x1800);
15695 +// printk("Set TRGMII mode clock stage 6\n");
15696 + mii_mgr_write(0, 13, 0x1f);
15697 + mii_mgr_write(0, 14, 0x403);
15698 + mii_mgr_write(0, 13, 0x401f);
15699 + mii_mgr_write(0, 14, 0x1c00);
15701 +// printk("Set TRGMII mode clock stage 7\n");
15702 + mii_mgr_write(0, 13, 0x1f);
15703 + mii_mgr_write(0, 14, 0x401);
15704 + mii_mgr_write(0, 13, 0x401f);
15705 + mii_mgr_write(0, 14, 0xc020);
15707 +// printk("Set TRGMII mode clock stage 8\n");
15708 + mii_mgr_write(0, 13, 0x1f);
15709 + mii_mgr_write(0, 14, 0x406);
15710 + mii_mgr_write(0, 13, 0x401f);
15711 + mii_mgr_write(0, 14, 0xa030);
15713 +// printk("Set TRGMII mode clock stage 9\n");
15714 + mii_mgr_write(0, 13, 0x1f);
15715 + mii_mgr_write(0, 14, 0x406);
15716 + mii_mgr_write(0, 13, 0x401f);
15717 + mii_mgr_write(0, 14, 0xa038);
15719 + udelay(120); // for MT7623 bring up test
15721 +// printk("Set TRGMII mode clock stage 10\n");
15722 + mii_mgr_write(0, 13, 0x1f);
15723 + mii_mgr_write(0, 14, 0x410);
15724 + mii_mgr_write(0, 13, 0x401f);
15725 + mii_mgr_write(0, 14, 0x3);
15727 +// printk("Set TRGMII mode clock stage 11\n");
15729 + mii_mgr_read(31, 0x7830 ,®Value);
15730 + regValue &=0xFFFFFFFC;
15731 + regValue |=0x00000001;
15732 + mii_mgr_write(31, 0x7830, regValue);
15734 +// printk("Set TRGMII mode clock stage 12\n");
15735 + mii_mgr_read(31, 0x7a40 ,®Value);
15736 + regValue &= ~(0x1<<30);
15737 + regValue &= ~(0x1<<28);
15738 + mii_mgr_write(31, 0x7a40, regValue);
15740 + //mii_mgr_write(31, 0x7a78, 0x855);
15741 + mii_mgr_write(31, 0x7a78, 0x55);
15742 +// printk(" Adjust MT7530 TXC delay\n");
15743 + udelay(100); // for mt7623 bring up test
15745 +// printk(" Release MT7623 RXC Reset\n");
15746 + *(volatile u_long *)(0xfb110300) &= 0x7fffffff; // Release MT7623 RXC reset
15748 + for(i=0;i<=4;i++)
15750 + mii_mgr_write(i, 13, 0x7);
15751 + mii_mgr_write(i, 14, 0x3C);
15752 + mii_mgr_write(i, 13, 0x4007);
15753 + mii_mgr_write(i, 14, 0x0);
15756 + //Disable EEE 10Base-Te:
15757 + for(i=0;i<=4;i++)
15759 + mii_mgr_write(i, 13, 0x1f);
15760 + mii_mgr_write(i, 14, 0x027b);
15761 + mii_mgr_write(i, 13, 0x401f);
15762 + mii_mgr_write(i, 14, 0x1177);
15765 + for(i=0;i<=4;i++)
15768 + mii_mgr_read(i, 0x0 ,®Value);
15769 + regValue &= ~(0x1<<11);
15770 + mii_mgr_write(i, 0x0, regValue);
15773 + for(i=0;i<=4;i++) {
15774 + mii_mgr_read(i, 4, ®Value);
15775 + regValue |= (3<<7); //turn on 100Base-T Advertisement
15776 + mii_mgr_write(i, 4, regValue);
15778 + mii_mgr_read(i, 9, ®Value);
15779 + regValue |= (3<<8); //turn on 1000Base-T Advertisement
15780 + mii_mgr_write(i, 9, regValue);
15783 + mii_mgr_read(i, 0, ®Value);
15784 + regValue |= (1 << 9);
15785 + mii_mgr_write(i, 0, regValue);
15788 + mii_mgr_read(31, 0x7808 ,®Value);
15789 + regValue |= (3<<16); //Enable INTR
15790 + mii_mgr_write(31, 0x7808 ,regValue);
15793 +void mt7623_ethifsys_init(void)
15795 +#define TRGPLL_CON0 (0xF0209280)
15796 +#define TRGPLL_CON1 (0xF0209284)
15797 +#define TRGPLL_CON2 (0xF0209288)
15798 +#define TRGPLL_PWR_CON0 (0xF020928C)
15799 +#define ETHPLL_CON0 (0xF0209290)
15800 +#define ETHPLL_CON1 (0xF0209294)
15801 +#define ETHPLL_CON2 (0xF0209298)
15802 +#define ETHPLL_PWR_CON0 (0xF020929C)
15803 +#define ETH_PWR_CON (0xF00062A0)
15804 +#define HIF_PWR_CON (0xF00062A4)
15806 + u32 temp, pwr_ack_status;
15807 + /*=========================================================================*/
15808 + /* Enable ETHPLL & TRGPLL*/
15809 + /*=========================================================================*/
15811 + temp = sysRegRead(ETHPLL_PWR_CON0);
15812 + sysRegWrite(ETHPLL_PWR_CON0, temp | 0x1);
15814 + temp = sysRegRead(TRGPLL_PWR_CON0);
15815 + sysRegWrite(TRGPLL_PWR_CON0, temp | 0x1);
15817 + udelay(5); /* wait for xPLL_PWR_ON ready (min delay is 1us)*/
15819 + /* xPLL ISO Disable*/
15820 + temp = sysRegRead(ETHPLL_PWR_CON0);
15821 + sysRegWrite(ETHPLL_PWR_CON0, temp & ~0x2);
15823 + temp = sysRegRead(TRGPLL_PWR_CON0);
15824 + sysRegWrite(TRGPLL_PWR_CON0, temp & ~0x2);
15826 + /* xPLL Frequency Set*/
15827 + temp = sysRegRead(ETHPLL_CON0);
15828 + sysRegWrite(ETHPLL_CON0, temp | 0x1);
15829 +#if defined (CONFIG_GE1_TRGMII_FORCE_2900)
15830 + temp = sysRegRead(TRGPLL_CON0);
15831 + sysRegWrite(TRGPLL_CON0, temp | 0x1);
15832 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2600)
15833 + sysRegWrite(TRGPLL_CON1, 0xB2000000);
15834 + temp = sysRegRead(TRGPLL_CON0);
15835 + sysRegWrite(TRGPLL_CON0, temp | 0x1);
15836 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2000)
15837 + sysRegWrite(TRGPLL_CON1, 0xCCEC4EC5);
15838 + sysRegWrite(TRGPLL_CON0, 0x121);
15840 + udelay(40); /* wait for PLL stable (min delay is 20us)*/
15843 + /*=========================================================================*/
15844 + /* Power on ETHDMASYS and HIFSYS*/
15845 + /*=========================================================================*/
15846 + /* Power on ETHDMASYS*/
15847 + sysRegWrite(0xF0006000, 0x0b160001);
15848 + pwr_ack_status = (sysRegRead(ETH_PWR_CON) & 0x0000f000) >> 12;
15850 + if(pwr_ack_status == 0x0) {
15851 + printk("ETH already turn on and power on flow will be skipped...\n");
15853 + temp = sysRegRead(ETH_PWR_CON) ;
15854 + sysRegWrite(ETH_PWR_CON, temp | 0x4); /* PWR_ON*/
15855 + temp = sysRegRead(ETH_PWR_CON) ;
15856 + sysRegWrite(ETH_PWR_CON, temp | 0x8); /* PWR_ON_S*/
15858 + udelay(5); /* wait power settle time (min delay is 1us)*/
15860 + temp = sysRegRead(ETH_PWR_CON) ;
15861 + sysRegWrite(ETH_PWR_CON, temp & ~0x10); /* PWR_CLK_DIS*/
15862 + temp = sysRegRead(ETH_PWR_CON) ;
15863 + sysRegWrite(ETH_PWR_CON, temp & ~0x2); /* PWR_ISO*/
15864 + temp = sysRegRead(ETH_PWR_CON) ;
15865 + sysRegWrite(ETH_PWR_CON, temp & ~0x100); /* SRAM_PDN 0*/
15866 + temp = sysRegRead(ETH_PWR_CON) ;
15867 + sysRegWrite(ETH_PWR_CON, temp & ~0x200); /* SRAM_PDN 1*/
15868 + temp = sysRegRead(ETH_PWR_CON) ;
15869 + sysRegWrite(ETH_PWR_CON, temp & ~0x400); /* SRAM_PDN 2*/
15870 + temp = sysRegRead(ETH_PWR_CON) ;
15871 + sysRegWrite(ETH_PWR_CON, temp & ~0x800); /* SRAM_PDN 3*/
15873 + udelay(5); /* wait SRAM settle time (min delay is 1Us)*/
15875 + temp = sysRegRead(ETH_PWR_CON) ;
15876 + sysRegWrite(ETH_PWR_CON, temp | 0x1); /* PWR_RST_B*/
15879 + /* Power on HIFSYS*/
15880 + pwr_ack_status = (sysRegRead(HIF_PWR_CON) & 0x0000f000) >> 12;
15881 + if(pwr_ack_status == 0x0) {
15882 + printk("HIF already turn on and power on flow will be skipped...\n");
15885 + temp = sysRegRead(HIF_PWR_CON) ;
15886 + sysRegWrite(HIF_PWR_CON, temp | 0x4); /* PWR_ON*/
15887 + temp = sysRegRead(HIF_PWR_CON) ;
15888 + sysRegWrite(HIF_PWR_CON, temp | 0x8); /* PWR_ON_S*/
15890 + udelay(5); /* wait power settle time (min delay is 1us)*/
15892 + temp = sysRegRead(HIF_PWR_CON) ;
15893 + sysRegWrite(HIF_PWR_CON, temp & ~0x10); /* PWR_CLK_DIS*/
15894 + temp = sysRegRead(HIF_PWR_CON) ;
15895 + sysRegWrite(HIF_PWR_CON, temp & ~0x2); /* PWR_ISO*/
15896 + temp = sysRegRead(HIF_PWR_CON) ;
15897 + sysRegWrite(HIF_PWR_CON, temp & ~0x100); /* SRAM_PDN 0*/
15898 + temp = sysRegRead(HIF_PWR_CON) ;
15899 + sysRegWrite(HIF_PWR_CON, temp & ~0x200); /* SRAM_PDN 1*/
15900 + temp = sysRegRead(HIF_PWR_CON) ;
15901 + sysRegWrite(HIF_PWR_CON, temp & ~0x400); /* SRAM_PDN 2*/
15902 + temp = sysRegRead(HIF_PWR_CON) ;
15903 + sysRegWrite(HIF_PWR_CON, temp & ~0x800); /* SRAM_PDN 3*/
15905 + udelay(5); /* wait SRAM settle time (min delay is 1Us)*/
15907 + temp = sysRegRead(HIF_PWR_CON) ;
15908 + sysRegWrite(HIF_PWR_CON, temp | 0x1); /* PWR_RST_B*/
15911 + /* Release mt7530 reset */
15912 + temp = le32_to_cpu(*(volatile u_long *)(0xfb000034));
15913 + temp &= ~(BIT(2));
15914 + *(volatile u_long *)(0xfb000034) = temp;
15919 + * ra2882eth_init - Module Init code
15921 + * Called by kernel to register net_device
15925 +static int fe_probe(struct platform_device *pdev)
15928 + struct net_device *dev = alloc_etherdev(sizeof(END_DEVICE));
15930 + fe_irq = platform_get_irq(pdev, 0);
15932 +#ifdef CONFIG_RALINK_VISTA_BASIC
15934 + mii_mgr_read(29, 31, &sw_id);
15935 + is_switch_175c = (sw_id == 0x175c) ? 1:0;
15941 + strcpy(dev->name, DEV_NAME);
15942 + printk("%s:%s[%d]%d\n", __FILE__, __func__, __LINE__, fe_irq);
15943 + dev->irq = fe_irq;
15944 + dev->addr_len = 6;
15945 + dev->base_addr = RALINK_FRAME_ENGINE_BASE;
15947 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
15948 + rather_probe(dev);
15950 + dev->init = rather_probe;
15952 + ra2880_setup_dev_fptable(dev);
15954 + /* net_device structure Init */
15955 + ethtool_init(dev);
15956 + printk("Ralink APSoC Ethernet Driver Initilization. %s %d rx/tx descriptors allocated, mtu = %d!\n", RAETH_VERSION, NUM_RX_DESC, dev->mtu);
15957 +#ifdef CONFIG_RAETH_NAPI
15958 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
15959 + printk("NAPI enable, Tx Ring = %d, Rx Ring = %d\n", NUM_TX_DESC, NUM_RX_DESC);
15961 + printk("NAPI enable, weight = %d, Tx Ring = %d, Rx Ring = %d\n", dev->weight, NUM_TX_DESC, NUM_RX_DESC);
15965 + /* Register net device for the driver */
15966 + if ( register_netdev(dev) != 0) {
15967 + printk(KERN_WARNING " " __FILE__ ": No ethernet port found.\n");
15972 +#ifdef CONFIG_RAETH_NETLINK
15973 + csr_netlink_init();
15975 + ret = debug_proc_init();
15977 + dev_raether = dev;
15978 +#ifdef CONFIG_ARCH_MT7623
15979 + mt7623_ethifsys_init();
15990 +void fe_sw_init(void)
15992 +#if defined (CONFIG_GIGAPHY) || defined (CONFIG_RAETH_ROUTER) || defined (CONFIG_100PHY)
15993 + unsigned int regValue = 0;
15996 + // Case1: RT288x/RT3883/MT7621 GE1 + GigaPhy
15997 +#if defined (CONFIG_GE1_RGMII_AN)
15998 + enable_auto_negotiate(1);
15999 + if (isMarvellGigaPHY(1)) {
16000 +#if defined (CONFIG_RT3883_FPGA)
16001 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, ®Value);
16002 + regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
16003 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, regValue);
16005 + printk("\n Reset MARVELL phy\n");
16006 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, ®Value);
16007 + regValue |= 1<<7; //Add delay to RX_CLK for RXD Outputs
16008 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, regValue);
16010 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, ®Value);
16011 + regValue |= 1<<15; //PHY Software Reset
16012 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, regValue);
16013 +#elif defined (CONFIG_MT7621_FPGA) || defined (CONFIG_MT7623_FPGA)
16014 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, ®Value);
16015 + regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
16016 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, regValue);
16018 + /*10Mbps, debug*/
16019 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, 0x461);
16021 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, ®Value);
16022 + regValue |= 1<<9; //restart AN
16023 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, regValue);
16027 + if (isVtssGigaPHY(1)) {
16028 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 1);
16029 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, ®Value);
16030 + printk("Vitesse phy skew: %x --> ", regValue);
16031 + regValue |= (0x3<<12);
16032 + regValue &= ~(0x3<<14);
16033 + printk("%x\n", regValue);
16034 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, regValue);
16035 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0);
16037 +#if defined (CONFIG_RALINK_MT7621)
16038 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x21056300);//(P0, Auto mode)
16040 +#endif // CONFIG_GE1_RGMII_AN //
16042 + // Case2: RT3883/MT7621 GE2 + GigaPhy
16043 +#if defined (CONFIG_GE2_RGMII_AN)
16044 + enable_auto_negotiate(2);
16045 + if (isMarvellGigaPHY(2)) {
16046 +#if defined (CONFIG_RT3883_FPGA)
16047 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, ®Value);
16048 + regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
16049 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, regValue);
16051 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, ®Value);
16052 + regValue |= 1<<7; //Add delay to RX_CLK for RXD Outputs
16053 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, regValue);
16055 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, ®Value);
16056 + regValue |= 1<<15; //PHY Software Reset
16057 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, regValue);
16058 +#elif defined (CONFIG_MT7621_FPGA) || defined (CONFIG_MT7623_FPGA)
16059 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, ®Value);
16060 + regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
16061 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, regValue);
16063 + /*10Mbps, debug*/
16064 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 4, 0x461);
16067 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, ®Value);
16068 + regValue |= 1<<9; //restart AN
16069 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, regValue);
16073 + if (isVtssGigaPHY(2)) {
16074 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 1);
16075 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, ®Value);
16076 + printk("Vitesse phy skew: %x --> ", regValue);
16077 + regValue |= (0x3<<12);
16078 + regValue &= ~(0x3<<14);
16079 + printk("%x\n", regValue);
16080 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, regValue);
16081 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 0);
16083 +#if defined (CONFIG_RALINK_MT7621)
16084 + //RGMII2=Normal mode
16085 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
16086 + //GMAC2= RGMII mode
16087 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
16089 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(P1, Auto mode)
16091 +#endif // CONFIG_GE2_RGMII_AN //
16093 + // Case3: RT305x/RT335x/RT6855/RT6855A/MT7620 + EmbeddedSW
16094 +#if defined (CONFIG_RT_3052_ESW) && !defined(CONFIG_RALINK_MT7621) && !defined(CONFIG_ARCH_MT7623)
16095 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_MT7620)
16097 +#elif defined(CONFIG_RALINK_RT6855A)
16098 + rt6855A_gsw_init();
16100 + rt305x_esw_init();
16103 + // Case4: RT288x/RT388x/MT7621 GE1 + Internal GigaSW
16104 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
16105 +#if defined (CONFIG_RALINK_MT7621)
16106 + setup_internal_gsw();
16108 +#elif defined (CONFIG_ARCH_MT7623)
16109 +#if defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
16110 + *(volatile u_long *)(0xfb00002c) |= (1<<11);
16112 + *(volatile u_long *)(0xfb00002c) &= ~(1<<11);
16114 + setup_internal_gsw();
16115 + trgmii_calibration_7623();
16116 + trgmii_calibration_7530();
16117 + //*(volatile u_long *)(0xfb110300) |= (0x1f << 24); //Just only for 312.5/325MHz
16118 + *(volatile u_long *)(0xfb110340) = 0x00020000;
16119 + *(volatile u_long *)(0xfb110304) &= 0x3fffffff; // RX clock gating in MT7623
16120 + *(volatile u_long *)(0xfb110300) |= 0x80000000; // Assert RX reset in MT7623
16121 + *(volatile u_long *)(0xfb110300 ) &= 0x7fffffff; // Release RX reset in MT7623
16122 + *(volatile u_long *)(0xfb110300 +0x04) |= 0xC0000000; // Disable RX clock gating in MT7623
16123 +/*GE1@125MHz(RGMII mode) TX delay adjustment*/
16124 +#if defined (CONFIG_GE1_RGMII_FORCE_1000)
16125 + *(volatile u_long *)(0xfb110350) = 0x55;
16126 + *(volatile u_long *)(0xfb110358) = 0x55;
16127 + *(volatile u_long *)(0xfb110360) = 0x55;
16128 + *(volatile u_long *)(0xfb110368) = 0x55;
16129 + *(volatile u_long *)(0xfb110370) = 0x55;
16130 + *(volatile u_long *)(0xfb110378) = 0x855;
16134 +#elif defined (CONFIG_MT7623_FPGA) /* Nelson: remove for bring up, should be added!!! */
16135 + setup_fpga_gsw();
16137 + sysRegWrite(MDIO_CFG, INIT_VALUE_OF_FORCE_1000_FD);
16141 + // Case5: RT388x/MT7621 GE2 + GigaSW
16142 +#if defined (CONFIG_GE2_RGMII_FORCE_1000)
16143 +#if defined (CONFIG_RALINK_MT7621)
16144 + setup_external_gsw();
16146 + sysRegWrite(MDIO_CFG2, INIT_VALUE_OF_FORCE_1000_FD);
16150 + // Case6: RT288x GE1 /RT388x,MT7621 GE1/GE2 + (10/100 Switch or 100PHY)
16151 +#if defined (CONFIG_RAETH_ROUTER) || defined (CONFIG_100PHY)
16153 + //set GMAC to MII or RvMII mode
16154 +#if defined (CONFIG_RALINK_RT3883)
16155 + regValue = sysRegRead(SYSCFG1);
16156 +#if defined (CONFIG_GE1_MII_FORCE_100) || defined (CONFIG_GE1_MII_AN)
16157 + regValue &= ~(0x3 << 12);
16158 + regValue |= 0x1 << 12; // GE1 MII Mode
16159 +#elif defined (CONFIG_GE1_RVMII_FORCE_100)
16160 + regValue &= ~(0x3 << 12);
16161 + regValue |= 0x2 << 12; // GE1 RvMII Mode
16164 +#if defined (CONFIG_GE2_MII_FORCE_100) || defined (CONFIG_GE2_MII_AN)
16165 + regValue &= ~(0x3 << 14);
16166 + regValue |= 0x1 << 14; // GE2 MII Mode
16167 +#elif defined (CONFIG_GE2_RVMII_FORCE_100)
16168 + regValue &= ~(0x3 << 14);
16169 + regValue |= 0x2 << 14; // GE2 RvMII Mode
16171 + sysRegWrite(SYSCFG1, regValue);
16172 +#endif // CONFIG_RALINK_RT3883 //
16174 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
16176 +#if defined (CONFIG_GE1_MII_FORCE_100)
16177 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x5e337);//(P0, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
16179 +#if defined (CONFIG_GE2_MII_FORCE_100)
16180 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x5e337);//(P1, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
16182 +#if defined (CONFIG_GE1_MII_AN) || defined (CONFIG_GE1_RGMII_AN)
16183 + enable_auto_negotiate(1);
16184 +#if defined (CONFIG_RALINK_MT7621)
16185 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x21056300);//(P0, Auto mode)
16188 +#if defined (CONFIG_GE2_MII_AN) || defined (CONFIG_GE1_RGMII_AN)
16189 + enable_auto_negotiate(2);
16190 +#if defined (CONFIG_RALINK_MT7621)
16191 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(P1, Auto mode)
16196 +#if defined (CONFIG_GE1_MII_FORCE_100)
16197 +#if defined (CONFIG_RALINK_MT7621)
16199 + sysRegWrite(MDIO_CFG, INIT_VALUE_OF_FORCE_100_FD);
16202 +#if defined (CONFIG_GE2_MII_FORCE_100)
16203 +#if defined (CONFIG_RALINK_MT7621)
16205 + sysRegWrite(MDIO_CFG2, INIT_VALUE_OF_FORCE_100_FD);
16208 + //add switch configuration here for other switch chips.
16209 +#if defined (CONFIG_GE1_MII_FORCE_100) || defined (CONFIG_GE2_MII_FORCE_100)
16210 + // IC+ 175x: force IC+ switch cpu port is 100/FD
16211 + mii_mgr_write(29, 22, 0x8420);
16215 +#endif // defined (CONFIG_RAETH_ROUTER) || defined (CONFIG_100PHY) //
16221 + * ra2882eth_cleanup_module - Module Exit code
16223 + * Cmd 'rmmod' will invode the routine to exit the module
16227 + void ra2882eth_cleanup_module(void)
16229 + struct net_device *dev = dev_raether;
16230 + END_DEVICE *ei_local;
16232 + ei_local = netdev_priv(dev);
16234 +#ifdef CONFIG_PSEUDO_SUPPORT
16235 + unregister_netdev(ei_local->PseudoDev);
16236 + free_netdev(ei_local->PseudoDev);
16238 + unregister_netdev(dev);
16239 + RAETH_PRINT("Free ei_local and unregister netdev...\n");
16241 + free_netdev(dev);
16242 + debug_proc_exit();
16243 +#ifdef CONFIG_RAETH_NETLINK
16244 + csr_netlink_end();
16248 +EXPORT_SYMBOL(set_fe_dma_glo_cfg);
16249 +//module_init(ra2882eth_init);
16250 +//module_exit(ra2882eth_cleanup_module);
16252 +const struct of_device_id of_fe_match[] = {
16253 + { .compatible = "mediatek,mt7623-net", },
16257 +MODULE_DEVICE_TABLE(of, of_fe_match);
16259 +static struct platform_driver fe_driver = {
16260 + .probe = fe_probe,
16261 +// .remove = ra2882eth_cleanup_module,
16263 + .name = "ralink_soc_eth",
16264 + .owner = THIS_MODULE,
16265 + .of_match_table = of_fe_match,
16269 +static int __init init_rtfe(void)
16272 + ret = platform_driver_register(&fe_driver);
16276 +static void __exit exit_rtfe(void)
16278 + platform_driver_unregister(&fe_driver);
16281 +module_init(init_rtfe);
16282 +module_exit(exit_rtfe);
16285 +MODULE_LICENSE("GPL");
16286 diff --git a/drivers/net/ethernet/raeth/raether.h b/drivers/net/ethernet/raeth/raether.h
16287 new file mode 100644
16288 index 0000000..7a97109
16290 +++ b/drivers/net/ethernet/raeth/raether.h
16292 +#ifndef RA2882ETHEND_H
16293 +#define RA2882ETHEND_H
16295 +#ifdef DSP_VIA_NONCACHEABLE
16296 +#define ESRAM_BASE 0xa0800000 /* 0x0080-0000 ~ 0x00807FFF */
16298 +#define ESRAM_BASE 0x80800000 /* 0x0080-0000 ~ 0x00807FFF */
16301 +#define RX_RING_BASE ((int)(ESRAM_BASE + 0x7000))
16302 +#define TX_RING_BASE ((int)(ESRAM_BASE + 0x7800))
16304 +#if defined(CONFIG_RALINK_RT2880)
16305 +#define NUM_TX_RINGS 1
16307 +#define NUM_TX_RINGS 4
16309 +#ifdef MEMORY_OPTIMIZATION
16310 +#ifdef CONFIG_RAETH_ROUTER
16311 +#define NUM_RX_DESC 32 //128
16312 +#define NUM_TX_DESC 32 //128
16313 +#elif CONFIG_RT_3052_ESW
16314 +#define NUM_RX_DESC 16 //64
16315 +#define NUM_TX_DESC 16 //64
16317 +#define NUM_RX_DESC 32 //128
16318 +#define NUM_TX_DESC 32 //128
16320 +//#define NUM_RX_MAX_PROCESS 32
16321 +#define NUM_RX_MAX_PROCESS 32
16323 +#if defined (CONFIG_RAETH_ROUTER)
16324 +#define NUM_RX_DESC 256
16325 +#define NUM_TX_DESC 256
16326 +#elif defined (CONFIG_RT_3052_ESW)
16327 +#if defined (CONFIG_RALINK_MT7621)
16328 +#define NUM_RX_DESC 512
16329 +#define NUM_QRX_DESC 16
16330 +#define NUM_TX_DESC 512
16332 +#define NUM_RX_DESC 256
16333 +#define NUM_QRX_DESC NUM_RX_DESC
16334 +#define NUM_TX_DESC 256
16337 +#define NUM_RX_DESC 256
16338 +#define NUM_QRX_DESC NUM_RX_DESC
16339 +#define NUM_TX_DESC 256
16341 +#if defined(CONFIG_RALINK_RT3883) || defined(CONFIG_RALINK_MT7620)
16342 +#define NUM_RX_MAX_PROCESS 2
16344 +#define NUM_RX_MAX_PROCESS 16
16347 +#define NUM_LRO_RX_DESC 16
16349 +#if defined (CONFIG_SUPPORT_OPENWRT)
16350 +#define DEV_NAME "eth0"
16351 +#define DEV2_NAME "eth1"
16353 +#define DEV_NAME "eth2"
16354 +#define DEV2_NAME "eth3"
16357 +#if defined (CONFIG_RALINK_RT6855A) || defined (CONFIG_RALINK_MT7621)
16358 +#define GMAC0_OFFSET 0xE000
16359 +#define GMAC2_OFFSET 0xE006
16361 +#define GMAC0_OFFSET 0x28
16362 +#define GMAC2_OFFSET 0x22
16365 +#if defined(CONFIG_RALINK_RT6855A)
16366 +#define IRQ_ENET0 22
16367 +#elif defined(CONFIG_ARCH_MT7623)
16368 +#define IRQ_ENET0 232
16370 +#define IRQ_ENET0 3 /* hardware interrupt #3, defined in RT2880 Soc Design Spec Rev 0.03, pp43 */
16373 +#if defined (CONFIG_RAETH_HW_LRO)
16374 +#define HW_LRO_TIMER_UNIT 1
16375 +#define HW_LRO_REFRESH_TIME 50000
16376 +#define HW_LRO_MAX_AGG_CNT 64
16377 +#define HW_LRO_AGG_DELTA 1
16378 +#if defined(CONFIG_RAETH_PDMA_DVT)
16379 +#define MAX_LRO_RX_LENGTH 10240
16381 +#define MAX_LRO_RX_LENGTH (PAGE_SIZE - SKB_DATA_ALIGN(NET_SKB_PAD + sizeof(struct skb_shared_info)))
16383 +#define HW_LRO_AGG_TIME 10 /* 200us */
16384 +#define HW_LRO_AGE_TIME 50
16385 +#define HW_LRO_BW_THRE 3000
16386 +#define HW_LRO_PKT_INT_ALPHA 100
16387 +#endif /* CONFIG_RAETH_HW_LRO */
16388 +#define FE_INT_STATUS_REG (*(volatile unsigned long *)(FE_INT_STATUS))
16389 +#define FE_INT_STATUS_CLEAN(reg) (*(volatile unsigned long *)(FE_INT_STATUS)) = reg
16391 +//#define RAETH_DEBUG
16392 +#ifdef RAETH_DEBUG
16393 +#define RAETH_PRINT(fmt, args...) printk(KERN_INFO fmt, ## args)
16395 +#define RAETH_PRINT(fmt, args...) { }
16398 +struct net_device_stats *ra_get_stats(struct net_device *dev);
16400 +void ei_tx_timeout(struct net_device *dev);
16401 +int rather_probe(struct net_device *dev);
16402 +int ei_open(struct net_device *dev);
16403 +int ei_close(struct net_device *dev);
16405 +int ra2882eth_init(void);
16406 +void ra2882eth_cleanup_module(void);
16408 +void ei_xmit_housekeeping(unsigned long data);
16410 +u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data);
16411 +u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data);
16412 +u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr);
16413 +u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 *read_data);
16414 +u32 mii_mgr_write_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data);
16415 +void fe_sw_init(void);
16418 diff --git a/drivers/net/ethernet/raeth/raether_hwlro.c b/drivers/net/ethernet/raeth/raether_hwlro.c
16419 new file mode 100755
16420 index 0000000..5fc4f36
16422 +++ b/drivers/net/ethernet/raeth/raether_hwlro.c
16424 +#include <linux/module.h>
16425 +#include <linux/version.h>
16426 +#include <linux/kernel.h>
16427 +#include <linux/types.h>
16428 +#include <linux/pci.h>
16429 +#include <linux/init.h>
16430 +#include <linux/skbuff.h>
16431 +#include <linux/if_vlan.h>
16432 +#include <linux/if_ether.h>
16433 +#include <linux/fs.h>
16434 +#include <asm/uaccess.h>
16435 +#include <asm/rt2880/surfboardint.h>
16436 +#include <linux/delay.h>
16437 +#include <linux/sched.h>
16438 +#include <asm/rt2880/rt_mmap.h>
16439 +#include "ra2882ethreg.h"
16440 +#include "raether.h"
16441 +#include "ra_mac.h"
16442 +#include "ra_ioctl.h"
16443 +#include "ra_rfrw.h"
16445 +#if defined(CONFIG_RAETH_HW_LRO_FORCE)
16446 +int set_fe_lro_ring1_cfg(struct net_device *dev)
16450 + netdev_printk(KERN_CRIT, dev, "set_fe_lro_ring1_cfg()\n");
16452 + /* 1. Set RX ring mode to force port */
16453 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
16455 + /* 2. Configure lro ring */
16456 + /* 2.1 set src/destination TCP ports */
16457 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 1122);
16458 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 3344);
16459 + /* 2.2 set src/destination IPs */
16460 + str_to_ip(&ip, "10.10.10.3");
16461 + sysRegWrite(LRO_RX_RING1_SIP_DW0, ip);
16462 + str_to_ip(&ip, "10.10.10.254");
16463 + sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
16464 + /* 2.3 IPv4 force port mode */
16465 + SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING1, 1);
16466 + /* 2.4 IPv6 force port mode */
16467 + SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING1, 1);
16469 + /* 3. Set Age timer: 10 msec. */
16470 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
16472 + /* 4. Valid LRO ring */
16473 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
16478 +int set_fe_lro_ring2_cfg(struct net_device *dev)
16482 + netdev_printk(KERN_CRIT, dev, "set_fe_lro_ring2_cfg()\n");
16484 + /* 1. Set RX ring mode to force port */
16485 + SET_PDMA_RXRING2_MODE(PDMA_RX_FORCE_PORT);
16487 + /* 2. Configure lro ring */
16488 + /* 2.1 set src/destination TCP ports */
16489 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING2, 5566);
16490 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING2, 7788);
16491 + /* 2.2 set src/destination IPs */
16492 + str_to_ip(&ip, "10.10.10.3");
16493 + sysRegWrite(LRO_RX_RING2_SIP_DW0, ip);
16494 + str_to_ip(&ip, "10.10.10.254");
16495 + sysRegWrite(LRO_RX_RING2_DIP_DW0, ip);
16496 + /* 2.3 IPv4 force port mode */
16497 + SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING2, 1);
16498 + /* 2.4 IPv6 force port mode */
16499 + SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING2, 1);
16501 + /* 3. Set Age timer: 10 msec. */
16502 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
16504 + /* 4. Valid LRO ring */
16505 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
16510 +int set_fe_lro_ring3_cfg(struct net_device *dev)
16514 + netdev_printk(KERN_CRIT, dev, "set_fe_lro_ring3_cfg()\n");
16516 + /* 1. Set RX ring mode to force port */
16517 + SET_PDMA_RXRING3_MODE(PDMA_RX_FORCE_PORT);
16519 + /* 2. Configure lro ring */
16520 + /* 2.1 set src/destination TCP ports */
16521 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING3, 9900);
16522 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING3, 99);
16523 + /* 2.2 set src/destination IPs */
16524 + str_to_ip(&ip, "10.10.10.3");
16525 + sysRegWrite(LRO_RX_RING3_SIP_DW0, ip);
16526 + str_to_ip(&ip, "10.10.10.254");
16527 + sysRegWrite(LRO_RX_RING3_DIP_DW0, ip);
16528 + /* 2.3 IPv4 force port mode */
16529 + SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING3, 1);
16530 + /* 2.4 IPv6 force port mode */
16531 + SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING3, 1);
16533 + /* 3. Set Age timer: 10 msec. */
16534 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
16536 + /* 4. Valid LRO ring */
16537 + SET_PDMA_RXRING3_VALID(1);
16542 +int set_fe_lro_glo_cfg(struct net_device *dev)
16544 + unsigned int regVal = 0;
16546 + netdev_printk(KERN_CRIT, dev, "set_fe_lro_glo_cfg()\n");
16548 + /* 1 Set max AGG timer: 10 msec. */
16549 + SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
16551 + /* 2. Set max LRO agg count */
16552 + SET_PDMA_LRO_MAX_AGG_CNT(HW_LRO_MAX_AGG_CNT);
16554 + /* PDMA prefetch enable setting */
16555 + SET_PDMA_LRO_RXD_PREFETCH_EN(0x3);
16557 + /* 2.1 IPv4 checksum update enable */
16558 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
16560 + /* 3. Polling relinguish */
16561 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH)
16564 + /* 4. Enable LRO */
16565 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
16566 + regVal |= PDMA_LRO_EN;
16567 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
16572 +int set_fe_lro_auto_cfg(struct net_device *dev)
16574 + unsigned int regVal = 0;
16577 + netdev_printk(KERN_CRIT, dev, "set_fe_lro_auto_cfg()\n");
16579 + /* 1.1 Set my IP_1 */
16580 + str_to_ip(&ip, "10.10.10.254");
16581 + sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
16582 + sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
16583 + sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
16584 + sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
16585 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
16587 + /* 1.2 Set my IP_2 */
16588 + str_to_ip(&ip, "10.10.20.254");
16589 + sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
16590 + sysRegWrite(LRO_RX_RING1_DIP_DW1, 0);
16591 + sysRegWrite(LRO_RX_RING1_DIP_DW2, 0);
16592 + sysRegWrite(LRO_RX_RING1_DIP_DW3, 0);
16593 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
16595 + /* 1.3 Set my IP_3 */
16596 + sysRegWrite(LRO_RX_RING2_DIP_DW3, 0x20010238);
16597 + sysRegWrite(LRO_RX_RING2_DIP_DW2, 0x08000000);
16598 + sysRegWrite(LRO_RX_RING2_DIP_DW1, 0x00000000);
16599 + sysRegWrite(LRO_RX_RING2_DIP_DW0, 0x00000254);
16600 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
16602 + /* 1.4 Set my IP_4 */
16603 + sysRegWrite(LRO_RX_RING3_DIP_DW3, 0x20010238);
16604 + sysRegWrite(LRO_RX_RING3_DIP_DW2, 0x08010000);
16605 + sysRegWrite(LRO_RX_RING3_DIP_DW1, 0x00000000);
16606 + sysRegWrite(LRO_RX_RING3_DIP_DW0, 0x00000254);
16607 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
16609 + /* 2.1 Set RX ring1~3 to auto-learn modes */
16610 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
16611 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
16612 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
16614 + /* 2.2 Valid LRO ring */
16615 + SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
16616 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
16617 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
16618 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
16620 + /* 2.3 Set AGE timer */
16621 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
16622 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
16623 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
16625 + /* 2.4 Set max AGG timer */
16626 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, HW_LRO_AGG_TIME);
16627 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, HW_LRO_AGG_TIME);
16628 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, HW_LRO_AGG_TIME);
16630 + /* 2.5 Set max LRO agg count */
16631 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
16632 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, HW_LRO_MAX_AGG_CNT);
16633 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, HW_LRO_MAX_AGG_CNT);
16635 + /* 3.0 IPv6 LRO enable */
16636 + SET_PDMA_LRO_IPV6_EN(1);
16638 + /* 3.1 IPv4 checksum update enable */
16639 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
16641 + /* 3.2 TCP push option check disable */
16642 + //SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(0);
16644 + /* PDMA prefetch enable setting */
16645 + SET_PDMA_LRO_RXD_PREFETCH_EN(0x3);
16647 + /* 3.2 switch priority comparison to byte count mode */
16648 +/* SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE); */
16649 + SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_PKT_CNT_MODE);
16651 + /* 3.3 bandwidth threshold setting */
16652 + SET_PDMA_LRO_BW_THRESHOLD(HW_LRO_BW_THRE);
16654 + /* 3.4 auto-learn score delta setting */
16655 + sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
16657 + /* 3.5 Set ALT timer to 20us: (unit: 20us) */
16658 + SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(HW_LRO_TIMER_UNIT);
16659 + /* 3.6 Set ALT refresh timer to 1 sec. (unit: 20us) */
16660 + SET_PDMA_LRO_ALT_REFRESH_TIMER(HW_LRO_REFRESH_TIME);
16662 + /* 3.7 the least remaining room of SDL0 in RXD for lro aggregation */
16663 + SET_PDMA_LRO_MIN_RXD_SDL(1522);
16665 + /* 4. Polling relinguish */
16666 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH)
16669 + /* 5. Enable LRO */
16670 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
16671 + regVal |= PDMA_LRO_EN;
16672 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
16676 +#endif /* CONFIG_RAETH_HW_LRO_FORCE */
16678 +int fe_hw_lro_init(struct net_device *dev)
16681 + END_DEVICE *ei_local = netdev_priv(dev);
16683 + /* Initial RX Ring 3 */
16684 + ei_local->rx_ring3 =
16685 + pci_alloc_consistent(NULL, NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
16686 + &ei_local->phy_rx_ring3);
16687 + for (i = 0; i < NUM_LRO_RX_DESC; i++) {
16688 + memset(&ei_local->rx_ring3[i], 0, sizeof(struct PDMA_rxdesc));
16689 + ei_local->rx_ring3[i].rxd_info2.DDONE_bit = 0;
16690 + ei_local->rx_ring3[i].rxd_info2.LS0 = 0;
16691 + ei_local->rx_ring3[i].rxd_info2.PLEN0 =
16692 + SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
16693 + ei_local->rx_ring3[i].rxd_info2.PLEN1 =
16694 + SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
16695 + ei_local->rx_ring3[i].rxd_info1.PDP0 =
16696 + dma_map_single(NULL, ei_local->netrx3_skbuf[i]->data,
16697 + MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
16699 + netdev_printk(KERN_CRIT, dev,
16700 + "\nphy_rx_ring3 = 0x%08x, rx_ring3 = 0x%p\n",
16701 + ei_local->phy_rx_ring3, ei_local->rx_ring3);
16702 + /* Initial RX Ring 2 */
16703 + ei_local->rx_ring2 =
16704 + pci_alloc_consistent(NULL, NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
16705 + &ei_local->phy_rx_ring2);
16706 + for (i = 0; i < NUM_LRO_RX_DESC; i++) {
16707 + memset(&ei_local->rx_ring2[i], 0, sizeof(struct PDMA_rxdesc));
16708 + ei_local->rx_ring2[i].rxd_info2.DDONE_bit = 0;
16709 + ei_local->rx_ring2[i].rxd_info2.LS0 = 0;
16710 + ei_local->rx_ring2[i].rxd_info2.PLEN0 =
16711 + SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
16712 + ei_local->rx_ring2[i].rxd_info2.PLEN1 =
16713 + SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
16714 + ei_local->rx_ring2[i].rxd_info1.PDP0 =
16715 + dma_map_single(NULL, ei_local->netrx2_skbuf[i]->data,
16716 + MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
16718 + netdev_printk(KERN_CRIT, dev,
16719 + "\nphy_rx_ring2 = 0x%08x, rx_ring2 = 0x%p\n",
16720 + ei_local->phy_rx_ring2, ei_local->rx_ring2);
16721 + /* Initial RX Ring 1 */
16722 + ei_local->rx_ring1 =
16723 + pci_alloc_consistent(NULL, NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
16724 + &ei_local->phy_rx_ring1);
16725 + for (i = 0; i < NUM_LRO_RX_DESC; i++) {
16726 + memset(&ei_local->rx_ring1[i], 0, sizeof(struct PDMA_rxdesc));
16727 + ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0;
16728 + ei_local->rx_ring1[i].rxd_info2.LS0 = 0;
16729 + ei_local->rx_ring1[i].rxd_info2.PLEN0 =
16730 + SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
16731 + ei_local->rx_ring1[i].rxd_info2.PLEN1 =
16732 + SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
16733 + ei_local->rx_ring1[i].rxd_info1.PDP0 =
16734 + dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data,
16735 + MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
16737 + netdev_printk(KERN_CRIT, dev,
16738 + "\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",
16739 + ei_local->phy_rx_ring1, ei_local->rx_ring1);
16741 + sysRegWrite(RX_BASE_PTR3, phys_to_bus((u32) ei_local->phy_rx_ring3));
16742 + sysRegWrite(RX_MAX_CNT3, cpu_to_le32((u32) NUM_LRO_RX_DESC));
16743 + sysRegWrite(RX_CALC_IDX3, cpu_to_le32((u32) (NUM_LRO_RX_DESC - 1)));
16744 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX3);
16745 + sysRegWrite(RX_BASE_PTR2, phys_to_bus((u32) ei_local->phy_rx_ring2));
16746 + sysRegWrite(RX_MAX_CNT2, cpu_to_le32((u32) NUM_LRO_RX_DESC));
16747 + sysRegWrite(RX_CALC_IDX2, cpu_to_le32((u32) (NUM_LRO_RX_DESC - 1)));
16748 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX2);
16749 + sysRegWrite(RX_BASE_PTR1, phys_to_bus((u32) ei_local->phy_rx_ring1));
16750 + sysRegWrite(RX_MAX_CNT1, cpu_to_le32((u32) NUM_LRO_RX_DESC));
16751 + sysRegWrite(RX_CALC_IDX1, cpu_to_le32((u32) (NUM_LRO_RX_DESC - 1)));
16752 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX1);
16754 +#if defined(CONFIG_RAETH_HW_LRO_FORCE)
16755 + set_fe_lro_ring1_cfg(dev);
16756 + set_fe_lro_ring2_cfg(dev);
16757 + set_fe_lro_ring3_cfg(dev);
16758 + set_fe_lro_glo_cfg(dev);
16760 + set_fe_lro_auto_cfg(dev);
16761 +#endif /* CONFIG_RAETH_HW_LRO_FORCE */
16763 + /* HW LRO parameter settings */
16764 + ei_local->hw_lro_alpha = HW_LRO_PKT_INT_ALPHA;
16765 + ei_local->hw_lro_fix_setting = 1;
16769 +EXPORT_SYMBOL(fe_hw_lro_init);
16771 diff --git a/drivers/net/ethernet/raeth/raether_pdma.c b/drivers/net/ethernet/raeth/raether_pdma.c
16772 new file mode 100755
16773 index 0000000..4d47ee2
16775 +++ b/drivers/net/ethernet/raeth/raether_pdma.c
16777 +#include <linux/module.h>
16778 +#include <linux/version.h>
16779 +#include <linux/kernel.h>
16780 +#include <linux/types.h>
16781 +#include <linux/pci.h>
16782 +#include <linux/init.h>
16783 +#include <linux/skbuff.h>
16784 +#include <linux/if_vlan.h>
16785 +#include <linux/if_ether.h>
16786 +#include <linux/fs.h>
16787 +#include <asm/uaccess.h>
16788 +#include <asm/rt2880/surfboardint.h>
16789 +#if defined (CONFIG_RAETH_TSO)
16790 +#include <linux/tcp.h>
16791 +#include <net/ipv6.h>
16792 +#include <linux/ip.h>
16793 +#include <net/ip.h>
16794 +#include <net/tcp.h>
16795 +#include <linux/in.h>
16796 +#include <linux/ppp_defs.h>
16797 +#include <linux/if_pppox.h>
16799 +#if defined (CONFIG_RAETH_LRO)
16800 +#include <linux/inet_lro.h>
16802 +#include <linux/delay.h>
16803 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
16804 +#include <linux/sched.h>
16807 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
16808 +#include <asm/rt2880/rt_mmap.h>
16810 +#include <linux/libata-compat.h>
16813 +#include "ra2882ethreg.h"
16814 +#include "raether.h"
16815 +#include "ra_mac.h"
16816 +#include "ra_ioctl.h"
16817 +#include "ra_rfrw.h"
16818 +#ifdef CONFIG_RAETH_NETLINK
16819 +#include "ra_netlink.h"
16821 +#if defined (CONFIG_RAETH_QOS)
16822 +#include "ra_qos.h"
16825 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
16826 +#include "../../../net/nat/hw_nat/ra_nat.h"
16828 +#if defined(CONFIG_RAETH_PDMA_DVT)
16829 +#include "dvt/raether_pdma_dvt.h"
16830 +#endif /* CONFIG_RAETH_PDMA_DVT */
16832 +#if !defined(CONFIG_RA_NAT_NONE)
16835 +extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
16836 +extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
16839 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
16842 +#include <asm/mipsregs.h>
16843 +extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
16844 +extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
16845 +#endif /* CONFIG_RA_CLASSIFIER */
16847 +#if defined (CONFIG_RALINK_RT3052_MP2)
16848 +int32_t mcast_rx(struct sk_buff * skb);
16849 +int32_t mcast_tx(struct sk_buff * skb);
16853 +#ifdef RA_MTD_RW_BY_NUM
16854 +int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf);
16856 +int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf);
16859 +/* gmac driver feature set config */
16860 +#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
16863 +#if defined (CONFIG_ARCH_MT7623)
16866 +#define DELAY_INT 1
16870 +//#define CONFIG_UNH_TEST
16871 +/* end of config */
16873 +#if defined (CONFIG_RAETH_JUMBOFRAME)
16874 +#define MAX_RX_LENGTH 4096
16876 +#define MAX_RX_LENGTH 1536
16879 +extern struct net_device *dev_raether;
16882 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
16883 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
16884 +extern int rx_calc_idx1;
16887 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
16888 +extern int rx_calc_idx0;
16889 +static unsigned long tx_cpu_owner_idx0=0;
16891 +extern unsigned long tx_ring_full;
16893 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
16894 +#include "ra_ethtool.h"
16895 +extern struct ethtool_ops ra_ethtool_ops;
16896 +#ifdef CONFIG_PSEUDO_SUPPORT
16897 +extern struct ethtool_ops ra_virt_ethtool_ops;
16898 +#endif // CONFIG_PSEUDO_SUPPORT //
16899 +#endif // (CONFIG_ETHTOOL //
16901 +#ifdef CONFIG_RALINK_VISTA_BASIC
16902 +int is_switch_175c = 1;
16905 +#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
16906 +struct QDMA_txdesc *free_head = NULL;
16909 +//#if defined (CONFIG_RAETH_LRO)
16911 +unsigned int lan_ip;
16912 +struct lro_para_struct lro_para;
16913 +int lro_flush_needed;
16914 +extern char const *nvram_get(int index, char *name);
16917 +#define KSEG1 0xa0000000
16918 +#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1))
16919 +#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1)
16921 +extern void set_fe_dma_glo_cfg(void);
16924 + * @brief cal txd number for a page
16928 + * @return frag_txd_num
16931 +unsigned int cal_frag_txd_num(unsigned int size)
16933 + unsigned int frag_txd_num = 0;
16937 + if(size > MAX_TXD_LEN){
16939 + size -= MAX_TXD_LEN;
16945 + return frag_txd_num;
16949 +#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
16950 +bool fq_qdma_init(struct net_device *dev)
16952 + END_DEVICE* ei_local = netdev_priv(dev);
16953 + unsigned int phy_free_head;
16954 + unsigned int phy_free_tail;
16955 + unsigned int *free_page_head = NULL;
16956 + unsigned int phy_free_page_head;
16959 + free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &phy_free_head);
16960 + if (unlikely(free_head == NULL)){
16961 + printk(KERN_ERR "QDMA FQ decriptor not available...\n");
16964 + memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE);
16966 + free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &phy_free_page_head);
16967 + if (unlikely(free_page_head == NULL)){
16968 + printk(KERN_ERR "QDMA FQ page not available...\n");
16971 + for (i=0; i < NUM_QDMA_PAGE; i++) {
16972 + free_head[i].txd_info1.SDP = (phy_free_page_head + (i * QDMA_PAGE_SIZE));
16973 + if(i < (NUM_QDMA_PAGE-1)){
16974 + free_head[i].txd_info2.NDP = (phy_free_head + ((i+1) * sizeof(struct QDMA_txdesc)));
16978 + printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) );
16979 + printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] );
16980 + printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP );
16981 + printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP );
16984 + free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
16987 + phy_free_tail = (phy_free_head + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc)));
16989 + printk("phy_free_head is 0x%x!!!\n", phy_free_head);
16990 + printk("phy_free_tail_phy is 0x%x!!!\n", phy_free_tail);
16991 + sysRegWrite(QDMA_FQ_HEAD, (u32)phy_free_head);
16992 + sysRegWrite(QDMA_FQ_TAIL, (u32)phy_free_tail);
16993 + sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
16994 + sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
16996 + ei_local->free_head = free_head;
16997 + ei_local->phy_free_head = phy_free_head;
16998 + ei_local->free_page_head = free_page_head;
16999 + ei_local->phy_free_page_head = phy_free_page_head;
17004 +int fe_dma_init(struct net_device *dev)
17008 + unsigned int regVal;
17009 + END_DEVICE* ei_local = netdev_priv(dev);
17010 +#if defined (CONFIG_RAETH_QOS)
17016 + regVal = sysRegRead(PDMA_GLO_CFG);
17017 + if((regVal & RX_DMA_BUSY))
17019 + printk("\n RX_DMA_BUSY !!! ");
17022 + if((regVal & TX_DMA_BUSY))
17024 + printk("\n TX_DMA_BUSY !!! ");
17030 +#if defined(CONFIG_RAETH_PDMA_DVT)
17031 + pdma_dvt_set_dma_mode();
17032 +#endif /* CONFIG_RAETH_PDMA_DVT */
17034 +#if defined (CONFIG_RAETH_QOS)
17035 + for (i=0;i<NUM_TX_RINGS;i++){
17036 + for (j=0;j<NUM_TX_DESC;j++){
17037 + ei_local->skb_free[i][j]=0;
17039 + ei_local->free_idx[i]=0;
17042 + * RT2880: 2 x TX_Ring, 1 x Rx_Ring
17043 + * RT2883: 4 x TX_Ring, 1 x Rx_Ring
17044 + * RT3883: 4 x TX_Ring, 1 x Rx_Ring
17045 + * RT3052: 4 x TX_Ring, 1 x Rx_Ring
17047 + fe_tx_desc_init(dev, 0, 3, 1);
17048 + if (ei_local->tx_ring0 == NULL) {
17049 + printk("RAETH: tx ring0 allocation failed\n");
17053 + fe_tx_desc_init(dev, 1, 3, 1);
17054 + if (ei_local->tx_ring1 == NULL) {
17055 + printk("RAETH: tx ring1 allocation failed\n");
17059 + printk("\nphy_tx_ring0 = %08x, tx_ring0 = %p, size: %d bytes\n", ei_local->phy_tx_ring0, ei_local->tx_ring0, sizeof(struct PDMA_txdesc));
17061 + printk("\nphy_tx_ring1 = %08x, tx_ring1 = %p, size: %d bytes\n", ei_local->phy_tx_ring1, ei_local->tx_ring1, sizeof(struct PDMA_txdesc));
17063 +#if ! defined (CONFIG_RALINK_RT2880)
17064 + fe_tx_desc_init(dev, 2, 3, 1);
17065 + if (ei_local->tx_ring2 == NULL) {
17066 + printk("RAETH: tx ring2 allocation failed\n");
17070 + fe_tx_desc_init(dev, 3, 3, 1);
17071 + if (ei_local->tx_ring3 == NULL) {
17072 + printk("RAETH: tx ring3 allocation failed\n");
17076 + printk("\nphy_tx_ring2 = %08x, tx_ring2 = %p, size: %d bytes\n", ei_local->phy_tx_ring2, ei_local->tx_ring2, sizeof(struct PDMA_txdesc));
17078 + printk("\nphy_tx_ring3 = %08x, tx_ring3 = %p, size: %d bytes\n", ei_local->phy_tx_ring3, ei_local->tx_ring3, sizeof(struct PDMA_txdesc));
17080 +#endif // CONFIG_RALINK_RT2880 //
17082 + for (i=0;i<NUM_TX_DESC;i++){
17083 + ei_local->skb_free[i]=0;
17085 + ei_local->free_idx =0;
17086 +#if defined (CONFIG_MIPS)
17087 + ei_local->tx_ring0 = pci_alloc_consistent(NULL, NUM_TX_DESC * sizeof(struct PDMA_txdesc), &ei_local->phy_tx_ring0);
17089 + ei_local->tx_ring0 = dma_alloc_coherent(NULL, NUM_TX_DESC * sizeof(struct PDMA_txdesc), &ei_local->phy_tx_ring0, GFP_KERNEL);
17091 + printk("\nphy_tx_ring = 0x%08x, tx_ring = 0x%p\n", ei_local->phy_tx_ring0, ei_local->tx_ring0);
17093 + for (i=0; i < NUM_TX_DESC; i++) {
17094 + memset(&ei_local->tx_ring0[i],0,sizeof(struct PDMA_txdesc));
17095 + ei_local->tx_ring0[i].txd_info2.LS0_bit = 1;
17096 + ei_local->tx_ring0[i].txd_info2.DDONE_bit = 1;
17099 +#endif // CONFIG_RAETH_QOS
17101 +#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
17103 + fq_qdma_init(dev);
17107 + regVal = sysRegRead(QDMA_GLO_CFG);
17108 + if((regVal & RX_DMA_BUSY))
17110 + printk("\n RX_DMA_BUSY !!! ");
17113 + if((regVal & TX_DMA_BUSY))
17115 + printk("\n TX_DMA_BUSY !!! ");
17121 + /* Initial RX Ring 0*/
17123 +#ifdef CONFIG_32B_DESC
17124 + ei_local->qrx_ring = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
17125 + ei_local->phy_qrx_ring = virt_to_phys(ei_local->qrx_ring);
17127 + ei_local->qrx_ring = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_qrx_ring);
17129 + for (i = 0; i < NUM_QRX_DESC; i++) {
17130 + memset(&ei_local->qrx_ring[i],0,sizeof(struct PDMA_rxdesc));
17131 + ei_local->qrx_ring[i].rxd_info2.DDONE_bit = 0;
17132 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
17133 + ei_local->qrx_ring[i].rxd_info2.LS0 = 0;
17134 + ei_local->qrx_ring[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
17136 + ei_local->qrx_ring[i].rxd_info2.LS0 = 1;
17138 + ei_local->qrx_ring[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17140 + printk("\nphy_qrx_ring = 0x%08x, qrx_ring = 0x%p\n",ei_local->phy_qrx_ring,ei_local->qrx_ring);
17142 + regVal = sysRegRead(QDMA_GLO_CFG);
17143 + regVal &= 0x000000FF;
17145 + sysRegWrite(QDMA_GLO_CFG, regVal);
17146 + regVal=sysRegRead(QDMA_GLO_CFG);
17148 + /* Tell the adapter where the TX/RX rings are located. */
17150 + sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_qrx_ring));
17151 + sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_QRX_DESC));
17152 + sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
17153 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17154 + rx_calc_idx0 = rx_dma_owner_idx0 = sysRegRead(QRX_CRX_IDX_0);
17156 + sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);
17158 + ei_local->rx_ring0 = ei_local->qrx_ring;
17160 +#else /* PDMA RX */
17162 + /* Initial RX Ring 0*/
17163 +#ifdef CONFIG_32B_DESC
17164 + ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
17165 + ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
17167 +#if defined (CONFIG_MIPS)
17168 + ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
17170 + ei_local->rx_ring0 = dma_alloc_coherent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0, GFP_KERNEL);
17173 + for (i = 0; i < NUM_RX_DESC; i++) {
17174 + memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
17175 + ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
17176 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
17177 + ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
17178 + ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
17180 + ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
17182 + ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17184 + printk("\nphy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
17186 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
17187 + /* Initial RX Ring 1*/
17188 +#ifdef CONFIG_32B_DESC
17189 + ei_local->rx_ring1 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
17190 + ei_local->phy_rx_ring1 = virt_to_phys(ei_local->rx_ring1);
17192 +#if defined (CONFIG_MIPS)
17193 + ei_local->rx_ring1 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1);
17195 + ei_local->rx_ring1 = dma_alloc_coherent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1, GFP_KERNEL);
17199 + for (i = 0; i < NUM_RX_DESC; i++) {
17200 + memset(&ei_local->rx_ring1[i],0,sizeof(struct PDMA_rxdesc));
17201 + ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0;
17202 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
17203 + ei_local->rx_ring1[i].rxd_info2.LS0 = 0;
17204 + ei_local->rx_ring1[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
17206 + ei_local->rx_ring1[i].rxd_info2.LS0 = 1;
17208 + ei_local->rx_ring1[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17210 + printk("\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",ei_local->phy_rx_ring1,ei_local->rx_ring1);
17211 +#if defined(CONFIG_ARCH_MT7623)
17212 + /* Initial RX Ring 2*/
17213 + ei_local->rx_ring2 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring2);
17214 + for (i = 0; i < NUM_RX_DESC; i++) {
17215 + memset(&ei_local->rx_ring2[i],0,sizeof(struct PDMA_rxdesc));
17216 + ei_local->rx_ring2[i].rxd_info2.DDONE_bit = 0;
17217 + ei_local->rx_ring2[i].rxd_info2.LS0 = 0;
17218 + ei_local->rx_ring2[i].rxd_info2.PLEN0 = SET_ADMA_RX_LEN0(MAX_RX_LENGTH);
17219 + ei_local->rx_ring2[i].rxd_info2.PLEN1 = SET_ADMA_RX_LEN1(MAX_RX_LENGTH >> 14);
17220 + ei_local->rx_ring2[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx2_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17222 + printk("\nphy_rx_ring2 = 0x%08x, rx_ring2 = 0x%p\n",ei_local->phy_rx_ring2,ei_local->rx_ring2);
17223 + /* Initial RX Ring 3*/
17224 + ei_local->rx_ring3 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring3);
17225 + for (i = 0; i < NUM_RX_DESC; i++) {
17226 + memset(&ei_local->rx_ring3[i],0,sizeof(struct PDMA_rxdesc));
17227 + ei_local->rx_ring3[i].rxd_info2.DDONE_bit = 0;
17228 + ei_local->rx_ring3[i].rxd_info2.LS0 = 0;
17229 + ei_local->rx_ring3[i].rxd_info2.PLEN0 = SET_ADMA_RX_LEN0(MAX_RX_LENGTH);
17230 + ei_local->rx_ring3[i].rxd_info2.PLEN1 = SET_ADMA_RX_LEN1(MAX_RX_LENGTH >> 14);
17231 + ei_local->rx_ring3[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx3_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17233 + printk("\nphy_rx_ring3 = 0x%08x, rx_ring3 = 0x%p\n",ei_local->phy_rx_ring3,ei_local->rx_ring3);
17234 +#endif /* CONFIG_ARCH_MT7623 */
17239 + regVal = sysRegRead(PDMA_GLO_CFG);
17240 + regVal &= 0x000000FF;
17241 + sysRegWrite(PDMA_GLO_CFG, regVal);
17242 + regVal=sysRegRead(PDMA_GLO_CFG);
17244 + /* Tell the adapter where the TX/RX rings are located. */
17245 +#if !defined (CONFIG_RAETH_QOS)
17246 + sysRegWrite(TX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_tx_ring0));
17247 + sysRegWrite(TX_MAX_CNT0, cpu_to_le32((u32) NUM_TX_DESC));
17248 + sysRegWrite(TX_CTX_IDX0, 0);
17249 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17250 + tx_cpu_owner_idx0 = 0;
17252 + sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX0);
17255 +#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
17256 + sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_qrx_ring));
17257 + sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_QRX_DESC));
17258 + sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
17259 +#else /* PDMA RX */
17260 + sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0));
17261 + sysRegWrite(RX_MAX_CNT0, cpu_to_le32((u32) NUM_RX_DESC));
17262 + sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
17265 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17266 + rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
17268 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);
17269 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
17270 + sysRegWrite(RX_BASE_PTR1, phys_to_bus((u32) ei_local->phy_rx_ring1));
17271 + sysRegWrite(RX_MAX_CNT1, cpu_to_le32((u32) NUM_RX_DESC));
17272 + sysRegWrite(RX_CALC_IDX1, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
17273 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17274 + rx_calc_idx1 = sysRegRead(RX_CALC_IDX1);
17276 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX1);
17277 +#if defined(CONFIG_ARCH_MT7623)
17278 + sysRegWrite(RX_BASE_PTR2, phys_to_bus((u32) ei_local->phy_rx_ring2));
17279 + sysRegWrite(RX_MAX_CNT2, cpu_to_le32((u32) NUM_RX_DESC));
17280 + sysRegWrite(RX_CALC_IDX2, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
17281 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX2);
17282 + sysRegWrite(RX_BASE_PTR3, phys_to_bus((u32) ei_local->phy_rx_ring3));
17283 + sysRegWrite(RX_MAX_CNT3, cpu_to_le32((u32) NUM_RX_DESC));
17284 + sysRegWrite(RX_CALC_IDX3, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
17285 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX3);
17286 +#endif /* CONFIG_ARCH_MT7623 */
17288 +#if defined (CONFIG_RALINK_RT6855A)
17289 + regVal = sysRegRead(RX_DRX_IDX0);
17290 + regVal = (regVal == 0)? (NUM_RX_DESC - 1) : (regVal - 1);
17291 + sysRegWrite(RX_CALC_IDX0, cpu_to_le32(regVal));
17292 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17293 + rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
17295 + regVal = sysRegRead(TX_DTX_IDX0);
17296 + sysRegWrite(TX_CTX_IDX0, cpu_to_le32(regVal));
17297 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17298 + tx_cpu_owner_idx0 = regVal;
17300 + ei_local->free_idx = regVal;
17303 +#if defined (CONFIG_RAETH_QOS)
17304 + set_scheduler_weight();
17305 + set_schedule_pause_condition();
17306 + set_output_shaper();
17309 + set_fe_dma_glo_cfg();
17314 +inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no)
17316 + unsigned int length=skb->len;
17317 + END_DEVICE* ei_local = netdev_priv(dev);
17318 +#ifndef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17319 + unsigned long tx_cpu_owner_idx0 = sysRegRead(TX_CTX_IDX0);
17321 +#if defined (CONFIG_RAETH_TSO)
17322 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17323 + unsigned long ctx_idx_start_addr = tx_cpu_owner_idx0;
17325 + struct iphdr *iph = NULL;
17326 + struct tcphdr *th = NULL;
17327 + struct skb_frag_struct *frag;
17328 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
17330 + unsigned int len, size, offset, frag_txd_num, skb_txd_num ;
17331 +#endif // CONFIG_RAETH_TSO //
17333 +#if defined (CONFIG_RAETH_TSOV6)
17334 + struct ipv6hdr *ip6h = NULL;
17337 +#ifdef CONFIG_PSEUDO_SUPPORT
17338 + PSEUDO_ADAPTER *pAd;
17341 + while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0)
17343 +#ifdef CONFIG_PSEUDO_SUPPORT
17344 + if (gmac_no == 2) {
17345 + if (ei_local->PseudoDev != NULL) {
17346 + pAd = netdev_priv(ei_local->PseudoDev);
17347 + pAd->stat.tx_errors++;
17351 + ei_local->stat.tx_errors++;
17354 +#if !defined (CONFIG_RAETH_TSO)
17355 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = virt_to_phys(skb->data);
17356 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = length;
17357 +#if defined (CONFIG_RALINK_MT7620)
17358 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = 0;
17359 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17360 + if (gmac_no == 1) {
17361 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 1;
17363 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 2;
17366 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = gmac_no;
17367 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.QN = 3;
17370 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined(CONFIG_RALINK_MT7628)
17371 + if (skb->ip_summed == CHECKSUM_PARTIAL){
17372 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 7;
17374 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 0;
17378 +#ifdef CONFIG_RAETH_HW_VLAN_TX
17379 + if(vlan_tx_tag_present(skb)) {
17380 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17381 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
17383 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0x80 | (vlan_tx_tag_get(skb) >> 13) << 4 | (vlan_tx_tag_get(skb) & 0xF);
17386 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17387 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0;
17389 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0;
17394 +#if defined(CONFIG_RAETH_PDMA_DVT)
17395 + raeth_pdma_tx_vlan_dvt( ei_local, tx_cpu_owner_idx0 );
17396 +#endif /* CONFIG_RAETH_PDMA_DVT */
17398 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
17399 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
17400 + if(ra_sw_nat_hook_rx!= NULL){
17401 +#if defined (CONFIG_RALINK_MT7620)
17402 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = (1 << 7); /* PPE */
17403 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17404 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 4; /* PPE */
17406 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = 6; /* PPE */
17408 + FOE_MAGIC_TAG(skb) = 0;
17413 +#if defined(CONFIG_RAETH_PDMA_DVT)
17414 + raeth_pdma_tx_desc_dvt( ei_local, tx_cpu_owner_idx0 );
17415 +#endif /* CONFIG_RAETH_PDMA_DVT */
17417 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit = 0;
17420 + printk("---------------\n");
17421 + printk("tx_info1=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1);
17422 + printk("tx_info2=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2);
17423 + printk("tx_info3=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3);
17424 + printk("tx_info4=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4);
17428 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = virt_to_phys(skb->data);
17429 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = (length - skb->data_len);
17430 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit = nr_frags ? 0:1;
17431 +#if defined (CONFIG_RALINK_MT7620)
17432 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = 0;
17433 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17434 + if (gmac_no == 1) {
17435 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 1;
17437 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 2;
17440 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = gmac_no;
17441 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.QN = 3;
17443 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TSO = 0;
17445 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined(CONFIG_RALINK_MT7628)
17446 + if (skb->ip_summed == CHECKSUM_PARTIAL){
17447 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 7;
17449 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 0;
17453 +#ifdef CONFIG_RAETH_HW_VLAN_TX
17454 + if(vlan_tx_tag_present(skb)) {
17455 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17456 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
17458 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0x80 | (vlan_tx_tag_get(skb) >> 13) << 4 | (vlan_tx_tag_get(skb) & 0xF);
17461 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17462 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0;
17464 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0;
17469 +#if defined(CONFIG_RAETH_PDMA_DVT)
17470 + raeth_pdma_tx_vlan_dvt( ei_local, tx_cpu_owner_idx0 );
17471 +#endif /* CONFIG_RAETH_PDMA_DVT */
17473 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
17474 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
17475 + if(ra_sw_nat_hook_rx!= NULL){
17476 +#if defined (CONFIG_RALINK_MT7620)
17477 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = (1 << 7); /* PPE */
17478 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17479 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 4; /* PPE */
17481 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = 6; /* PPE */
17483 + FOE_MAGIC_TAG(skb) = 0;
17490 + if(nr_frags > 0) {
17492 + for(i=0;i<nr_frags;i++) {
17493 + frag = &skb_shinfo(skb)->frags[i];
17494 + offset = frag->page_offset;
17495 + len = frag->size;
17496 + frag_txd_num = cal_frag_txd_num(len);
17498 + while(frag_txd_num > 0){
17499 + if(len < MAX_TXD_LEN)
17502 + size = MAX_TXD_LEN;
17503 + if(skb_txd_num%2 == 0) {
17504 + tx_cpu_owner_idx0 = (tx_cpu_owner_idx0+1) % NUM_TX_DESC;
17506 + while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0)
17508 +#ifdef config_pseudo_support
17509 + if (gmac_no == 2) {
17510 + if (ei_local->pseudodev != null) {
17511 + pad = netdev_priv(ei_local->pseudodev);
17512 + pad->stat.tx_errors++;
17516 + ei_local->stat.tx_errors++;
17519 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
17520 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = pci_map_page(NULL, frag->page, offset, size, PCI_DMA_TODEVICE);
17522 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = pci_map_page(NULL, frag->page.p, offset, size, PCI_DMA_TODEVICE);
17524 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = size;
17526 + if( (i==(nr_frags-1)) && (frag_txd_num == 1))
17527 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit = 1;
17529 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit = 0;
17530 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit = 0;
17532 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
17533 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3.SDP1 = pci_map_page(NULL, frag->page, offset, size, PCI_DMA_TODEVICE);
17535 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3.SDP1 = pci_map_page(NULL, frag->page.p, offset, size, PCI_DMA_TODEVICE);
17538 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL1 = size;
17539 + if( (i==(nr_frags-1)) && (frag_txd_num == 1))
17540 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS1_bit = 1;
17542 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS1_bit = 0;
17552 +#if defined(CONFIG_RAETH_PDMA_DVT)
17553 + if( (pdma_dvt_get_debug_test_config() & PDMA_TEST_TSO_DEBUG) ){
17554 + printk("skb_shinfo(skb)->gso_segs = %d\n", skb_shinfo(skb)->gso_segs);
17556 +#endif /* CONFIG_RAETH_PDMA_DVT */
17557 + /* fill in MSS info in tcp checksum field */
17558 + if(skb_shinfo(skb)->gso_segs > 1) {
17560 +// TsoLenUpdate(skb->len);
17562 + /* TCP over IPv4 */
17563 + iph = (struct iphdr *)skb_network_header(skb);
17564 +#if defined (CONFIG_RAETH_TSOV6)
17565 + /* TCP over IPv6 */
17566 + ip6h = (struct ipv6hdr *)skb_network_header(skb);
17568 + if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
17569 + th = (struct tcphdr *)skb_transport_header(skb);
17570 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17571 + ei_local->tx_ring0[ctx_idx_start_addr].txd_info4.TSO = 1;
17573 + ei_local->tx_ring0[sysRegRead(TX_CTX_IDX0)].txd_info4.TSO = 1;
17575 + th->check = htons(skb_shinfo(skb)->gso_size);
17576 +#if defined (CONFIG_MIPS)
17577 + dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
17579 + dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
17583 +#if defined (CONFIG_RAETH_TSOV6)
17584 + /* TCP over IPv6 */
17585 + else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
17586 + th = (struct tcphdr *)skb_transport_header(skb);
17587 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17588 + ei_local->tx_ring0[ctx_idx_start_addr].txd_info4.TSO = 1;
17590 + ei_local->tx_ring0[sysRegRead(TX_CTX_IDX0)].txd_info4.TSO = 1;
17592 + th->check = htons(skb_shinfo(skb)->gso_size);
17593 +#if defined (CONFIG_MIPS)
17594 + dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
17596 + dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
17599 +#endif // CONFIG_RAETH_TSOV6 //
17602 +#if defined(CONFIG_RAETH_PDMA_DVT)
17603 + raeth_pdma_tx_desc_dvt( ei_local, tx_cpu_owner_idx0 );
17604 +#endif /* CONFIG_RAETH_PDMA_DVT */
17606 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17607 + ei_local->tx_ring0[ctx_idx_start_addr].txd_info2.DDONE_bit = 0;
17609 + ei_local->tx_ring0[sysRegRead(TX_CTX_IDX0)].txd_info2.DDONE_bit = 0;
17611 +#endif // CONFIG_RAETH_TSO //
17613 + tx_cpu_owner_idx0 = (tx_cpu_owner_idx0+1) % NUM_TX_DESC;
17614 + while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0)
17616 +// printk(KERN_ERR "%s: TXD=%lu TX DMA is Busy !!\n", dev->name, tx_cpu_owner_idx0);
17617 +#ifdef CONFIG_PSEUDO_SUPPORT
17618 + if (gmac_no == 2) {
17619 + if (ei_local->PseudoDev != NULL) {
17620 + pAd = netdev_priv(ei_local->PseudoDev);
17621 + pAd->stat.tx_errors++;
17625 + ei_local->stat.tx_errors++;
17627 + sysRegWrite(TX_CTX_IDX0, cpu_to_le32((u32)tx_cpu_owner_idx0));
17629 +#ifdef CONFIG_PSEUDO_SUPPORT
17630 + if (gmac_no == 2) {
17631 + if (ei_local->PseudoDev != NULL) {
17632 + pAd = netdev_priv(ei_local->PseudoDev);
17633 + pAd->stat.tx_packets++;
17634 + pAd->stat.tx_bytes += length;
17639 + ei_local->stat.tx_packets++;
17640 + ei_local->stat.tx_bytes += length;
17642 +#ifdef CONFIG_RAETH_NAPI
17643 + if ( ei_local->tx_full == 1) {
17644 + ei_local->tx_full = 0;
17645 + netif_wake_queue(dev);
17652 +int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no)
17654 + END_DEVICE *ei_local = netdev_priv(dev);
17655 + unsigned long flags;
17656 + unsigned long tx_cpu_owner_idx;
17657 + unsigned int tx_cpu_owner_idx_next;
17658 + unsigned int num_of_txd = 0;
17659 +#if defined (CONFIG_RAETH_TSO)
17660 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
17661 + struct skb_frag_struct *frag;
17663 +#if !defined(CONFIG_RAETH_QOS)
17664 + unsigned int tx_cpu_owner_idx_next2;
17666 + int ring_no, queue_no, port_no;
17668 +#ifdef CONFIG_RALINK_VISTA_BASIC
17669 + struct vlan_ethhdr *veth;
17671 +#ifdef CONFIG_PSEUDO_SUPPORT
17672 + PSEUDO_ADAPTER *pAd;
17675 +#if !defined(CONFIG_RA_NAT_NONE)
17676 + if(ra_sw_nat_hook_tx!= NULL)
17678 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
17679 + if(FOE_MAGIC_TAG(skb) != FOE_MAGIC_PPE)
17682 + //spin_lock_irqsave(&ei_local->page_lock, flags);
17683 + if(ra_sw_nat_hook_tx(skb, gmac_no)==1){
17684 + //spin_unlock_irqrestore(&ei_local->page_lock, flags);
17687 + //spin_unlock_irqrestore(&ei_local->page_lock, flags);
17693 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
17696 + if(ra_classifier_hook_tx!= NULL)
17698 +#if defined(CONFIG_RALINK_EXTERNAL_TIMER)
17699 + ra_classifier_hook_tx(skb, (*((volatile u32 *)(0xB0000D08))&0x0FFFF));
17701 + ra_classifier_hook_tx(skb, read_c0_count());
17704 +#endif /* CONFIG_RA_CLASSIFIER */
17706 +#if defined (CONFIG_RALINK_RT3052_MP2)
17710 +#if !defined (CONFIG_RALINK_RT6855) && !defined (CONFIG_RALINK_RT6855A) && \
17711 + !defined(CONFIG_RALINK_MT7621) && !defined (CONFIG_ARCH_MT7623)
17713 +#define MIN_PKT_LEN 60
17714 + if (skb->len < MIN_PKT_LEN) {
17715 + if (skb_padto(skb, MIN_PKT_LEN)) {
17716 + printk("raeth: skb_padto failed\n");
17719 + skb_put(skb, MIN_PKT_LEN - skb->len);
17723 + dev->trans_start = jiffies; /* save the timestamp */
17724 + spin_lock_irqsave(&ei_local->page_lock, flags);
17725 +#if defined (CONFIG_MIPS)
17726 + dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
17728 + dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
17732 +#ifdef CONFIG_RALINK_VISTA_BASIC
17733 + veth = (struct vlan_ethhdr *)(skb->data);
17734 + if (is_switch_175c && veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
17735 + if ((veth->h_vlan_TCI & __constant_htons(VLAN_VID_MASK)) == 0) {
17736 + veth->h_vlan_TCI |= htons(VLAN_DEV_INFO(dev)->vlan_id);
17741 +#if defined (CONFIG_RAETH_QOS)
17742 + if(pkt_classifier(skb, gmac_no, &ring_no, &queue_no, &port_no)) {
17743 + get_tx_ctx_idx(ring_no, &tx_cpu_owner_idx);
17744 + tx_cpu_owner_idx_next = (tx_cpu_owner_idx + 1) % NUM_TX_DESC;
17745 + if(((ei_local->skb_free[ring_no][tx_cpu_owner_idx]) ==0) && (ei_local->skb_free[ring_no][tx_cpu_owner_idx_next]==0)){
17746 + fe_qos_packet_send(dev, skb, ring_no, queue_no, port_no);
17748 + ei_local->stat.tx_dropped++;
17750 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
17755 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17756 + tx_cpu_owner_idx = tx_cpu_owner_idx0;
17758 + tx_cpu_owner_idx = sysRegRead(TX_CTX_IDX0);
17760 +#if defined (CONFIG_RAETH_TSO)
17761 +// num_of_txd = (nr_frags==0) ? 1 : ((nr_frags>>1) + 1);
17762 +// NumOfTxdUpdate(num_of_txd);
17763 + if(nr_frags != 0){
17764 + for(i=0;i<nr_frags;i++) {
17765 + frag = &skb_shinfo(skb)->frags[i];
17766 + num_of_txd += cal_frag_txd_num(frag->size);
17768 + num_of_txd = (num_of_txd >> 1) + 1;
17775 + tx_cpu_owner_idx_next = (tx_cpu_owner_idx + num_of_txd) % NUM_TX_DESC;
17777 + if(((ei_local->skb_free[tx_cpu_owner_idx]) ==0) && (ei_local->skb_free[tx_cpu_owner_idx_next]==0)){
17778 + rt2880_eth_send(dev, skb, gmac_no);
17780 + tx_cpu_owner_idx_next2 = (tx_cpu_owner_idx_next + 1) % NUM_TX_DESC;
17782 + if(ei_local->skb_free[tx_cpu_owner_idx_next2]!=0){
17783 +#if defined (CONFIG_RAETH_SW_FC)
17784 + netif_stop_queue(dev);
17785 +#ifdef CONFIG_PSEUDO_SUPPORT
17786 + netif_stop_queue(ei_local->PseudoDev);
17792 +#ifdef CONFIG_PSEUDO_SUPPORT
17793 + if (gmac_no == 2) {
17794 + if (ei_local->PseudoDev != NULL) {
17795 + pAd = netdev_priv(ei_local->PseudoDev);
17796 + pAd->stat.tx_dropped++;
17800 + ei_local->stat.tx_dropped++;
17801 +#if defined (CONFIG_RAETH_SW_FC)
17802 + printk("tx_ring_full, drop packet\n");
17805 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
17809 +#if defined (CONFIG_RAETH_TSO)
17810 + /* SG: use multiple TXD to send the packet (only have one skb) */
17811 + ei_local->skb_free[(tx_cpu_owner_idx + num_of_txd - 1) % NUM_TX_DESC] = skb;
17812 + while(--num_of_txd) {
17813 + ei_local->skb_free[(tx_cpu_owner_idx + num_of_txd -1) % NUM_TX_DESC] = (struct sk_buff *)0xFFFFFFFF; //MAGIC ID
17816 + ei_local->skb_free[tx_cpu_owner_idx] = skb;
17819 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
17823 +void ei_xmit_housekeeping(unsigned long unused)
17825 + struct net_device *dev = dev_raether;
17826 + END_DEVICE *ei_local = netdev_priv(dev);
17827 + struct PDMA_txdesc *tx_desc;
17828 + unsigned long skb_free_idx;
17829 + unsigned long tx_dtx_idx __maybe_unused;
17830 +#ifndef CONFIG_RAETH_NAPI
17831 + unsigned long reg_int_mask=0;
17834 +#ifdef CONFIG_RAETH_QOS
17836 + for (i=0;i<NUM_TX_RINGS;i++){
17837 + skb_free_idx = ei_local->free_idx[i];
17838 + if((ei_local->skb_free[i][skb_free_idx])==0){
17842 + get_tx_desc_and_dtx_idx(ei_local, i, &tx_dtx_idx, &tx_desc);
17844 + while(tx_desc[skb_free_idx].txd_info2.DDONE_bit==1 && (ei_local->skb_free[i][skb_free_idx])!=0 ){
17845 + dev_kfree_skb_any((ei_local->skb_free[i][skb_free_idx]));
17847 + ei_local->skb_free[i][skb_free_idx]=0;
17848 + skb_free_idx = (skb_free_idx +1) % NUM_TX_DESC;
17850 + ei_local->free_idx[i] = skb_free_idx;
17853 + tx_dtx_idx = sysRegRead(TX_DTX_IDX0);
17854 + tx_desc = ei_local->tx_ring0;
17855 + skb_free_idx = ei_local->free_idx;
17856 + if ((ei_local->skb_free[skb_free_idx]) != 0 && tx_desc[skb_free_idx].txd_info2.DDONE_bit==1) {
17857 + while(tx_desc[skb_free_idx].txd_info2.DDONE_bit==1 && (ei_local->skb_free[skb_free_idx])!=0 ){
17858 +#if defined (CONFIG_RAETH_TSO)
17859 + if(ei_local->skb_free[skb_free_idx]!=(struct sk_buff *)0xFFFFFFFF) {
17860 + dev_kfree_skb_any(ei_local->skb_free[skb_free_idx]);
17863 + dev_kfree_skb_any(ei_local->skb_free[skb_free_idx]);
17865 + ei_local->skb_free[skb_free_idx]=0;
17866 + skb_free_idx = (skb_free_idx +1) % NUM_TX_DESC;
17869 + netif_wake_queue(dev);
17870 +#ifdef CONFIG_PSEUDO_SUPPORT
17871 + netif_wake_queue(ei_local->PseudoDev);
17874 + ei_local->free_idx = skb_free_idx;
17875 + } /* if skb_free != 0 */
17878 +#ifndef CONFIG_RAETH_NAPI
17879 + reg_int_mask=sysRegRead(FE_INT_ENABLE);
17880 +#if defined (DELAY_INT)
17881 + sysRegWrite(FE_INT_ENABLE, reg_int_mask| TX_DLY_INT);
17884 + sysRegWrite(FE_INT_ENABLE, reg_int_mask | TX_DONE_INT0 \
17889 +#endif //CONFIG_RAETH_NAPI//
17894 +EXPORT_SYMBOL(ei_start_xmit);
17895 +EXPORT_SYMBOL(ei_xmit_housekeeping);
17896 +EXPORT_SYMBOL(fe_dma_init);
17897 +EXPORT_SYMBOL(rt2880_eth_send);
17898 diff --git a/drivers/net/ethernet/raeth/raether_qdma.c b/drivers/net/ethernet/raeth/raether_qdma.c
17899 new file mode 100644
17900 index 0000000..acf8bfe
17902 +++ b/drivers/net/ethernet/raeth/raether_qdma.c
17904 +#include <linux/module.h>
17905 +#include <linux/version.h>
17906 +#include <linux/kernel.h>
17907 +#include <linux/types.h>
17908 +#include <linux/pci.h>
17909 +#include <linux/init.h>
17910 +#include <linux/skbuff.h>
17911 +#include <linux/if_vlan.h>
17912 +#include <linux/if_ether.h>
17913 +#include <linux/fs.h>
17914 +#include <asm/uaccess.h>
17915 +#include <asm/rt2880/surfboardint.h>
17916 +#if defined (CONFIG_RAETH_TSO)
17917 +#include <linux/tcp.h>
17918 +#include <net/ipv6.h>
17919 +#include <linux/ip.h>
17920 +#include <net/ip.h>
17921 +#include <net/tcp.h>
17922 +#include <linux/in.h>
17923 +#include <linux/ppp_defs.h>
17924 +#include <linux/if_pppox.h>
17926 +#include <linux/delay.h>
17927 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
17928 +#include <linux/sched.h>
17930 +#if defined (CONFIG_HW_SFQ)
17931 +#include <linux/if_vlan.h>
17932 +#include <net/ipv6.h>
17933 +#include <net/ip.h>
17934 +#include <linux/if_pppox.h>
17935 +#include <linux/ppp_defs.h>
17938 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
17939 +#include <asm/rt2880/rt_mmap.h>
17941 +#include <linux/libata-compat.h>
17944 +#include "ra2882ethreg.h"
17945 +#include "raether.h"
17946 +#include "ra_mac.h"
17947 +#include "ra_ioctl.h"
17948 +#include "ra_rfrw.h"
17949 +#ifdef CONFIG_RAETH_NETLINK
17950 +#include "ra_netlink.h"
17952 +#if defined (CONFIG_RAETH_QOS)
17953 +#include "ra_qos.h"
17956 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
17957 +#include "../../../net/nat/hw_nat/ra_nat.h"
17961 +#if !defined(CONFIG_RA_NAT_NONE)
17964 +extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
17965 +extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
17968 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
17971 +#include <asm/mipsregs.h>
17972 +extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
17973 +extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
17974 +#endif /* CONFIG_RA_CLASSIFIER */
17976 +#if defined (CONFIG_RALINK_RT3052_MP2)
17977 +int32_t mcast_rx(struct sk_buff * skb);
17978 +int32_t mcast_tx(struct sk_buff * skb);
17981 +#ifdef RA_MTD_RW_BY_NUM
17982 +int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf);
17984 +int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf);
17987 +/* gmac driver feature set config */
17988 +#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
17991 +#if defined (CONFIG_ARCH_MT7623)
17994 +#define DELAY_INT 1
17998 +//#define CONFIG_UNH_TEST
17999 +/* end of config */
18001 +#if defined (CONFIG_RAETH_JUMBOFRAME)
18002 +#define MAX_RX_LENGTH 4096
18004 +#define MAX_RX_LENGTH 1536
18007 +extern struct net_device *dev_raether;
18009 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
18010 +static int rx_dma_owner_idx1;
18011 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
18012 +static int rx_calc_idx1;
18015 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
18016 +static int rx_calc_idx0;
18017 +static unsigned long tx_cpu_owner_idx0=0;
18019 +extern unsigned long tx_ring_full;
18021 +#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER)
18022 +#include "ra_ethtool.h"
18023 +extern struct ethtool_ops ra_ethtool_ops;
18024 +#ifdef CONFIG_PSEUDO_SUPPORT
18025 +extern struct ethtool_ops ra_virt_ethtool_ops;
18026 +#endif // CONFIG_PSEUDO_SUPPORT //
18027 +#endif // (CONFIG_ETHTOOL //
18029 +#ifdef CONFIG_RALINK_VISTA_BASIC
18030 +int is_switch_175c = 1;
18033 +//skb->mark to queue mapping table
18034 +extern unsigned int M2Q_table[64];
18035 +struct QDMA_txdesc *free_head = NULL;
18036 +extern unsigned int lan_wan_separate;
18037 +#if defined (CONFIG_HW_SFQ)
18038 +extern unsigned int web_sfq_enable;
18039 +#define HwSfqQUp 3
18040 +#define HwSfqQDl 1
18042 +int dbg =0;//debug used
18043 +#if defined (CONFIG_HW_SFQ)
18044 +struct SFQ_table *sfq0;
18045 +struct SFQ_table *sfq1;
18046 +struct SFQ_table *sfq2;
18047 +struct SFQ_table *sfq3;
18050 +#define KSEG1 0xa0000000
18051 +#if defined (CONFIG_MIPS)
18052 +#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1))
18053 +#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1)
18055 +#define PHYS_TO_VIRT(x) phys_to_virt(x)
18056 +#define VIRT_TO_PHYS(x) virt_to_phys(x)
18059 +extern void set_fe_dma_glo_cfg(void);
18061 +#if defined (CONFIG_HW_SFQ)
18062 +ParseResult SfqParseResult;
18067 + * @brief: get the TXD index from its address
18069 + * @param: cpu_ptr
18071 + * @return: TXD index
18074 +static unsigned int GET_TXD_OFFSET(struct QDMA_txdesc **cpu_ptr)
18076 + struct net_device *dev = dev_raether;
18077 + END_DEVICE *ei_local = netdev_priv(dev);
18079 + //ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
18080 + //ctx_offset = (*cpu_ptr - ei_local->txd_pool);
18081 + ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->phy_txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
18083 + return ctx_offset;
18090 + * @brief cal txd number for a page
18094 + * @return frag_txd_num
18097 +unsigned int cal_frag_txd_num(unsigned int size)
18099 + unsigned int frag_txd_num = 0;
18103 + if(size > MAX_TXD_LEN){
18105 + size -= MAX_TXD_LEN;
18111 + return frag_txd_num;
18116 + * @brief get free TXD from TXD queue
18118 + * @param free_txd
18122 +static int get_free_txd(struct QDMA_txdesc **free_txd)
18124 + struct net_device *dev = dev_raether;
18125 + END_DEVICE *ei_local = netdev_priv(dev);
18126 + unsigned int tmp_idx;
18128 + if(ei_local->free_txd_num > 0){
18129 + tmp_idx = ei_local->free_txd_head;
18130 + ei_local->free_txd_head = ei_local->txd_pool_info[tmp_idx];
18131 + ei_local->free_txd_num -= 1;
18132 + //*free_txd = &ei_local->txd_pool[tmp_idx];
18133 + *free_txd = ei_local->phy_txd_pool + (sizeof(struct QDMA_txdesc) * tmp_idx);
18136 + return NUM_TX_DESC;
18141 + * @brief add free TXD into TXD queue
18143 + * @param free_txd
18147 +int put_free_txd(int free_txd_idx)
18149 + struct net_device *dev = dev_raether;
18150 + END_DEVICE *ei_local = netdev_priv(dev);
18151 + ei_local->txd_pool_info[ei_local->free_txd_tail] = free_txd_idx;
18152 + ei_local->free_txd_tail = free_txd_idx;
18153 + ei_local->txd_pool_info[free_txd_idx] = NUM_TX_DESC;
18154 + ei_local->free_txd_num += 1;
18158 +/*define qdma initial alloc*/
18164 + * @return 0: fail
18167 +bool qdma_tx_desc_alloc(void)
18169 + struct net_device *dev = dev_raether;
18170 + END_DEVICE *ei_local = netdev_priv(dev);
18171 + struct QDMA_txdesc *free_txd = NULL;
18172 + unsigned int txd_idx;
18176 + ei_local->txd_pool = pci_alloc_consistent(NULL, sizeof(struct QDMA_txdesc) * NUM_TX_DESC, &ei_local->phy_txd_pool);
18177 + printk("txd_pool=%p phy_txd_pool=%08X\n", ei_local->txd_pool , ei_local->phy_txd_pool);
18179 + if (ei_local->txd_pool == NULL) {
18180 + printk("adapter->txd_pool allocation failed!\n");
18183 + printk("ei_local->skb_free start address is 0x%p.\n", ei_local->skb_free);
18184 + //set all txd_pool_info to 0.
18185 + for ( i = 0; i < NUM_TX_DESC; i++)
18187 + ei_local->skb_free[i]= 0;
18188 + ei_local->txd_pool_info[i] = i + 1;
18189 + ei_local->txd_pool[i].txd_info3.LS_bit = 1;
18190 + ei_local->txd_pool[i].txd_info3.OWN_bit = 1;
18193 + ei_local->free_txd_head = 0;
18194 + ei_local->free_txd_tail = NUM_TX_DESC - 1;
18195 + ei_local->free_txd_num = NUM_TX_DESC;
18198 + //get free txd from txd pool
18199 + txd_idx = get_free_txd(&free_txd);
18200 + if( txd_idx == NUM_TX_DESC) {
18201 + printk("get_free_txd fail\n");
18205 + //add null TXD for transmit
18206 + //ei_local->tx_dma_ptr = VIRT_TO_PHYS(free_txd);
18207 + //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
18208 + ei_local->tx_dma_ptr = free_txd;
18209 + ei_local->tx_cpu_ptr = free_txd;
18210 + sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
18211 + sysRegWrite(QTX_DTX_PTR, ei_local->tx_dma_ptr);
18213 + //get free txd from txd pool
18215 + txd_idx = get_free_txd(&free_txd);
18216 + if( txd_idx == NUM_TX_DESC) {
18217 + printk("get_free_txd fail\n");
18220 + // add null TXD for release
18221 + //sysRegWrite(QTX_CRX_PTR, VIRT_TO_PHYS(free_txd));
18222 + //sysRegWrite(QTX_DRX_PTR, VIRT_TO_PHYS(free_txd));
18223 + sysRegWrite(QTX_CRX_PTR, free_txd);
18224 + sysRegWrite(QTX_DRX_PTR, free_txd);
18225 + printk("free_txd: %p, ei_local->cpu_ptr: %08X\n", free_txd, ei_local->tx_cpu_ptr);
18227 + printk(" POOL HEAD_PTR | DMA_PTR | CPU_PTR \n");
18228 + printk("----------------+---------+--------\n");
18229 + printk(" 0x%p 0x%08X 0x%08X\n",ei_local->txd_pool, ei_local->tx_dma_ptr, ei_local->tx_cpu_ptr);
18232 +#if defined (CONFIG_HW_SFQ)
18233 +bool sfq_init(void)
18235 + unsigned int regVal;
18237 + unsigned int sfq_phy0;
18238 + unsigned int sfq_phy1;
18239 + unsigned int sfq_phy2;
18240 + unsigned int sfq_phy3;
18241 + struct SFQ_table *sfq0;
18242 + struct SFQ_table *sfq1;
18243 + struct SFQ_table *sfq2;
18244 + struct SFQ_table *sfq3;
18246 + regVal = sysRegRead(VQTX_GLO);
18247 + regVal = regVal | VQTX_MIB_EN |(1<<16) ;
18248 + sysRegWrite(VQTX_GLO, regVal);// Virtual table extends to 32bytes
18249 + regVal = sysRegRead(VQTX_GLO);
18250 + sysRegWrite(VQTX_NUM, (VQTX_NUM_0) | (VQTX_NUM_1) | (VQTX_NUM_2) | (VQTX_NUM_3));
18251 + sysRegWrite(VQTX_HASH_CFG, 0xF002710); //10 s change hash algorithm
18252 + sysRegWrite(VQTX_VLD_CFG, 0x00);
18253 + sysRegWrite(VQTX_HASH_SD, 0x0D);
18254 + sysRegWrite(QDMA_FC_THRES, 0x9b9b4444);
18255 + sysRegWrite(QDMA_HRED1, 0);
18256 + sysRegWrite(QDMA_HRED2, 0);
18257 + sysRegWrite(QDMA_SRED1, 0);
18258 + sysRegWrite(QDMA_SRED2, 0);
18259 + sfq0 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy0);
18260 + memset(sfq0, 0x0, 256*sizeof(struct SFQ_table) );
18261 + for (i=0; i < 256; i++) {
18262 + sfq0[i].sfq_info1.VQHPTR = 0xdeadbeef;
18263 + sfq0[i].sfq_info2.VQTPTR = 0xdeadbeef;
18266 + sfq1 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy1);
18268 + memset(sfq1, 0x0, 256*sizeof(struct SFQ_table) );
18269 + for (i=0; i < 256; i++) {
18270 + sfq1[i].sfq_info1.VQHPTR = 0xdeadbeef;
18271 + sfq1[i].sfq_info2.VQTPTR = 0xdeadbeef;
18274 + sfq2 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy2);
18275 + memset(sfq2, 0x0, 256*sizeof(struct SFQ_table) );
18276 + for (i=0; i < 256; i++) {
18277 + sfq2[i].sfq_info1.VQHPTR = 0xdeadbeef;
18278 + sfq2[i].sfq_info2.VQTPTR = 0xdeadbeef;
18281 + sfq3 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy3);
18282 + memset(sfq3, 0x0, 256*sizeof(struct SFQ_table) );
18283 + for (i=0; i < 256; i++) {
18284 + sfq3[i].sfq_info1.VQHPTR = 0xdeadbeef;
18285 + sfq3[i].sfq_info2.VQTPTR = 0xdeadbeef;
18289 + printk("*****sfq_phy0 is 0x%x!!!*******\n", sfq_phy0);
18290 + printk("*****sfq_phy1 is 0x%x!!!*******\n", sfq_phy1);
18291 + printk("*****sfq_phy2 is 0x%x!!!*******\n", sfq_phy2);
18292 + printk("*****sfq_phy3 is 0x%x!!!*******\n", sfq_phy3);
18293 + printk("*****sfq_virt0 is 0x%x!!!*******\n", sfq0);
18294 + printk("*****sfq_virt1 is 0x%x!!!*******\n", sfq1);
18295 + printk("*****sfq_virt2 is 0x%x!!!*******\n", sfq2);
18296 + printk("*****sfq_virt3 is 0x%x!!!*******\n", sfq3);
18297 + printk("*****sfq_virt0 is 0x%x!!!*******\n", sfq0);
18298 + sysRegWrite(VQTX_TB_BASE0, (u32)sfq_phy0);
18299 + sysRegWrite(VQTX_TB_BASE1, (u32)sfq_phy1);
18300 + sysRegWrite(VQTX_TB_BASE2, (u32)sfq_phy2);
18301 + sysRegWrite(VQTX_TB_BASE3, (u32)sfq_phy3);
18306 +bool fq_qdma_init(struct net_device *dev)
18308 + END_DEVICE* ei_local = netdev_priv(dev);
18309 + //struct QDMA_txdesc *free_head = NULL;
18310 + unsigned int phy_free_head;
18311 + unsigned int phy_free_tail;
18312 + unsigned int *free_page_head = NULL;
18313 + unsigned int phy_free_page_head;
18316 + free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &phy_free_head);
18317 + if (unlikely(free_head == NULL)){
18318 + printk(KERN_ERR "QDMA FQ decriptor not available...\n");
18321 + memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE);
18323 + free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &phy_free_page_head);
18324 + if (unlikely(free_page_head == NULL)){
18325 + printk(KERN_ERR "QDMA FQ page not available...\n");
18328 + for (i=0; i < NUM_QDMA_PAGE; i++) {
18329 + free_head[i].txd_info1.SDP = (phy_free_page_head + (i * QDMA_PAGE_SIZE));
18330 + if(i < (NUM_QDMA_PAGE-1)){
18331 + free_head[i].txd_info2.NDP = (phy_free_head + ((i+1) * sizeof(struct QDMA_txdesc)));
18335 + printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) );
18336 + printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] );
18337 + printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP );
18338 + printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP );
18341 + free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
18344 + phy_free_tail = (phy_free_head + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc)));
18346 + printk("phy_free_head is 0x%x!!!\n", phy_free_head);
18347 + printk("phy_free_tail_phy is 0x%x!!!\n", phy_free_tail);
18348 + sysRegWrite(QDMA_FQ_HEAD, (u32)phy_free_head);
18349 + sysRegWrite(QDMA_FQ_TAIL, (u32)phy_free_tail);
18350 + sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
18351 + sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
18353 + ei_local->free_head = free_head;
18354 + ei_local->phy_free_head = phy_free_head;
18355 + ei_local->free_page_head = free_page_head;
18356 + ei_local->phy_free_page_head = phy_free_page_head;
18360 +int fe_dma_init(struct net_device *dev)
18364 + unsigned int regVal;
18365 + END_DEVICE* ei_local = netdev_priv(dev);
18368 + #if defined (CONFIG_HW_SFQ)
18371 + fq_qdma_init(dev);
18375 + regVal = sysRegRead(QDMA_GLO_CFG);
18376 + if((regVal & RX_DMA_BUSY))
18378 + printk("\n RX_DMA_BUSY !!! ");
18381 + if((regVal & TX_DMA_BUSY))
18383 + printk("\n TX_DMA_BUSY !!! ");
18388 + /*tx desc alloc, add a NULL TXD to HW*/
18390 + qdma_tx_desc_alloc();
18392 + /* Initial RX Ring 0*/
18394 +#ifdef CONFIG_32B_DESC
18395 + ei_local->qrx_ring = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
18396 + ei_local->phy_qrx_ring = virt_to_phys(ei_local->qrx_ring);
18398 + ei_local->qrx_ring = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_qrx_ring);
18400 + for (i = 0; i < NUM_QRX_DESC; i++) {
18401 + memset(&ei_local->qrx_ring[i],0,sizeof(struct PDMA_rxdesc));
18402 + ei_local->qrx_ring[i].rxd_info2.DDONE_bit = 0;
18403 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
18404 + ei_local->qrx_ring[i].rxd_info2.LS0 = 0;
18405 + ei_local->qrx_ring[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
18407 + ei_local->qrx_ring[i].rxd_info2.LS0 = 1;
18409 + ei_local->qrx_ring[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
18411 + printk("\nphy_qrx_ring = 0x%08x, qrx_ring = 0x%p\n",ei_local->phy_qrx_ring,ei_local->qrx_ring);
18413 + regVal = sysRegRead(QDMA_GLO_CFG);
18414 + regVal &= 0x000000FF;
18416 + sysRegWrite(QDMA_GLO_CFG, regVal);
18417 + regVal=sysRegRead(QDMA_GLO_CFG);
18419 + /* Tell the adapter where the TX/RX rings are located. */
18421 + sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_qrx_ring));
18422 + sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_QRX_DESC));
18423 + sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
18424 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
18425 + rx_calc_idx0 = rx_dma_owner_idx0 = sysRegRead(QRX_CRX_IDX_0);
18427 + sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);
18429 + ei_local->rx_ring0 = ei_local->qrx_ring;
18430 +#if !defined (CONFIG_RAETH_QDMATX_QDMARX)
18431 + /* Initial PDMA RX Ring 0*/
18432 +#ifdef CONFIG_32B_DESC
18433 + ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
18434 + ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
18436 + ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
18438 + for (i = 0; i < NUM_RX_DESC; i++) {
18439 + memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
18440 + ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
18441 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
18442 + ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
18443 + ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
18445 + ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
18447 + ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
18449 + printk("\nphy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
18451 + regVal = sysRegRead(PDMA_GLO_CFG);
18452 + regVal &= 0x000000FF;
18453 + sysRegWrite(PDMA_GLO_CFG, regVal);
18454 + regVal=sysRegRead(PDMA_GLO_CFG);
18456 + sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0));
18457 + sysRegWrite(RX_MAX_CNT0, cpu_to_le32((u32) NUM_RX_DESC));
18458 + sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
18459 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
18460 + rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
18462 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);
18464 +#if !defined (CONFIG_HW_SFQ)
18465 + /* Enable randon early drop and set drop threshold automatically */
18466 + sysRegWrite(QDMA_FC_THRES, 0x174444);
18468 + sysRegWrite(QDMA_HRED2, 0x0);
18469 + set_fe_dma_glo_cfg();
18470 +#if defined (CONFIG_ARCH_MT7623)
18471 + printk("Enable QDMA TX NDP coherence check and re-read mechanism\n");
18472 + regVal=sysRegRead(QDMA_GLO_CFG);
18473 + regVal = regVal | 0x400;
18474 + sysRegWrite(QDMA_GLO_CFG, regVal);
18475 + printk("***********QDMA_GLO_CFG=%x\n", sysRegRead(QDMA_GLO_CFG));
18481 +#if defined (CONFIG_HW_SFQ)
18485 +int udp_source_port=0;
18486 +int tcp_source_port=0;
18488 +int SfqParseLayerInfo(struct sk_buff * skb)
18491 + struct vlan_hdr *vh_sfq = NULL;
18492 + struct ethhdr *eth_sfq = NULL;
18493 + struct iphdr *iph_sfq = NULL;
18494 + struct ipv6hdr *ip6h_sfq = NULL;
18495 + struct tcphdr *th_sfq = NULL;
18496 + struct udphdr *uh_sfq = NULL;
18497 +#ifdef CONFIG_RAETH_HW_VLAN_TX
18498 + struct vlan_hdr pseudo_vhdr_sfq;
18501 + memset(&SfqParseResult, 0, sizeof(SfqParseResult));
18503 + eth_sfq = (struct ethhdr *)skb->data;
18504 + memcpy(SfqParseResult.dmac, eth_sfq->h_dest, ETH_ALEN);
18505 + memcpy(SfqParseResult.smac, eth_sfq->h_source, ETH_ALEN);
18506 + SfqParseResult.eth_type = eth_sfq->h_proto;
18509 + if (SfqParseResult.eth_type == htons(ETH_P_8021Q)){
18510 + SfqParseResult.vlan1_gap = VLAN_HLEN;
18511 + vh_sfq = (struct vlan_hdr *)(skb->data + ETH_HLEN);
18512 + SfqParseResult.eth_type = vh_sfq->h_vlan_encapsulated_proto;
18514 + SfqParseResult.vlan1_gap = 0;
18519 + LAYER2_HEADER(skb) = skb->data;
18520 + LAYER3_HEADER(skb) = (skb->data + ETH_HLEN + (SfqParseResult.vlan1_gap));
18524 + /* set layer4 start addr */
18525 + if ((SfqParseResult.eth_type == htons(ETH_P_IP)) || (SfqParseResult.eth_type == htons(ETH_P_PPP_SES)
18526 + && SfqParseResult.ppp_tag == htons(PPP_IP))) {
18527 + iph_sfq = (struct iphdr *)LAYER3_HEADER(skb);
18529 + //prepare layer3/layer4 info
18530 + memcpy(&SfqParseResult.iph, iph_sfq, sizeof(struct iphdr));
18531 + if (iph_sfq->protocol == IPPROTO_TCP) {
18533 + LAYER4_HEADER(skb) = ((uint8_t *) iph_sfq + (iph_sfq->ihl * 4));
18534 + th_sfq = (struct tcphdr *)LAYER4_HEADER(skb);
18535 + memcpy(&SfqParseResult.th, th_sfq, sizeof(struct tcphdr));
18536 + SfqParseResult.pkt_type = IPV4_HNAPT;
18537 + //printk("tcp parsing\n");
18538 + tcp_source_port = ntohs(SfqParseResult.th.source);
18539 + udp_source_port = 0;
18540 + #if(0) //for TCP ack, test use
18541 + if(ntohl(SfqParseResult.iph.saddr) == 0xa0a0a04){ // tcp ack packet
18547 + sfq_prot = 2;//IPV4_HNAPT
18548 + proto_id = 1;//TCP
18549 + if(iph_sfq->frag_off & htons(IP_MF|IP_OFFSET)) {
18552 + } else if (iph_sfq->protocol == IPPROTO_UDP) {
18553 + LAYER4_HEADER(skb) = ((uint8_t *) iph_sfq + iph_sfq->ihl * 4);
18554 + uh_sfq = (struct udphdr *)LAYER4_HEADER(skb);
18555 + memcpy(&SfqParseResult.uh, uh_sfq, sizeof(struct udphdr));
18556 + SfqParseResult.pkt_type = IPV4_HNAPT;
18557 + udp_source_port = ntohs(SfqParseResult.uh.source);
18558 + tcp_source_port = 0;
18560 + sfq_prot = 2;//IPV4_HNAPT
18561 + proto_id =2;//UDP
18562 + if(iph_sfq->frag_off & htons(IP_MF|IP_OFFSET)) {
18568 + }else if (SfqParseResult.eth_type == htons(ETH_P_IPV6) ||
18569 + (SfqParseResult.eth_type == htons(ETH_P_PPP_SES) &&
18570 + SfqParseResult.ppp_tag == htons(PPP_IPV6))) {
18571 + ip6h_sfq = (struct ipv6hdr *)LAYER3_HEADER(skb);
18572 + memcpy(&SfqParseResult.ip6h, ip6h_sfq, sizeof(struct ipv6hdr));
18574 + if (ip6h_sfq->nexthdr == NEXTHDR_TCP) {
18575 + LAYER4_HEADER(skb) = ((uint8_t *) ip6h_sfq + sizeof(struct ipv6hdr));
18576 + th_sfq = (struct tcphdr *)LAYER4_HEADER(skb);
18577 + memcpy(&SfqParseResult.th, th_sfq, sizeof(struct tcphdr));
18578 + SfqParseResult.pkt_type = IPV6_5T_ROUTE;
18579 + sfq_prot = 4;//IPV6_5T
18580 + #if(0) //for TCP ack, test use
18581 + if(ntohl(SfqParseResult.ip6h.saddr.s6_addr32[3]) == 8){
18587 + } else if (ip6h_sfq->nexthdr == NEXTHDR_UDP) {
18588 + LAYER4_HEADER(skb) = ((uint8_t *) ip6h_sfq + sizeof(struct ipv6hdr));
18589 + uh_sfq = (struct udphdr *)LAYER4_HEADER(skb);
18590 + memcpy(&SfqParseResult.uh, uh_sfq, sizeof(struct udphdr));
18591 + SfqParseResult.pkt_type = IPV6_5T_ROUTE;
18593 + sfq_prot = 4;//IPV6_5T
18596 + sfq_prot = 3;//IPV6_3T
18604 +inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no)
18606 + unsigned int length=skb->len;
18607 + END_DEVICE* ei_local = netdev_priv(dev);
18609 + struct QDMA_txdesc *cpu_ptr;
18611 + struct QDMA_txdesc *dma_ptr __maybe_unused;
18612 + struct QDMA_txdesc *free_txd;
18614 +#if defined (CONFIG_RAETH_TSO)
18615 + struct iphdr *iph = NULL;
18616 + struct QDMA_txdesc *init_cpu_ptr;
18617 + struct tcphdr *th = NULL;
18618 + struct skb_frag_struct *frag;
18619 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
18620 + unsigned int len, size, offset, frag_txd_num;
18621 + int init_txd_idx, i;
18622 +#endif // CONFIG_RAETH_TSO //
18624 +#if defined (CONFIG_RAETH_TSOV6)
18625 + struct ipv6hdr *ip6h = NULL;
18628 +#ifdef CONFIG_PSEUDO_SUPPORT
18629 + PSEUDO_ADAPTER *pAd;
18631 + //cpu_ptr = PHYS_TO_VIRT(ei_local->tx_cpu_ptr);
18632 + //dma_ptr = PHYS_TO_VIRT(ei_local->tx_dma_ptr);
18633 + //ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
18634 + cpu_ptr = (ei_local->tx_cpu_ptr);
18635 + ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
18636 + cpu_ptr = phys_to_virt(ei_local->tx_cpu_ptr);
18637 + dma_ptr = phys_to_virt(ei_local->tx_dma_ptr);
18638 + cpu_ptr = (ei_local->txd_pool + (ctx_offset));
18639 + ei_local->skb_free[ctx_offset] = skb;
18640 +#if defined (CONFIG_RAETH_TSO)
18641 + init_cpu_ptr = cpu_ptr;
18642 + init_txd_idx = ctx_offset;
18645 +#if !defined (CONFIG_RAETH_TSO)
18647 + //2. prepare data
18648 + //cpu_ptr->txd_info1.SDP = VIRT_TO_PHYS(skb->data);
18649 + cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
18650 + cpu_ptr->txd_info3.SDL = skb->len;
18651 +#if defined (CONFIG_HW_SFQ)
18652 + SfqParseLayerInfo(skb);
18653 + cpu_ptr->txd_info4.VQID0 = 1;//1:HW hash 0:CPU
18656 +#if(0)// for tcp ack use, test use
18657 + if (ack_packt==1){
18658 + cpu_ptr->txd_info3.QID = 0x0a;
18659 + //cpu_ptr->txd_info3.VQID = 0;
18661 + cpu_ptr->txd_info3.QID = 0;
18664 + cpu_ptr->txd_info3.PROT = sfq_prot;
18665 + cpu_ptr->txd_info3.IPOFST = 14 + (SfqParseResult.vlan1_gap); //no vlan
18668 + if (gmac_no == 1) {
18669 + cpu_ptr->txd_info4.FPORT = 1;
18671 + cpu_ptr->txd_info4.FPORT = 2;
18674 + cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
18675 +#ifdef CONFIG_PSEUDO_SUPPORT
18676 + if((lan_wan_separate==1) && (gmac_no==2)){
18677 + cpu_ptr->txd_info3.QID += 8;
18678 +#if defined (CONFIG_HW_SFQ)
18679 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18680 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18684 +#if defined (CONFIG_HW_SFQ)
18685 + if((lan_wan_separate==1) && (gmac_no==1)){
18686 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18687 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18691 +#endif //end CONFIG_PSEUDO_SUPPORT
18694 + printk("M2Q_table[%d]=%d\n", skb->mark, M2Q_table[skb->mark]);
18695 + printk("cpu_ptr->txd_info3.QID = %d\n", cpu_ptr->txd_info3.QID);
18698 + iph = (struct iphdr *)skb_network_header(skb);
18699 + if (iph->tos == 0xe0)
18700 + cpu_ptr->txd_info3.QID = 3;
18701 + else if (iph->tos == 0xa0)
18702 + cpu_ptr->txd_info3.QID = 2;
18703 + else if (iph->tos == 0x20)
18704 + cpu_ptr->txd_info3.QID = 1;
18706 + cpu_ptr->txd_info3.QID = 0;
18709 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
18710 + if (skb->ip_summed == CHECKSUM_PARTIAL){
18711 + cpu_ptr->txd_info4.TUI_CO = 7;
18713 + cpu_ptr->txd_info4.TUI_CO = 0;
18717 +#ifdef CONFIG_RAETH_HW_VLAN_TX
18718 + if(vlan_tx_tag_present(skb)) {
18719 + cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
18721 + cpu_ptr->txd_info4.VLAN_TAG = 0;
18725 +#ifdef CONFIG_RAETH_HW_VLAN_TX // QoS Web UI used
18727 + if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==2)){
18728 + cpu_ptr->txd_info3.QID += 8;
18729 +#if defined (CONFIG_HW_SFQ)
18730 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18731 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18735 +#if defined (CONFIG_HW_SFQ)
18736 + if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==1)){
18737 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18738 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18742 +#endif // CONFIG_RAETH_HW_VLAN_TX
18745 +//no hw van, no GE2, web UI used
18746 +#ifndef CONFIG_PSEUDO_SUPPORT
18747 +#ifndef CONFIG_RAETH_HW_VLAN_TX
18748 + if(lan_wan_separate==1){
18749 + struct vlan_hdr *vh = NULL;
18750 + unsigned short vlanid = 0;
18751 + unsigned short vlan_TCI;
18752 + vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
18753 + vlan_TCI = vh->h_vlan_TCI;
18754 + vlanid = (vlan_TCI & VLAN_VID_MASK)>>8;
18755 + if(vlanid == 2)//to wan
18757 + cpu_ptr->txd_info3.QID += 8;
18758 +#if defined (CONFIG_HW_SFQ)
18759 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18760 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18763 + }else if(vlanid == 1){ //to lan
18764 +#if defined (CONFIG_HW_SFQ)
18765 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18766 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18773 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
18774 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
18775 + if(ra_sw_nat_hook_rx!= NULL){
18776 + cpu_ptr->txd_info4.FPORT = 4; /* PPE */
18777 + FOE_MAGIC_TAG(skb) = 0;
18782 + cpu_ptr->txd_info4.FPORT = 4; /* PPE */
18783 + cpu_ptr->txd_info4.UDF = 0x2F;
18786 +#if defined (CONFIG_MIPS)
18787 + dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
18789 + dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
18791 + cpu_ptr->txd_info3.SWC_bit = 1;
18793 + //3. get NULL TXD and decrease free_tx_num by 1.
18794 + ctx_offset = get_free_txd(&free_txd);
18795 + if(ctx_offset == NUM_TX_DESC) {
18796 + printk("get_free_txd fail\n"); // this should not happen. free_txd_num is 2 at least.
18800 + //4. hook new TXD in the end of queue
18801 + //cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd);
18802 + cpu_ptr->txd_info2.NDP = (free_txd);
18805 + //5. move CPU_PTR to new TXD
18806 + //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
18807 + ei_local->tx_cpu_ptr = (free_txd);
18808 + cpu_ptr->txd_info3.OWN_bit = 0;
18809 + sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
18812 + printk("----------------------------------------------\n");
18813 + printk("txd_info1:%08X \n",*(int *)&cpu_ptr->txd_info1);
18814 + printk("txd_info2:%08X \n",*(int *)&cpu_ptr->txd_info2);
18815 + printk("txd_info3:%08X \n",*(int *)&cpu_ptr->txd_info3);
18816 + printk("txd_info4:%08X \n",*(int *)&cpu_ptr->txd_info4);
18819 +#else //#if !defined (CONFIG_RAETH_TSO)
18820 + cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
18821 + cpu_ptr->txd_info3.SDL = (length - skb->data_len);
18822 + cpu_ptr->txd_info3.LS_bit = nr_frags ? 0:1;
18823 +#if defined (CONFIG_HW_SFQ)
18824 + SfqParseLayerInfo(skb);
18825 + // printk("tcp_source_port=%d\n", tcp_source_port);
18827 + cpu_ptr->txd_info4.VQID0 = 0;//1:HW hash 0:CPU
18828 + if (tcp_source_port==1000) cpu_ptr->txd_info3.VQID = 0;
18829 + else if (tcp_source_port==1100) cpu_ptr->txd_info3.VQID = 1;
18830 + else if (tcp_source_port==1200) cpu_ptr->txd_info3.VQID = 2;
18831 + else cpu_ptr->txd_info3.VQID = 0;
18833 + cpu_ptr->txd_info4.VQID0 = 1;
18834 + cpu_ptr->txd_info3.PROT = sfq_prot;
18835 + cpu_ptr->txd_info3.IPOFST = 14 + (SfqParseResult.vlan1_gap); //no vlan
18838 + if (gmac_no == 1) {
18839 + cpu_ptr->txd_info4.FPORT = 1;
18841 + cpu_ptr->txd_info4.FPORT = 2;
18844 + cpu_ptr->txd_info4.TSO = 0;
18845 + cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
18846 +#ifdef CONFIG_PSEUDO_SUPPORT //web UI used tso
18847 + if((lan_wan_separate==1) && (gmac_no==2)){
18848 + cpu_ptr->txd_info3.QID += 8;
18849 +#if defined (CONFIG_HW_SFQ)
18850 + if(web_sfq_enable == 1 &&(skb->mark == 2)){
18851 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18855 +#if defined (CONFIG_HW_SFQ)
18856 + if((lan_wan_separate==1) && (gmac_no==1)){
18857 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18858 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18862 +#endif //CONFIG_PSEUDO_SUPPORT
18864 + printk("M2Q_table[%d]=%d\n", skb->mark, M2Q_table[skb->mark]);
18865 + printk("cpu_ptr->txd_info3.QID = %d\n", cpu_ptr->txd_info3.QID);
18867 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
18868 + if (skb->ip_summed == CHECKSUM_PARTIAL){
18869 + cpu_ptr->txd_info4.TUI_CO = 7;
18871 + cpu_ptr->txd_info4.TUI_CO = 0;
18875 +#ifdef CONFIG_RAETH_HW_VLAN_TX
18876 + if(vlan_tx_tag_present(skb)) {
18877 + cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
18879 + cpu_ptr->txd_info4.VLAN_TAG = 0;
18882 +#ifdef CONFIG_RAETH_HW_VLAN_TX // QoS Web UI used tso
18884 + if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==2)){
18885 + //cpu_ptr->txd_info3.QID += 8;
18886 + cpu_ptr->txd_info3.QID += 8;
18887 +#if defined (CONFIG_HW_SFQ)
18888 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18889 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18893 +#if defined (CONFIG_HW_SFQ)
18894 + if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==1)){
18895 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18896 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18900 +#endif // CONFIG_RAETH_HW_VLAN_TX
18903 +//no hw van, no GE2, web UI used
18904 +#ifndef CONFIG_PSEUDO_SUPPORT
18905 +#ifndef CONFIG_RAETH_HW_VLAN_TX
18906 + if(lan_wan_separate==1){
18907 + struct vlan_hdr *vh = NULL;
18908 + unsigned short vlanid = 0;
18909 + unsigned short vlan_TCI;
18910 + vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
18911 + vlan_TCI = vh->h_vlan_TCI;
18912 + vlanid = (vlan_TCI & VLAN_VID_MASK)>>8;
18913 + if(vlanid == 2)//eth2.2 to wan
18915 + cpu_ptr->txd_info3.QID += 8;
18916 +#if defined (CONFIG_HW_SFQ)
18917 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18918 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18921 + }else if(!strcmp(netdev, "eth2.1")){ // eth2.1 to lan
18922 +#if defined (CONFIG_HW_SFQ)
18923 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18924 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18932 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
18933 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
18934 + if(ra_sw_nat_hook_rx!= NULL){
18935 + cpu_ptr->txd_info4.FPORT = 4; /* PPE */
18936 + FOE_MAGIC_TAG(skb) = 0;
18941 + cpu_ptr->txd_info3.SWC_bit = 1;
18943 + ctx_offset = get_free_txd(&free_txd);
18944 + if(ctx_offset == NUM_TX_DESC) {
18945 + printk("get_free_txd fail\n");
18948 + //cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd);
18949 + //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
18950 + cpu_ptr->txd_info2.NDP = free_txd;
18951 + ei_local->tx_cpu_ptr = free_txd;
18954 + if(nr_frags > 0) {
18955 + for(i=0;i<nr_frags;i++) {
18956 + // 1. set or get init value for current fragment
18958 + frag = &skb_shinfo(skb)->frags[i];
18959 + len = frag->size;
18960 + frag_txd_num = cal_frag_txd_num(len); // calculate the needed TXD numbers for this fragment
18961 + for(frag_txd_num = frag_txd_num;frag_txd_num > 0; frag_txd_num --){
18962 + // 2. size will be assigned to SDL and can't be larger than MAX_TXD_LEN
18963 + if(len < MAX_TXD_LEN)
18966 + size = MAX_TXD_LEN;
18968 + //3. Update TXD info
18969 + cpu_ptr = (ei_local->txd_pool + (ctx_offset));
18970 + cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
18971 +#ifdef CONFIG_PSEUDO_SUPPORT //QoS Web UI used , nr_frags
18972 + if((lan_wan_separate==1) && (gmac_no==2)){
18973 + //cpu_ptr->txd_info3.QID += 8;
18974 + cpu_ptr->txd_info3.QID += 8;
18975 +#if defined (CONFIG_HW_SFQ)
18976 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18977 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18981 +#if defined (CONFIG_HW_SFQ)
18982 + if((lan_wan_separate==1) && (gmac_no==1)){
18983 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18984 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18988 +#endif //CONFIG_PSEUDO_SUPPORT
18990 +//QoS web used, nr_frags
18991 +#ifdef CONFIG_RAETH_HW_VLAN_TX
18992 + if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==2)){
18993 + cpu_ptr->txd_info3.QID += 8;
18994 +#if defined (CONFIG_HW_SFQ)
18995 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18996 + cpu_ptr->txd_info3.QID = HwSfqQUp;
19000 +#if defined (CONFIG_HW_SFQ)
19001 + if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==1)){
19002 + if(web_sfq_enable==1 &&(skb->mark == 2)){
19003 + cpu_ptr->txd_info3.QID = HwSfqQDl;
19007 +#endif // CONFIG_RAETH_HW_VLAN_TX
19008 +//no hw van, no GE2, web UI used
19009 +#ifndef CONFIG_PSEUDO_SUPPORT
19010 +#ifndef CONFIG_RAETH_HW_VLAN_TX
19011 + if(lan_wan_separate==1){
19012 + struct vlan_hdr *vh = NULL;
19013 + unsigned short vlanid = 0;
19014 + unsigned short vlan_TCI;
19015 + vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
19016 + vlan_TCI = vh->h_vlan_TCI;
19017 + vlanid = (vlan_TCI & VLAN_VID_MASK)>>8;
19018 + if(vlanid == 2))//eth2.2 to wan
19020 + cpu_ptr->txd_info3.QID += 8;
19021 +#if defined (CONFIG_HW_SFQ)
19022 + if(web_sfq_enable==1 &&(skb->mark == 2)){
19023 + cpu_ptr->txd_info3.QID = HwSfqQUp;
19027 + }else if(vlanid == 1){ // eth2.1 to lan
19028 +#if defined (CONFIG_HW_SFQ)
19029 + if(web_sfq_enable==1 &&(skb->mark == 2)){
19030 + cpu_ptr->txd_info3.QID = HwSfqQDl;
19038 + printk("M2Q_table[%d]=%d\n", skb->mark, M2Q_table[skb->mark]);
19039 + printk("cpu_ptr->txd_info3.QID = %d\n", cpu_ptr->txd_info3.QID);
19041 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
19042 + cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE);
19044 + cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page.p, frag->page_offset + offset, size, PCI_DMA_TODEVICE);
19045 +// printk(" frag->page = %08x. frag->page_offset = %08x. frag->size = % 08x.\n", frag->page, (frag->page_offset+offset), size);
19047 + cpu_ptr->txd_info3.SDL = size;
19048 + if( (i==(nr_frags-1)) && (frag_txd_num == 1))
19049 + cpu_ptr->txd_info3.LS_bit = 1;
19051 + cpu_ptr->txd_info3.LS_bit = 0;
19052 + cpu_ptr->txd_info3.OWN_bit = 0;
19053 + cpu_ptr->txd_info3.SWC_bit = 1;
19054 + //4. Update skb_free for housekeeping
19055 + ei_local->skb_free[ctx_offset] = (cpu_ptr->txd_info3.LS_bit == 1)?skb:(struct sk_buff *)0xFFFFFFFF; //MAGIC ID
19057 + //5. Get next TXD
19058 + ctx_offset = get_free_txd(&free_txd);
19059 + //cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd);
19060 + //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
19061 + cpu_ptr->txd_info2.NDP = free_txd;
19062 + ei_local->tx_cpu_ptr = free_txd;
19063 + //6. Update offset and len.
19068 + ei_local->skb_free[init_txd_idx]= (struct sk_buff *)0xFFFFFFFF; //MAGIC ID
19071 + if(skb_shinfo(skb)->gso_segs > 1) {
19073 +// TsoLenUpdate(skb->len);
19075 + /* TCP over IPv4 */
19076 + iph = (struct iphdr *)skb_network_header(skb);
19077 +#if defined (CONFIG_RAETH_TSOV6)
19078 + /* TCP over IPv6 */
19079 + ip6h = (struct ipv6hdr *)skb_network_header(skb);
19081 + if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
19082 + th = (struct tcphdr *)skb_transport_header(skb);
19083 +#if defined (CONFIG_HW_SFQ)
19085 + init_cpu_ptr->txd_info4.VQID0 = 0;//1:HW hash 0:CPU
19086 + if (tcp_source_port==1000) init_cpu_ptr->txd_info3.VQID = 0;
19087 + else if (tcp_source_port==1100) init_cpu_ptr->txd_info3.VQID = 1;
19088 + else if (tcp_source_port==1200) init_cpu_ptr->txd_info3.VQID = 2;
19089 + else cpu_ptr->txd_info3.VQID = 0;
19091 + init_cpu_ptr->txd_info4.VQID0 = 1;
19092 + init_cpu_ptr->txd_info3.PROT = sfq_prot;
19093 + init_cpu_ptr->txd_info3.IPOFST = 14 + (SfqParseResult.vlan1_gap); //no vlan
19096 + init_cpu_ptr->txd_info4.TSO = 1;
19098 + th->check = htons(skb_shinfo(skb)->gso_size);
19099 +#if defined (CONFIG_MIPS)
19100 + dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
19102 + dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
19106 +#if defined (CONFIG_RAETH_TSOV6)
19107 + /* TCP over IPv6 */
19108 + //ip6h = (struct ipv6hdr *)skb_network_header(skb);
19109 + else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
19110 + th = (struct tcphdr *)skb_transport_header(skb);
19111 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19112 + init_cpu_ptr->txd_info4.TSO = 1;
19114 + init_cpu_ptr->txd_info4.TSO = 1;
19116 + th->check = htons(skb_shinfo(skb)->gso_size);
19117 +#if defined (CONFIG_MIPS)
19118 + dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
19120 + dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
19127 +// dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
19129 + init_cpu_ptr->txd_info3.OWN_bit = 0;
19130 +#endif // CONFIG_RAETH_TSO //
19132 + sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
19134 +#ifdef CONFIG_PSEUDO_SUPPORT
19135 + if (gmac_no == 2) {
19136 + if (ei_local->PseudoDev != NULL) {
19137 + pAd = netdev_priv(ei_local->PseudoDev);
19138 + pAd->stat.tx_packets++;
19139 + pAd->stat.tx_bytes += length;
19145 + ei_local->stat.tx_packets++;
19146 + ei_local->stat.tx_bytes += skb->len;
19148 +#ifdef CONFIG_RAETH_NAPI
19149 + if ( ei_local->tx_full == 1) {
19150 + ei_local->tx_full = 0;
19151 + netif_wake_queue(dev);
19158 +int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no)
19160 + END_DEVICE *ei_local = netdev_priv(dev);
19161 + unsigned long flags;
19162 + unsigned int num_of_txd = 0;
19163 +#if defined (CONFIG_RAETH_TSO)
19164 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
19165 + struct skb_frag_struct *frag;
19167 +#ifdef CONFIG_PSEUDO_SUPPORT
19168 + PSEUDO_ADAPTER *pAd;
19171 +#if !defined(CONFIG_RA_NAT_NONE)
19172 + if(ra_sw_nat_hook_tx!= NULL)
19174 +// spin_lock_irqsave(&ei_local->page_lock, flags);
19175 + if(ra_sw_nat_hook_tx(skb, gmac_no)==1){
19176 +// spin_unlock_irqrestore(&ei_local->page_lock, flags);
19179 +// spin_unlock_irqrestore(&ei_local->page_lock, flags);
19187 + dev->trans_start = jiffies; /* save the timestamp */
19188 + spin_lock_irqsave(&ei_local->page_lock, flags);
19189 +#if defined (CONFIG_MIPS)
19190 + dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
19192 + dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
19196 +//check free_txd_num before calling rt288_eth_send()
19198 +#if defined (CONFIG_RAETH_TSO)
19199 + // num_of_txd = (nr_frags==0) ? 1 : (nr_frags + 1);
19200 + if(nr_frags != 0){
19201 + for(i=0;i<nr_frags;i++) {
19202 + frag = &skb_shinfo(skb)->frags[i];
19203 + num_of_txd += cal_frag_txd_num(frag->size);
19211 +#if defined(CONFIG_RALINK_MT7621)
19212 + if((sysRegRead(0xbe00000c)&0xFFFF) == 0x0101) {
19213 + ei_xmit_housekeeping(0);
19218 + if ((ei_local->free_txd_num > num_of_txd + 1) && (ei_local->free_txd_num != NUM_TX_DESC))
19220 + rt2880_eth_send(dev, skb, gmac_no); // need to modify rt2880_eth_send() for QDMA
19221 + if (ei_local->free_txd_num < 3)
19223 +#if defined (CONFIG_RAETH_STOP_RX_WHEN_TX_FULL)
19224 + netif_stop_queue(dev);
19225 +#ifdef CONFIG_PSEUDO_SUPPORT
19226 + netif_stop_queue(ei_local->PseudoDev);
19228 + tx_ring_full = 1;
19232 +#ifdef CONFIG_PSEUDO_SUPPORT
19233 + if (gmac_no == 2)
19235 + if (ei_local->PseudoDev != NULL)
19237 + pAd = netdev_priv(ei_local->PseudoDev);
19238 + pAd->stat.tx_dropped++;
19242 + ei_local->stat.tx_dropped++;
19244 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
19247 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
19251 +void ei_xmit_housekeeping(unsigned long unused)
19253 + struct net_device *dev = dev_raether;
19254 + END_DEVICE *ei_local = netdev_priv(dev);
19255 +#ifndef CONFIG_RAETH_NAPI
19256 + unsigned long reg_int_mask=0;
19258 + struct QDMA_txdesc *dma_ptr = NULL;
19259 + struct QDMA_txdesc *cpu_ptr = NULL;
19260 + struct QDMA_txdesc *tmp_ptr = NULL;
19261 + unsigned int ctx_offset = 0;
19262 + unsigned int dtx_offset = 0;
19264 + cpu_ptr = sysRegRead(QTX_CRX_PTR);
19265 + dma_ptr = sysRegRead(QTX_DRX_PTR);
19266 + ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
19267 + dtx_offset = GET_TXD_OFFSET(&dma_ptr);
19268 + cpu_ptr = (ei_local->txd_pool + (ctx_offset));
19269 + dma_ptr = (ei_local->txd_pool + (dtx_offset));
19271 + while(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) {
19272 + //1. keep cpu next TXD
19273 + tmp_ptr = cpu_ptr->txd_info2.NDP;
19275 + put_free_txd(ctx_offset);
19276 + //3. update ctx_offset and free skb memory
19277 + ctx_offset = GET_TXD_OFFSET(&tmp_ptr);
19278 +#if defined (CONFIG_RAETH_TSO)
19279 + if(ei_local->skb_free[ctx_offset]!=(struct sk_buff *)0xFFFFFFFF) {
19280 + dev_kfree_skb_any(ei_local->skb_free[ctx_offset]);
19283 + dev_kfree_skb_any(ei_local->skb_free[ctx_offset]);
19285 + ei_local->skb_free[ctx_offset] = 0;
19287 + netif_wake_queue(dev);
19288 +#ifdef CONFIG_PSEUDO_SUPPORT
19289 + netif_wake_queue(ei_local->PseudoDev);
19292 + //4. update cpu_ptr
19293 + cpu_ptr = (ei_local->txd_pool + ctx_offset);
19295 + sysRegWrite(QTX_CRX_PTR, (ei_local->phy_txd_pool + (ctx_offset << 4)));
19296 +#ifndef CONFIG_RAETH_NAPI
19297 + reg_int_mask=sysRegRead(QFE_INT_ENABLE);
19298 +#if defined (DELAY_INT)
19299 + sysRegWrite(QFE_INT_ENABLE, reg_int_mask| RLS_DLY_INT);
19302 + sysRegWrite(QFE_INT_ENABLE, reg_int_mask | RLS_DONE_INT);
19304 +#endif //CONFIG_RAETH_NAPI//
19307 +EXPORT_SYMBOL(ei_start_xmit);
19308 +EXPORT_SYMBOL(ei_xmit_housekeeping);
19309 +EXPORT_SYMBOL(fe_dma_init);
19310 +EXPORT_SYMBOL(rt2880_eth_send);
19311 diff --git a/drivers/net/ethernet/raeth/raether_qdma_mt7623.c b/drivers/net/ethernet/raeth/raether_qdma_mt7623.c
19312 new file mode 100644
19313 index 0000000..b465b75
19315 +++ b/drivers/net/ethernet/raeth/raether_qdma_mt7623.c
19317 +#include <linux/module.h>
19318 +#include <linux/version.h>
19319 +#include <linux/kernel.h>
19320 +#include <linux/types.h>
19321 +#include <linux/pci.h>
19322 +#include <linux/init.h>
19323 +#include <linux/skbuff.h>
19324 +#include <linux/if_vlan.h>
19325 +#include <linux/if_ether.h>
19326 +#include <linux/fs.h>
19327 +#include <asm/uaccess.h>
19328 +#include <asm/rt2880/surfboardint.h>
19329 +#if defined (CONFIG_RAETH_TSO)
19330 +#include <linux/tcp.h>
19331 +#include <net/ipv6.h>
19332 +#include <linux/ip.h>
19333 +#include <net/ip.h>
19334 +#include <net/tcp.h>
19335 +#include <linux/in.h>
19336 +#include <linux/ppp_defs.h>
19337 +#include <linux/if_pppox.h>
19339 +#include <linux/delay.h>
19340 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
19341 +#include <linux/sched.h>
19344 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
19345 +#include <asm/rt2880/rt_mmap.h>
19347 +#include <linux/libata-compat.h>
19350 +#include "ra2882ethreg.h"
19351 +#include "raether.h"
19352 +#include "ra_mac.h"
19353 +#include "ra_ioctl.h"
19354 +#include "ra_rfrw.h"
19355 +#ifdef CONFIG_RAETH_NETLINK
19356 +#include "ra_netlink.h"
19358 +#if defined (CONFIG_RAETH_QOS)
19359 +#include "ra_qos.h"
19362 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
19363 +#include "../../../net/nat/hw_nat/ra_nat.h"
19367 +#if !defined(CONFIG_RA_NAT_NONE)
19370 +extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
19371 +extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
19374 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
19377 +#include <asm/mipsregs.h>
19378 +extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
19379 +extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
19380 +#endif /* CONFIG_RA_CLASSIFIER */
19382 +#if defined (CONFIG_RALINK_RT3052_MP2)
19383 +int32_t mcast_rx(struct sk_buff * skb);
19384 +int32_t mcast_tx(struct sk_buff * skb);
19387 +#ifdef RA_MTD_RW_BY_NUM
19388 +int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf);
19390 +int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf);
19393 +/* gmac driver feature set config */
19394 +#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
19397 +#define DELAY_INT 1
19400 +//#define CONFIG_UNH_TEST
19401 +/* end of config */
19403 +#if defined (CONFIG_RAETH_JUMBOFRAME)
19404 +#define MAX_RX_LENGTH 4096
19406 +#define MAX_RX_LENGTH 1536
19409 +extern struct net_device *dev_raether;
19411 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
19412 +static int rx_dma_owner_idx1;
19413 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19414 +static int rx_calc_idx1;
19417 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19418 +static int rx_calc_idx0;
19419 +static unsigned long tx_cpu_owner_idx0=0;
19421 +static unsigned long tx_ring_full=0;
19423 +#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER)
19424 +#include "ra_ethtool.h"
19425 +extern struct ethtool_ops ra_ethtool_ops;
19426 +#ifdef CONFIG_PSEUDO_SUPPORT
19427 +extern struct ethtool_ops ra_virt_ethtool_ops;
19428 +#endif // CONFIG_PSEUDO_SUPPORT //
19429 +#endif // (CONFIG_ETHTOOL //
19431 +#ifdef CONFIG_RALINK_VISTA_BASIC
19432 +int is_switch_175c = 1;
19435 +//skb->mark to queue mapping table
19436 +extern unsigned int M2Q_table[64];
19439 +#define KSEG1 0xa0000000
19440 +#if defined (CONFIG_MIPS)
19441 +#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1))
19442 +#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1)
19444 +#define PHYS_TO_VIRT(x) phys_to_virt(x)
19445 +#define VIRT_TO_PHYS(x) virt_to_phys(x)
19449 +extern void set_fe_dma_glo_cfg(void);
19454 + * @brief: get the TXD index from its address
19456 + * @param: cpu_ptr
19458 + * @return: TXD index
19461 +static unsigned int GET_TXD_OFFSET(struct QDMA_txdesc **cpu_ptr)
19463 + struct net_device *dev = dev_raether;
19464 + END_DEVICE *ei_local = netdev_priv(dev);
19466 + //ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
19467 + //ctx_offset = (*cpu_ptr - ei_local->txd_pool);
19469 + ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->phy_txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
19470 + return ctx_offset;
19477 + * @brief cal txd number for a page
19481 + * @return frag_txd_num
19484 +unsigned int cal_frag_txd_num(unsigned int size)
19486 + unsigned int frag_txd_num = 0;
19490 + if(size > MAX_TXD_LEN){
19492 + size -= MAX_TXD_LEN;
19498 + return frag_txd_num;
19503 + * @brief get free TXD from TXD queue
19505 + * @param free_txd
19509 +static int get_free_txd(struct QDMA_txdesc **free_txd)
19511 + struct net_device *dev = dev_raether;
19512 + END_DEVICE *ei_local = netdev_priv(dev);
19513 + unsigned int tmp_idx;
19515 + if(ei_local->free_txd_num > 0){
19516 + tmp_idx = ei_local->free_txd_head;
19517 + ei_local->free_txd_head = ei_local->txd_pool_info[tmp_idx];
19518 + ei_local->free_txd_num -= 1;
19519 + //*free_txd = &ei_local->txd_pool[tmp_idx];
19520 + *free_txd = ei_local->phy_txd_pool + (sizeof(struct QDMA_txdesc) * tmp_idx);
19523 + return NUM_TX_DESC;
19528 + * @brief add free TXD into TXD queue
19530 + * @param free_txd
19534 +int put_free_txd(int free_txd_idx)
19536 + struct net_device *dev = dev_raether;
19537 + END_DEVICE *ei_local = netdev_priv(dev);
19538 + ei_local->txd_pool_info[ei_local->free_txd_tail] = free_txd_idx;
19539 + ei_local->free_txd_tail = free_txd_idx;
19540 + ei_local->txd_pool_info[free_txd_idx] = NUM_TX_DESC;
19541 + ei_local->free_txd_num += 1;
19545 +/*define qdma initial alloc*/
19551 + * @return 0: fail
19554 +bool qdma_tx_desc_alloc(void)
19556 + struct net_device *dev = dev_raether;
19557 + END_DEVICE *ei_local = netdev_priv(dev);
19558 + struct QDMA_txdesc *free_txd = NULL;
19559 + unsigned int txd_idx;
19563 + ei_local->txd_pool = pci_alloc_consistent(NULL, sizeof(struct QDMA_txdesc) * NUM_TX_DESC, &ei_local->phy_txd_pool);
19564 + printk("txd_pool=%p phy_txd_pool=%08X\n", ei_local->txd_pool , ei_local->phy_txd_pool);
19566 + if (ei_local->txd_pool == NULL) {
19567 + printk("adapter->txd_pool allocation failed!\n");
19570 + printk("ei_local->skb_free start address is 0x%p.\n", ei_local->skb_free);
19571 + //set all txd_pool_info to 0.
19572 + for ( i = 0; i < NUM_TX_DESC; i++)
19574 + ei_local->skb_free[i]= 0;
19575 + ei_local->txd_pool_info[i] = i + 1;
19576 + ei_local->txd_pool[i].txd_info3.LS_bit = 1;
19577 + ei_local->txd_pool[i].txd_info3.OWN_bit = 1;
19580 + ei_local->free_txd_head = 0;
19581 + ei_local->free_txd_tail = NUM_TX_DESC - 1;
19582 + ei_local->free_txd_num = NUM_TX_DESC;
19585 + //get free txd from txd pool
19586 + txd_idx = get_free_txd(&free_txd);
19587 + if( txd_idx == NUM_TX_DESC) {
19588 + printk("get_free_txd fail\n");
19592 + //add null TXD for transmit
19595 + ei_local->tx_dma_ptr = free_txd;
19596 + ei_local->tx_cpu_ptr = free_txd;
19597 + //ei_local->tx_dma_ptr = virt_to_phys(free_txd);
19598 + //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
19599 + sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
19600 + sysRegWrite(QTX_DTX_PTR, ei_local->tx_dma_ptr);
19602 + printk("kurtis: free_txd = 0x%x!!!\n", free_txd);
19603 + printk("kurtis: ei_local->tx_dma_ptr = 0x%x!!!\n", ei_local->tx_dma_ptr);
19605 + //get free txd from txd pool
19607 + txd_idx = get_free_txd(&free_txd);
19608 + if( txd_idx == NUM_TX_DESC) {
19609 + printk("get_free_txd fail\n");
19612 + // add null TXD for release
19613 + //sysRegWrite(QTX_CRX_PTR, virt_to_phys(free_txd));
19614 + //sysRegWrite(QTX_DRX_PTR, virt_to_phys(free_txd));
19615 + sysRegWrite(QTX_CRX_PTR, free_txd);
19616 + sysRegWrite(QTX_DRX_PTR, free_txd);
19618 + printk("free_txd: %p, ei_local->cpu_ptr: %08X\n", free_txd, ei_local->tx_cpu_ptr);
19620 + printk(" POOL HEAD_PTR | DMA_PTR | CPU_PTR \n");
19621 + printk("----------------+---------+--------\n");
19623 + printk(" 0x%p 0x%08X 0x%08X\n",ei_local->txd_pool,
19624 + ei_local->tx_dma_ptr, ei_local->tx_cpu_ptr);
19629 +bool fq_qdma_init(void)
19631 + struct QDMA_txdesc *free_head = NULL;
19632 + unsigned int free_head_phy;
19633 + unsigned int free_tail_phy;
19634 + unsigned int *free_page_head = NULL;
19635 + unsigned int free_page_head_phy;
19638 + free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &free_head_phy);
19639 + if (unlikely(free_head == NULL)){
19640 + printk(KERN_ERR "QDMA FQ decriptor not available...\n");
19643 + memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE);
19645 + free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &free_page_head_phy);
19646 + if (unlikely(free_page_head == NULL)){
19647 + printk(KERN_ERR "QDMA FQ pager not available...\n");
19650 + for (i=0; i < NUM_QDMA_PAGE; i++) {
19651 + free_head[i].txd_info1.SDP = (free_page_head_phy + (i * QDMA_PAGE_SIZE));
19652 + if(i < (NUM_QDMA_PAGE-1)){
19653 + free_head[i].txd_info2.NDP = (free_head_phy + ((i+1) * sizeof(struct QDMA_txdesc)));
19657 + printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) );
19658 + printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] );
19659 + printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP );
19660 + printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP );
19663 + free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
19666 + free_tail_phy = (free_head_phy + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc)));
19668 + printk("free_head_phy is 0x%x!!!\n", free_head_phy);
19669 + printk("free_tail_phy is 0x%x!!!\n", free_tail_phy);
19670 + sysRegWrite(QDMA_FQ_HEAD, (u32)free_head_phy);
19671 + sysRegWrite(QDMA_FQ_TAIL, (u32)free_tail_phy);
19672 + sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
19673 + sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
19677 +int fe_dma_init(struct net_device *dev)
19681 + unsigned int regVal;
19682 + END_DEVICE* ei_local = netdev_priv(dev);
19688 + regVal = sysRegRead(QDMA_GLO_CFG);
19689 + if((regVal & RX_DMA_BUSY))
19691 + printk("\n RX_DMA_BUSY !!! ");
19694 + if((regVal & TX_DMA_BUSY))
19696 + printk("\n TX_DMA_BUSY !!! ");
19701 + /*tx desc alloc, add a NULL TXD to HW*/
19703 + qdma_tx_desc_alloc();
19706 + /* Initial RX Ring 0*/
19707 +#ifdef CONFIG_32B_DESC
19708 + ei_local->rx_ring0 = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
19709 + ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
19711 + ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
19713 + for (i = 0; i < NUM_QRX_DESC; i++) {
19714 + memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
19715 + ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
19716 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
19717 + ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
19718 + ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
19720 + ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
19722 + ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
19724 + printk("QDMA_RX:phy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
19726 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
19727 + /* Initial RX Ring 1*/
19728 +#ifdef CONFIG_32B_DESC
19729 + ei_local->rx_ring1 = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
19730 + ei_local->phy_rx_ring1 = virt_to_phys(ei_local->rx_ring1);
19732 + ei_local->rx_ring1 = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1);
19734 + for (i = 0; i < NUM_QRX_DESC; i++) {
19735 + memset(&ei_local->rx_ring1[i],0,sizeof(struct PDMA_rxdesc));
19736 + ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0;
19737 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
19738 + ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
19739 + ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
19741 + ei_local->rx_ring1[i].rxd_info2.LS0 = 1;
19743 + ei_local->rx_ring1[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
19745 + printk("\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",ei_local->phy_rx_ring1,ei_local->rx_ring1);
19748 + regVal = sysRegRead(QDMA_GLO_CFG);
19749 + regVal &= 0x000000FF;
19750 + sysRegWrite(QDMA_GLO_CFG, regVal);
19751 + regVal=sysRegRead(QDMA_GLO_CFG);
19753 + /* Tell the adapter where the TX/RX rings are located. */
19755 + sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_rx_ring0));
19756 + sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_QRX_DESC));
19757 + sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
19758 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19759 + rx_calc_idx0 = rx_dma_owner_idx0 = sysRegRead(QRX_CRX_IDX_0);
19761 + sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);
19762 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
19763 + sysRegWrite(QRX_BASE_PTR_1, phys_to_bus((u32) ei_local->phy_rx_ring1));
19764 + sysRegWrite(QRX_MAX_CNT_1, cpu_to_le32((u32) NUM_QRX_DESC));
19765 + sysRegWrite(QRX_CRX_IDX_1, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
19766 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19767 + rx_calc_idx1 = rx_dma_owner_idx1 = sysRegRead(QRX_CRX_IDX_1);
19769 + sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX1);
19772 +#if !defined (CONFIG_RAETH_QDMATX_QDMARX)
19773 + /* Initial PDMA RX Ring 0*/
19774 +#ifdef CONFIG_32B_DESC
19775 + ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
19776 + ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
19778 + ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
19780 + for (i = 0; i < NUM_RX_DESC; i++) {
19781 + memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
19782 + ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
19783 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
19784 + ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
19785 + ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
19787 + ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
19789 + ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
19791 + printk("PDMA_RX:phy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
19793 + regVal = sysRegRead(PDMA_GLO_CFG);
19794 + regVal &= 0x000000FF;
19795 + sysRegWrite(PDMA_GLO_CFG, regVal);
19796 + regVal=sysRegRead(PDMA_GLO_CFG);
19798 + sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0));
19799 + sysRegWrite(RX_MAX_CNT0, cpu_to_le32((u32) NUM_RX_DESC));
19800 + sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
19801 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19802 + rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
19804 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);
19807 + /* Enable randon early drop and set drop threshold automatically */
19808 + sysRegWrite(QDMA_FC_THRES, 0x174444);
19809 + sysRegWrite(QDMA_HRED2, 0x0);
19810 + set_fe_dma_glo_cfg();
19815 +inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no)
19817 + unsigned int length=skb->len;
19818 + END_DEVICE* ei_local = netdev_priv(dev);
19820 + struct QDMA_txdesc *cpu_ptr;
19822 + struct QDMA_txdesc *dma_ptr __maybe_unused;
19823 + struct QDMA_txdesc *free_txd;
19824 + unsigned int ctx_offset = 0;
19825 + unsigned int dtx_offset = 0;
19826 +#if defined (CONFIG_RAETH_TSO)
19827 + struct iphdr *iph = NULL;
19828 + struct QDMA_txdesc *init_cpu_ptr;
19829 + struct tcphdr *th = NULL;
19830 + struct skb_frag_struct *frag;
19831 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
19832 + unsigned int len, size, offset, frag_txd_num;
19833 + int init_txd_idx, i;
19834 +#endif // CONFIG_RAETH_TSO //
19836 +#if defined (CONFIG_RAETH_TSOV6)
19837 + struct ipv6hdr *ip6h = NULL;
19840 +#ifdef CONFIG_PSEUDO_SUPPORT
19841 + PSEUDO_ADAPTER *pAd;
19843 + cpu_ptr = (ei_local->tx_cpu_ptr);
19844 + ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
19845 + cpu_ptr = phys_to_virt(ei_local->tx_cpu_ptr);
19846 + dma_ptr = phys_to_virt(ei_local->tx_dma_ptr);
19848 + //dma_ptr = (ei_local->tx_dma_ptr);
19851 + /*only modify virtual address*/
19852 + //cpu_ptr = (ei_local->txd_pool) + (ctx_offset * sizeof(struct QDMA_txdesc));
19853 + cpu_ptr = (ei_local->txd_pool + (ctx_offset));
19855 + //dtx_offset = GET_TXD_OFFSET(&dma_ptr);
19856 + //dma_ptr = (ei_local->txd_pool) + (dtx_offset * sizeof(struct QDMA_txdesc));
19858 + //printk("eth_send ctx_offset = 0x%x!!!\n", ctx_offset);
19859 + //printk("eth_send dtx_offset = 0x%x!!!\n", dtx_offset);
19860 + //printk("eth_send ei_local->txd_pool = 0x%x!!!\n", ei_local->txd_pool);
19861 + //printk("eth_send cpu_ptr = 0x%x!!!\n", cpu_ptr);
19862 + //printk("eth_send ctx_offset = 0x%x!!!\n", ctx_offset);
19863 + //printk("eth_send ei_local->skb_free[ctx_offset] = 0x%x!!!\n", skb);
19866 + ei_local->skb_free[ctx_offset] = skb;
19867 +#if defined (CONFIG_RAETH_TSO)
19868 + init_cpu_ptr = cpu_ptr;
19869 + init_txd_idx = ctx_offset;
19872 +#if !defined (CONFIG_RAETH_TSO)
19874 + //2. prepare data
19875 + cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
19876 + cpu_ptr->txd_info3.SDL = skb->len;
19878 + if (gmac_no == 1) {
19879 + cpu_ptr->txd_info4.FPORT = 1;
19881 + cpu_ptr->txd_info4.FPORT = 2;
19885 + cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
19887 + iph = (struct iphdr *)skb_network_header(skb);
19888 + if (iph->tos == 0xe0)
19889 + cpu_ptr->txd_info3.QID = 3;
19890 + else if (iph->tos == 0xa0)
19891 + cpu_ptr->txd_info3.QID = 2;
19892 + else if (iph->tos == 0x20)
19893 + cpu_ptr->txd_info3.QID = 1;
19895 + cpu_ptr->txd_info3.QID = 0;
19898 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
19899 + if (skb->ip_summed == CHECKSUM_PARTIAL){
19900 + cpu_ptr->txd_info4.TUI_CO = 7;
19902 + cpu_ptr->txd_info4.TUI_CO = 0;
19906 +#ifdef CONFIG_RAETH_HW_VLAN_TX
19907 + if(vlan_tx_tag_present(skb)) {
19908 + cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
19910 + cpu_ptr->txd_info4.VLAN_TAG = 0;
19914 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
19915 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
19916 + if(ra_sw_nat_hook_rx!= NULL){
19917 + cpu_ptr->txd_info4.FPORT = 4; /* PPE */
19918 + FOE_MAGIC_TAG(skb) = 0;
19923 + cpu_ptr->txd_info4.FPORT = 4; /* PPE */
19924 + cpu_ptr->txd_info4.UDF = 0x2F;
19927 +#if defined (CONFIG_MIPS)
19928 + dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
19930 + dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
19932 + cpu_ptr->txd_info3.SWC_bit = 1;
19934 + //3. get NULL TXD and decrease free_tx_num by 1.
19935 + ctx_offset = get_free_txd(&free_txd);
19936 + if(ctx_offset == NUM_TX_DESC) {
19937 + printk("get_free_txd fail\n"); // this should not happen. free_txd_num is 2 at least.
19941 + //4. hook new TXD in the end of queue
19942 + //cpu_ptr->txd_info2.NDP = virt_to_phys(free_txd);
19943 + cpu_ptr->txd_info2.NDP = (free_txd);
19946 + //5. move CPU_PTR to new TXD
19947 + //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
19948 + ei_local->tx_cpu_ptr = (free_txd);
19949 + cpu_ptr->txd_info3.OWN_bit = 0;
19950 + sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
19953 + printk("----------------------------------------------\n");
19954 + printk("txd_info1:%08X \n",*(int *)&cpu_ptr->txd_info1);
19955 + printk("txd_info2:%08X \n",*(int *)&cpu_ptr->txd_info2);
19956 + printk("txd_info3:%08X \n",*(int *)&cpu_ptr->txd_info3);
19957 + printk("txd_info4:%08X \n",*(int *)&cpu_ptr->txd_info4);
19960 +#else //#if !defined (CONFIG_RAETH_TSO)
19961 + cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
19962 + cpu_ptr->txd_info3.SDL = (length - skb->data_len);
19963 + cpu_ptr->txd_info3.LS_bit = nr_frags ? 0:1;
19964 + if (gmac_no == 1) {
19965 + cpu_ptr->txd_info4.FPORT = 1;
19967 + cpu_ptr->txd_info4.FPORT = 2;
19970 + cpu_ptr->txd_info4.TSO = 0;
19971 + cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
19972 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
19973 + if (skb->ip_summed == CHECKSUM_PARTIAL){
19974 + cpu_ptr->txd_info4.TUI_CO = 7;
19976 + cpu_ptr->txd_info4.TUI_CO = 0;
19980 +#ifdef CONFIG_RAETH_HW_VLAN_TX
19981 + if(vlan_tx_tag_present(skb)) {
19982 + cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
19984 + cpu_ptr->txd_info4.VLAN_TAG = 0;
19988 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
19989 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
19990 + if(ra_sw_nat_hook_rx!= NULL){
19991 + cpu_ptr->txd_info4.FPORT = 4; /* PPE */
19992 + FOE_MAGIC_TAG(skb) = 0;
19997 + cpu_ptr->txd_info3.SWC_bit = 1;
19999 + ctx_offset = get_free_txd(&free_txd);
20000 + if(ctx_offset == NUM_TX_DESC) {
20001 + printk("get_free_txd fail\n");
20004 + //cpu_ptr->txd_info2.NDP = virt_to_phys(free_txd);
20005 + //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
20006 + cpu_ptr->txd_info2.NDP = free_txd;
20007 + ei_local->tx_cpu_ptr = free_txd;
20009 + if(nr_frags > 0) {
20010 + for(i=0;i<nr_frags;i++) {
20011 + // 1. set or get init value for current fragment
20013 + frag = &skb_shinfo(skb)->frags[i];
20014 + len = frag->size;
20015 + frag_txd_num = cal_frag_txd_num(len); // calculate the needed TXD numbers for this fragment
20016 + for(frag_txd_num = frag_txd_num;frag_txd_num > 0; frag_txd_num --){
20017 + // 2. size will be assigned to SDL and can't be larger than MAX_TXD_LEN
20018 + if(len < MAX_TXD_LEN)
20021 + size = MAX_TXD_LEN;
20023 + //3. Update TXD info
20024 + cpu_ptr = (ei_local->txd_pool + (ctx_offset));
20025 + cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
20026 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
20027 + cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE);
20029 + cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page.p, frag->page_offset + offset, size, PCI_DMA_TODEVICE);
20030 +// printk(" frag->page = %08x. frag->page_offset = %08x. frag->size = % 08x.\n", frag->page, (frag->page_offset+offset), size);
20032 + cpu_ptr->txd_info3.SDL = size;
20033 + if( (i==(nr_frags-1)) && (frag_txd_num == 1))
20034 + cpu_ptr->txd_info3.LS_bit = 1;
20036 + cpu_ptr->txd_info3.LS_bit = 0;
20037 + cpu_ptr->txd_info3.OWN_bit = 0;
20038 + cpu_ptr->txd_info3.SWC_bit = 1;
20039 + //4. Update skb_free for housekeeping
20040 + ei_local->skb_free[ctx_offset] = (cpu_ptr->txd_info3.LS_bit == 1)?skb:(struct sk_buff *)0xFFFFFFFF; //MAGIC ID
20042 + //5. Get next TXD
20043 + ctx_offset = get_free_txd(&free_txd);
20044 + //cpu_ptr->txd_info2.NDP = virt_to_phys(free_txd);
20045 + //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
20046 + cpu_ptr->txd_info2.NDP = free_txd;
20047 + ei_local->tx_cpu_ptr = free_txd;
20048 + //6. Update offset and len.
20053 + ei_local->skb_free[init_txd_idx]= (struct sk_buff *)0xFFFFFFFF; //MAGIC ID
20056 + if(skb_shinfo(skb)->gso_segs > 1) {
20058 +// TsoLenUpdate(skb->len);
20060 + /* TCP over IPv4 */
20061 + iph = (struct iphdr *)skb_network_header(skb);
20062 +#if defined (CONFIG_RAETH_TSOV6)
20063 + /* TCP over IPv6 */
20064 + ip6h = (struct ipv6hdr *)skb_network_header(skb);
20066 + if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
20067 + th = (struct tcphdr *)skb_transport_header(skb);
20069 + init_cpu_ptr->txd_info4.TSO = 1;
20071 + th->check = htons(skb_shinfo(skb)->gso_size);
20072 +#if defined (CONFIG_MIPS)
20073 + dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
20075 + dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
20079 +#if defined (CONFIG_RAETH_TSOV6)
20080 + /* TCP over IPv6 */
20081 + //ip6h = (struct ipv6hdr *)skb_network_header(skb);
20082 + else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
20083 + th = (struct tcphdr *)skb_transport_header(skb);
20084 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
20085 + init_cpu_ptr->txd_info4.TSO = 1;
20087 + init_cpu_ptr->txd_info4.TSO = 1;
20089 + th->check = htons(skb_shinfo(skb)->gso_size);
20090 +#if defined (CONFIG_MIPS)
20091 + dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
20093 + dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
20100 +// dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
20102 + init_cpu_ptr->txd_info3.OWN_bit = 0;
20103 +#endif // CONFIG_RAETH_TSO //
20105 + sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
20107 +#ifdef CONFIG_PSEUDO_SUPPORT
20108 + if (gmac_no == 2) {
20109 + if (ei_local->PseudoDev != NULL) {
20110 + pAd = netdev_priv(ei_local->PseudoDev);
20111 + pAd->stat.tx_packets++;
20112 + pAd->stat.tx_bytes += length;
20118 + ei_local->stat.tx_packets++;
20119 + ei_local->stat.tx_bytes += skb->len;
20121 +#ifdef CONFIG_RAETH_NAPI
20122 + if ( ei_local->tx_full == 1) {
20123 + ei_local->tx_full = 0;
20124 + netif_wake_queue(dev);
20131 +int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no)
20133 + END_DEVICE *ei_local = netdev_priv(dev);
20134 + unsigned long flags;
20135 + unsigned int num_of_txd = 0;
20136 +#if defined (CONFIG_RAETH_TSO)
20137 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
20138 + struct skb_frag_struct *frag;
20140 +#ifdef CONFIG_PSEUDO_SUPPORT
20141 + PSEUDO_ADAPTER *pAd;
20144 +#if !defined(CONFIG_RA_NAT_NONE)
20145 + if(ra_sw_nat_hook_tx!= NULL)
20147 +// spin_lock_irqsave(&ei_local->page_lock, flags);
20148 + if(ra_sw_nat_hook_tx(skb, gmac_no)==1){
20149 +// spin_unlock_irqrestore(&ei_local->page_lock, flags);
20152 +// spin_unlock_irqrestore(&ei_local->page_lock, flags);
20158 +#if defined(CONFIG_RALINK_MT7621) || defined(CONFIG_ARCH_MT7623)
20159 +#define MIN_PKT_LEN 64
20160 + if (skb->len < MIN_PKT_LEN) {
20161 + if (skb_padto(skb, MIN_PKT_LEN)) {
20162 + printk("raeth: skb_padto failed\n");
20165 + skb_put(skb, MIN_PKT_LEN - skb->len);
20170 + dev->trans_start = jiffies; /* save the timestamp */
20171 + spin_lock_irqsave(&ei_local->page_lock, flags);
20172 +#if defined (CONFIG_MIPS)
20173 + dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
20175 + dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
20179 +//check free_txd_num before calling rt288_eth_send()
20181 +#if defined (CONFIG_RAETH_TSO)
20182 + // num_of_txd = (nr_frags==0) ? 1 : (nr_frags + 1);
20183 + if(nr_frags != 0){
20184 + for(i=0;i<nr_frags;i++) {
20185 + frag = &skb_shinfo(skb)->frags[i];
20186 + num_of_txd += cal_frag_txd_num(frag->size);
20194 +#if defined(CONFIG_RALINK_MT7621)
20195 + if((sysRegRead(0xbe00000c)&0xFFFF) == 0x0101) {
20196 + ei_xmit_housekeeping(0);
20200 + ei_xmit_housekeeping(0);
20202 + //if ((ei_local->free_txd_num > num_of_txd + 1) && (ei_local->free_txd_num != NUM_TX_DESC))
20203 + if ((ei_local->free_txd_num > num_of_txd + 5) && (ei_local->free_txd_num != NUM_TX_DESC))
20205 + rt2880_eth_send(dev, skb, gmac_no); // need to modify rt2880_eth_send() for QDMA
20206 + if (ei_local->free_txd_num < 3)
20208 +#if defined (CONFIG_RAETH_STOP_RX_WHEN_TX_FULL)
20209 + netif_stop_queue(dev);
20210 +#ifdef CONFIG_PSEUDO_SUPPORT
20211 + netif_stop_queue(ei_local->PseudoDev);
20213 + tx_ring_full = 1;
20217 +#ifdef CONFIG_PSEUDO_SUPPORT
20218 + if (gmac_no == 2)
20220 + if (ei_local->PseudoDev != NULL)
20222 + pAd = netdev_priv(ei_local->PseudoDev);
20223 + pAd->stat.tx_dropped++;
20227 + ei_local->stat.tx_dropped++;
20229 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
20232 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
20236 +void ei_xmit_housekeeping(unsigned long unused)
20238 + struct net_device *dev = dev_raether;
20239 + END_DEVICE *ei_local = netdev_priv(dev);
20240 +#ifndef CONFIG_RAETH_NAPI
20241 + unsigned long reg_int_mask=0;
20243 + struct QDMA_txdesc *dma_ptr = NULL;
20244 + struct QDMA_txdesc *cpu_ptr = NULL;
20245 + struct QDMA_txdesc *tmp_ptr = NULL;
20246 + unsigned int htx_offset = 0;
20247 + unsigned int ctx_offset = 0;
20248 + unsigned int dtx_offset = 0;
20250 + //dma_ptr = phys_to_virt(sysRegRead(QTX_DRX_PTR));
20251 + //cpu_ptr = phys_to_virt(sysRegRead(QTX_CRX_PTR));
20252 + //printk("kurtis:housekeeping QTX_DRX_PTR = 0x%x!!!\n", sysRegRead(QTX_DRX_PTR));
20253 + //printk("kurtis:housekeeping DMA_PTR = 0x%x!!!\n", dma_ptr);
20255 + cpu_ptr = sysRegRead(QTX_CRX_PTR);
20256 + dma_ptr = sysRegRead(QTX_DRX_PTR);
20258 + //printk("kurtis:housekeeping QTX_CRX_PTR = 0x%x!!!\n", cpu_ptr);
20259 + //printk("kurtis:housekeeping QTX_DRX_PTR = 0x%x!!!\n", dma_ptr);
20260 + ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
20261 + dtx_offset = GET_TXD_OFFSET(&dma_ptr);
20262 + htx_offset = ctx_offset;
20263 + cpu_ptr = (ei_local->txd_pool + (ctx_offset));
20264 + dma_ptr = (ei_local->txd_pool + (dtx_offset));
20267 + //printk("kurtis:housekeeping CPU_PTR = 0x%x!!!\n", cpu_ptr);
20268 + //printk("kurtis:housekeeping DMA_PTR = 0x%x!!!\n", dma_ptr);
20274 + if(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) {
20275 + while(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) {
20277 + //1. keep cpu next TXD
20278 + //tmp_ptr = phys_to_virt(cpu_ptr->txd_info2.NDP);
20279 + tmp_ptr = cpu_ptr->txd_info2.NDP;
20280 + htx_offset = GET_TXD_OFFSET(&tmp_ptr);
20281 + //printk("kurtis:housekeeping cpu_ptr->txd_info2.NDP = 0x%x!!!\n", cpu_ptr->txd_info2.NDP);
20282 + //printk("kurtis:housekeeping tmp_ptr = 0x%x!!!\n", tmp_ptr);
20283 + //printk("kurtis:housekeeping htx_offset = 0x%x!!!\n", htx_offset);
20284 + //2. free skb meomry
20285 +#if defined (CONFIG_RAETH_TSO)
20286 + if(ei_local->skb_free[htx_offset]!=(struct sk_buff *)0xFFFFFFFF) {
20287 + dev_kfree_skb_any(ei_local->skb_free[htx_offset]);
20290 + dev_kfree_skb_any(ei_local->skb_free[htx_offset]);
20294 + //htx_offset = GET_TXD_OFFSET(&cpu_ptr);
20295 + //put_free_txd(htx_offset);
20296 + put_free_txd(ctx_offset);
20300 + netif_wake_queue(dev);
20301 +#ifdef CONFIG_PSEUDO_SUPPORT
20302 + netif_wake_queue(ei_local->PseudoDev);
20306 + //4. update cpu_ptr to next ptr
20307 + //cpu_ptr = tmp_ptr;
20308 + cpu_ptr = (ei_local->txd_pool + htx_offset);
20309 + ctx_offset = htx_offset;
20310 + //cpu_ptr = (cpu_ptr + (htx_offset));
20311 + //printk("kurtis:housekeeping 4. update cpu_ptr = 0x%x!!!\n", cpu_ptr);
20314 + //sysRegWrite(QTX_CRX_PTR, virt_to_phys(cpu_ptr));
20315 + //sysRegWrite(QTX_CRX_PTR, cpu_ptr);
20316 + tmp_ptr = (ei_local->phy_txd_pool + (htx_offset << 4));
20317 + //printk("kurtis:housekeeping 5. update QTX_CRX_PTR = 0x%x!!!\n", tmp_ptr);
20318 + sysRegWrite(QTX_CRX_PTR, tmp_ptr);
20322 +#ifndef CONFIG_RAETH_NAPI
20323 + reg_int_mask=sysRegRead(QFE_INT_ENABLE);
20324 +#if defined (DELAY_INT)
20325 + sysRegWrite(FE_INT_ENABLE, reg_int_mask| RLS_DLY_INT);
20328 + sysRegWrite(FE_INT_ENABLE, reg_int_mask | RLS_DONE_INT);
20330 +#endif //CONFIG_RAETH_NAPI//
20333 +EXPORT_SYMBOL(ei_start_xmit);
20334 +EXPORT_SYMBOL(ei_xmit_housekeeping);
20335 +EXPORT_SYMBOL(fe_dma_init);
20336 +EXPORT_SYMBOL(rt2880_eth_send);
20337 diff --git a/drivers/net/ethernet/raeth/smb_hook.c b/drivers/net/ethernet/raeth/smb_hook.c
20338 new file mode 100644
20339 index 0000000..617139c
20341 +++ b/drivers/net/ethernet/raeth/smb_hook.c
20343 +#include <linux/version.h>
20344 +#include <linux/module.h>
20345 +#include <linux/kernel.h>
20346 +#include <linux/types.h>
20347 +#include <linux/skbuff.h>
20350 +int (*smb_nf_local_in_hook)(struct sk_buff *skb) = NULL;
20351 +int (*smb_nf_pre_routing_hook)(struct sk_buff *skb) = NULL;
20352 +int (*smb_nf_local_out_hook)(struct sk_buff *skb) = NULL;
20353 +int (*smb_nf_post_routing_hook)(struct sk_buff *skb) = NULL;
20354 +EXPORT_SYMBOL(smb_nf_local_in_hook);
20355 +EXPORT_SYMBOL(smb_nf_pre_routing_hook);
20356 +EXPORT_SYMBOL(smb_nf_local_out_hook);
20357 +EXPORT_SYMBOL(smb_nf_post_routing_hook);
20360 diff --git a/drivers/net/ethernet/raeth/smb_nf.c b/drivers/net/ethernet/raeth/smb_nf.c
20361 new file mode 100644
20362 index 0000000..86250eb
20364 +++ b/drivers/net/ethernet/raeth/smb_nf.c
20366 +#include <linux/module.h>
20367 +#include <linux/version.h>
20368 +#include <linux/kernel.h>
20369 +#include <linux/types.h>
20371 +#include <linux/inetdevice.h>
20372 +#include <linux/tcp.h>
20373 +#include <linux/ip.h>
20374 +#include <net/tcp.h>
20375 +#include <net/ip.h>
20377 +extern int (*smb_nf_local_in_hook)(struct sk_buff *skb);
20378 +extern int (*smb_nf_pre_routing_hook)(struct sk_buff *skb);
20379 +extern int (*smb_nf_local_out_hook)(struct sk_buff *skb);
20380 +extern int (*smb_nf_post_routing_hook)(struct sk_buff *skb);
20382 +struct net_device *lan_int = NULL;
20383 +struct in_ifaddr *lan_ifa = NULL;
20386 +int mtk_smb_nf_local_in_hook(struct sk_buff *skb)
20388 + struct iphdr *iph = ip_hdr(skb);
20390 + if (skb->protocol == htons(ETH_P_IP)) {
20391 + struct iphdr *iph = ip_hdr(skb);
20393 + if (iph->protocol == IPPROTO_TCP) {
20394 + struct tcphdr *th = tcp_hdr(skb);
20395 + unsigned short sport, dport;
20397 + th = tcp_hdr(skb);
20398 + th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
20400 + if ((iph->daddr == lan_ifa->ifa_local)
20401 + && ((th->dest == 0xbd01) || (th->dest == 0x8900)
20402 + || (th->dest == 0x8a00) || (th->dest == 0x8b00)))
20413 +int mtk_smb_nf_pre_routing_hook(struct sk_buff *skb)
20415 + struct iphdr *iph = ip_hdr(skb);
20417 + if (skb->protocol == htons(ETH_P_IP)) {
20418 + struct iphdr *iph = ip_hdr(skb);
20420 + if (iph->protocol == IPPROTO_TCP) {
20421 + struct tcphdr *th = tcp_hdr(skb);
20422 + unsigned short sport, dport;
20424 + th = tcp_hdr(skb);
20425 + th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
20426 + if ((iph->daddr == lan_ifa->ifa_local)
20427 + && ((th->dest == 0xbd01) || (th->dest == 0x8900)
20428 + || (th->dest == 0x8a00) || (th->dest == 0x8b00)))
20439 +int mtk_smb_nf_local_out_hook(struct sk_buff *skb)
20441 + struct iphdr *iph = ip_hdr(skb);
20443 + if (iph->protocol == IPPROTO_TCP) {
20444 + struct tcphdr *th = tcp_hdr(skb);
20446 + th = tcp_hdr(skb);
20447 + th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
20449 + if ((iph->saddr == lan_ifa->ifa_local)
20450 + && ((th->source == 0xbd01) || (th->source == 0x8900)
20451 + || (th->source == 0x8a00) || (th->source == 0x8b00)))
20460 +int mtk_smb_nf_post_routing_hook(struct sk_buff *skb)
20462 + struct iphdr *iph = ip_hdr(skb);
20464 + if (skb->protocol == htons(ETH_P_IP)) {
20465 + struct iphdr *iph = ip_hdr(skb);
20467 + if (iph->protocol == IPPROTO_TCP) {
20468 + struct tcphdr *th = tcp_hdr(skb);
20470 + th = tcp_hdr(skb);
20471 + th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
20473 + if ((iph->saddr == lan_ifa->ifa_local)
20474 + && ((th->source == 0xbd01) || (th->source == 0x8900)
20475 + || (th->source == 0x8a00) || (th->source == 0x8b00)))
20486 +int __init mtk_smb_hook_init(void)
20488 + struct in_device *in_dev;
20489 + struct in_ifaddr **ifap = NULL;
20490 + struct in_ifaddr *ifa = NULL;
20492 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
20493 + lan_int = dev_get_by_name(&init_net, "br0");
20495 + lan_int = dev_get_by_name("br0");
20498 + in_dev = __in_dev_get_rtnl(lan_int);
20503 + for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
20504 + ifap = &ifa->ifa_next) {
20505 + if (!strcmp("br0", ifa->ifa_label))
20508 + break; /* found */
20516 + smb_nf_local_in_hook = mtk_smb_nf_local_in_hook;
20517 + smb_nf_pre_routing_hook = mtk_smb_nf_pre_routing_hook;
20518 + smb_nf_local_out_hook = mtk_smb_nf_local_out_hook;
20519 + smb_nf_post_routing_hook = mtk_smb_nf_post_routing_hook;
20522 + printk("Samba Netfilter Hook Enabled\n");
20527 +void mtk_smb_hook_cleanup(void)
20531 + smb_nf_local_in_hook = NULL;
20532 + smb_nf_pre_routing_hook = NULL;
20533 + smb_nf_local_out_hook = NULL;
20534 + smb_nf_post_routing_hook = NULL;
20539 +module_init(mtk_smb_hook_init);
20540 +module_exit(mtk_smb_hook_cleanup);
20542 +MODULE_LICENSE("GPL");
20543 diff --git a/drivers/net/ethernet/raeth/sync_write.h b/drivers/net/ethernet/raeth/sync_write.h
20544 new file mode 100644
20545 index 0000000..8b800e6
20547 +++ b/drivers/net/ethernet/raeth/sync_write.h
20549 +#ifndef _MT_SYNC_WRITE_H
20550 +#define _MT_SYNC_WRITE_H
20552 +#if defined(__KERNEL__)
20554 +#include <linux/io.h>
20555 +#include <asm/cacheflush.h>
20556 +//#include <asm/system.h>
20562 +#define mt65xx_reg_sync_writel(v, a) \
20564 + __raw_writel((v), IOMEM((a))); \
20568 +#define mt65xx_reg_sync_writew(v, a) \
20570 + __raw_writew((v), IOMEM((a))); \
20574 +#define mt65xx_reg_sync_writeb(v, a) \
20576 + __raw_writeb((v), IOMEM((a))); \
20580 +#define mt_reg_sync_writel(v, a) \
20582 + __raw_writel((v), IOMEM((a))); \
20586 +#define mt_reg_sync_writew(v, a) \
20588 + __raw_writew((v), IOMEM((a))); \
20592 +#define mt_reg_sync_writeb(v, a) \
20594 + __raw_writeb((v), IOMEM((a))); \
20599 +#else /* __KERNEL__ */
20601 +#include <sys/types.h>
20602 +#include <sys/stat.h>
20603 +#include <fcntl.h>
20604 +#include <unistd.h>
20605 +#include <string.h>
20609 + __asm__ __volatile__ ("dsb" : : : "memory"); \
20612 +#define mt65xx_reg_sync_writel(v, a) \
20614 + *(volatile unsigned int *)(a) = (v); \
20618 +#define mt65xx_reg_sync_writew(v, a) \
20620 + *(volatile unsigned short *)(a) = (v); \
20624 +#define mt65xx_reg_sync_writeb(v, a) \
20626 + *(volatile unsigned char *)(a) = (v); \
20630 +#define mt_reg_sync_writel(v, a) \
20632 + *(volatile unsigned int *)(a) = (v); \
20636 +#define mt_reg_sync_writew(v, a) \
20638 + *(volatile unsigned short *)(a) = (v); \
20642 +#define mt_reg_sync_writeb(v, a) \
20644 + *(volatile unsigned char *)(a) = (v); \
20649 +#endif /* __KERNEL__ */
20651 +#endif /* !_MT_SYNC_WRITE_H */