ipq40xx: Add patches for 4.19
authorRobert Marko <robimarko@gmail.com>
Mon, 17 Dec 2018 09:39:09 +0000 (10:39 +0100)
committerChristian Lamparter <chunkeey@gmail.com>
Thu, 27 Dec 2018 13:06:38 +0000 (14:06 +0100)
This adds the necessary patches for 4.19 kernel.
Upstreamed patches were dropped, backported upstreamed patches
from 4.20.
Drop Winbond ID patch since that NAND IC was upstreamed to use
SPI-NAND framework and support for it was backported from 4.20.
Rework ESSEDMA patches to compile under 4.19 due to timer changes,
Clément Péron did the hard work and his changes were taken from the
initial 4.19 PR.
MR33 changes had to be manually refreshed to apply.
Refresh other patches to apply.

Signed-off-by: Robert Marko <robimarko@gmail.com>
Remove

26 files changed:
target/linux/ipq40xx/patches-4.19/070-v4.20-soc-qcom-spm-add-SCM-probe-dependency.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/071-01-v4.20-ARM-dts-qcom-ipq4019-use-v2-of-the-kpss-bringup-mech.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/071-02-ipq40xx-Fix-booting-secondary-cores.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/072-v4.20-ARM-dts-qcom-ipq4019-add-cpu-operating-points-for-cp.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/073-v4.20-ARM-dts-qcom-ipq4019-fix-cpu0-s-qcom-saw2-reg-value.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/074-ARM-qcom-Add-IPQ4019-SoC-support.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/075-dt-bindings-phy-qcom-ipq4019-usb-add-binding-documen.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/076-phy-qcom-ipq4019-usb-add-driver-for-QCOM-IPQ4019.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/077-qcom-ipq4019-add-USB-devicetree-nodes.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/079-v4.20-ARM-dts-qcom-ipq4019-fix-PCI-range.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/080-ARM-dts-qcom-add-gpio-ranges-property.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/081-clk-fix-apss-cpu-overclocking.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/082-v4.20-mtd-spinand-winbond-Add-support-for-W25N01GV.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/303-spi-nor-enable-4B-opcodes-for-mx25l25635f.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/700-net-add-qualcomm-mdio-and-phy.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/701-dts-ipq4019-add-mdio-node.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/702-dts-ipq4019-add-PHY-switch-nodes.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/703-net-IPQ4019-needs-rfs-vlan_tag-callbacks-in.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/710-net-add-qualcomm-essedma-ethernet-driver.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/711-dts-ipq4019-add-ethernet-essedma-node.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/712-mr33-essedma.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/713-essedma-alloc-skb-ip-align.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/850-soc-add-qualcomm-syscon.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/900-dts-ipq4019-ap-dk01.1.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/901-arm-boot-add-dts-files.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/997-device_tree_cmdline.patch [new file with mode: 0644]

diff --git a/target/linux/ipq40xx/patches-4.19/070-v4.20-soc-qcom-spm-add-SCM-probe-dependency.patch b/target/linux/ipq40xx/patches-4.19/070-v4.20-soc-qcom-spm-add-SCM-probe-dependency.patch
new file mode 100644 (file)
index 0000000..d0b520f
--- /dev/null
@@ -0,0 +1,27 @@
+From 61a3bd10082b0e861b4e1bc451a92e20181a52f5 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Mon, 23 Jul 2018 16:17:35 +0200
+Subject: [PATCH] soc: qcom: spm: add SCM probe dependency
+
+Check for SCM availability before attempting to use SPM. SPM probe will
+fail otherwise.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: John Crispin <john@phrozen.org>
+Signed-off-by: Andy Gross <andy.gross@linaro.org>
+---
+ drivers/soc/qcom/spm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/soc/qcom/spm.c
++++ b/drivers/soc/qcom/spm.c
+@@ -219,6 +219,9 @@ static int __init qcom_cpuidle_init(stru
+       cpumask_t mask;
+       bool use_scm_power_down = false;
++      if (!qcom_scm_is_available())
++              return -EPROBE_DEFER;
++
+       for (i = 0; ; i++) {
+               state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+               if (!state_node)
diff --git a/target/linux/ipq40xx/patches-4.19/071-01-v4.20-ARM-dts-qcom-ipq4019-use-v2-of-the-kpss-bringup-mech.patch b/target/linux/ipq40xx/patches-4.19/071-01-v4.20-ARM-dts-qcom-ipq4019-use-v2-of-the-kpss-bringup-mech.patch
new file mode 100644 (file)
index 0000000..b1d69ca
--- /dev/null
@@ -0,0 +1,97 @@
+From 233c77d4f1d12e4337fba1146d5197f4c0f9107d Mon Sep 17 00:00:00 2001
+From: Matthew McClintock <mmcclint@codeaurora.org>
+Date: Wed, 25 Jul 2018 10:37:45 +0200
+Subject: [PATCH] ARM: dts: qcom: ipq4019: use v2 of the kpss bringup mechanism
+
+v1 was the incorrect choice here and sometimes the board
+would not come up properly.
+
+Signed-off-by: Matthew McClintock <mmcclint@codeaurora.org>
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: John Crispin <john@phrozen.org>
+Signed-off-by: Andy Gross <andy.gross@linaro.org>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 25 +++++++++++++++++--------
+ 1 file changed, 17 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -52,7 +52,8 @@
+               cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+-                      enable-method = "qcom,kpss-acc-v1";
++                      enable-method = "qcom,kpss-acc-v2";
++                      next-level-cache = <&L2>;
+                       qcom,acc = <&acc0>;
+                       qcom,saw = <&saw0>;
+                       reg = <0x0>;
+@@ -71,7 +72,8 @@
+               cpu@1 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+-                      enable-method = "qcom,kpss-acc-v1";
++                      enable-method = "qcom,kpss-acc-v2";
++                      next-level-cache = <&L2>;
+                       qcom,acc = <&acc1>;
+                       qcom,saw = <&saw1>;
+                       reg = <0x1>;
+@@ -90,7 +92,8 @@
+               cpu@2 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+-                      enable-method = "qcom,kpss-acc-v1";
++                      enable-method = "qcom,kpss-acc-v2";
++                      next-level-cache = <&L2>;
+                       qcom,acc = <&acc2>;
+                       qcom,saw = <&saw2>;
+                       reg = <0x2>;
+@@ -109,7 +112,8 @@
+               cpu@3 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+-                      enable-method = "qcom,kpss-acc-v1";
++                      enable-method = "qcom,kpss-acc-v2";
++                      next-level-cache = <&L2>;
+                       qcom,acc = <&acc3>;
+                       qcom,saw = <&saw3>;
+                       reg = <0x3>;
+@@ -124,6 +128,11 @@
+                       >;
+                       clock-latency = <256000>;
+               };
++
++              L2: l2-cache {
++                      compatible = "cache";
++                      cache-level = <2>;
++              };
+       };
+       pmu {
+@@ -292,22 +301,22 @@
+               };
+                 acc0: clock-controller@b088000 {
+-                        compatible = "qcom,kpss-acc-v1";
++                        compatible = "qcom,kpss-acc-v2";
+                         reg = <0x0b088000 0x1000>, <0xb008000 0x1000>;
+                 };
+                 acc1: clock-controller@b098000 {
+-                        compatible = "qcom,kpss-acc-v1";
++                        compatible = "qcom,kpss-acc-v2";
+                         reg = <0x0b098000 0x1000>, <0xb008000 0x1000>;
+                 };
+                 acc2: clock-controller@b0a8000 {
+-                        compatible = "qcom,kpss-acc-v1";
++                        compatible = "qcom,kpss-acc-v2";
+                         reg = <0x0b0a8000 0x1000>, <0xb008000 0x1000>;
+                 };
+                 acc3: clock-controller@b0b8000 {
+-                        compatible = "qcom,kpss-acc-v1";
++                        compatible = "qcom,kpss-acc-v2";
+                         reg = <0x0b0b8000 0x1000>, <0xb008000 0x1000>;
+                 };
diff --git a/target/linux/ipq40xx/patches-4.19/071-02-ipq40xx-Fix-booting-secondary-cores.patch b/target/linux/ipq40xx/patches-4.19/071-02-ipq40xx-Fix-booting-secondary-cores.patch
new file mode 100644 (file)
index 0000000..d37a8cb
--- /dev/null
@@ -0,0 +1,38 @@
+From 8a4540321e8bcf7a5b485c332a2e78f3501c78ed Mon Sep 17 00:00:00 2001
+From: Robert Marko <robimarko@gmail.com>
+Date: Thu, 29 Nov 2018 22:29:36 +0100
+Subject: [PATCH] ipq40xx: Fix booting secondary cores
+
+Add the second part of old 071-qcom-ipq4019-use-v2-of-the-kpss-bringup-mechanism.patch
+We dont modify the patch itself as its upstream and this change is not.
+
+Originally added by Mantas Pucka Mantas Pucka <mantas@8devices.com>
+
+Signed-off-by: Robert Marko <robimarko@gmail.com>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -132,6 +132,7 @@
+               L2: l2-cache {
+                       compatible = "cache";
+                       cache-level = <2>;
++                      qcom,saw = <&saw_l2>;
+               };
+       };
+@@ -344,6 +345,12 @@
+                         regulator;
+                 };
++              saw_l2: regulator@b012000 {
++                      compatible = "qcom,saw2";
++                      reg = <0xb012000 0x1000>;
++                      regulator;
++              };
++
+               blsp1_uart1: serial@78af000 {
+                       compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+                       reg = <0x78af000 0x200>;
diff --git a/target/linux/ipq40xx/patches-4.19/072-v4.20-ARM-dts-qcom-ipq4019-add-cpu-operating-points-for-cp.patch b/target/linux/ipq40xx/patches-4.19/072-v4.20-ARM-dts-qcom-ipq4019-add-cpu-operating-points-for-cp.patch
new file mode 100644 (file)
index 0000000..3035461
--- /dev/null
@@ -0,0 +1,110 @@
+From bcb9ab4c2917e92114d2f4c2b1da97cdf15b471b Mon Sep 17 00:00:00 2001
+From: Matthew McClintock <mmcclint@codeaurora.org>
+Date: Wed, 25 Jul 2018 10:37:46 +0200
+Subject: [PATCH] ARM: dts: qcom: ipq4019: add cpu operating points for cpufreq
+ support
+
+This adds some operating points for cpu frequeny scaling
+
+Signed-off-by: Matthew McClintock <mmcclint@codeaurora.org>
+Signed-off-by: John Crispin <john@phrozen.org>
+Signed-off-by: Andy Gross <andy.gross@linaro.org>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 54 ++++++++++++++---------------
+ 1 file changed, 26 insertions(+), 28 deletions(-)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -59,14 +59,8 @@
+                       reg = <0x0>;
+                       clocks = <&gcc GCC_APPS_CLK_SRC>;
+                       clock-frequency = <0>;
+-                      operating-points = <
+-                              /* kHz  uV (fixed) */
+-                              48000   1100000
+-                              200000  1100000
+-                              500000  1100000
+-                              716000  1100000
+-                      >;
+                       clock-latency = <256000>;
++                      operating-points-v2 = <&cpu0_opp_table>;
+               };
+               cpu@1 {
+@@ -79,14 +73,8 @@
+                       reg = <0x1>;
+                       clocks = <&gcc GCC_APPS_CLK_SRC>;
+                       clock-frequency = <0>;
+-                      operating-points = <
+-                              /* kHz  uV (fixed) */
+-                              48000   1100000
+-                              200000  1100000
+-                              500000  1100000
+-                              666000  1100000
+-                      >;
+                       clock-latency = <256000>;
++                      operating-points-v2 = <&cpu0_opp_table>;
+               };
+               cpu@2 {
+@@ -99,14 +87,8 @@
+                       reg = <0x2>;
+                       clocks = <&gcc GCC_APPS_CLK_SRC>;
+                       clock-frequency = <0>;
+-                      operating-points = <
+-                              /* kHz  uV (fixed) */
+-                              48000   1100000
+-                              200000  1100000
+-                              500000  1100000
+-                              666000  1100000
+-                      >;
+                       clock-latency = <256000>;
++                      operating-points-v2 = <&cpu0_opp_table>;
+               };
+               cpu@3 {
+@@ -119,14 +101,8 @@
+                       reg = <0x3>;
+                       clocks = <&gcc GCC_APPS_CLK_SRC>;
+                       clock-frequency = <0>;
+-                      operating-points = <
+-                              /* kHz  uV (fixed) */
+-                              48000   1100000
+-                              200000  1100000
+-                              500000  1100000
+-                              666000  1100000
+-                      >;
+                       clock-latency = <256000>;
++                      operating-points-v2 = <&cpu0_opp_table>;
+               };
+               L2: l2-cache {
+@@ -136,6 +112,28 @@
+               };
+       };
++      cpu0_opp_table: opp_table0 {
++              compatible = "operating-points-v2";
++              opp-shared;
++
++              opp-48000000 {
++                      opp-hz = /bits/ 64 <48000000>;
++                      clock-latency-ns = <256000>;
++              };
++              opp-200000000 {
++                      opp-hz = /bits/ 64 <200000000>;
++                      clock-latency-ns = <256000>;
++              };
++              opp-500000000 {
++                      opp-hz = /bits/ 64 <500000000>;
++                      clock-latency-ns = <256000>;
++              };
++              opp-716000000 {
++                      opp-hz = /bits/ 64 <716000000>;
++                      clock-latency-ns = <256000>;
++              };
++      };
++
+       pmu {
+               compatible = "arm,cortex-a7-pmu";
+               interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) |
diff --git a/target/linux/ipq40xx/patches-4.19/073-v4.20-ARM-dts-qcom-ipq4019-fix-cpu0-s-qcom-saw2-reg-value.patch b/target/linux/ipq40xx/patches-4.19/073-v4.20-ARM-dts-qcom-ipq4019-fix-cpu0-s-qcom-saw2-reg-value.patch
new file mode 100644 (file)
index 0000000..7573e48
--- /dev/null
@@ -0,0 +1,34 @@
+From bd73a3dd257fb838bd456a18eeee0ef0224b7a40 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Wed, 25 Jul 2018 10:37:47 +0200
+Subject: [PATCH] ARM: dts: qcom: ipq4019: fix cpu0's qcom,saw2 reg value
+
+while compiling an ipq4019 target, dtc will complain:
+regulator@b089000 unit address format error, expected "2089000"
+
+The saw0 regulator reg value seems to be
+copied and pasted from qcom-ipq8064.dtsi.
+
+This patch fixes the reg value to match that of the
+unit address which in turn silences the warning.
+(There is no driver for qcom,saw2 right now.
+So this went unnoticed)
+
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: John Crispin <john@phrozen.org>
+Signed-off-by: Andy Gross <andy.gross@linaro.org>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -321,7 +321,7 @@
+                 saw0: regulator@b089000 {
+                         compatible = "qcom,saw2";
+-                        reg = <0x02089000 0x1000>, <0x0b009000 0x1000>;
++                      reg = <0x0b089000 0x1000>, <0x0b009000 0x1000>;
+                         regulator;
+                 };
diff --git a/target/linux/ipq40xx/patches-4.19/074-ARM-qcom-Add-IPQ4019-SoC-support.patch b/target/linux/ipq40xx/patches-4.19/074-ARM-qcom-Add-IPQ4019-SoC-support.patch
new file mode 100644 (file)
index 0000000..76fcdc6
--- /dev/null
@@ -0,0 +1,36 @@
+From 89b43d59ec8c9cda588555eb1f2754dd19ef5144 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Sun, 22 Jul 2018 12:07:57 +0200
+Subject: [PATCH 8/8] ARM: qcom: Add IPQ4019 SoC support
+
+Add support for the Qualcomm Atheros IPQ4019 SoC.
+
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: John Crispin <john@phrozen.org>
+---
+ arch/arm/Makefile          | 1 +
+ arch/arm/mach-qcom/Kconfig | 5 +++++
+ 2 files changed, 6 insertions(+)
+
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -150,6 +150,7 @@ textofs-$(CONFIG_ARCH_MSM8X60) := 0x0020
+ textofs-$(CONFIG_ARCH_MSM8960) := 0x00208000
+ textofs-$(CONFIG_ARCH_MESON) := 0x00208000
+ textofs-$(CONFIG_ARCH_AXXIA) := 0x00308000
++textofs-$(CONFIG_ARCH_IPQ40XX) := 0x00208000
+ # Machine directory name.  This list is sorted alphanumerically
+ # by CONFIG_* macro name.
+--- a/arch/arm/mach-qcom/Kconfig
++++ b/arch/arm/mach-qcom/Kconfig
+@@ -27,4 +27,9 @@ config ARCH_MDM9615
+       bool "Enable support for MDM9615"
+       select CLKSRC_QCOM
++config ARCH_IPQ40XX
++      bool "Enable support for IPQ40XX"
++      select CLKSRC_QCOM
++      select HAVE_ARM_ARCH_TIMER
++
+ endif
diff --git a/target/linux/ipq40xx/patches-4.19/075-dt-bindings-phy-qcom-ipq4019-usb-add-binding-documen.patch b/target/linux/ipq40xx/patches-4.19/075-dt-bindings-phy-qcom-ipq4019-usb-add-binding-documen.patch
new file mode 100644 (file)
index 0000000..e7407bc
--- /dev/null
@@ -0,0 +1,38 @@
+From 5f01733dc755dfadfa51b7b3c6c160e632fc6002 Mon Sep 17 00:00:00 2001
+From: John Crispin <john@phrozen.org>
+Date: Tue, 24 Jul 2018 15:09:36 +0200
+Subject: [PATCH 1/3] dt-bindings: phy-qcom-ipq4019-usb: add binding document
+
+This patch adds the binding documentation for the HS/SS USB PHY found
+inside Qualcom Dakota SoCs.
+
+Signed-off-by: John Crispin <john@phrozen.org>
+---
+ .../bindings/phy/phy-qcom-ipq4019-usb.txt           | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/phy/phy-qcom-ipq4019-usb.txt
+
+--- /dev/null
++++ b/Documentation/devicetree/bindings/phy/phy-qcom-ipq4019-usb.txt
+@@ -0,0 +1,21 @@
++Qualcom Dakota HS/SS USB PHY
++
++Required properties:
++ - compatible: "qcom,usb-ss-ipq4019-phy",
++             "qcom,usb-hs-ipq4019-phy"
++ - reg: offset and length of the registers
++ - #phy-cells: should be 0
++ - resets: the reset controllers as listed below
++ - reset-names: the names of the reset controllers
++      "por_rst" - the POR reset line for SS and HS phys
++      "srif_rst" - the SRIF reset line for HS phys
++Example:
++
++hsphy@a8000 {
++      compatible = "qcom,usb-hs-ipq4019-phy";
++      phy-cells = <0>;
++      reg = <0xa8000 0x40>;
++      resets = <&gcc USB2_HSPHY_POR_ARES>,
++               <&gcc USB2_HSPHY_S_ARES>;
++      reset-names = "por_rst", "srif_rst";
++};
diff --git a/target/linux/ipq40xx/patches-4.19/076-phy-qcom-ipq4019-usb-add-driver-for-QCOM-IPQ4019.patch b/target/linux/ipq40xx/patches-4.19/076-phy-qcom-ipq4019-usb-add-driver-for-QCOM-IPQ4019.patch
new file mode 100644 (file)
index 0000000..e7e7a1f
--- /dev/null
@@ -0,0 +1,234 @@
+From 633f0e08498aebfdb932bd71319b4cb136709499 Mon Sep 17 00:00:00 2001
+From: John Crispin <john@phrozen.org>
+Date: Tue, 24 Jul 2018 14:45:49 +0200
+Subject: [PATCH 2/3] phy: qcom-ipq4019-usb: add driver for QCOM/IPQ4019
+
+Add a driver to setup the USB phy on Qualcom Dakota SoCs.
+The driver sets up HS and SS phys. In case of HS some magic values need to
+be written to magic offsets. These were taken from the SDK driver.
+
+Signed-off-by: John Crispin <john@phrozen.org>
+---
+ drivers/phy/qualcomm/Kconfig                |   7 ++
+ drivers/phy/qualcomm/Makefile               |   1 +
+ drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c | 188 ++++++++++++++++++++++++++++
+ 3 files changed, 196 insertions(+)
+ create mode 100644 drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
+
+--- a/drivers/phy/qualcomm/Kconfig
++++ b/drivers/phy/qualcomm/Kconfig
+@@ -17,6 +17,13 @@ config PHY_QCOM_APQ8064_SATA
+       depends on OF
+       select GENERIC_PHY
++config PHY_QCOM_IPQ4019_USB
++      tristate "Qualcomm IPQ4019 USB PHY module"
++      depends on OF && ARCH_QCOM
++      select GENERIC_PHY
++      help
++        Support for the USB PHY on QCOM IPQ4019/Dakota chipsets.
++
+ config PHY_QCOM_IPQ806X_SATA
+       tristate "Qualcomm IPQ806x SATA SerDes/PHY driver"
+       depends on ARCH_QCOM
+--- /dev/null
++++ b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
+@@ -0,0 +1,188 @@
++/*
++ * Copyright (C) 2018 John Crispin <john@phrozen.org>
++ *
++ * Based on code from
++ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/of_platform.h>
++#include <linux/phy/phy.h>
++#include <linux/platform_device.h>
++#include <linux/reset.h>
++
++/*
++ * Magic registers copied from the SDK driver code
++ */
++#define PHY_CTRL0_ADDR        0x000
++#define PHY_CTRL1_ADDR        0x004
++#define PHY_CTRL2_ADDR        0x008
++#define PHY_CTRL3_ADDR        0x00C
++#define PHY_CTRL4_ADDR        0x010
++#define PHY_MISC_ADDR 0x024
++#define PHY_IPG_ADDR  0x030
++
++#define PHY_CTRL0_VAL 0xA4600015
++#define PHY_CTRL1_VAL 0x09500000
++#define PHY_CTRL2_VAL 0x00058180
++#define PHY_CTRL3_VAL 0x6DB6DCD6
++#define PHY_CTRL4_VAL 0x836DB6DB
++#define PHY_MISC_VAL  0x3803FB0C
++#define PHY_IPG_VAL   0x47323232
++
++struct ipq4019_usb_phy {
++      struct device           *dev;
++      struct phy              *phy;
++      void __iomem            *base;
++      struct reset_control    *por_rst;
++      struct reset_control    *srif_rst;
++};
++
++static int ipq4019_ss_phy_power_off(struct phy *_phy)
++{
++      struct ipq4019_usb_phy *phy = phy_get_drvdata(_phy);
++
++      reset_control_assert(phy->por_rst);
++      msleep(10);
++
++      return 0;
++}
++
++static int ipq4019_ss_phy_power_on(struct phy *_phy)
++{
++      struct ipq4019_usb_phy *phy = phy_get_drvdata(_phy);
++
++      ipq4019_ss_phy_power_off(_phy);
++
++      reset_control_deassert(phy->por_rst);
++
++      return 0;
++}
++
++static struct phy_ops ipq4019_usb_ss_phy_ops = {
++      .power_on       = ipq4019_ss_phy_power_on,
++      .power_off      = ipq4019_ss_phy_power_off,
++};
++
++static int ipq4019_hs_phy_power_off(struct phy *_phy)
++{
++      struct ipq4019_usb_phy *phy = phy_get_drvdata(_phy);
++
++      reset_control_assert(phy->por_rst);
++      msleep(10);
++
++      reset_control_assert(phy->srif_rst);
++      msleep(10);
++
++      return 0;
++}
++
++static int ipq4019_hs_phy_power_on(struct phy *_phy)
++{
++      struct ipq4019_usb_phy *phy = phy_get_drvdata(_phy);
++
++      ipq4019_hs_phy_power_off(_phy);
++
++      reset_control_deassert(phy->srif_rst);
++      msleep(10);
++
++      writel(PHY_CTRL0_VAL, phy->base + PHY_CTRL0_ADDR);
++      writel(PHY_CTRL1_VAL, phy->base + PHY_CTRL1_ADDR);
++      writel(PHY_CTRL2_VAL, phy->base + PHY_CTRL2_ADDR);
++      writel(PHY_CTRL3_VAL, phy->base + PHY_CTRL3_ADDR);
++      writel(PHY_CTRL4_VAL, phy->base + PHY_CTRL4_ADDR);
++      writel(PHY_MISC_VAL, phy->base + PHY_MISC_ADDR);
++      writel(PHY_IPG_VAL, phy->base + PHY_IPG_ADDR);
++      msleep(10);
++
++      reset_control_deassert(phy->por_rst);
++
++      return 0;
++}
++
++static struct phy_ops ipq4019_usb_hs_phy_ops = {
++      .power_on       = ipq4019_hs_phy_power_on,
++      .power_off      = ipq4019_hs_phy_power_off,
++};
++
++static const struct of_device_id ipq4019_usb_phy_of_match[] = {
++      { .compatible = "qcom,usb-hs-ipq4019-phy", .data = &ipq4019_usb_hs_phy_ops},
++      { .compatible = "qcom,usb-ss-ipq4019-phy", .data = &ipq4019_usb_ss_phy_ops},
++      { },
++};
++MODULE_DEVICE_TABLE(of, ipq4019_usb_phy_of_match);
++
++static int ipq4019_usb_phy_probe(struct platform_device *pdev)
++{
++      struct device *dev = &pdev->dev;
++      struct resource *res;
++      struct phy_provider *phy_provider;
++      struct ipq4019_usb_phy *phy;
++      const struct of_device_id *match;
++
++      match = of_match_device(ipq4019_usb_phy_of_match, &pdev->dev);
++      if (!match)
++              return -ENODEV;
++
++      phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
++      if (!phy)
++              return -ENOMEM;
++
++      phy->dev = &pdev->dev;
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      phy->base = devm_ioremap_resource(&pdev->dev, res);
++      if (IS_ERR(phy->base)) {
++              dev_err(dev, "failed to remap register memory\n");
++              return PTR_ERR(phy->base);
++      }
++
++      phy->por_rst = devm_reset_control_get(phy->dev, "por_rst");
++      if (IS_ERR(phy->por_rst)) {
++              if (PTR_ERR(phy->por_rst) != -EPROBE_DEFER)
++                      dev_err(dev, "POR reset is missing\n");
++              return PTR_ERR(phy->por_rst);
++      }
++
++      phy->srif_rst = devm_reset_control_get_optional(phy->dev, "srif_rst");
++      if (IS_ERR(phy->srif_rst))
++              return PTR_ERR(phy->srif_rst);
++
++      phy->phy = devm_phy_create(dev, NULL, match->data);
++      if (IS_ERR(phy->phy)) {
++              dev_err(dev, "failed to create PHY\n");
++              return PTR_ERR(phy->phy);
++      }
++      phy_set_drvdata(phy->phy, phy);
++
++      phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
++
++      return PTR_ERR_OR_ZERO(phy_provider);
++}
++
++static struct platform_driver ipq4019_usb_phy_driver = {
++      .probe  = ipq4019_usb_phy_probe,
++      .driver = {
++              .of_match_table = ipq4019_usb_phy_of_match,
++              .name  = "ipq4019-usb-phy",
++      }
++};
++module_platform_driver(ipq4019_usb_phy_driver);
++
++MODULE_DESCRIPTION("QCOM/IPQ4019 USB phy driver");
++MODULE_AUTHOR("John Crispin <john@phrozen.org>");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/phy/qualcomm/Makefile
++++ b/drivers/phy/qualcomm/Makefile
+@@ -1,6 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-$(CONFIG_PHY_ATH79_USB)           += phy-ath79-usb.o
+ obj-$(CONFIG_PHY_QCOM_APQ8064_SATA)   += phy-qcom-apq8064-sata.o
++obj-$(CONFIG_PHY_QCOM_IPQ4019_USB)    += phy-qcom-ipq4019-usb.o
+ obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA)   += phy-qcom-ipq806x-sata.o
+ obj-$(CONFIG_PHY_QCOM_QMP)            += phy-qcom-qmp.o
+ obj-$(CONFIG_PHY_QCOM_QUSB2)          += phy-qcom-qusb2.o
diff --git a/target/linux/ipq40xx/patches-4.19/077-qcom-ipq4019-add-USB-devicetree-nodes.patch b/target/linux/ipq40xx/patches-4.19/077-qcom-ipq4019-add-USB-devicetree-nodes.patch
new file mode 100644 (file)
index 0000000..419e364
--- /dev/null
@@ -0,0 +1,123 @@
+From 1fc7d5523e21ed140fed43c4dde011a3b6d9ba08 Mon Sep 17 00:00:00 2001
+From: John Crispin <john@phrozen.org>
+Date: Tue, 24 Jul 2018 14:47:55 +0200
+Subject: [PATCH 3/3] qcom: ipq4019: add USB devicetree nodes
+
+This patch makes USB work on the Dakota EVB.
+
+Signed-off-by: John Crispin <john@phrozen.org>
+---
+ arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi | 20 ++++++++
+ arch/arm/boot/dts/qcom-ipq4019.dtsi           | 74 +++++++++++++++++++++++++++
+ 2 files changed, 94 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
+@@ -109,5 +109,25 @@
+               wifi@a800000 {
+                       status = "ok";
+               };
++
++              usb3_ss_phy: ssphy@9a000 {
++                      status = "ok";
++              };
++
++              usb3_hs_phy: hsphy@a6000 {
++                      status = "ok";
++              };
++
++              usb3: usb3@8af8800 {
++                      status = "ok";
++              };
++
++              usb2_hs_phy: hsphy@a8000 {
++                      status = "ok";
++              };
++
++              usb2: usb2@60f8800 {
++                      status = "ok";
++              };
+       };
+ };
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -564,5 +564,79 @@
+                                         "legacy";
+                       status = "disabled";
+               };
++
++              usb3_ss_phy: ssphy@9a000 {
++                      compatible = "qcom,usb-ss-ipq4019-phy";
++                      #phy-cells = <0>;
++                      reg = <0x9a000 0x800>;
++                      reg-names = "phy_base";
++                      resets = <&gcc USB3_UNIPHY_PHY_ARES>;
++                      reset-names = "por_rst";
++                      status = "disabled";
++              };
++
++              usb3_hs_phy: hsphy@a6000 {
++                      compatible = "qcom,usb-hs-ipq4019-phy";
++                      #phy-cells = <0>;
++                      reg = <0xa6000 0x40>;
++                      reg-names = "phy_base";
++                      resets = <&gcc USB3_HSPHY_POR_ARES>, <&gcc USB3_HSPHY_S_ARES>;
++                      reset-names = "por_rst", "srif_rst";
++                      status = "disabled";
++              };
++
++              usb3@8af8800 {
++                      compatible = "qcom,dwc3";
++                      reg = <0x8af8800 0x100>;
++                      #address-cells = <1>;
++                      #size-cells = <1>;
++                      clocks = <&gcc GCC_USB3_MASTER_CLK>,
++                               <&gcc GCC_USB3_SLEEP_CLK>,
++                               <&gcc GCC_USB3_MOCK_UTMI_CLK>;
++                      clock-names = "master", "sleep", "mock_utmi";
++                      ranges;
++                      status = "disabled";
++
++                      dwc3@8a00000 {
++                              compatible = "snps,dwc3";
++                              reg = <0x8a00000 0xf8000>;
++                              interrupts = <0 132 0>;
++                              phys = <&usb3_hs_phy>, <&usb3_ss_phy>;
++                              phy-names = "usb2-phy", "usb3-phy";
++                              dr_mode = "host";
++                      };
++              };
++
++              usb2_hs_phy: hsphy@a8000 {
++                      compatible = "qcom,usb-hs-ipq4019-phy";
++                      #phy-cells = <0>;
++                      reg = <0xa8000 0x40>;
++                      reg-names = "phy_base";
++                      resets = <&gcc USB2_HSPHY_POR_ARES>, <&gcc USB2_HSPHY_S_ARES>;
++                      reset-names = "por_rst", "srif_rst";
++                      status = "disabled";
++              };
++
++              usb2@60f8800 {
++                      compatible = "qcom,dwc3";
++                      reg = <0x60f8800 0x100>;
++                      #address-cells = <1>;
++                      #size-cells = <1>;
++                      clocks = <&gcc GCC_USB2_MASTER_CLK>,
++                               <&gcc GCC_USB2_SLEEP_CLK>,
++                               <&gcc GCC_USB2_MOCK_UTMI_CLK>;
++                      clock-names = "master", "sleep", "mock_utmi";
++                      ranges;
++                      status = "disabled";
++
++                      dwc3@6000000 {
++                              compatible = "snps,dwc3";
++                              reg = <0x6000000 0xf8000>;
++                              interrupts = <0 136 0>;
++                              phys = <&usb2_hs_phy>;
++                              phy-names = "usb2-phy";
++                              dr_mode = "host";
++                      };
++              };
+       };
+ };
diff --git a/target/linux/ipq40xx/patches-4.19/079-v4.20-ARM-dts-qcom-ipq4019-fix-PCI-range.patch b/target/linux/ipq40xx/patches-4.19/079-v4.20-ARM-dts-qcom-ipq4019-fix-PCI-range.patch
new file mode 100644 (file)
index 0000000..6bfea2a
--- /dev/null
@@ -0,0 +1,25 @@
+From da89f500cb55fb3f19c4b399b46d8add0abbd4d6 Mon Sep 17 00:00:00 2001
+From: Mathias Kresin <dev@kresin.me>
+Date: Wed, 25 Jul 2018 10:37:48 +0200
+Subject: [PATCH] ARM: dts: qcom: ipq4019: fix PCI range
+
+The PCI range is invalid and PCI attached devices doen't work.
+
+Signed-off-by: Mathias Kresin <dev@kresin.me>
+Signed-off-by: John Crispin <john@phrozen.org>
+Signed-off-by: Andy Gross <andy.gross@linaro.org>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -401,7 +401,7 @@
+                       #size-cells = <2>;
+                       ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000
+-                                0x82000000 0 0x48000000 0x48000000 0 0x10000000>;
++                                0x82000000 0 0x40300000 0x40300000 0 0x400000>;
+                       interrupts = <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>;
+                       interrupt-names = "msi";
diff --git a/target/linux/ipq40xx/patches-4.19/080-ARM-dts-qcom-add-gpio-ranges-property.patch b/target/linux/ipq40xx/patches-4.19/080-ARM-dts-qcom-add-gpio-ranges-property.patch
new file mode 100644 (file)
index 0000000..67522f5
--- /dev/null
@@ -0,0 +1,70 @@
+From patchwork Mon May 21 20:57:38 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [v5,3/4] ARM: dts: qcom: add gpio-ranges property
+X-Patchwork-Submitter: Christian Lamparter <chunkeey@gmail.com>
+X-Patchwork-Id: 917856
+Message-Id: <0ae3376606a89bcdf3fe753a5c967f7103699e09.1526935804.git.chunkeey@gmail.com>
+To: linux-gpio@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
+ linux-arm-msm@vger.kernel.org, devicetree@vger.kernel.org
+Cc: Bjorn Andersson <bjorn.andersson@linaro.org>,
+ Linus Walleij <linus.walleij@linaro.org>,
+ Stephen Boyd <sboyd@kernel.org>, David Brown <david.brown@linaro.org>,
+ Rob Herring <robh+dt@kernel.org>, Mark Rutland <mark.rutland@arm.com>,
+ Andy Gross <andy.gross@linaro.org>,
+ Sven Eckelmann <sven.eckelmann@openmesh.com>
+Date: Mon, 21 May 2018 22:57:38 +0200
+From: Christian Lamparter <chunkeey@gmail.com>
+List-Id: <linux-gpio.vger.kernel.org>
+
+This patch adds the gpio-ranges property to almost all of
+the Qualcomm ARM platforms that utilize the pinctrl-msm
+framework.
+
+The gpio-ranges property is part of the gpiolib subsystem.
+As a result, the binding text is available in section
+"2.1 gpio- and pin-controller interaction" of
+Documentation/devicetree/bindings/gpio/gpio.txt
+
+For more information please see the patch titled:
+"pinctrl: msm: fix gpio-hog related boot issues" from
+this series.
+
+Reported-by: Sven Eckelmann <sven.eckelmann@openmesh.com>
+Tested-by: Sven Eckelmann <sven.eckelmann@openmesh.com> [ipq4019]
+Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+---
+To help with git bisect, the DT update patch has been intentionally
+placed after the "pinctrl: msm: fix gpio-hog related boot issues".
+Otherwise - if the order was reveresed - and bisect decides to split
+between these two patches, the gpiochip_add_pin_ranges() function
+will be executed twice with the same parameters for the same pinctrl.
+---
+ arch/arm/boot/dts/qcom-apq8064.dtsi   | 1 +
+ arch/arm/boot/dts/qcom-apq8084.dtsi   | 1 +
+ arch/arm/boot/dts/qcom-ipq4019.dtsi   | 1 +
+ arch/arm/boot/dts/qcom-ipq8064.dtsi   | 1 +
+ arch/arm/boot/dts/qcom-mdm9615.dtsi   | 1 +
+ arch/arm/boot/dts/qcom-msm8660.dtsi   | 1 +
+ arch/arm/boot/dts/qcom-msm8960.dtsi   | 1 +
+ arch/arm/boot/dts/qcom-msm8974.dtsi   | 1 +
+ arch/arm64/boot/dts/qcom/ipq8074.dtsi | 3 ++-
+ arch/arm64/boot/dts/qcom/msm8916.dtsi | 1 +
+ arch/arm64/boot/dts/qcom/msm8992.dtsi | 1 +
+ arch/arm64/boot/dts/qcom/msm8994.dtsi | 1 +
+ arch/arm64/boot/dts/qcom/msm8996.dtsi | 1 +
+ 13 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -202,6 +202,7 @@
+                       compatible = "qcom,ipq4019-pinctrl";
+                       reg = <0x01000000 0x300000>;
+                       gpio-controller;
++                      gpio-ranges = <&tlmm 0 0 100>;
+                       #gpio-cells = <2>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
diff --git a/target/linux/ipq40xx/patches-4.19/081-clk-fix-apss-cpu-overclocking.patch b/target/linux/ipq40xx/patches-4.19/081-clk-fix-apss-cpu-overclocking.patch
new file mode 100644 (file)
index 0000000..de99792
--- /dev/null
@@ -0,0 +1,115 @@
+From f2b87dc1028b710ec8ce25808b9d21f92b376184 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@googlemail.com>
+Date: Sun, 11 Mar 2018 14:41:31 +0100
+Subject: [PATCH 2/2] clk: fix apss cpu overclocking
+
+There's an interaction issue between the clk changes:"
+clk: qcom: ipq4019: Add the apss cpu pll divider clock node
+clk: qcom: ipq4019: remove fixed clocks and add pll clocks
+" and the cpufreq-dt.
+
+cpufreq-dt is now spamming the kernel-log with the following:
+
+[ 1099.190658] cpu cpu0: dev_pm_opp_set_rate: failed to find current OPP
+for freq 761142857 (-34)
+
+This only happens on certain devices like the Compex WPJ428
+and AVM FritzBox!4040. However, other devices like the Asus
+RT-AC58U and Meraki MR33 work just fine.
+
+The issue stem from the fact that all higher CPU-Clocks
+are achieved by switching the clock-parent to the P_DDRPLLAPSS
+(ddrpllapss). Which is set by Qualcomm's proprietary bootcode
+as part of the DDR calibration.
+
+For example, the FB4040 uses 256 MiB Nanya NT5CC128M16IP clocked
+at round 533 MHz (ddrpllsdcc = 190285714 Hz).
+
+whereas the 128 MiB Nanya NT5CC64M16GP-DI in the ASUS RT-AC58U is
+clocked at a slightly higher 537 MHz ( ddrpllsdcc = 192000000 Hz).
+
+This patch attempts to fix the issue by modifying
+clk_cpu_div_round_rate(), clk_cpu_div_set_rate(), clk_cpu_div_recalc_rate()
+to use a new qcom_find_freq_close() function, which returns the closest
+matching frequency, instead of the next higher. This way, the SoC in
+the FB4040 (with its max clock speed of 710.4 MHz) will no longer
+try to overclock to 761 MHz.
+
+Fixes: d83dcacea18 ("clk: qcom: ipq4019: Add the apss cpu pll divider clock node")
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: John Crispin <john@phrozen.org>
+---
+ drivers/clk/qcom/gcc-ipq4019.c | 34 +++++++++++++++++++++++++++++++---
+ 1 file changed, 31 insertions(+), 3 deletions(-)
+
+--- a/drivers/clk/qcom/gcc-ipq4019.c
++++ b/drivers/clk/qcom/gcc-ipq4019.c
+@@ -1251,6 +1251,29 @@ static const struct clk_fepll_vco gcc_fe
+       .reg = 0x2f020,
+ };
++
++const struct freq_tbl *qcom_find_freq_close(const struct freq_tbl *f,
++                                           unsigned long rate)
++{
++      const struct freq_tbl *last = NULL;
++
++      for ( ; f->freq; f++) {
++              if (rate == f->freq)
++                      return f;
++
++              if (f->freq > rate) {
++                      if (!last ||
++                         (f->freq - rate) < (rate - last->freq))
++                              return f;
++                      else
++                              return last;
++              }
++              last = f;
++      }
++
++      return last;
++}
++
+ /*
+  * Round rate function for APSS CPU PLL Clock divider.
+  * It looks up the frequency table and returns the next higher frequency
+@@ -1263,7 +1286,7 @@ static long clk_cpu_div_round_rate(struc
+       struct clk_hw *p_hw;
+       const struct freq_tbl *f;
+-      f = qcom_find_freq(pll->freq_tbl, rate);
++      f = qcom_find_freq_close(pll->freq_tbl, rate);
+       if (!f)
+               return -EINVAL;
+@@ -1286,7 +1309,7 @@ static int clk_cpu_div_set_rate(struct c
+       u32 mask;
+       int ret;
+-      f = qcom_find_freq(pll->freq_tbl, rate);
++      f = qcom_find_freq_close(pll->freq_tbl, rate);
+       if (!f)
+               return -EINVAL;
+@@ -1313,6 +1336,7 @@ static unsigned long
+ clk_cpu_div_recalc_rate(struct clk_hw *hw,
+                       unsigned long parent_rate)
+ {
++      const struct freq_tbl *f;
+       struct clk_fepll *pll = to_clk_fepll(hw);
+       u32 cdiv, pre_div;
+       u64 rate;
+@@ -1333,7 +1357,11 @@ clk_cpu_div_recalc_rate(struct clk_hw *h
+       rate = clk_fepll_vco_calc_rate(pll, parent_rate) * 2;
+       do_div(rate, pre_div);
+-      return rate;
++      f = qcom_find_freq_close(pll->freq_tbl, rate);
++      if (!f)
++              return rate;
++
++      return f->freq;
+ };
+ static const struct clk_ops clk_regmap_cpu_div_ops = {
diff --git a/target/linux/ipq40xx/patches-4.19/082-v4.20-mtd-spinand-winbond-Add-support-for-W25N01GV.patch b/target/linux/ipq40xx/patches-4.19/082-v4.20-mtd-spinand-winbond-Add-support-for-W25N01GV.patch
new file mode 100644 (file)
index 0000000..22bd985
--- /dev/null
@@ -0,0 +1,31 @@
+From 342fc01bfd6d717602c71d96d3ef40a36e45e060 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robimarko@gmail.com>
+Date: Fri, 5 Oct 2018 09:02:50 +0200
+Subject: [PATCH] mtd: spinand: winbond: Add support for W25N01GV
+
+W25N01GV is a single die version of the already supported
+W25M02GV with half the capacity. Everything else is the
+same so introduce support for W25N01GV.
+
+Signed-off-by: Robert Marko <robimarko@gmail.com>
+---
+ drivers/mtd/nand/spi/winbond.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/mtd/nand/spi/winbond.c
++++ b/drivers/mtd/nand/spi/winbond.c
+@@ -84,6 +84,14 @@ static const struct spinand_info winbond
+                    0,
+                    SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+                    SPINAND_SELECT_TARGET(w25m02gv_select_target)),
++      SPINAND_INFO("W25N01GV", 0xAA,
++                   NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
++                   NAND_ECCREQ(1, 512),
++                   SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
++                                            &write_cache_variants,
++                                            &update_cache_variants),
++                   0,
++                   SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ };
+ /**
diff --git a/target/linux/ipq40xx/patches-4.19/303-spi-nor-enable-4B-opcodes-for-mx25l25635f.patch b/target/linux/ipq40xx/patches-4.19/303-spi-nor-enable-4B-opcodes-for-mx25l25635f.patch
new file mode 100644 (file)
index 0000000..eae2fd9
--- /dev/null
@@ -0,0 +1,62 @@
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1091,6 +1091,7 @@ static const struct flash_info spi_nor_i
+       { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+       { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+       { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++      { "mx25l25635f", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+       { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
+       { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+       { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+@@ -1273,11 +1274,12 @@ static const struct flash_info spi_nor_i
+       { },
+ };
+-static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
++static const struct flash_info *spi_nor_read_id(struct spi_nor *nor,
++                                              const char *name)
+ {
+       int                     tmp;
+       u8                      id[SPI_NOR_MAX_ID_LEN];
+-      const struct flash_info *info;
++      const struct flash_info *info, *first_match = NULL;
+       tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+       if (tmp < 0) {
+@@ -1288,10 +1290,16 @@ static const struct flash_info *spi_nor_
+       for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
+               info = &spi_nor_ids[tmp];
+               if (info->id_len) {
+-                      if (!memcmp(info->id, id, info->id_len))
+-                              return &spi_nor_ids[tmp];
++                      if (!memcmp(info->id, id, info->id_len)) {
++                              if (!name || !strcmp(name, info->name))
++                                      return info;
++                              if (!first_match)
++                                      first_match = info;
++                      }
+               }
+       }
++      if (first_match)
++              return first_match;
+       dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
+               id[0], id[1], id[2]);
+       return ERR_PTR(-ENODEV);
+@@ -2825,7 +2833,7 @@ int spi_nor_scan(struct spi_nor *nor, co
+               info = spi_nor_match_id(name);
+       /* Try to auto-detect if chip name wasn't specified or not found */
+       if (!info)
+-              info = spi_nor_read_id(nor);
++              info = spi_nor_read_id(nor, NULL);
+       if (IS_ERR_OR_NULL(info))
+               return -ENOENT;
+@@ -2836,7 +2844,7 @@ int spi_nor_scan(struct spi_nor *nor, co
+       if (name && info->id_len) {
+               const struct flash_info *jinfo;
+-              jinfo = spi_nor_read_id(nor);
++              jinfo = spi_nor_read_id(nor, name);
+               if (IS_ERR(jinfo)) {
+                       return PTR_ERR(jinfo);
+               } else if (jinfo != info) {
diff --git a/target/linux/ipq40xx/patches-4.19/700-net-add-qualcomm-mdio-and-phy.patch b/target/linux/ipq40xx/patches-4.19/700-net-add-qualcomm-mdio-and-phy.patch
new file mode 100644 (file)
index 0000000..58ae6b9
--- /dev/null
@@ -0,0 +1,2690 @@
+From 5a71a2005a2e1e6bbe36f00386c495ad6626beb2 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@googlemail.com>
+Date: Thu, 19 Jan 2017 01:59:43 +0100
+Subject: [PATCH 30/38] NET: add qualcomm mdio and PHY
+
+---
+ drivers/net/phy/Kconfig  | 14 ++++++++++++++
+ drivers/net/phy/Makefile |  2 ++
+ 2 files changed, 16 insertions(+)
+
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -519,6 +519,20 @@ config XILINX_GMII2RGMII
+         the Reduced Gigabit Media Independent Interface(RGMII) between
+         Ethernet physical media devices and the Gigabit Ethernet controller.
++config MDIO_IPQ40XX
++      tristate "Qualcomm Atheros ipq40xx MDIO interface"
++      depends on HAS_IOMEM && OF
++      ---help---
++        This driver supports the MDIO interface found in Qualcomm
++        Atheros ipq40xx Soc chip.
++
++config AR40XX_PHY
++      tristate "Driver for Qualcomm Atheros IPQ40XX switches"
++      depends on HAS_IOMEM && OF
++      select SWCONFIG
++      ---help---
++         This is the driver for Qualcomm Atheros IPQ40XX ESS switches.
++
+ endif # PHYLIB
+ config MICREL_KS8995MA
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -48,6 +48,7 @@ obj-$(CONFIG_MDIO_CAVIUM)    += mdio-cavium
+ obj-$(CONFIG_MDIO_GPIO)               += mdio-gpio.o
+ obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
+ obj-$(CONFIG_MDIO_I2C)                += mdio-i2c.o
++obj-$(CONFIG_MDIO_IPQ40XX)    += mdio-ipq40xx.o
+ obj-$(CONFIG_MDIO_MOXART)     += mdio-moxart.o
+ obj-$(CONFIG_MDIO_MSCC_MIIM)  += mdio-mscc-miim.o
+ obj-$(CONFIG_MDIO_OCTEON)     += mdio-octeon.o
+@@ -61,6 +62,7 @@ obj-y                                += $(sfp-obj-y) $(sfp-obj-m)
+ obj-$(CONFIG_AMD_PHY)         += amd.o
+ obj-$(CONFIG_AQUANTIA_PHY)    += aquantia.o
++obj-$(CONFIG_AR40XX_PHY)      += ar40xx.o
+ obj-$(CONFIG_ASIX_PHY)                += asix.o
+ obj-$(CONFIG_AT803X_PHY)      += at803x.o
+ obj-$(CONFIG_BCM63XX_PHY)     += bcm63xx.o
+--- /dev/null
++++ b/drivers/net/phy/ar40xx.c
+@@ -0,0 +1,2090 @@
++/*
++ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/bitops.h>
++#include <linux/switch.h>
++#include <linux/delay.h>
++#include <linux/phy.h>
++#include <linux/clk.h>
++#include <linux/reset.h>
++#include <linux/lockdep.h>
++#include <linux/workqueue.h>
++#include <linux/of_device.h>
++#include <linux/of_address.h>
++#include <linux/mdio.h>
++#include <linux/gpio.h>
++
++#include "ar40xx.h"
++
++static struct ar40xx_priv *ar40xx_priv;
++
++#define MIB_DESC(_s , _o, _n) \
++      {                       \
++              .size = (_s),   \
++              .offset = (_o), \
++              .name = (_n),   \
++      }
++
++static const struct ar40xx_mib_desc ar40xx_mibs[] = {
++      MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"),
++      MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"),
++      MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"),
++      MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"),
++      MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"),
++      MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"),
++      MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"),
++      MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"),
++      MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"),
++      MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"),
++      MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"),
++      MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"),
++      MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"),
++      MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"),
++      MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"),
++      MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"),
++      MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"),
++      MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"),
++      MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"),
++      MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"),
++      MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"),
++      MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"),
++      MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"),
++      MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"),
++      MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"),
++      MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"),
++      MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"),
++      MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"),
++      MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"),
++      MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"),
++      MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"),
++      MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"),
++      MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"),
++      MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"),
++      MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"),
++      MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"),
++      MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"),
++      MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"),
++      MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"),
++};
++
++static u32
++ar40xx_read(struct ar40xx_priv *priv, int reg)
++{
++      return readl(priv->hw_addr + reg);
++}
++
++static u32
++ar40xx_psgmii_read(struct ar40xx_priv *priv, int reg)
++{
++      return readl(priv->psgmii_hw_addr + reg);
++}
++
++static void
++ar40xx_write(struct ar40xx_priv *priv, int reg, u32 val)
++{
++      writel(val, priv->hw_addr + reg);
++}
++
++static u32
++ar40xx_rmw(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
++{
++      u32 ret;
++
++      ret = ar40xx_read(priv, reg);
++      ret &= ~mask;
++      ret |= val;
++      ar40xx_write(priv, reg, ret);
++      return ret;
++}
++
++static void
++ar40xx_psgmii_write(struct ar40xx_priv *priv, int reg, u32 val)
++{
++      writel(val, priv->psgmii_hw_addr + reg);
++}
++
++static void
++ar40xx_phy_dbg_write(struct ar40xx_priv *priv, int phy_addr,
++                   u16 dbg_addr, u16 dbg_data)
++{
++      struct mii_bus *bus = priv->mii_bus;
++
++      mutex_lock(&bus->mdio_lock);
++      bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
++      bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA, dbg_data);
++      mutex_unlock(&bus->mdio_lock);
++}
++
++static void
++ar40xx_phy_dbg_read(struct ar40xx_priv *priv, int phy_addr,
++                  u16 dbg_addr, u16 *dbg_data)
++{
++      struct mii_bus *bus = priv->mii_bus;
++
++      mutex_lock(&bus->mdio_lock);
++      bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
++      *dbg_data = bus->read(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA);
++      mutex_unlock(&bus->mdio_lock);
++}
++
++static void
++ar40xx_phy_mmd_write(struct ar40xx_priv *priv, u32 phy_id,
++                   u16 mmd_num, u16 reg_id, u16 reg_val)
++{
++      struct mii_bus *bus = priv->mii_bus;
++
++      mutex_lock(&bus->mdio_lock);
++      bus->write(bus, phy_id,
++                      AR40XX_MII_ATH_MMD_ADDR, mmd_num);
++      bus->write(bus, phy_id,
++                      AR40XX_MII_ATH_MMD_DATA, reg_id);
++      bus->write(bus, phy_id,
++                      AR40XX_MII_ATH_MMD_ADDR,
++                      0x4000 | mmd_num);
++      bus->write(bus, phy_id,
++              AR40XX_MII_ATH_MMD_DATA, reg_val);
++      mutex_unlock(&bus->mdio_lock);
++}
++
++static u16
++ar40xx_phy_mmd_read(struct ar40xx_priv *priv, u32 phy_id,
++                  u16 mmd_num, u16 reg_id)
++{
++      u16 value;
++      struct mii_bus *bus = priv->mii_bus;
++
++      mutex_lock(&bus->mdio_lock);
++      bus->write(bus, phy_id,
++                      AR40XX_MII_ATH_MMD_ADDR, mmd_num);
++      bus->write(bus, phy_id,
++                      AR40XX_MII_ATH_MMD_DATA, reg_id);
++      bus->write(bus, phy_id,
++                      AR40XX_MII_ATH_MMD_ADDR,
++                      0x4000 | mmd_num);
++      value = bus->read(bus, phy_id, AR40XX_MII_ATH_MMD_DATA);
++      mutex_unlock(&bus->mdio_lock);
++      return value;
++}
++
++/* Start of swconfig support */
++
++static void
++ar40xx_phy_poll_reset(struct ar40xx_priv *priv)
++{
++      u32 i, in_reset, retries = 500;
++      struct mii_bus *bus = priv->mii_bus;
++
++      /* Assume RESET was recently issued to some or all of the phys */
++      in_reset = GENMASK(AR40XX_NUM_PHYS - 1, 0);
++
++      while (retries--) {
++              /* 1ms should be plenty of time.
++               * 802.3 spec allows for a max wait time of 500ms
++               */
++              usleep_range(1000, 2000);
++
++              for (i = 0; i < AR40XX_NUM_PHYS; i++) {
++                      int val;
++
++                      /* skip devices which have completed reset */
++                      if (!(in_reset & BIT(i)))
++                              continue;
++
++                      val = mdiobus_read(bus, i, MII_BMCR);
++                      if (val < 0)
++                              continue;
++
++                      /* mark when phy is no longer in reset state */
++                      if (!(val & BMCR_RESET))
++                              in_reset &= ~BIT(i);
++              }
++
++              if (!in_reset)
++                      return;
++      }
++
++      dev_warn(&bus->dev, "Failed to reset all phys! (in_reset: 0x%x)\n",
++               in_reset);
++}
++
++static void
++ar40xx_phy_init(struct ar40xx_priv *priv)
++{
++      int i;
++      struct mii_bus *bus;
++      u16 val;
++
++      bus = priv->mii_bus;
++      for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
++              ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
++              val &= ~AR40XX_PHY_MANU_CTRL_EN;
++              ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
++              mdiobus_write(bus, i,
++                            MII_ADVERTISE, ADVERTISE_ALL |
++                            ADVERTISE_PAUSE_CAP |
++                            ADVERTISE_PAUSE_ASYM);
++              mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
++              mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
++      }
++
++      ar40xx_phy_poll_reset(priv);
++}
++
++static void
++ar40xx_port_phy_linkdown(struct ar40xx_priv *priv)
++{
++      struct mii_bus *bus;
++      int i;
++      u16 val;
++
++      bus = priv->mii_bus;
++      for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
++              mdiobus_write(bus, i, MII_CTRL1000, 0);
++              mdiobus_write(bus, i, MII_ADVERTISE, 0);
++              mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
++              ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
++              val |= AR40XX_PHY_MANU_CTRL_EN;
++              ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
++              /* disable transmit */
++              ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_2, &val);
++              val &= 0xf00f;
++              ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_2, val);
++      }
++}
++
++static void
++ar40xx_set_mirror_regs(struct ar40xx_priv *priv)
++{
++      int port;
++
++      /* reset all mirror registers */
++      ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
++                 AR40XX_FWD_CTRL0_MIRROR_PORT,
++                 (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
++      for (port = 0; port < AR40XX_NUM_PORTS; port++) {
++              ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(port),
++                         AR40XX_PORT_LOOKUP_ING_MIRROR_EN, 0);
++
++              ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(port),
++                         AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN, 0);
++      }
++
++      /* now enable mirroring if necessary */
++      if (priv->source_port >= AR40XX_NUM_PORTS ||
++          priv->monitor_port >= AR40XX_NUM_PORTS ||
++          priv->source_port == priv->monitor_port) {
++              return;
++      }
++
++      ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
++                 AR40XX_FWD_CTRL0_MIRROR_PORT,
++                 (priv->monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
++
++      if (priv->mirror_rx)
++              ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(priv->source_port), 0,
++                         AR40XX_PORT_LOOKUP_ING_MIRROR_EN);
++
++      if (priv->mirror_tx)
++              ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(priv->source_port),
++                         0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN);
++}
++
++static int
++ar40xx_sw_get_ports(struct switch_dev *dev, struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      u8 ports = priv->vlan_table[val->port_vlan];
++      int i;
++
++      val->len = 0;
++      for (i = 0; i < dev->ports; i++) {
++              struct switch_port *p;
++
++              if (!(ports & BIT(i)))
++                      continue;
++
++              p = &val->value.ports[val->len++];
++              p->id = i;
++              if ((priv->vlan_tagged & BIT(i)) ||
++                  (priv->pvid[i] != val->port_vlan))
++                      p->flags = BIT(SWITCH_PORT_FLAG_TAGGED);
++              else
++                      p->flags = 0;
++      }
++      return 0;
++}
++
++static int
++ar40xx_sw_set_ports(struct switch_dev *dev, struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      u8 *vt = &priv->vlan_table[val->port_vlan];
++      int i;
++
++      *vt = 0;
++      for (i = 0; i < val->len; i++) {
++              struct switch_port *p = &val->value.ports[i];
++
++              if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) {
++                      if (val->port_vlan == priv->pvid[p->id])
++                              priv->vlan_tagged |= BIT(p->id);
++              } else {
++                      priv->vlan_tagged &= ~BIT(p->id);
++                      priv->pvid[p->id] = val->port_vlan;
++              }
++
++              *vt |= BIT(p->id);
++      }
++      return 0;
++}
++
++static int
++ar40xx_reg_wait(struct ar40xx_priv *priv, u32 reg, u32 mask, u32 val,
++              unsigned timeout)
++{
++      int i;
++
++      for (i = 0; i < timeout; i++) {
++              u32 t;
++
++              t = ar40xx_read(priv, reg);
++              if ((t & mask) == val)
++                      return 0;
++
++              usleep_range(1000, 2000);
++      }
++
++      return -ETIMEDOUT;
++}
++
++static int
++ar40xx_mib_op(struct ar40xx_priv *priv, u32 op)
++{
++      int ret;
++
++      lockdep_assert_held(&priv->mib_lock);
++
++      /* Capture the hardware statistics for all ports */
++      ar40xx_rmw(priv, AR40XX_REG_MIB_FUNC,
++                 AR40XX_MIB_FUNC, (op << AR40XX_MIB_FUNC_S));
++
++      /* Wait for the capturing to complete. */
++      ret = ar40xx_reg_wait(priv, AR40XX_REG_MIB_FUNC,
++                            AR40XX_MIB_BUSY, 0, 10);
++
++      return ret;
++}
++
++static void
++ar40xx_mib_fetch_port_stat(struct ar40xx_priv *priv, int port, bool flush)
++{
++      unsigned int base;
++      u64 *mib_stats;
++      int i;
++      u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
++
++      WARN_ON(port >= priv->dev.ports);
++
++      lockdep_assert_held(&priv->mib_lock);
++
++      base = AR40XX_REG_PORT_STATS_START +
++             AR40XX_REG_PORT_STATS_LEN * port;
++
++      mib_stats = &priv->mib_stats[port * num_mibs];
++      if (flush) {
++              u32 len;
++
++              len = num_mibs * sizeof(*mib_stats);
++              memset(mib_stats, 0, len);
++              return;
++      }
++      for (i = 0; i < num_mibs; i++) {
++              const struct ar40xx_mib_desc *mib;
++              u64 t;
++
++              mib = &ar40xx_mibs[i];
++              t = ar40xx_read(priv, base + mib->offset);
++              if (mib->size == 2) {
++                      u64 hi;
++
++                      hi = ar40xx_read(priv, base + mib->offset + 4);
++                      t |= hi << 32;
++              }
++
++              mib_stats[i] += t;
++      }
++}
++
++static int
++ar40xx_mib_capture(struct ar40xx_priv *priv)
++{
++      return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_CAPTURE);
++}
++
++static int
++ar40xx_mib_flush(struct ar40xx_priv *priv)
++{
++      return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_FLUSH);
++}
++
++static int
++ar40xx_sw_set_reset_mibs(struct switch_dev *dev,
++                       const struct switch_attr *attr,
++                       struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      unsigned int len;
++      int ret;
++      u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
++
++      mutex_lock(&priv->mib_lock);
++
++      len = priv->dev.ports * num_mibs * sizeof(*priv->mib_stats);
++      memset(priv->mib_stats, 0, len);
++      ret = ar40xx_mib_flush(priv);
++
++      mutex_unlock(&priv->mib_lock);
++      return ret;
++}
++
++static int
++ar40xx_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
++                 struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      priv->vlan = !!val->value.i;
++      return 0;
++}
++
++static int
++ar40xx_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
++                 struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      val->value.i = priv->vlan;
++      return 0;
++}
++
++static int
++ar40xx_sw_set_mirror_rx_enable(struct switch_dev *dev,
++                             const struct switch_attr *attr,
++                             struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      priv->mirror_rx = !!val->value.i;
++      ar40xx_set_mirror_regs(priv);
++      mutex_unlock(&priv->reg_mutex);
++
++      return 0;
++}
++
++static int
++ar40xx_sw_get_mirror_rx_enable(struct switch_dev *dev,
++                             const struct switch_attr *attr,
++                             struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      val->value.i = priv->mirror_rx;
++      mutex_unlock(&priv->reg_mutex);
++      return 0;
++}
++
++static int
++ar40xx_sw_set_mirror_tx_enable(struct switch_dev *dev,
++                             const struct switch_attr *attr,
++                             struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      priv->mirror_tx = !!val->value.i;
++      ar40xx_set_mirror_regs(priv);
++      mutex_unlock(&priv->reg_mutex);
++
++      return 0;
++}
++
++static int
++ar40xx_sw_get_mirror_tx_enable(struct switch_dev *dev,
++                             const struct switch_attr *attr,
++                             struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      val->value.i = priv->mirror_tx;
++      mutex_unlock(&priv->reg_mutex);
++      return 0;
++}
++
++static int
++ar40xx_sw_set_mirror_monitor_port(struct switch_dev *dev,
++                                const struct switch_attr *attr,
++                                struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      priv->monitor_port = val->value.i;
++      ar40xx_set_mirror_regs(priv);
++      mutex_unlock(&priv->reg_mutex);
++
++      return 0;
++}
++
++static int
++ar40xx_sw_get_mirror_monitor_port(struct switch_dev *dev,
++                                const struct switch_attr *attr,
++                                struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      val->value.i = priv->monitor_port;
++      mutex_unlock(&priv->reg_mutex);
++      return 0;
++}
++
++static int
++ar40xx_sw_set_mirror_source_port(struct switch_dev *dev,
++                               const struct switch_attr *attr,
++                               struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      priv->source_port = val->value.i;
++      ar40xx_set_mirror_regs(priv);
++      mutex_unlock(&priv->reg_mutex);
++
++      return 0;
++}
++
++static int
++ar40xx_sw_get_mirror_source_port(struct switch_dev *dev,
++                               const struct switch_attr *attr,
++                               struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      val->value.i = priv->source_port;
++      mutex_unlock(&priv->reg_mutex);
++      return 0;
++}
++
++static int
++ar40xx_sw_set_linkdown(struct switch_dev *dev,
++                     const struct switch_attr *attr,
++                     struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      if (val->value.i == 1)
++              ar40xx_port_phy_linkdown(priv);
++      else
++              ar40xx_phy_init(priv);
++
++      return 0;
++}
++
++static int
++ar40xx_sw_set_port_reset_mib(struct switch_dev *dev,
++                           const struct switch_attr *attr,
++                           struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      int port;
++      int ret;
++
++      port = val->port_vlan;
++      if (port >= dev->ports)
++              return -EINVAL;
++
++      mutex_lock(&priv->mib_lock);
++      ret = ar40xx_mib_capture(priv);
++      if (ret)
++              goto unlock;
++
++      ar40xx_mib_fetch_port_stat(priv, port, true);
++
++unlock:
++      mutex_unlock(&priv->mib_lock);
++      return ret;
++}
++
++static int
++ar40xx_sw_get_port_mib(struct switch_dev *dev,
++                     const struct switch_attr *attr,
++                     struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      u64 *mib_stats;
++      int port;
++      int ret;
++      char *buf = priv->buf;
++      int i, len = 0;
++      u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
++
++      port = val->port_vlan;
++      if (port >= dev->ports)
++              return -EINVAL;
++
++      mutex_lock(&priv->mib_lock);
++      ret = ar40xx_mib_capture(priv);
++      if (ret)
++              goto unlock;
++
++      ar40xx_mib_fetch_port_stat(priv, port, false);
++
++      len += snprintf(buf + len, sizeof(priv->buf) - len,
++                      "Port %d MIB counters\n",
++                      port);
++
++      mib_stats = &priv->mib_stats[port * num_mibs];
++      for (i = 0; i < num_mibs; i++)
++              len += snprintf(buf + len, sizeof(priv->buf) - len,
++                              "%-12s: %llu\n",
++                              ar40xx_mibs[i].name,
++                              mib_stats[i]);
++
++      val->value.s = buf;
++      val->len = len;
++
++unlock:
++      mutex_unlock(&priv->mib_lock);
++      return ret;
++}
++
++static int
++ar40xx_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
++                struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      priv->vlan_id[val->port_vlan] = val->value.i;
++      return 0;
++}
++
++static int
++ar40xx_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
++                struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      val->value.i = priv->vlan_id[val->port_vlan];
++      return 0;
++}
++
++static int
++ar40xx_sw_get_pvid(struct switch_dev *dev, int port, int *vlan)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      *vlan = priv->pvid[port];
++      return 0;
++}
++
++static int
++ar40xx_sw_set_pvid(struct switch_dev *dev, int port, int vlan)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      /* make sure no invalid PVIDs get set */
++      if (vlan >= dev->vlans)
++              return -EINVAL;
++
++      priv->pvid[port] = vlan;
++      return 0;
++}
++
++static void
++ar40xx_read_port_link(struct ar40xx_priv *priv, int port,
++                    struct switch_port_link *link)
++{
++      u32 status;
++      u32 speed;
++
++      memset(link, 0, sizeof(*link));
++
++      status = ar40xx_read(priv, AR40XX_REG_PORT_STATUS(port));
++
++      link->aneg = !!(status & AR40XX_PORT_AUTO_LINK_EN);
++      if (link->aneg || (port != AR40XX_PORT_CPU))
++              link->link = !!(status & AR40XX_PORT_STATUS_LINK_UP);
++      else
++              link->link = true;
++
++      if (!link->link)
++              return;
++
++      link->duplex = !!(status & AR40XX_PORT_DUPLEX);
++      link->tx_flow = !!(status & AR40XX_PORT_STATUS_TXFLOW);
++      link->rx_flow = !!(status & AR40XX_PORT_STATUS_RXFLOW);
++
++      speed = (status & AR40XX_PORT_SPEED) >>
++               AR40XX_PORT_STATUS_SPEED_S;
++
++      switch (speed) {
++      case AR40XX_PORT_SPEED_10M:
++              link->speed = SWITCH_PORT_SPEED_10;
++              break;
++      case AR40XX_PORT_SPEED_100M:
++              link->speed = SWITCH_PORT_SPEED_100;
++              break;
++      case AR40XX_PORT_SPEED_1000M:
++              link->speed = SWITCH_PORT_SPEED_1000;
++              break;
++      default:
++              link->speed = SWITCH_PORT_SPEED_UNKNOWN;
++              break;
++      }
++}
++
++static int
++ar40xx_sw_get_port_link(struct switch_dev *dev, int port,
++                      struct switch_port_link *link)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      ar40xx_read_port_link(priv, port, link);
++      return 0;
++}
++
++static const struct switch_attr ar40xx_sw_attr_globals[] = {
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "enable_vlan",
++              .description = "Enable VLAN mode",
++              .set = ar40xx_sw_set_vlan,
++              .get = ar40xx_sw_get_vlan,
++              .max = 1
++      },
++      {
++              .type = SWITCH_TYPE_NOVAL,
++              .name = "reset_mibs",
++              .description = "Reset all MIB counters",
++              .set = ar40xx_sw_set_reset_mibs,
++      },
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "enable_mirror_rx",
++              .description = "Enable mirroring of RX packets",
++              .set = ar40xx_sw_set_mirror_rx_enable,
++              .get = ar40xx_sw_get_mirror_rx_enable,
++              .max = 1
++      },
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "enable_mirror_tx",
++              .description = "Enable mirroring of TX packets",
++              .set = ar40xx_sw_set_mirror_tx_enable,
++              .get = ar40xx_sw_get_mirror_tx_enable,
++              .max = 1
++      },
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "mirror_monitor_port",
++              .description = "Mirror monitor port",
++              .set = ar40xx_sw_set_mirror_monitor_port,
++              .get = ar40xx_sw_get_mirror_monitor_port,
++              .max = AR40XX_NUM_PORTS - 1
++      },
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "mirror_source_port",
++              .description = "Mirror source port",
++              .set = ar40xx_sw_set_mirror_source_port,
++              .get = ar40xx_sw_get_mirror_source_port,
++              .max = AR40XX_NUM_PORTS - 1
++      },
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "linkdown",
++              .description = "Link down all the PHYs",
++              .set = ar40xx_sw_set_linkdown,
++              .max = 1
++      },
++};
++
++static const struct switch_attr ar40xx_sw_attr_port[] = {
++      {
++              .type = SWITCH_TYPE_NOVAL,
++              .name = "reset_mib",
++              .description = "Reset single port MIB counters",
++              .set = ar40xx_sw_set_port_reset_mib,
++      },
++      {
++              .type = SWITCH_TYPE_STRING,
++              .name = "mib",
++              .description = "Get port's MIB counters",
++              .set = NULL,
++              .get = ar40xx_sw_get_port_mib,
++      },
++};
++
++const struct switch_attr ar40xx_sw_attr_vlan[] = {
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "vid",
++              .description = "VLAN ID (0-4094)",
++              .set = ar40xx_sw_set_vid,
++              .get = ar40xx_sw_get_vid,
++              .max = 4094,
++      },
++};
++
++/* End of swconfig support */
++
++static int
++ar40xx_wait_bit(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
++{
++      int timeout = 20;
++      u32 t;
++
++      while (1) {
++              t = ar40xx_read(priv, reg);
++              if ((t & mask) == val)
++                      return 0;
++
++              if (timeout-- <= 0)
++                      break;
++
++              usleep_range(10, 20);
++      }
++
++      pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n",
++             (unsigned int)reg, t, mask, val);
++      return -ETIMEDOUT;
++}
++
++static int
++ar40xx_atu_flush(struct ar40xx_priv *priv)
++{
++      int ret;
++
++      ret = ar40xx_wait_bit(priv, AR40XX_REG_ATU_FUNC,
++                            AR40XX_ATU_FUNC_BUSY, 0);
++      if (!ret)
++              ar40xx_write(priv, AR40XX_REG_ATU_FUNC,
++                           AR40XX_ATU_FUNC_OP_FLUSH |
++                           AR40XX_ATU_FUNC_BUSY);
++
++      return ret;
++}
++
++static void
++ar40xx_ess_reset(struct ar40xx_priv *priv)
++{
++      reset_control_assert(priv->ess_rst);
++      mdelay(10);
++      reset_control_deassert(priv->ess_rst);
++      /* Waiting for all inner tables init done.
++        * It cost 5~10ms.
++        */
++      mdelay(10);
++
++      pr_info("ESS reset ok!\n");
++}
++
++/* Start of psgmii self test */
++
++static void
++ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv *priv)
++{
++      u32 n;
++      struct mii_bus *bus = priv->mii_bus;
++      /* reset phy psgmii */
++      /* fix phy psgmii RX 20bit */
++      mdiobus_write(bus, 5, 0x0, 0x005b);
++      /* reset phy psgmii */
++      mdiobus_write(bus, 5, 0x0, 0x001b);
++      /* release reset phy psgmii */
++      mdiobus_write(bus, 5, 0x0, 0x005b);
++
++      for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
++              u16 status;
++
++              status = ar40xx_phy_mmd_read(priv, 5, 1, 0x28);
++              if (status & BIT(0))
++                      break;
++              /* Polling interval to check PSGMII PLL in malibu is ready
++                * the worst time is 8.67ms
++                * for 25MHz reference clock
++                * [512+(128+2048)*49]*80ns+100us
++                */
++              mdelay(2);
++      }
++
++      /*check malibu psgmii calibration done end..*/
++
++      /*freeze phy psgmii RX CDR*/
++      mdiobus_write(bus, 5, 0x1a, 0x2230);
++
++      ar40xx_ess_reset(priv);
++
++      /*check psgmii calibration done start*/
++      for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
++              u32 status;
++
++              status = ar40xx_psgmii_read(priv, 0xa0);
++              if (status & BIT(0))
++                      break;
++              /* Polling interval to check PSGMII PLL in ESS is ready */
++              mdelay(2);
++      }
++
++      /* check dakota psgmii calibration done end..*/
++
++      /* relesae phy psgmii RX CDR */
++      mdiobus_write(bus, 5, 0x1a, 0x3230);
++      /* release phy psgmii RX 20bit */
++      mdiobus_write(bus, 5, 0x0, 0x005f);
++}
++
++static void
++ar40xx_psgmii_single_phy_testing(struct ar40xx_priv *priv, int phy)
++{
++      int j;
++      u32 tx_ok, tx_error;
++      u32 rx_ok, rx_error;
++      u32 tx_ok_high16;
++      u32 rx_ok_high16;
++      u32 tx_all_ok, rx_all_ok;
++      struct mii_bus *bus = priv->mii_bus;
++
++      mdiobus_write(bus, phy, 0x0, 0x9000);
++      mdiobus_write(bus, phy, 0x0, 0x4140);
++
++      for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
++              u16 status;
++
++              status = mdiobus_read(bus, phy, 0x11);
++              if (status & AR40XX_PHY_SPEC_STATUS_LINK)
++                      break;
++              /* the polling interval to check if the PHY link up or not
++                * maxwait_timer: 750 ms +/-10 ms
++                * minwait_timer : 1 us +/- 0.1us
++                * time resides in minwait_timer ~ maxwait_timer
++                * see IEEE 802.3 section 40.4.5.2
++                */
++              mdelay(8);
++      }
++
++      /* enable check */
++      ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0000);
++      ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0003);
++
++      /* start traffic */
++      ar40xx_phy_mmd_write(priv, phy, 7, 0x8020, 0xa000);
++      /* wait for all traffic end
++        * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
++        */
++      mdelay(50);
++
++      /* check counter */
++      tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
++      tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
++      tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
++      rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
++      rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
++      rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
++      tx_all_ok = tx_ok + (tx_ok_high16 << 16);
++      rx_all_ok = rx_ok + (rx_ok_high16 << 16);
++      if (tx_all_ok == 0x1000 && tx_error == 0) {
++              /* success */
++              priv->phy_t_status &= (~BIT(phy));
++      } else {
++              pr_info("PHY %d single test PSGMII issue happen!\n", phy);
++              priv->phy_t_status |= BIT(phy);
++      }
++
++      mdiobus_write(bus, phy, 0x0, 0x1840);
++}
++
++static void
++ar40xx_psgmii_all_phy_testing(struct ar40xx_priv *priv)
++{
++      int phy, j;
++      struct mii_bus *bus = priv->mii_bus;
++
++      mdiobus_write(bus, 0x1f, 0x0, 0x9000);
++      mdiobus_write(bus, 0x1f, 0x0, 0x4140);
++
++      for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
++              for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
++                      u16 status;
++
++                      status = mdiobus_read(bus, phy, 0x11);
++                      if (!(status & BIT(10)))
++                              break;
++              }
++
++              if (phy >= (AR40XX_NUM_PORTS - 1))
++                      break;
++              /* The polling interva to check if the PHY link up or not */
++              mdelay(8);
++      }
++      /* enable check */
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0000);
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0003);
++
++      /* start traffic */
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0xa000);
++      /* wait for all traffic end
++        * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
++        */
++      mdelay(50);
++
++      for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
++              u32 tx_ok, tx_error;
++              u32 rx_ok, rx_error;
++              u32 tx_ok_high16;
++              u32 rx_ok_high16;
++              u32 tx_all_ok, rx_all_ok;
++
++              /* check counter */
++              tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
++              tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
++              tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
++              rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
++              rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
++              rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
++              tx_all_ok = tx_ok + (tx_ok_high16<<16);
++              rx_all_ok = rx_ok + (rx_ok_high16<<16);
++              if (tx_all_ok == 0x1000 && tx_error == 0) {
++                      /* success */
++                      priv->phy_t_status &= ~BIT(phy + 8);
++              } else {
++                      pr_info("PHY%d test see issue!\n", phy);
++                      priv->phy_t_status |= BIT(phy + 8);
++              }
++      }
++
++      pr_debug("PHY all test 0x%x \r\n", priv->phy_t_status);
++}
++
++void
++ar40xx_psgmii_self_test(struct ar40xx_priv *priv)
++{
++      u32 i, phy;
++      struct mii_bus *bus = priv->mii_bus;
++
++      ar40xx_malibu_psgmii_ess_reset(priv);
++
++      /* switch to access MII reg for copper */
++      mdiobus_write(bus, 4, 0x1f, 0x8500);
++      for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
++              /*enable phy mdio broadcast write*/
++              ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x801f);
++      }
++      /* force no link by power down */
++      mdiobus_write(bus, 0x1f, 0x0, 0x1840);
++      /*packet number*/
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x1000);
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8062, 0x05e0);
++
++      /*fix mdi status */
++      mdiobus_write(bus, 0x1f, 0x10, 0x6800);
++      for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) {
++              priv->phy_t_status = 0;
++
++              for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
++                      ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
++                              AR40XX_PORT_LOOKUP_LOOPBACK,
++                              AR40XX_PORT_LOOKUP_LOOPBACK);
++              }
++
++              for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++)
++                      ar40xx_psgmii_single_phy_testing(priv, phy);
++
++              ar40xx_psgmii_all_phy_testing(priv);
++
++              if (priv->phy_t_status)
++                      ar40xx_malibu_psgmii_ess_reset(priv);
++              else
++                      break;
++      }
++
++      if (i >= AR40XX_PSGMII_CALB_NUM)
++              pr_info("PSGMII cannot recover\n");
++      else
++              pr_debug("PSGMII recovered after %d times reset\n", i);
++
++      /* configuration recover */
++      /* packet number */
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x0);
++      /* disable check */
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0);
++      /* disable traffic */
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0x0);
++}
++
++void
++ar40xx_psgmii_self_test_clean(struct ar40xx_priv *priv)
++{
++      int phy;
++      struct mii_bus *bus = priv->mii_bus;
++
++      /* disable phy internal loopback */
++      mdiobus_write(bus, 0x1f, 0x10, 0x6860);
++      mdiobus_write(bus, 0x1f, 0x0, 0x9040);
++
++      for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
++              /* disable mac loop back */
++              ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
++                              AR40XX_PORT_LOOKUP_LOOPBACK, 0);
++              /* disable phy mdio broadcast write */
++              ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x001f);
++      }
++
++      /* clear fdb entry */
++      ar40xx_atu_flush(priv);
++}
++
++/* End of psgmii self test */
++
++static void
++ar40xx_mac_mode_init(struct ar40xx_priv *priv, u32 mode)
++{
++      if (mode == PORT_WRAPPER_PSGMII) {
++              ar40xx_psgmii_write(priv, AR40XX_PSGMII_MODE_CONTROL, 0x2200);
++              ar40xx_psgmii_write(priv, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380);
++      }
++}
++
++static
++int ar40xx_cpuport_setup(struct ar40xx_priv *priv)
++{
++      u32 t;
++
++      t = AR40XX_PORT_STATUS_TXFLOW |
++           AR40XX_PORT_STATUS_RXFLOW |
++           AR40XX_PORT_TXHALF_FLOW |
++           AR40XX_PORT_DUPLEX |
++           AR40XX_PORT_SPEED_1000M;
++      ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
++      usleep_range(10, 20);
++
++      t |= AR40XX_PORT_TX_EN |
++             AR40XX_PORT_RX_EN;
++      ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
++
++      return 0;
++}
++
++static void
++ar40xx_init_port(struct ar40xx_priv *priv, int port)
++{
++      u32 t;
++
++      ar40xx_rmw(priv, AR40XX_REG_PORT_STATUS(port),
++                      AR40XX_PORT_AUTO_LINK_EN, 0);
++
++      ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0);
++
++      ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), 0);
++
++      t = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S;
++      ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
++
++      t = AR40XX_PORT_LOOKUP_LEARN;
++      t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
++      ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
++}
++
++void
++ar40xx_init_globals(struct ar40xx_priv *priv)
++{
++      u32 t;
++
++      /* enable CPU port and disable mirror port */
++      t = AR40XX_FWD_CTRL0_CPU_PORT_EN |
++          AR40XX_FWD_CTRL0_MIRROR_PORT;
++      ar40xx_write(priv, AR40XX_REG_FWD_CTRL0, t);
++
++      /* forward multicast and broadcast frames to CPU */
++      t = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) |
++          (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) |
++          (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S);
++      ar40xx_write(priv, AR40XX_REG_FWD_CTRL1, t);
++
++      /* enable jumbo frames */
++      ar40xx_rmw(priv, AR40XX_REG_MAX_FRAME_SIZE,
++                 AR40XX_MAX_FRAME_SIZE_MTU, 9018 + 8 + 2);
++
++      /* Enable MIB counters */
++      ar40xx_rmw(priv, AR40XX_REG_MODULE_EN, 0,
++                 AR40XX_MODULE_EN_MIB);
++
++      /* Disable AZ */
++      ar40xx_write(priv, AR40XX_REG_EEE_CTRL, 0);
++
++      /* set flowctrl thershold for cpu port */
++      t = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) |
++            AR40XX_PORT0_FC_THRESH_OFF_DFLT;
++      ar40xx_write(priv, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t);
++}
++
++static void
++ar40xx_malibu_init(struct ar40xx_priv *priv)
++{
++      int i;
++      struct mii_bus *bus;
++      u16 val;
++
++      bus = priv->mii_bus;
++
++      /* war to enable AZ transmitting ability */
++      ar40xx_phy_mmd_write(priv, AR40XX_PSGMII_ID, 1,
++                           AR40XX_MALIBU_PSGMII_MODE_CTRL,
++                           AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL);
++      for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
++              /* change malibu control_dac */
++              val = ar40xx_phy_mmd_read(priv, i, 7,
++                                        AR40XX_MALIBU_PHY_MMD7_DAC_CTRL);
++              val &= ~AR40XX_MALIBU_DAC_CTRL_MASK;
++              val |= AR40XX_MALIBU_DAC_CTRL_VALUE;
++              ar40xx_phy_mmd_write(priv, i, 7,
++                                   AR40XX_MALIBU_PHY_MMD7_DAC_CTRL, val);
++              if (i == AR40XX_MALIBU_PHY_LAST_ADDR) {
++                      /* to avoid goes into hibernation */
++                      val = ar40xx_phy_mmd_read(priv, i, 3,
++                                                AR40XX_MALIBU_PHY_RLP_CTRL);
++                      val &= (~(1<<1));
++                      ar40xx_phy_mmd_write(priv, i, 3,
++                                           AR40XX_MALIBU_PHY_RLP_CTRL, val);
++              }
++      }
++
++      /* adjust psgmii serdes tx amp */
++      mdiobus_write(bus, AR40XX_PSGMII_ID, AR40XX_PSGMII_TX_DRIVER_1_CTRL,
++                    AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP);
++}
++
++static int
++ar40xx_hw_init(struct ar40xx_priv *priv)
++{
++      u32 i;
++
++      ar40xx_ess_reset(priv);
++
++      if (priv->mii_bus)
++              ar40xx_malibu_init(priv);
++      else
++              return -1;
++
++      ar40xx_psgmii_self_test(priv);
++      ar40xx_psgmii_self_test_clean(priv);
++
++      ar40xx_mac_mode_init(priv, priv->mac_mode);
++
++      for (i = 0; i < priv->dev.ports; i++)
++              ar40xx_init_port(priv, i);
++
++      ar40xx_init_globals(priv);
++
++      return 0;
++}
++
++/* Start of qm error WAR */
++
++static
++int ar40xx_force_1g_full(struct ar40xx_priv *priv, u32 port_id)
++{
++      u32 reg;
++
++      if (port_id < 0 || port_id > 6)
++              return -1;
++
++      reg = AR40XX_REG_PORT_STATUS(port_id);
++      return ar40xx_rmw(priv, reg, AR40XX_PORT_SPEED,
++                      (AR40XX_PORT_SPEED_1000M | AR40XX_PORT_DUPLEX));
++}
++
++static
++int ar40xx_get_qm_status(struct ar40xx_priv *priv,
++                       u32 port_id, u32 *qm_buffer_err)
++{
++      u32 reg;
++      u32 qm_val;
++
++      if (port_id < 1 || port_id > 5) {
++              *qm_buffer_err = 0;
++              return -1;
++      }
++
++      if (port_id < 4) {
++              reg = AR40XX_REG_QM_PORT0_3_QNUM;
++              ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
++              qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
++              /* every 8 bits for each port */
++              *qm_buffer_err = (qm_val >> (port_id * 8)) & 0xFF;
++      } else {
++              reg = AR40XX_REG_QM_PORT4_6_QNUM;
++              ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
++              qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
++              /* every 8 bits for each port */
++              *qm_buffer_err = (qm_val >> ((port_id-4) * 8)) & 0xFF;
++      }
++
++      return 0;
++}
++
++static void
++ar40xx_sw_mac_polling_task(struct ar40xx_priv *priv)
++{
++      static int task_count;
++      u32 i;
++      u32 reg, value;
++      u32 link, speed, duplex;
++      u32 qm_buffer_err;
++      u16 port_phy_status[AR40XX_NUM_PORTS];
++      static u32 qm_err_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
++      static u32 link_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
++      struct mii_bus *bus = NULL;
++
++      if (!priv || !priv->mii_bus)
++              return;
++
++      bus = priv->mii_bus;
++
++      ++task_count;
++
++      for (i = 1; i < AR40XX_NUM_PORTS; ++i) {
++              port_phy_status[i] =
++                      mdiobus_read(bus, i-1, AR40XX_PHY_SPEC_STATUS);
++              speed = link = duplex = port_phy_status[i];
++              speed &= AR40XX_PHY_SPEC_STATUS_SPEED;
++              speed >>= 14;
++              link &= AR40XX_PHY_SPEC_STATUS_LINK;
++              link >>= 10;
++              duplex &= AR40XX_PHY_SPEC_STATUS_DUPLEX;
++              duplex >>= 13;
++
++              if (link != priv->ar40xx_port_old_link[i]) {
++                      ++link_cnt[i];
++                      /* Up --> Down */
++                      if ((priv->ar40xx_port_old_link[i] ==
++                                      AR40XX_PORT_LINK_UP) &&
++                          (link == AR40XX_PORT_LINK_DOWN)) {
++                              /* LINK_EN disable(MAC force mode)*/
++                              reg = AR40XX_REG_PORT_STATUS(i);
++                              ar40xx_rmw(priv, reg,
++                                              AR40XX_PORT_AUTO_LINK_EN, 0);
++
++                              /* Check queue buffer */
++                              qm_err_cnt[i] = 0;
++                              ar40xx_get_qm_status(priv, i, &qm_buffer_err);
++                              if (qm_buffer_err) {
++                                      priv->ar40xx_port_qm_buf[i] =
++                                              AR40XX_QM_NOT_EMPTY;
++                              } else {
++                                      u16 phy_val = 0;
++
++                                      priv->ar40xx_port_qm_buf[i] =
++                                              AR40XX_QM_EMPTY;
++                                      ar40xx_force_1g_full(priv, i);
++                                      /* Ref:QCA8337 Datasheet,Clearing
++                                       * MENU_CTRL_EN prevents phy to
++                                       * stuck in 100BT mode when
++                                       * bringing up the link
++                                       */
++                                      ar40xx_phy_dbg_read(priv, i-1,
++                                                          AR40XX_PHY_DEBUG_0,
++                                                          &phy_val);
++                                      phy_val &= (~AR40XX_PHY_MANU_CTRL_EN);
++                                      ar40xx_phy_dbg_write(priv, i-1,
++                                                           AR40XX_PHY_DEBUG_0,
++                                                           phy_val);
++                              }
++                              priv->ar40xx_port_old_link[i] = link;
++                      } else if ((priv->ar40xx_port_old_link[i] ==
++                                              AR40XX_PORT_LINK_DOWN) &&
++                                      (link == AR40XX_PORT_LINK_UP)) {
++                              /* Down --> Up */
++                              if (priv->port_link_up[i] < 1) {
++                                      ++priv->port_link_up[i];
++                              } else {
++                                      /* Change port status */
++                                      reg = AR40XX_REG_PORT_STATUS(i);
++                                      value = ar40xx_read(priv, reg);
++                                      priv->port_link_up[i] = 0;
++
++                                      value &= ~(AR40XX_PORT_DUPLEX |
++                                                 AR40XX_PORT_SPEED);
++                                      value |= speed | (duplex ? BIT(6) : 0);
++                                      ar40xx_write(priv, reg, value);
++                                      /* clock switch need such time
++                                       * to avoid glitch
++                                       */
++                                      usleep_range(100, 200);
++
++                                      value |= AR40XX_PORT_AUTO_LINK_EN;
++                                      ar40xx_write(priv, reg, value);
++                                      /* HW need such time to make sure link
++                                       * stable before enable MAC
++                                       */
++                                      usleep_range(100, 200);
++
++                                      if (speed == AR40XX_PORT_SPEED_100M) {
++                                              u16 phy_val = 0;
++                                              /* Enable @100M, if down to 10M
++                                               * clock will change smoothly
++                                               */
++                                              ar40xx_phy_dbg_read(priv, i-1,
++                                                                  0,
++                                                                  &phy_val);
++                                              phy_val |=
++                                                      AR40XX_PHY_MANU_CTRL_EN;
++                                              ar40xx_phy_dbg_write(priv, i-1,
++                                                                   0,
++                                                                   phy_val);
++                                      }
++                                      priv->ar40xx_port_old_link[i] = link;
++                              }
++                      }
++              }
++
++              if (priv->ar40xx_port_qm_buf[i] == AR40XX_QM_NOT_EMPTY) {
++                      /* Check QM */
++                      ar40xx_get_qm_status(priv, i, &qm_buffer_err);
++                      if (qm_buffer_err) {
++                              ++qm_err_cnt[i];
++                      } else {
++                              priv->ar40xx_port_qm_buf[i] =
++                                              AR40XX_QM_EMPTY;
++                              qm_err_cnt[i] = 0;
++                              ar40xx_force_1g_full(priv, i);
++                      }
++              }
++      }
++}
++
++static void
++ar40xx_qm_err_check_work_task(struct work_struct *work)
++{
++      struct ar40xx_priv *priv = container_of(work, struct ar40xx_priv,
++                                      qm_dwork.work);
++
++      mutex_lock(&priv->qm_lock);
++
++      ar40xx_sw_mac_polling_task(priv);
++
++      mutex_unlock(&priv->qm_lock);
++
++      schedule_delayed_work(&priv->qm_dwork,
++                            msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
++}
++
++static int
++ar40xx_qm_err_check_work_start(struct ar40xx_priv *priv)
++{
++      mutex_init(&priv->qm_lock);
++
++      INIT_DELAYED_WORK(&priv->qm_dwork, ar40xx_qm_err_check_work_task);
++
++      schedule_delayed_work(&priv->qm_dwork,
++                            msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
++
++      return 0;
++}
++
++/* End of qm error WAR */
++
++static int
++ar40xx_vlan_init(struct ar40xx_priv *priv)
++{
++      int port;
++      unsigned long bmp;
++
++      /* By default Enable VLAN */
++      priv->vlan = 1;
++      priv->vlan_table[AR40XX_LAN_VLAN] = priv->cpu_bmp | priv->lan_bmp;
++      priv->vlan_table[AR40XX_WAN_VLAN] = priv->cpu_bmp | priv->wan_bmp;
++      priv->vlan_tagged = priv->cpu_bmp;
++      bmp = priv->lan_bmp;
++      for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
++                      priv->pvid[port] = AR40XX_LAN_VLAN;
++
++      bmp = priv->wan_bmp;
++      for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
++                      priv->pvid[port] = AR40XX_WAN_VLAN;
++
++      return 0;
++}
++
++static void
++ar40xx_mib_work_func(struct work_struct *work)
++{
++      struct ar40xx_priv *priv;
++      int err;
++
++      priv = container_of(work, struct ar40xx_priv, mib_work.work);
++
++      mutex_lock(&priv->mib_lock);
++
++      err = ar40xx_mib_capture(priv);
++      if (err)
++              goto next_port;
++
++      ar40xx_mib_fetch_port_stat(priv, priv->mib_next_port, false);
++
++next_port:
++      priv->mib_next_port++;
++      if (priv->mib_next_port >= priv->dev.ports)
++              priv->mib_next_port = 0;
++
++      mutex_unlock(&priv->mib_lock);
++
++      schedule_delayed_work(&priv->mib_work,
++                            msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
++}
++
++static void
++ar40xx_setup_port(struct ar40xx_priv *priv, int port, u32 members)
++{
++      u32 t;
++      u32 egress, ingress;
++      u32 pvid = priv->vlan_id[priv->pvid[port]];
++
++      if (priv->vlan) {
++              egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD;
++              ingress = AR40XX_IN_SECURE;
++      } else {
++              egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH;
++              ingress = AR40XX_IN_PORT_ONLY;
++      }
++
++      t = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S;
++      t |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S;
++      ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), t);
++
++      t = AR40XX_PORT_VLAN1_PORT_VLAN_PROP;
++      t |= egress << AR40XX_PORT_VLAN1_OUT_MODE_S;
++      ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
++
++      t = members;
++      t |= AR40XX_PORT_LOOKUP_LEARN;
++      t |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S;
++      t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
++      ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
++}
++
++static void
++ar40xx_vtu_op(struct ar40xx_priv *priv, u32 op, u32 val)
++{
++      if (ar40xx_wait_bit(priv, AR40XX_REG_VTU_FUNC1,
++                          AR40XX_VTU_FUNC1_BUSY, 0))
++              return;
++
++      if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD)
++              ar40xx_write(priv, AR40XX_REG_VTU_FUNC0, val);
++
++      op |= AR40XX_VTU_FUNC1_BUSY;
++      ar40xx_write(priv, AR40XX_REG_VTU_FUNC1, op);
++}
++
++static void
++ar40xx_vtu_load_vlan(struct ar40xx_priv *priv, u32 vid, u32 port_mask)
++{
++      u32 op;
++      u32 val;
++      int i;
++
++      op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S);
++      val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL;
++      for (i = 0; i < AR40XX_NUM_PORTS; i++) {
++              u32 mode;
++
++              if ((port_mask & BIT(i)) == 0)
++                      mode = AR40XX_VTU_FUNC0_EG_MODE_NOT;
++              else if (priv->vlan == 0)
++                      mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP;
++              else if ((priv->vlan_tagged & BIT(i)) ||
++                       (priv->vlan_id[priv->pvid[i]] != vid))
++                      mode = AR40XX_VTU_FUNC0_EG_MODE_TAG;
++              else
++                      mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG;
++
++              val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i);
++      }
++      ar40xx_vtu_op(priv, op, val);
++}
++
++static void
++ar40xx_vtu_flush(struct ar40xx_priv *priv)
++{
++      ar40xx_vtu_op(priv, AR40XX_VTU_FUNC1_OP_FLUSH, 0);
++}
++
++static int
++ar40xx_sw_hw_apply(struct switch_dev *dev)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      u8 portmask[AR40XX_NUM_PORTS];
++      int i, j;
++
++      mutex_lock(&priv->reg_mutex);
++      /* flush all vlan entries */
++      ar40xx_vtu_flush(priv);
++
++      memset(portmask, 0, sizeof(portmask));
++      if (priv->vlan) {
++              for (j = 0; j < AR40XX_MAX_VLANS; j++) {
++                      u8 vp = priv->vlan_table[j];
++
++                      if (!vp)
++                              continue;
++
++                      for (i = 0; i < dev->ports; i++) {
++                              u8 mask = BIT(i);
++
++                              if (vp & mask)
++                                      portmask[i] |= vp & ~mask;
++                      }
++
++                      ar40xx_vtu_load_vlan(priv, priv->vlan_id[j],
++                                           priv->vlan_table[j]);
++              }
++      } else {
++              /* 8021q vlan disabled */
++              for (i = 0; i < dev->ports; i++) {
++                      if (i == AR40XX_PORT_CPU)
++                              continue;
++
++                      portmask[i] = BIT(AR40XX_PORT_CPU);
++                      portmask[AR40XX_PORT_CPU] |= BIT(i);
++              }
++      }
++
++      /* update the port destination mask registers and tag settings */
++      for (i = 0; i < dev->ports; i++)
++              ar40xx_setup_port(priv, i, portmask[i]);
++
++      ar40xx_set_mirror_regs(priv);
++
++      mutex_unlock(&priv->reg_mutex);
++      return 0;
++}
++
++static int
++ar40xx_sw_reset_switch(struct switch_dev *dev)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      int i, rv;
++
++      mutex_lock(&priv->reg_mutex);
++      memset(&priv->vlan, 0, sizeof(struct ar40xx_priv) -
++              offsetof(struct ar40xx_priv, vlan));
++
++      for (i = 0; i < AR40XX_MAX_VLANS; i++)
++              priv->vlan_id[i] = i;
++
++      ar40xx_vlan_init(priv);
++
++      priv->mirror_rx = false;
++      priv->mirror_tx = false;
++      priv->source_port = 0;
++      priv->monitor_port = 0;
++
++      mutex_unlock(&priv->reg_mutex);
++
++      rv = ar40xx_sw_hw_apply(dev);
++      return rv;
++}
++
++static int
++ar40xx_start(struct ar40xx_priv *priv)
++{
++      int ret;
++
++      ret = ar40xx_hw_init(priv);
++      if (ret)
++              return ret;
++
++      ret = ar40xx_sw_reset_switch(&priv->dev);
++      if (ret)
++              return ret;
++
++      /* at last, setup cpu port */
++      ret = ar40xx_cpuport_setup(priv);
++      if (ret)
++              return ret;
++
++      schedule_delayed_work(&priv->mib_work,
++                            msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
++
++      ar40xx_qm_err_check_work_start(priv);
++
++      return 0;
++}
++
++static const struct switch_dev_ops ar40xx_sw_ops = {
++      .attr_global = {
++              .attr = ar40xx_sw_attr_globals,
++              .n_attr = ARRAY_SIZE(ar40xx_sw_attr_globals),
++      },
++      .attr_port = {
++              .attr = ar40xx_sw_attr_port,
++              .n_attr = ARRAY_SIZE(ar40xx_sw_attr_port),
++      },
++      .attr_vlan = {
++              .attr = ar40xx_sw_attr_vlan,
++              .n_attr = ARRAY_SIZE(ar40xx_sw_attr_vlan),
++      },
++      .get_port_pvid = ar40xx_sw_get_pvid,
++      .set_port_pvid = ar40xx_sw_set_pvid,
++      .get_vlan_ports = ar40xx_sw_get_ports,
++      .set_vlan_ports = ar40xx_sw_set_ports,
++      .apply_config = ar40xx_sw_hw_apply,
++      .reset_switch = ar40xx_sw_reset_switch,
++      .get_port_link = ar40xx_sw_get_port_link,
++};
++
++/* Start of phy driver support */
++
++static const u32 ar40xx_phy_ids[] = {
++      0x004dd0b1,
++      0x004dd0b2, /* AR40xx */
++};
++
++static bool
++ar40xx_phy_match(u32 phy_id)
++{
++      int i;
++
++      for (i = 0; i < ARRAY_SIZE(ar40xx_phy_ids); i++)
++              if (phy_id == ar40xx_phy_ids[i])
++                      return true;
++
++      return false;
++}
++
++static bool
++is_ar40xx_phy(struct mii_bus *bus)
++{
++      unsigned i;
++
++      for (i = 0; i < 4; i++) {
++              u32 phy_id;
++
++              phy_id = mdiobus_read(bus, i, MII_PHYSID1) << 16;
++              phy_id |= mdiobus_read(bus, i, MII_PHYSID2);
++              if (!ar40xx_phy_match(phy_id))
++                      return false;
++      }
++
++      return true;
++}
++
++static int
++ar40xx_phy_probe(struct phy_device *phydev)
++{
++      if (!is_ar40xx_phy(phydev->mdio.bus))
++              return -ENODEV;
++
++      ar40xx_priv->mii_bus = phydev->mdio.bus;
++      phydev->priv = ar40xx_priv;
++      if (phydev->mdio.addr == 0)
++              ar40xx_priv->phy = phydev;
++
++      phydev->supported |= SUPPORTED_1000baseT_Full;
++      phydev->advertising |= ADVERTISED_1000baseT_Full;
++      return 0;
++}
++
++static void
++ar40xx_phy_remove(struct phy_device *phydev)
++{
++      ar40xx_priv->mii_bus = NULL;
++      phydev->priv = NULL;
++}
++
++static int
++ar40xx_phy_config_init(struct phy_device *phydev)
++{
++      return 0;
++}
++
++static int
++ar40xx_phy_read_status(struct phy_device *phydev)
++{
++      if (phydev->mdio.addr != 0)
++              return genphy_read_status(phydev);
++
++      return 0;
++}
++
++static int
++ar40xx_phy_config_aneg(struct phy_device *phydev)
++{
++      if (phydev->mdio.addr == 0)
++              return 0;
++
++      return genphy_config_aneg(phydev);
++}
++
++static struct phy_driver ar40xx_phy_driver = {
++      .phy_id         = 0x004d0000,
++      .name           = "QCA Malibu",
++      .phy_id_mask    = 0xffff0000,
++      .features       = PHY_BASIC_FEATURES,
++      .probe          = ar40xx_phy_probe,
++      .remove         = ar40xx_phy_remove,
++      .config_init    = ar40xx_phy_config_init,
++      .config_aneg    = ar40xx_phy_config_aneg,
++      .read_status    = ar40xx_phy_read_status,
++};
++
++static uint16_t ar40xx_gpio_get_phy(unsigned int offset)
++{
++      return offset / 4;
++}
++
++static uint16_t ar40xx_gpio_get_reg(unsigned int offset)
++{
++      return 0x8074 + offset % 4;
++}
++
++static void ar40xx_gpio_set(struct gpio_chip *gc, unsigned int offset,
++                          int value)
++{
++      struct ar40xx_priv *priv = gpiochip_get_data(gc);
++
++      ar40xx_phy_mmd_write(priv, ar40xx_gpio_get_phy(offset), 0x7,
++                           ar40xx_gpio_get_reg(offset),
++                           value ? 0xA000 : 0x8000);
++}
++
++static int ar40xx_gpio_get(struct gpio_chip *gc, unsigned offset)
++{
++      struct ar40xx_priv *priv = gpiochip_get_data(gc);
++
++      return ar40xx_phy_mmd_read(priv, ar40xx_gpio_get_phy(offset), 0x7,
++                                 ar40xx_gpio_get_reg(offset)) == 0xA000;
++}
++
++static int ar40xx_gpio_get_dir(struct gpio_chip *gc, unsigned offset)
++{
++      return 0; /* only out direction */
++}
++
++static int ar40xx_gpio_dir_out(struct gpio_chip *gc, unsigned offset,
++                             int value)
++{
++      /*
++       * the direction out value is used to set the initial value.
++       * support of this function is required by leds-gpio.c
++       */
++      ar40xx_gpio_set(gc, offset, value);
++      return 0;
++}
++
++static void ar40xx_register_gpio(struct device *pdev,
++                               struct ar40xx_priv *priv,
++                               struct device_node *switch_node)
++{
++      struct gpio_chip *gc;
++      int err;
++
++      gc = devm_kzalloc(pdev, sizeof(*gc), GFP_KERNEL);
++      if (!gc)
++              return;
++
++      gc->label = "ar40xx_gpio",
++      gc->base = -1,
++      gc->ngpio = 5 /* mmd 0 - 4 */ * 4 /* 0x8074 - 0x8077 */,
++      gc->parent = pdev;
++      gc->owner = THIS_MODULE;
++
++      gc->get_direction = ar40xx_gpio_get_dir;
++      gc->direction_output = ar40xx_gpio_dir_out;
++      gc->get = ar40xx_gpio_get;
++      gc->set = ar40xx_gpio_set;
++      gc->can_sleep = true;
++      gc->label = priv->dev.name;
++      gc->of_node = switch_node;
++
++      err = devm_gpiochip_add_data(pdev, gc, priv);
++      if (err != 0)
++              dev_err(pdev, "Failed to register gpio %d.\n", err);
++}
++
++/* End of phy driver support */
++
++/* Platform driver probe function */
++
++static int ar40xx_probe(struct platform_device *pdev)
++{
++      struct device_node *switch_node;
++      struct device_node *psgmii_node;
++      const __be32 *mac_mode;
++      struct clk *ess_clk;
++      struct switch_dev *swdev;
++      struct ar40xx_priv *priv;
++      u32 len;
++      u32 num_mibs;
++      struct resource psgmii_base = {0};
++      struct resource switch_base = {0};
++      int ret;
++
++      priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
++      if (!priv)
++              return -ENOMEM;
++
++      platform_set_drvdata(pdev, priv);
++      ar40xx_priv = priv;
++
++      switch_node = of_node_get(pdev->dev.of_node);
++      if (of_address_to_resource(switch_node, 0, &switch_base) != 0)
++              return -EIO;
++
++      priv->hw_addr = devm_ioremap_resource(&pdev->dev, &switch_base);
++      if (IS_ERR(priv->hw_addr)) {
++              dev_err(&pdev->dev, "Failed to ioremap switch_base!\n");
++              return PTR_ERR(priv->hw_addr);
++      }
++
++      /*psgmii dts get*/
++      psgmii_node = of_find_node_by_name(NULL, "ess-psgmii");
++      if (!psgmii_node) {
++              dev_err(&pdev->dev, "Failed to find ess-psgmii node!\n");
++              return -EINVAL;
++      }
++
++      if (of_address_to_resource(psgmii_node, 0, &psgmii_base) != 0)
++              return -EIO;
++
++      priv->psgmii_hw_addr = devm_ioremap_resource(&pdev->dev, &psgmii_base);
++      if (IS_ERR(priv->psgmii_hw_addr)) {
++              dev_err(&pdev->dev, "psgmii ioremap fail!\n");
++              return PTR_ERR(priv->psgmii_hw_addr);
++      }
++
++      mac_mode = of_get_property(switch_node, "switch_mac_mode", &len);
++      if (!mac_mode) {
++              dev_err(&pdev->dev, "Failed to read switch_mac_mode\n");
++              return -EINVAL;
++      }
++      priv->mac_mode = be32_to_cpup(mac_mode);
++
++      ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
++      if (ess_clk)
++              clk_prepare_enable(ess_clk);
++
++      priv->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
++      if (IS_ERR(priv->ess_rst)) {
++              dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
++              return PTR_ERR(priv->ess_rst);
++      }
++
++      if (of_property_read_u32(switch_node, "switch_cpu_bmp",
++                               &priv->cpu_bmp) ||
++          of_property_read_u32(switch_node, "switch_lan_bmp",
++                               &priv->lan_bmp) ||
++          of_property_read_u32(switch_node, "switch_wan_bmp",
++                               &priv->wan_bmp)) {
++              dev_err(&pdev->dev, "Failed to read port properties\n");
++              return -EIO;
++      }
++
++      ret = phy_driver_register(&ar40xx_phy_driver, THIS_MODULE);
++      if (ret) {
++              dev_err(&pdev->dev, "Failed to register ar40xx phy driver!\n");
++              return -EIO;
++      }
++
++      mutex_init(&priv->reg_mutex);
++      mutex_init(&priv->mib_lock);
++      INIT_DELAYED_WORK(&priv->mib_work, ar40xx_mib_work_func);
++
++      /* register switch */
++      swdev = &priv->dev;
++
++      swdev->alias = dev_name(&priv->mii_bus->dev);
++
++      swdev->cpu_port = AR40XX_PORT_CPU;
++      swdev->name = "QCA AR40xx";
++      swdev->vlans = AR40XX_MAX_VLANS;
++      swdev->ports = AR40XX_NUM_PORTS;
++      swdev->ops = &ar40xx_sw_ops;
++      ret = register_switch(swdev, NULL);
++      if (ret)
++              goto err_unregister_phy;
++
++      num_mibs = ARRAY_SIZE(ar40xx_mibs);
++      len = priv->dev.ports * num_mibs *
++            sizeof(*priv->mib_stats);
++      priv->mib_stats = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
++      if (!priv->mib_stats) {
++              ret = -ENOMEM;
++              goto err_unregister_switch;
++      }
++
++      ar40xx_start(priv);
++
++      if (of_property_read_bool(switch_node, "gpio-controller"))
++              ar40xx_register_gpio(&pdev->dev, ar40xx_priv, switch_node);
++
++      return 0;
++
++err_unregister_switch:
++      unregister_switch(&priv->dev);
++err_unregister_phy:
++      phy_driver_unregister(&ar40xx_phy_driver);
++      platform_set_drvdata(pdev, NULL);
++      return ret;
++}
++
++static int ar40xx_remove(struct platform_device *pdev)
++{
++      struct ar40xx_priv *priv = platform_get_drvdata(pdev);
++
++      cancel_delayed_work_sync(&priv->qm_dwork);
++      cancel_delayed_work_sync(&priv->mib_work);
++
++      unregister_switch(&priv->dev);
++
++      phy_driver_unregister(&ar40xx_phy_driver);
++
++      return 0;
++}
++
++static const struct of_device_id ar40xx_of_mtable[] = {
++      {.compatible = "qcom,ess-switch" },
++      {}
++};
++
++struct platform_driver ar40xx_drv = {
++      .probe = ar40xx_probe,
++      .remove = ar40xx_remove,
++      .driver = {
++              .name    = "ar40xx",
++              .of_match_table = ar40xx_of_mtable,
++      },
++};
++
++module_platform_driver(ar40xx_drv);
++
++MODULE_DESCRIPTION("IPQ40XX ESS driver");
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null
++++ b/drivers/net/phy/ar40xx.h
+@@ -0,0 +1,337 @@
++/*
++ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++ #ifndef __AR40XX_H
++#define __AR40XX_H
++
++#define AR40XX_MAX_VLANS      128
++#define AR40XX_NUM_PORTS      6
++#define AR40XX_NUM_PHYS       5
++
++#define BITS(_s, _n)  (((1UL << (_n)) - 1) << _s)
++
++struct ar40xx_priv {
++      struct switch_dev dev;
++
++      u8  __iomem      *hw_addr;
++      u8  __iomem      *psgmii_hw_addr;
++      u32 mac_mode;
++      struct reset_control *ess_rst;
++      u32 cpu_bmp;
++      u32 lan_bmp;
++      u32 wan_bmp;
++
++      struct mii_bus *mii_bus;
++      struct phy_device *phy;
++
++      /* mutex for qm task */
++      struct mutex qm_lock;
++      struct delayed_work qm_dwork;
++      u32 port_link_up[AR40XX_NUM_PORTS];
++      u32 ar40xx_port_old_link[AR40XX_NUM_PORTS];
++      u32 ar40xx_port_qm_buf[AR40XX_NUM_PORTS];
++
++      u32 phy_t_status;
++
++      /* mutex for switch reg access */
++      struct mutex reg_mutex;
++
++      /* mutex for mib task */
++      struct mutex mib_lock;
++      struct delayed_work mib_work;
++      int mib_next_port;
++      u64 *mib_stats;
++
++      char buf[2048];
++
++      /* all fields below will be cleared on reset */
++      bool vlan;
++      u16 vlan_id[AR40XX_MAX_VLANS];
++      u8 vlan_table[AR40XX_MAX_VLANS];
++      u8 vlan_tagged;
++      u16 pvid[AR40XX_NUM_PORTS];
++
++      /* mirror */
++      bool mirror_rx;
++      bool mirror_tx;
++      int source_port;
++      int monitor_port;
++};
++
++#define AR40XX_PORT_LINK_UP 1
++#define AR40XX_PORT_LINK_DOWN 0
++#define AR40XX_QM_NOT_EMPTY  1
++#define AR40XX_QM_EMPTY  0
++
++#define AR40XX_LAN_VLAN       1
++#define AR40XX_WAN_VLAN       2
++
++enum ar40xx_port_wrapper_cfg {
++      PORT_WRAPPER_PSGMII = 0,
++};
++
++struct ar40xx_mib_desc {
++      u32 size;
++      u32 offset;
++      const char *name;
++};
++
++#define AR40XX_PORT_CPU       0
++
++#define AR40XX_PSGMII_MODE_CONTROL    0x1b4
++#define   AR40XX_PSGMII_ATHR_CSCO_MODE_25M    BIT(0)
++
++#define AR40XX_PSGMIIPHY_TX_CONTROL    0x288
++
++#define AR40XX_MII_ATH_MMD_ADDR               0x0d
++#define AR40XX_MII_ATH_MMD_DATA               0x0e
++#define AR40XX_MII_ATH_DBG_ADDR               0x1d
++#define AR40XX_MII_ATH_DBG_DATA               0x1e
++
++#define AR40XX_STATS_RXBROAD          0x00
++#define AR40XX_STATS_RXPAUSE          0x04
++#define AR40XX_STATS_RXMULTI          0x08
++#define AR40XX_STATS_RXFCSERR         0x0c
++#define AR40XX_STATS_RXALIGNERR               0x10
++#define AR40XX_STATS_RXRUNT           0x14
++#define AR40XX_STATS_RXFRAGMENT               0x18
++#define AR40XX_STATS_RX64BYTE         0x1c
++#define AR40XX_STATS_RX128BYTE                0x20
++#define AR40XX_STATS_RX256BYTE                0x24
++#define AR40XX_STATS_RX512BYTE                0x28
++#define AR40XX_STATS_RX1024BYTE               0x2c
++#define AR40XX_STATS_RX1518BYTE               0x30
++#define AR40XX_STATS_RXMAXBYTE                0x34
++#define AR40XX_STATS_RXTOOLONG                0x38
++#define AR40XX_STATS_RXGOODBYTE               0x3c
++#define AR40XX_STATS_RXBADBYTE                0x44
++#define AR40XX_STATS_RXOVERFLOW               0x4c
++#define AR40XX_STATS_FILTERED         0x50
++#define AR40XX_STATS_TXBROAD          0x54
++#define AR40XX_STATS_TXPAUSE          0x58
++#define AR40XX_STATS_TXMULTI          0x5c
++#define AR40XX_STATS_TXUNDERRUN               0x60
++#define AR40XX_STATS_TX64BYTE         0x64
++#define AR40XX_STATS_TX128BYTE                0x68
++#define AR40XX_STATS_TX256BYTE                0x6c
++#define AR40XX_STATS_TX512BYTE                0x70
++#define AR40XX_STATS_TX1024BYTE               0x74
++#define AR40XX_STATS_TX1518BYTE               0x78
++#define AR40XX_STATS_TXMAXBYTE                0x7c
++#define AR40XX_STATS_TXOVERSIZE               0x80
++#define AR40XX_STATS_TXBYTE           0x84
++#define AR40XX_STATS_TXCOLLISION      0x8c
++#define AR40XX_STATS_TXABORTCOL               0x90
++#define AR40XX_STATS_TXMULTICOL               0x94
++#define AR40XX_STATS_TXSINGLECOL      0x98
++#define AR40XX_STATS_TXEXCDEFER               0x9c
++#define AR40XX_STATS_TXDEFER          0xa0
++#define AR40XX_STATS_TXLATECOL                0xa4
++
++#define AR40XX_REG_MODULE_EN                  0x030
++#define   AR40XX_MODULE_EN_MIB                        BIT(0)
++
++#define AR40XX_REG_MIB_FUNC                   0x034
++#define   AR40XX_MIB_BUSY             BIT(17)
++#define   AR40XX_MIB_CPU_KEEP                 BIT(20)
++#define   AR40XX_MIB_FUNC             BITS(24, 3)
++#define   AR40XX_MIB_FUNC_S           24
++#define   AR40XX_MIB_FUNC_NO_OP               0x0
++#define   AR40XX_MIB_FUNC_FLUSH               0x1
++
++#define AR40XX_REG_PORT_STATUS(_i)            (0x07c + (_i) * 4)
++#define   AR40XX_PORT_SPEED                   BITS(0, 2)
++#define   AR40XX_PORT_STATUS_SPEED_S  0
++#define   AR40XX_PORT_TX_EN                   BIT(2)
++#define   AR40XX_PORT_RX_EN                   BIT(3)
++#define   AR40XX_PORT_STATUS_TXFLOW   BIT(4)
++#define   AR40XX_PORT_STATUS_RXFLOW   BIT(5)
++#define   AR40XX_PORT_DUPLEX                  BIT(6)
++#define   AR40XX_PORT_TXHALF_FLOW             BIT(7)
++#define   AR40XX_PORT_STATUS_LINK_UP  BIT(8)
++#define   AR40XX_PORT_AUTO_LINK_EN            BIT(9)
++#define   AR40XX_PORT_STATUS_FLOW_CONTROL  BIT(12)
++
++#define AR40XX_REG_MAX_FRAME_SIZE             0x078
++#define   AR40XX_MAX_FRAME_SIZE_MTU           BITS(0, 14)
++
++#define AR40XX_REG_PORT_HEADER(_i)            (0x09c + (_i) * 4)
++
++#define AR40XX_REG_EEE_CTRL                   0x100
++#define   AR40XX_EEE_CTRL_DISABLE_PHY(_i)     BIT(4 + (_i) * 2)
++
++#define AR40XX_REG_PORT_VLAN0(_i)             (0x420 + (_i) * 0x8)
++#define   AR40XX_PORT_VLAN0_DEF_SVID          BITS(0, 12)
++#define   AR40XX_PORT_VLAN0_DEF_SVID_S                0
++#define   AR40XX_PORT_VLAN0_DEF_CVID          BITS(16, 12)
++#define   AR40XX_PORT_VLAN0_DEF_CVID_S                16
++
++#define AR40XX_REG_PORT_VLAN1(_i)             (0x424 + (_i) * 0x8)
++#define   AR40XX_PORT_VLAN1_PORT_VLAN_PROP    BIT(6)
++#define   AR40XX_PORT_VLAN1_OUT_MODE          BITS(12, 2)
++#define   AR40XX_PORT_VLAN1_OUT_MODE_S                12
++#define   AR40XX_PORT_VLAN1_OUT_MODE_UNMOD    0
++#define   AR40XX_PORT_VLAN1_OUT_MODE_UNTAG    1
++#define   AR40XX_PORT_VLAN1_OUT_MODE_TAG              2
++#define   AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH  3
++
++#define AR40XX_REG_VTU_FUNC0                  0x0610
++#define   AR40XX_VTU_FUNC0_EG_MODE            BITS(4, 14)
++#define   AR40XX_VTU_FUNC0_EG_MODE_S(_i)      (4 + (_i) * 2)
++#define   AR40XX_VTU_FUNC0_EG_MODE_KEEP               0
++#define   AR40XX_VTU_FUNC0_EG_MODE_UNTAG      1
++#define   AR40XX_VTU_FUNC0_EG_MODE_TAG                2
++#define   AR40XX_VTU_FUNC0_EG_MODE_NOT                3
++#define   AR40XX_VTU_FUNC0_IVL                        BIT(19)
++#define   AR40XX_VTU_FUNC0_VALID              BIT(20)
++
++#define AR40XX_REG_VTU_FUNC1                  0x0614
++#define   AR40XX_VTU_FUNC1_OP                 BITS(0, 3)
++#define   AR40XX_VTU_FUNC1_OP_NOOP            0
++#define   AR40XX_VTU_FUNC1_OP_FLUSH           1
++#define   AR40XX_VTU_FUNC1_OP_LOAD            2
++#define   AR40XX_VTU_FUNC1_OP_PURGE           3
++#define   AR40XX_VTU_FUNC1_OP_REMOVE_PORT     4
++#define   AR40XX_VTU_FUNC1_OP_GET_NEXT                5
++#define   AR40XX7_VTU_FUNC1_OP_GET_ONE                6
++#define   AR40XX_VTU_FUNC1_FULL                       BIT(4)
++#define   AR40XX_VTU_FUNC1_PORT                       BIT(8, 4)
++#define   AR40XX_VTU_FUNC1_PORT_S             8
++#define   AR40XX_VTU_FUNC1_VID                        BIT(16, 12)
++#define   AR40XX_VTU_FUNC1_VID_S              16
++#define   AR40XX_VTU_FUNC1_BUSY                       BIT(31)
++
++#define AR40XX_REG_FWD_CTRL0                  0x620
++#define   AR40XX_FWD_CTRL0_CPU_PORT_EN                BIT(10)
++#define   AR40XX_FWD_CTRL0_MIRROR_PORT                BITS(4, 4)
++#define   AR40XX_FWD_CTRL0_MIRROR_PORT_S      4
++
++#define AR40XX_REG_FWD_CTRL1                  0x624
++#define   AR40XX_FWD_CTRL1_UC_FLOOD           BITS(0, 7)
++#define   AR40XX_FWD_CTRL1_UC_FLOOD_S         0
++#define   AR40XX_FWD_CTRL1_MC_FLOOD           BITS(8, 7)
++#define   AR40XX_FWD_CTRL1_MC_FLOOD_S         8
++#define   AR40XX_FWD_CTRL1_BC_FLOOD           BITS(16, 7)
++#define   AR40XX_FWD_CTRL1_BC_FLOOD_S         16
++#define   AR40XX_FWD_CTRL1_IGMP                       BITS(24, 7)
++#define   AR40XX_FWD_CTRL1_IGMP_S             24
++
++#define AR40XX_REG_PORT_LOOKUP(_i)            (0x660 + (_i) * 0xc)
++#define   AR40XX_PORT_LOOKUP_MEMBER           BITS(0, 7)
++#define   AR40XX_PORT_LOOKUP_IN_MODE          BITS(8, 2)
++#define   AR40XX_PORT_LOOKUP_IN_MODE_S                8
++#define   AR40XX_PORT_LOOKUP_STATE            BITS(16, 3)
++#define   AR40XX_PORT_LOOKUP_STATE_S          16
++#define   AR40XX_PORT_LOOKUP_LEARN            BIT(20)
++#define   AR40XX_PORT_LOOKUP_LOOPBACK         BIT(21)
++#define   AR40XX_PORT_LOOKUP_ING_MIRROR_EN    BIT(25)
++
++#define AR40XX_REG_ATU_FUNC                   0x60c
++#define   AR40XX_ATU_FUNC_OP                  BITS(0, 4)
++#define   AR40XX_ATU_FUNC_OP_NOOP             0x0
++#define   AR40XX_ATU_FUNC_OP_FLUSH            0x1
++#define   AR40XX_ATU_FUNC_OP_LOAD             0x2
++#define   AR40XX_ATU_FUNC_OP_PURGE            0x3
++#define   AR40XX_ATU_FUNC_OP_FLUSH_LOCKED     0x4
++#define   AR40XX_ATU_FUNC_OP_FLUSH_UNICAST    0x5
++#define   AR40XX_ATU_FUNC_OP_GET_NEXT         0x6
++#define   AR40XX_ATU_FUNC_OP_SEARCH_MAC               0x7
++#define   AR40XX_ATU_FUNC_OP_CHANGE_TRUNK     0x8
++#define   AR40XX_ATU_FUNC_BUSY                        BIT(31)
++
++#define AR40XX_REG_QM_DEBUG_ADDR              0x820
++#define AR40XX_REG_QM_DEBUG_VALUE             0x824
++#define   AR40XX_REG_QM_PORT0_3_QNUM          0x1d
++#define   AR40XX_REG_QM_PORT4_6_QNUM          0x1e
++
++#define AR40XX_REG_PORT_HOL_CTRL1(_i)         (0x974 + (_i) * 0x8)
++#define   AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN  BIT(16)
++
++#define AR40XX_REG_PORT_FLOWCTRL_THRESH(_i)   (0x9b0 + (_i) * 0x4)
++#define   AR40XX_PORT0_FC_THRESH_ON_DFLT      0x60
++#define   AR40XX_PORT0_FC_THRESH_OFF_DFLT     0x90
++
++#define AR40XX_PHY_DEBUG_0   0
++#define AR40XX_PHY_MANU_CTRL_EN  BIT(12)
++
++#define AR40XX_PHY_DEBUG_2   2
++
++#define AR40XX_PHY_SPEC_STATUS 0x11
++#define   AR40XX_PHY_SPEC_STATUS_LINK         BIT(10)
++#define   AR40XX_PHY_SPEC_STATUS_DUPLEX               BIT(13)
++#define   AR40XX_PHY_SPEC_STATUS_SPEED                BITS(14, 2)
++
++/* port forwarding state */
++enum {
++      AR40XX_PORT_STATE_DISABLED = 0,
++      AR40XX_PORT_STATE_BLOCK = 1,
++      AR40XX_PORT_STATE_LISTEN = 2,
++      AR40XX_PORT_STATE_LEARN = 3,
++      AR40XX_PORT_STATE_FORWARD = 4
++};
++
++/* ingress 802.1q mode */
++enum {
++      AR40XX_IN_PORT_ONLY = 0,
++      AR40XX_IN_PORT_FALLBACK = 1,
++      AR40XX_IN_VLAN_ONLY = 2,
++      AR40XX_IN_SECURE = 3
++};
++
++/* egress 802.1q mode */
++enum {
++      AR40XX_OUT_KEEP = 0,
++      AR40XX_OUT_STRIP_VLAN = 1,
++      AR40XX_OUT_ADD_VLAN = 2
++};
++
++/* port speed */
++enum {
++      AR40XX_PORT_SPEED_10M = 0,
++      AR40XX_PORT_SPEED_100M = 1,
++      AR40XX_PORT_SPEED_1000M = 2,
++      AR40XX_PORT_SPEED_ERR = 3,
++};
++
++#define AR40XX_MIB_WORK_DELAY 2000 /* msecs */
++
++#define AR40XX_QM_WORK_DELAY    100
++
++#define   AR40XX_MIB_FUNC_CAPTURE     0x3
++
++#define AR40XX_REG_PORT_STATS_START   0x1000
++#define AR40XX_REG_PORT_STATS_LEN             0x100
++
++#define AR40XX_PORTS_ALL      0x3f
++
++#define AR40XX_PSGMII_ID      5
++#define AR40XX_PSGMII_CALB_NUM        100
++#define AR40XX_MALIBU_PSGMII_MODE_CTRL        0x6d
++#define AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL 0x220c
++#define AR40XX_MALIBU_PHY_MMD7_DAC_CTRL       0x801a
++#define AR40XX_MALIBU_DAC_CTRL_MASK   0x380
++#define AR40XX_MALIBU_DAC_CTRL_VALUE  0x280
++#define AR40XX_MALIBU_PHY_RLP_CTRL       0x805a
++#define AR40XX_PSGMII_TX_DRIVER_1_CTRL        0xb
++#define AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP 0x8a
++#define AR40XX_MALIBU_PHY_LAST_ADDR   4
++
++static inline struct ar40xx_priv *
++swdev_to_ar40xx(struct switch_dev *swdev)
++{
++      return container_of(swdev, struct ar40xx_priv, dev);
++}
++
++#endif
+--- /dev/null
++++ b/drivers/net/phy/mdio-ipq40xx.c
+@@ -0,0 +1,203 @@
++/*
++ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/delay.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/io.h>
++#include <linux/of_address.h>
++#include <linux/of_mdio.h>
++#include <linux/phy.h>
++#include <linux/platform_device.h>
++
++#define MDIO_CTRL_0_REG               0x40
++#define MDIO_CTRL_1_REG               0x44
++#define MDIO_CTRL_2_REG               0x48
++#define MDIO_CTRL_3_REG               0x4c
++#define MDIO_CTRL_4_REG               0x50
++#define MDIO_CTRL_4_ACCESS_BUSY               BIT(16)
++#define MDIO_CTRL_4_ACCESS_START              BIT(8)
++#define MDIO_CTRL_4_ACCESS_CODE_READ          0
++#define MDIO_CTRL_4_ACCESS_CODE_WRITE 1
++#define CTRL_0_REG_DEFAULT_VALUE      0x150FF
++
++#define IPQ40XX_MDIO_RETRY    1000
++#define IPQ40XX_MDIO_DELAY    10
++
++struct ipq40xx_mdio_data {
++      struct mii_bus  *mii_bus;
++      void __iomem    *membase;
++      int             phy_irq[PHY_MAX_ADDR];
++      struct device   *dev;
++};
++
++static int ipq40xx_mdio_wait_busy(struct ipq40xx_mdio_data *am)
++{
++      int i;
++
++      for (i = 0; i < IPQ40XX_MDIO_RETRY; i++) {
++              unsigned int busy;
++
++              busy = readl(am->membase + MDIO_CTRL_4_REG) &
++                      MDIO_CTRL_4_ACCESS_BUSY;
++              if (!busy)
++                      return 0;
++
++              /* BUSY might take to be cleard by 15~20 times of loop */
++              udelay(IPQ40XX_MDIO_DELAY);
++      }
++
++      dev_err(am->dev, "%s: MDIO operation timed out\n", am->mii_bus->name);
++
++      return -ETIMEDOUT;
++}
++
++static int ipq40xx_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
++{
++      struct ipq40xx_mdio_data *am = bus->priv;
++      int value = 0;
++      unsigned int cmd = 0;
++
++      lockdep_assert_held(&bus->mdio_lock);
++
++      if (ipq40xx_mdio_wait_busy(am))
++              return -ETIMEDOUT;
++
++      /* issue the phy address and reg */
++      writel((mii_id << 8) | regnum, am->membase + MDIO_CTRL_1_REG);
++
++      cmd = MDIO_CTRL_4_ACCESS_START|MDIO_CTRL_4_ACCESS_CODE_READ;
++
++      /* issue read command */
++      writel(cmd, am->membase + MDIO_CTRL_4_REG);
++
++      /* Wait read complete */
++      if (ipq40xx_mdio_wait_busy(am))
++              return -ETIMEDOUT;
++
++      /* Read data */
++      value = readl(am->membase + MDIO_CTRL_3_REG);
++
++      return value;
++}
++
++static int ipq40xx_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
++                          u16 value)
++{
++      struct ipq40xx_mdio_data *am = bus->priv;
++      unsigned int cmd = 0;
++
++      lockdep_assert_held(&bus->mdio_lock);
++
++      if (ipq40xx_mdio_wait_busy(am))
++              return -ETIMEDOUT;
++
++      /* issue the phy address and reg */
++      writel((mii_id << 8) | regnum, am->membase + MDIO_CTRL_1_REG);
++
++      /* issue write data */
++      writel(value, am->membase + MDIO_CTRL_2_REG);
++
++      cmd = MDIO_CTRL_4_ACCESS_START|MDIO_CTRL_4_ACCESS_CODE_WRITE;
++      /* issue write command */
++      writel(cmd, am->membase + MDIO_CTRL_4_REG);
++
++      /* Wait write complete */
++      if (ipq40xx_mdio_wait_busy(am))
++              return -ETIMEDOUT;
++
++      return 0;
++}
++
++static int ipq40xx_mdio_probe(struct platform_device *pdev)
++{
++      struct ipq40xx_mdio_data *am;
++      struct resource *res;
++      int i;
++
++      am = devm_kzalloc(&pdev->dev, sizeof(*am), GFP_KERNEL);
++      if (!am)
++              return -ENOMEM;
++
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      if (!res) {
++              dev_err(&pdev->dev, "no iomem resource found\n");
++              return -ENXIO;
++      }
++
++      am->membase = devm_ioremap_resource(&pdev->dev, res);
++      if (IS_ERR(am->membase)) {
++              dev_err(&pdev->dev, "unable to ioremap registers\n");
++              return PTR_ERR(am->membase);
++      }
++
++      am->mii_bus = devm_mdiobus_alloc(&pdev->dev);
++      if (!am->mii_bus)
++              return  -ENOMEM;
++
++      writel(CTRL_0_REG_DEFAULT_VALUE, am->membase + MDIO_CTRL_0_REG);
++
++      am->mii_bus->name = "ipq40xx_mdio";
++      am->mii_bus->read = ipq40xx_mdio_read;
++      am->mii_bus->write = ipq40xx_mdio_write;
++      memcpy(am->mii_bus->irq, am->phy_irq, sizeof(am->phy_irq));
++      am->mii_bus->priv = am;
++      am->mii_bus->parent = &pdev->dev;
++      snprintf(am->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(&pdev->dev));
++
++      for (i = 0; i < PHY_MAX_ADDR; i++)
++              am->phy_irq[i] = PHY_POLL;
++
++      am->dev = &pdev->dev;
++      platform_set_drvdata(pdev, am);
++
++      /* edma_axi_probe() use "am" drvdata.
++       * ipq40xx_mdio_probe() must be called first.
++       */
++      return of_mdiobus_register(am->mii_bus, pdev->dev.of_node);
++}
++
++static int ipq40xx_mdio_remove(struct platform_device *pdev)
++{
++      struct ipq40xx_mdio_data *am = platform_get_drvdata(pdev);
++
++      mdiobus_unregister(am->mii_bus);
++      return 0;
++}
++
++static const struct of_device_id ipq40xx_mdio_dt_ids[] = {
++      { .compatible = "qcom,ipq4019-mdio" },
++      { }
++};
++MODULE_DEVICE_TABLE(of, ipq40xx_mdio_dt_ids);
++
++static struct platform_driver ipq40xx_mdio_driver = {
++      .probe = ipq40xx_mdio_probe,
++      .remove = ipq40xx_mdio_remove,
++      .driver = {
++              .name = "ipq40xx-mdio",
++              .of_match_table = ipq40xx_mdio_dt_ids,
++      },
++};
++
++module_platform_driver(ipq40xx_mdio_driver);
++
++#define DRV_VERSION     "1.0"
++
++MODULE_DESCRIPTION("IPQ40XX MDIO interface driver");
++MODULE_AUTHOR("Qualcomm Atheros");
++MODULE_VERSION(DRV_VERSION);
++MODULE_LICENSE("Dual BSD/GPL");
diff --git a/target/linux/ipq40xx/patches-4.19/701-dts-ipq4019-add-mdio-node.patch b/target/linux/ipq40xx/patches-4.19/701-dts-ipq4019-add-mdio-node.patch
new file mode 100644 (file)
index 0000000..0e1bb87
--- /dev/null
@@ -0,0 +1,52 @@
+From 09ed737593f71bcca08a537a6c15264a1a6add08 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Sun, 20 Nov 2016 01:10:33 +0100
+Subject: [PATCH] dts: ipq4019: add mdio node for ethernet
+
+This patch adds the mdio device-tree node.
+This is where the switch is connected to, so it's needed
+for the ethernet interfaces.
+
+Note: The driver isn't anywhere close to be upstream,
+so the info might change.
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -566,6 +566,34 @@
+                       status = "disabled";
+               };
++              mdio@90000 {
++                      #address-cells = <1>;
++                      #size-cells = <0>;
++                      compatible = "qcom,ipq4019-mdio";
++                      reg = <0x90000 0x64>;
++                      status = "disabled";
++
++                      ethernet-phy@0 {
++                              reg = <0>;
++                      };
++
++                      ethernet-phy@1 {
++                              reg = <1>;
++                      };
++
++                      ethernet-phy@2 {
++                              reg = <2>;
++                      };
++
++                      ethernet-phy@3 {
++                              reg = <3>;
++                      };
++
++                      ethernet-phy@4 {
++                              reg = <4>;
++                      };
++              };
++
+               usb3_ss_phy: ssphy@9a000 {
+                       compatible = "qcom,usb-ss-ipq4019-phy";
+                       #phy-cells = <0>;
diff --git a/target/linux/ipq40xx/patches-4.19/702-dts-ipq4019-add-PHY-switch-nodes.patch b/target/linux/ipq40xx/patches-4.19/702-dts-ipq4019-add-PHY-switch-nodes.patch
new file mode 100644 (file)
index 0000000..03da6c8
--- /dev/null
@@ -0,0 +1,46 @@
+From 9deeec35dd3b628b95624e41d4e04acf728991ba Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Sun, 20 Nov 2016 02:20:54 +0100
+Subject: [PATCH] dts: ipq4019: add PHY/switch nodes
+
+This patch adds both the "qcom,ess-switch" and "qcom,ess-psgmii"
+nodes which are needed for the ar40xx.c driver to initialize the
+switch.
+
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -594,6 +594,29 @@
+                       };
+               };
++              ess-switch@c000000 {
++                      compatible = "qcom,ess-switch";
++                      reg = <0xc000000 0x80000>;
++                      switch_access_mode = "local bus";
++                      resets = <&gcc ESS_RESET>;
++                      reset-names = "ess_rst";
++                      clocks = <&gcc GCC_ESS_CLK>;
++                      clock-names = "ess_clk";
++                      switch_cpu_bmp = <0x1>;
++                      switch_lan_bmp = <0x1e>;
++                      switch_wan_bmp = <0x20>;
++                      switch_mac_mode = <0>; /* PORT_WRAPPER_PSGMII */
++                      switch_initvlas = <0x7c 0x54>;
++                      status = "disabled";
++              };
++
++              ess-psgmii@98000 {
++                      compatible = "qcom,ess-psgmii";
++                      reg = <0x98000 0x800>;
++                      psgmii_access_mode = "local bus";
++                      status = "disabled";
++              };
++
+               usb3_ss_phy: ssphy@9a000 {
+                       compatible = "qcom,usb-ss-ipq4019-phy";
+                       #phy-cells = <0>;
diff --git a/target/linux/ipq40xx/patches-4.19/703-net-IPQ4019-needs-rfs-vlan_tag-callbacks-in.patch b/target/linux/ipq40xx/patches-4.19/703-net-IPQ4019-needs-rfs-vlan_tag-callbacks-in.patch
new file mode 100644 (file)
index 0000000..cc7e497
--- /dev/null
@@ -0,0 +1,53 @@
+From 7c129254adb1093d10a62ed7bf7b956fcc6ffe34 Mon Sep 17 00:00:00 2001
+From: Rakesh Nair <ranair@codeaurora.org>
+Date: Wed, 20 Jul 2016 15:02:01 +0530
+Subject: [PATCH] net: IPQ4019 needs rfs/vlan_tag callbacks in
+ netdev_ops
+
+Add callback support to get default vlan tag and register
+receive flow steering filter.
+
+Used by IPQ4019 ess-edma driver.
+
+BUG=chrome-os-partner:33096
+TEST=none
+
+Change-Id: I266070e4a0fbe4a0d9966fe79a71e50ec4f26c75
+Signed-off-by: Rakesh Nair <ranair@codeaurora.org>
+Reviewed-on: https://chromium-review.googlesource.com/362203
+Commit-Ready: Grant Grundler <grundler@chromium.org>
+Tested-by: Grant Grundler <grundler@chromium.org>
+Reviewed-by: Grant Grundler <grundler@chromium.org>
+---
+ include/linux/netdevice.h | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -739,6 +739,16 @@ struct xps_map {
+ #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
+        - sizeof(struct xps_map)) / sizeof(u16))
++#ifdef CONFIG_RFS_ACCEL
++typedef int (*set_rfs_filter_callback_t)(struct net_device *dev,
++                                     __be32 src,
++                                     __be32 dst,
++                                     __be16 sport,
++                                     __be16 dport,
++                                     u8 proto,
++                                     u16 rxq_index,
++                                     u32 action);
++#endif
+ /*
+  * This structure holds all XPS maps for device.  Maps are indexed by CPU.
+  */
+@@ -1353,6 +1363,9 @@ struct net_device_ops {
+                                                    const struct sk_buff *skb,
+                                                    u16 rxq_index,
+                                                    u32 flow_id);
++        int                     (*ndo_register_rfs_filter)(struct net_device *dev,
++                                                              set_rfs_filter_callback_t set_filter);
++        int                     (*ndo_get_default_vlan_tag)(struct net_device *net);
+ #endif
+       int                     (*ndo_add_slave)(struct net_device *dev,
+                                                struct net_device *slave_dev,
diff --git a/target/linux/ipq40xx/patches-4.19/710-net-add-qualcomm-essedma-ethernet-driver.patch b/target/linux/ipq40xx/patches-4.19/710-net-add-qualcomm-essedma-ethernet-driver.patch
new file mode 100644 (file)
index 0000000..9fe3a1a
--- /dev/null
@@ -0,0 +1,4575 @@
+From 12e9319da1adacac92930c899c99f0e1970cac11 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@googlemail.com>
+Date: Thu, 19 Jan 2017 02:01:31 +0100
+Subject: [PATCH 33/38] NET: add qualcomm essedma ethernet driver
+
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+---
+ drivers/net/ethernet/qualcomm/Kconfig  | 9 +++++++++
+ drivers/net/ethernet/qualcomm/Makefile | 1 +
+ 2 files changed, 10 insertions(+)
+
+--- a/drivers/net/ethernet/qualcomm/Kconfig
++++ b/drivers/net/ethernet/qualcomm/Kconfig
+@@ -61,4 +61,13 @@ config QCOM_EMAC
+ source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
++config ESSEDMA
++      tristate "Qualcomm Atheros ESS Edma support"
++      ---help---
++        This driver supports ethernet edma adapter.
++        Say Y to build this driver.
++
++        To compile this driver as a module, choose M here. The module
++        will be called essedma.ko.
++
+ endif # NET_VENDOR_QUALCOMM
+--- a/drivers/net/ethernet/qualcomm/Makefile
++++ b/drivers/net/ethernet/qualcomm/Makefile
+@@ -10,5 +10,6 @@ obj-$(CONFIG_QCA7000_UART) += qcauart.o
+ qcauart-objs := qca_uart.o
+ obj-y += emac/
++obj-$(CONFIG_ESSEDMA) += essedma/
+ obj-$(CONFIG_RMNET) += rmnet/
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/essedma/Makefile
+@@ -0,0 +1,9 @@
++#
++## Makefile for the Qualcomm Atheros ethernet edma driver
++#
++
++
++obj-$(CONFIG_ESSEDMA) += essedma.o
++
++essedma-objs := edma_axi.o edma.o edma_ethtool.o
++
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/essedma/edma.c
+@@ -0,0 +1,2143 @@
++/*
++ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/platform_device.h>
++#include <linux/if_vlan.h>
++#include "ess_edma.h"
++#include "edma.h"
++
++extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
++bool edma_stp_rstp;
++u16 edma_ath_eth_type;
++
++/* edma_skb_priority_offset()
++ *    get edma skb priority
++ */
++static unsigned int edma_skb_priority_offset(struct sk_buff *skb)
++{
++      return (skb->priority >> 2) & 1;
++}
++
++/* edma_alloc_tx_ring()
++ *    Allocate Tx descriptors ring
++ */
++static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo,
++                            struct edma_tx_desc_ring *etdr)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++
++      /* Initialize ring */
++      etdr->size = sizeof(struct edma_sw_desc) * etdr->count;
++      etdr->sw_next_to_fill = 0;
++      etdr->sw_next_to_clean = 0;
++
++      /* Allocate SW descriptors */
++      etdr->sw_desc = vzalloc(etdr->size);
++      if (!etdr->sw_desc) {
++              dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr);
++              return -ENOMEM;
++      }
++
++      /* Allocate HW descriptors */
++      etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma,
++                                        GFP_KERNEL);
++      if (!etdr->hw_desc) {
++              dev_err(&pdev->dev, "descriptor allocation for tx ring failed");
++              vfree(etdr->sw_desc);
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++/* edma_free_tx_ring()
++ *    Free tx rings allocated by edma_alloc_tx_rings
++ */
++static void edma_free_tx_ring(struct edma_common_info *edma_cinfo,
++                            struct edma_tx_desc_ring *etdr)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++
++      if (likely(etdr->dma))
++              dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc,
++                               etdr->dma);
++
++      vfree(etdr->sw_desc);
++      etdr->sw_desc = NULL;
++}
++
++/* edma_alloc_rx_ring()
++ *    allocate rx descriptor ring
++ */
++static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo,
++                           struct edma_rfd_desc_ring *erxd)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++
++      erxd->size = sizeof(struct edma_sw_desc) * erxd->count;
++      erxd->sw_next_to_fill = 0;
++      erxd->sw_next_to_clean = 0;
++
++      /* Allocate SW descriptors */
++      erxd->sw_desc = vzalloc(erxd->size);
++      if (!erxd->sw_desc)
++              return -ENOMEM;
++
++      /* Alloc HW descriptors */
++      erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma,
++                      GFP_KERNEL);
++      if (!erxd->hw_desc) {
++              vfree(erxd->sw_desc);
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++/* edma_free_rx_ring()
++ *    Free rx ring allocated by alloc_rx_ring
++ */
++static void edma_free_rx_ring(struct edma_common_info *edma_cinfo,
++                           struct edma_rfd_desc_ring *rxdr)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++
++      if (likely(rxdr->dma))
++              dma_free_coherent(&pdev->dev, rxdr->size, rxdr->hw_desc,
++                               rxdr->dma);
++
++      vfree(rxdr->sw_desc);
++      rxdr->sw_desc = NULL;
++}
++
++/* edma_configure_tx()
++ *    Configure transmission control data
++ */
++static void edma_configure_tx(struct edma_common_info *edma_cinfo)
++{
++      u32 txq_ctrl_data;
++
++      txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
++      txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN;
++      txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
++      edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data);
++}
++
++
++/* edma_configure_rx()
++ *    configure reception control data
++ */
++static void edma_configure_rx(struct edma_common_info *edma_cinfo)
++{
++      struct edma_hw *hw = &edma_cinfo->hw;
++      u32 rss_type, rx_desc1, rxq_ctrl_data;
++
++      /* Set RSS type */
++      rss_type = hw->rss_type;
++      edma_write_reg(EDMA_REG_RSS_TYPE, rss_type);
++
++      /* Set RFD burst number */
++      rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
++
++      /* Set RFD prefetch threshold */
++      rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
++
++      /* Set RFD in host ring low threshold to generte interrupt */
++      rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
++      edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1);
++
++      /* Set Rx FIFO threshold to start to DMA data to host */
++      rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE;
++
++      /* Set RX remove vlan bit */
++      rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN;
++
++      edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data);
++}
++
++/* edma_alloc_rx_buf()
++ *    does skb allocation for the received packets.
++ */
++static int edma_alloc_rx_buf(struct edma_common_info
++                           *edma_cinfo,
++                           struct edma_rfd_desc_ring *erdr,
++                           int cleaned_count, int queue_id)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++      struct edma_rx_free_desc *rx_desc;
++      struct edma_sw_desc *sw_desc;
++      struct sk_buff *skb;
++      unsigned int i;
++      u16 prod_idx, length;
++      u32 reg_data;
++
++      if (cleaned_count > erdr->count) {
++              dev_err(&pdev->dev, "Incorrect cleaned_count %d",
++                     cleaned_count);
++              return -1;
++      }
++
++      i = erdr->sw_next_to_fill;
++
++      while (cleaned_count) {
++              sw_desc = &erdr->sw_desc[i];
++              length = edma_cinfo->rx_head_buffer_len;
++
++              if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
++                      skb = sw_desc->skb;
++              } else {
++                      /* alloc skb */
++                      skb = netdev_alloc_skb(edma_netdev[0], length);
++                      if (!skb) {
++                              /* Better luck next round */
++                              break;
++                      }
++              }
++
++              if (edma_cinfo->page_mode) {
++                      struct page *pg = alloc_page(GFP_ATOMIC);
++
++                      if (!pg) {
++                              dev_kfree_skb_any(skb);
++                              break;
++                      }
++
++                      sw_desc->dma = dma_map_page(&pdev->dev, pg, 0,
++                                                 edma_cinfo->rx_page_buffer_len,
++                                                 DMA_FROM_DEVICE);
++                      if (dma_mapping_error(&pdev->dev,
++                                  sw_desc->dma)) {
++                              __free_page(pg);
++                              dev_kfree_skb_any(skb);
++                              break;
++                      }
++
++                      skb_fill_page_desc(skb, 0, pg, 0,
++                                         edma_cinfo->rx_page_buffer_len);
++                      sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG;
++                      sw_desc->length = edma_cinfo->rx_page_buffer_len;
++              } else {
++                      sw_desc->dma = dma_map_single(&pdev->dev, skb->data,
++                                                   length, DMA_FROM_DEVICE);
++                      if (dma_mapping_error(&pdev->dev,
++                         sw_desc->dma)) {
++                              dev_kfree_skb_any(skb);
++                              break;
++                      }
++
++                      sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD;
++                      sw_desc->length = length;
++              }
++
++              /* Update the buffer info */
++              sw_desc->skb = skb;
++              rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[i]);
++              rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma);
++              if (++i == erdr->count)
++                      i = 0;
++              cleaned_count--;
++      }
++
++      erdr->sw_next_to_fill = i;
++
++      if (i == 0)
++              prod_idx = erdr->count - 1;
++      else
++              prod_idx = i - 1;
++
++      /* Update the producer index */
++      edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &reg_data);
++      reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
++      reg_data |= prod_idx;
++      edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
++      return cleaned_count;
++}
++
++/* edma_init_desc()
++ *    update descriptor ring size, buffer and producer/consumer index
++ */
++static void edma_init_desc(struct edma_common_info *edma_cinfo)
++{
++      struct edma_rfd_desc_ring *rfd_ring;
++      struct edma_tx_desc_ring *etdr;
++      int i = 0, j = 0;
++      u32 data = 0;
++      u16 hw_cons_idx = 0;
++
++      /* Set the base address of every TPD ring. */
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
++              etdr = edma_cinfo->tpd_ring[i];
++
++              /* Update descriptor ring base address */
++              edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma);
++              edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data);
++
++              /* Calculate hardware consumer index */
++              hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
++              etdr->sw_next_to_fill = hw_cons_idx;
++              etdr->sw_next_to_clean = hw_cons_idx;
++              data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
++              data |= hw_cons_idx;
++
++              /* update producer index */
++              edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data);
++
++              /* update SW consumer index register */
++              edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx);
++
++              /* Set TPD ring size */
++              edma_write_reg(EDMA_REG_TPD_RING_SIZE,
++                             edma_cinfo->tx_ring_count &
++                                  EDMA_TPD_RING_SIZE_MASK);
++      }
++
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              rfd_ring = edma_cinfo->rfd_ring[j];
++              /* Update Receive Free descriptor ring base address */
++              edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j),
++                      (u32)(rfd_ring->dma));
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++
++      data = edma_cinfo->rx_head_buffer_len;
++      if (edma_cinfo->page_mode)
++              data = edma_cinfo->rx_page_buffer_len;
++
++      data &= EDMA_RX_BUF_SIZE_MASK;
++      data <<= EDMA_RX_BUF_SIZE_SHIFT;
++
++      /* Update RFD ring size and RX buffer size */
++      data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
++              << EDMA_RFD_RING_SIZE_SHIFT;
++
++      edma_write_reg(EDMA_REG_RX_DESC0, data);
++
++      /* Disable TX FIFO low watermark and high watermark */
++      edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0);
++
++      /* Load all of base address above */
++      edma_read_reg(EDMA_REG_TX_SRAM_PART, &data);
++      data |= 1 << EDMA_LOAD_PTR_SHIFT;
++      edma_write_reg(EDMA_REG_TX_SRAM_PART, data);
++}
++
++/* edma_receive_checksum
++ *    Api to check checksum on receive packets
++ */
++static void edma_receive_checksum(struct edma_rx_return_desc *rd,
++                                               struct sk_buff *skb)
++{
++      skb_checksum_none_assert(skb);
++
++      /* check the RRD IP/L4 checksum bit to see if
++       * its set, which in turn indicates checksum
++       * failure.
++       */
++      if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK)
++              return;
++
++      skb->ip_summed = CHECKSUM_UNNECESSARY;
++}
++
++/* edma_clean_rfd()
++ *    clean up rx resourcers on error
++ */
++static void edma_clean_rfd(struct edma_rfd_desc_ring *erdr, u16 index)
++{
++      struct edma_rx_free_desc *rx_desc;
++      struct edma_sw_desc *sw_desc;
++
++      rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[index]);
++      sw_desc = &erdr->sw_desc[index];
++      if (sw_desc->skb) {
++              dev_kfree_skb_any(sw_desc->skb);
++              sw_desc->skb = NULL;
++      }
++
++      memset(rx_desc, 0, sizeof(struct edma_rx_free_desc));
++}
++
++/* edma_rx_complete_fraglist()
++ *    Complete Rx processing for fraglist skbs
++ */
++static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd)
++{
++      int i;
++      u32 priority;
++      u16 port_type;
++      u8 mac_addr[EDMA_ETH_HDR_LEN];
++
++      port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT)
++                              & EDMA_RRD_PORT_TYPE_MASK;
++      /* if port type is 0x4, then only proceed with
++       * other stp/rstp calculation
++       */
++      if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) {
++              u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
++
++              /* calculate the frame priority */
++              priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
++                      & EDMA_RRD_PRIORITY_MASK;
++
++              for (i = 0; i < EDMA_ETH_HDR_LEN; i++)
++                      mac_addr[i] = skb->data[i];
++
++              /* Check if destination mac addr is bpdu addr */
++              if (!memcmp(mac_addr, bpdu_mac, 6)) {
++                      /* destination mac address is BPDU
++                       * destination mac address, then add
++                       * atheros header to the packet.
++                       */
++                      u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) |
++                              (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) |
++                              (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id;
++                      skb_push(skb, 4);
++                      memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
++                      *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type);
++                      *(uint16_t *)&skb->data[14] = htons(athr_hdr);
++              }
++      }
++}
++
++/*
++ * edma_rx_complete_fraglist()
++ *    Complete Rx processing for fraglist skbs
++ */
++static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
++                                      u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++      struct edma_hw *hw = &edma_cinfo->hw;
++      struct sk_buff *skb_temp;
++      struct edma_sw_desc *sw_desc;
++      int i;
++      u16 size_remaining;
++
++      skb->data_len = 0;
++      skb->tail += (hw->rx_head_buff_size - 16);
++      skb->len = skb->truesize = length;
++      size_remaining = length - (hw->rx_head_buff_size - 16);
++
++      /* clean-up all related sw_descs */
++      for (i = 1; i < num_rfds; i++) {
++              struct sk_buff *skb_prev;
++              sw_desc = &erdr->sw_desc[sw_next_to_clean];
++              skb_temp = sw_desc->skb;
++
++              dma_unmap_single(&pdev->dev, sw_desc->dma,
++                      sw_desc->length, DMA_FROM_DEVICE);
++
++              if (size_remaining < hw->rx_head_buff_size)
++                      skb_put(skb_temp, size_remaining);
++              else
++                      skb_put(skb_temp, hw->rx_head_buff_size);
++
++              /*
++               * If we are processing the first rfd, we link
++               * skb->frag_list to the skb corresponding to the
++               * first RFD
++               */
++              if (i == 1)
++                      skb_shinfo(skb)->frag_list = skb_temp;
++              else
++                      skb_prev->next = skb_temp;
++              skb_prev = skb_temp;
++              skb_temp->next = NULL;
++
++              skb->data_len += skb_temp->len;
++              size_remaining -= skb_temp->len;
++
++              /* Increment SW index */
++              sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
++              (*cleaned_count)++;
++      }
++
++      return sw_next_to_clean;
++}
++
++/* edma_rx_complete_paged()
++ *    Complete Rx processing for paged skbs
++ */
++static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
++                                      u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++      struct sk_buff *skb_temp;
++      struct edma_sw_desc *sw_desc;
++      int i;
++      u16 size_remaining;
++
++      skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
++
++      /* Setup skbuff fields */
++      skb->len = length;
++
++      if (likely(num_rfds <= 1)) {
++              skb->data_len = length;
++              skb->truesize += edma_cinfo->rx_page_buffer_len;
++              skb_fill_page_desc(skb, 0, skb_frag_page(frag),
++                              16, length);
++      } else {
++              frag->size -= 16;
++              skb->data_len = frag->size;
++              skb->truesize += edma_cinfo->rx_page_buffer_len;
++              size_remaining = length - frag->size;
++
++              skb_fill_page_desc(skb, 0, skb_frag_page(frag),
++                              16, frag->size);
++
++              /* clean-up all related sw_descs */
++              for (i = 1; i < num_rfds; i++) {
++                      sw_desc = &erdr->sw_desc[sw_next_to_clean];
++                      skb_temp = sw_desc->skb;
++                      frag = &skb_shinfo(skb_temp)->frags[0];
++                      dma_unmap_page(&pdev->dev, sw_desc->dma,
++                              sw_desc->length, DMA_FROM_DEVICE);
++
++                      if (size_remaining < edma_cinfo->rx_page_buffer_len)
++                              frag->size = size_remaining;
++
++                      skb_fill_page_desc(skb, i, skb_frag_page(frag),
++                                      0, frag->size);
++
++                      skb_shinfo(skb_temp)->nr_frags = 0;
++                      dev_kfree_skb_any(skb_temp);
++
++                      skb->data_len += frag->size;
++                      skb->truesize += edma_cinfo->rx_page_buffer_len;
++                      size_remaining -= frag->size;
++
++                      /* Increment SW index */
++                      sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
++                      (*cleaned_count)++;
++              }
++      }
++
++      return sw_next_to_clean;
++}
++
++/*
++ * edma_rx_complete()
++ *    Main api called from the poll function to process rx packets.
++ */
++static void edma_rx_complete(struct edma_common_info *edma_cinfo,
++                          int *work_done, int work_to_do, int queue_id,
++                          struct napi_struct *napi)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++      struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id];
++      struct net_device *netdev;
++      struct edma_adapter *adapter;
++      struct edma_sw_desc *sw_desc;
++      struct sk_buff *skb;
++      struct edma_rx_return_desc *rd;
++      u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1,
++          sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0;
++      u32 data = 0;
++      u8 *vaddr;
++      int port_id, i, drop_count = 0;
++      u32 priority;
++      u16 count = erdr->count, rfd_avail;
++      u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
++
++      sw_next_to_clean = erdr->sw_next_to_clean;
++
++      edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
++      hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
++                         EDMA_RFD_CONS_IDX_MASK;
++
++      do {
++              while (sw_next_to_clean != hw_next_to_clean) {
++                      if (!work_to_do)
++                              break;
++
++                      sw_desc = &erdr->sw_desc[sw_next_to_clean];
++                      skb = sw_desc->skb;
++
++                      /* Unmap the allocated buffer */
++                      if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD))
++                              dma_unmap_single(&pdev->dev, sw_desc->dma,
++                                              sw_desc->length, DMA_FROM_DEVICE);
++                      else
++                              dma_unmap_page(&pdev->dev, sw_desc->dma,
++                                            sw_desc->length, DMA_FROM_DEVICE);
++
++                      /* Get RRD */
++                      if (edma_cinfo->page_mode) {
++                              vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0]));
++                              memcpy((uint8_t *)&rrd[0], vaddr, 16);
++                              rd = (struct edma_rx_return_desc *)rrd;
++                              kunmap_atomic(vaddr);
++                      } else {
++                              rd = (struct edma_rx_return_desc *)skb->data;
++                      }
++
++                      /* Check if RRD is valid */
++                      if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) {
++                              edma_clean_rfd(erdr, sw_next_to_clean);
++                              sw_next_to_clean = (sw_next_to_clean + 1) &
++                                                 (erdr->count - 1);
++                              cleaned_count++;
++                              continue;
++                      }
++
++                      /* Get the number of RFDs from RRD */
++                      num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK;
++
++                      /* Get Rx port ID from switch */
++                      port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK;
++                      if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) {
++                              dev_err(&pdev->dev, "Invalid RRD source port bit set");
++                              for (i = 0; i < num_rfds; i++) {
++                                      edma_clean_rfd(erdr, sw_next_to_clean);
++                                      sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
++                                      cleaned_count++;
++                              }
++                              continue;
++                      }
++
++                      /* check if we have a sink for the data we receive.
++                       * If the interface isn't setup, we have to drop the
++                       * incoming data for now.
++                       */
++                      netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id];
++                      if (!netdev) {
++                              edma_clean_rfd(erdr, sw_next_to_clean);
++                              sw_next_to_clean = (sw_next_to_clean + 1) &
++                                                 (erdr->count - 1);
++                              cleaned_count++;
++                              continue;
++                      }
++                      adapter = netdev_priv(netdev);
++
++                      /* This code is added to handle a usecase where high
++                       * priority stream and a low priority stream are
++                       * received simultaneously on DUT. The problem occurs
++                       * if one of the  Rx rings is full and the corresponding
++                       * core is busy with other stuff. This causes ESS CPU
++                       * port to backpressure all incoming traffic including
++                       * high priority one. We monitor free descriptor count
++                       * on each CPU and whenever it reaches threshold (< 80),
++                       * we drop all low priority traffic and let only high
++                       * priotiy traffic pass through. We can hence avoid
++                       * ESS CPU port to send backpressure on high priroity
++                       * stream.
++                       */
++                      priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
++                              & EDMA_RRD_PRIORITY_MASK;
++                      if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) {
++                              rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1);
++                              if (rfd_avail < EDMA_RFD_AVAIL_THR) {
++                                      sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_REUSE;
++                                      sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
++                                      adapter->stats.rx_dropped++;
++                                      cleaned_count++;
++                                      drop_count++;
++                                      if (drop_count == 3) {
++                                              work_to_do--;
++                                              (*work_done)++;
++                                              drop_count = 0;
++                                      }
++                                      if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
++                                              /* If buffer clean count reaches 16, we replenish HW buffers. */
++                                              ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
++                                              edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
++                                                            sw_next_to_clean);
++                                              cleaned_count = ret_count;
++                                      }
++                                      continue;
++                              }
++                      }
++
++                      work_to_do--;
++                      (*work_done)++;
++
++                      /* Increment SW index */
++                      sw_next_to_clean = (sw_next_to_clean + 1) &
++                                         (erdr->count - 1);
++
++                      cleaned_count++;
++
++                      /* Get the packet size and allocate buffer */
++                      length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
++
++                      if (edma_cinfo->page_mode) {
++                              /* paged skb */
++                              sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
++                              if (!pskb_may_pull(skb, ETH_HLEN)) {
++                                      dev_kfree_skb_any(skb);
++                                      continue;
++                              }
++                      } else {
++                              /* single or fraglist skb */
++
++                              /* Addition of 16 bytes is required, as in the packet
++                               * first 16 bytes are rrd descriptors, so actual data
++                               * starts from an offset of 16.
++                               */
++                              skb_reserve(skb, 16);
++                              if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode)) {
++                                      skb_put(skb, length);
++                              } else {
++                                      sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
++                              }
++                      }
++
++                      if (edma_stp_rstp) {
++                              edma_rx_complete_stp_rstp(skb, port_id, rd);
++                      }
++
++                      skb->protocol = eth_type_trans(skb, netdev);
++
++                      /* Record Rx queue for RFS/RPS and fill flow hash from HW */
++                      skb_record_rx_queue(skb, queue_to_rxid[queue_id]);
++                      if (netdev->features & NETIF_F_RXHASH) {
++                              hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT);
++                              if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END))
++                                      skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4);
++                      }
++
++#ifdef CONFIG_NF_FLOW_COOKIE
++                      skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
++#endif
++                      edma_receive_checksum(rd, skb);
++
++                      /* Process VLAN HW acceleration indication provided by HW */
++                      if (unlikely(adapter->default_vlan_tag != rd->rrd4)) {
++                              vlan = rd->rrd4;
++                              if (likely(rd->rrd7 & EDMA_RRD_CVLAN))
++                                      __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
++                              else if (rd->rrd1 & EDMA_RRD_SVLAN)
++                                      __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan);
++                      }
++
++                      /* Update rx statistics */
++                      adapter->stats.rx_packets++;
++                      adapter->stats.rx_bytes += length;
++
++                      /* Check if we reached refill threshold */
++                      if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
++                              ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
++                              edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
++                                            sw_next_to_clean);
++                              cleaned_count = ret_count;
++                      }
++
++                      /* At this point skb should go to stack */
++                      napi_gro_receive(napi, skb);
++              }
++
++              /* Check if we still have NAPI budget */
++              if (!work_to_do)
++                      break;
++
++              /* Read index once again since we still have NAPI budget */
++              edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
++              hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
++                      EDMA_RFD_CONS_IDX_MASK;
++      } while (hw_next_to_clean != sw_next_to_clean);
++
++      erdr->sw_next_to_clean = sw_next_to_clean;
++
++      /* Refill here in case refill threshold wasn't reached */
++      if (likely(cleaned_count)) {
++              ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
++              if (ret_count)
++                      dev_dbg(&pdev->dev, "Not all buffers was reallocated");
++              edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
++                            erdr->sw_next_to_clean);
++      }
++}
++
++/* edma_delete_rfs_filter()
++ *    Remove RFS filter from switch
++ */
++static int edma_delete_rfs_filter(struct edma_adapter *adapter,
++                               struct edma_rfs_filter_node *filter_node)
++{
++      int res = -1;
++
++      struct flow_keys *keys = &filter_node->keys;
++
++      if (likely(adapter->set_rfs_rule))
++              res = (*adapter->set_rfs_rule)(adapter->netdev,
++                      flow_get_u32_src(keys), flow_get_u32_dst(keys),
++                      keys->ports.src, keys->ports.dst,
++                      keys->basic.ip_proto, filter_node->rq_id, 0);
++
++      return res;
++}
++
++/* edma_add_rfs_filter()
++ *    Add RFS filter to switch
++ */
++static int edma_add_rfs_filter(struct edma_adapter *adapter,
++                             struct flow_keys *keys, u16 rq,
++                             struct edma_rfs_filter_node *filter_node)
++{
++      int res = -1;
++
++      struct flow_keys *dest_keys = &filter_node->keys;
++
++      memcpy(dest_keys, &filter_node->keys, sizeof(*dest_keys));
++/*
++      dest_keys->control = keys->control;
++      dest_keys->basic = keys->basic;
++      dest_keys->addrs = keys->addrs;
++      dest_keys->ports = keys->ports;
++      dest_keys.ip_proto = keys->ip_proto;
++*/
++      /* Call callback registered by ESS driver */
++      if (likely(adapter->set_rfs_rule))
++              res = (*adapter->set_rfs_rule)(adapter->netdev, flow_get_u32_src(keys),
++                    flow_get_u32_dst(keys), keys->ports.src, keys->ports.dst,
++                    keys->basic.ip_proto, rq, 1);
++
++      return res;
++}
++
++/* edma_rfs_key_search()
++ *    Look for existing RFS entry
++ */
++static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h,
++                                                     struct flow_keys *key)
++{
++      struct edma_rfs_filter_node *p;
++
++      hlist_for_each_entry(p, h, node)
++              if (flow_get_u32_src(&p->keys) == flow_get_u32_src(key) &&
++                  flow_get_u32_dst(&p->keys) == flow_get_u32_dst(key) &&
++                  p->keys.ports.src == key->ports.src &&
++                  p->keys.ports.dst == key->ports.dst &&
++                  p->keys.basic.ip_proto == key->basic.ip_proto)
++                      return p;
++      return NULL;
++}
++
++/* edma_initialise_rfs_flow_table()
++ *    Initialise EDMA RFS flow table
++ */
++static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter)
++{
++      int i;
++
++      spin_lock_init(&adapter->rfs.rfs_ftab_lock);
++
++      /* Initialize EDMA flow hash table */
++      for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++)
++              INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]);
++
++      adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES;
++      adapter->rfs.filter_available = adapter->rfs.max_num_filter;
++      adapter->rfs.hashtoclean = 0;
++
++      /* Add timer to get periodic RFS updates from OS */
++      timer_setup(&adapter->rfs.expire_rfs, edma_flow_may_expire, 0);
++      mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
++}
++
++/* edma_free_rfs_flow_table()
++ *    Free EDMA RFS flow table
++ */
++static void edma_free_rfs_flow_table(struct edma_adapter *adapter)
++{
++      int i;
++
++      /* Remove sync timer */
++      del_timer_sync(&adapter->rfs.expire_rfs);
++      spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
++
++      /* Free EDMA RFS table entries */
++      adapter->rfs.filter_available = 0;
++
++      /* Clean-up EDMA flow hash table */
++      for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) {
++              struct hlist_head *hhead;
++              struct hlist_node *tmp;
++              struct edma_rfs_filter_node *filter_node;
++              int res;
++
++              hhead = &adapter->rfs.hlist_head[i];
++              hlist_for_each_entry_safe(filter_node, tmp, hhead, node) {
++                      res  = edma_delete_rfs_filter(adapter, filter_node);
++                      if (res < 0)
++                              dev_warn(&adapter->netdev->dev,
++                                      "EDMA going down but RFS entry %d not allowed to be flushed by Switch",
++                                      filter_node->flow_id);
++                      hlist_del(&filter_node->node);
++                      kfree(filter_node);
++              }
++      }
++      spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
++}
++
++/* edma_tx_unmap_and_free()
++ *    clean TX buffer
++ */
++static inline void edma_tx_unmap_and_free(struct platform_device *pdev,
++                                       struct edma_sw_desc *sw_desc)
++{
++      struct sk_buff *skb = sw_desc->skb;
++
++      if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) ||
++                      (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST)))
++              /* unmap_single for skb head area */
++              dma_unmap_single(&pdev->dev, sw_desc->dma,
++                              sw_desc->length, DMA_TO_DEVICE);
++      else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)
++              /* unmap page for paged fragments */
++              dma_unmap_page(&pdev->dev, sw_desc->dma,
++                            sw_desc->length, DMA_TO_DEVICE);
++
++      if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST))
++              dev_kfree_skb_any(skb);
++
++      sw_desc->flags = 0;
++}
++
++/* edma_tx_complete()
++ *    Used to clean tx queues and update hardware and consumer index
++ */
++static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id)
++{
++      struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
++      struct edma_sw_desc *sw_desc;
++      struct platform_device *pdev = edma_cinfo->pdev;
++      int i;
++
++      u16 sw_next_to_clean = etdr->sw_next_to_clean;
++      u16 hw_next_to_clean;
++      u32 data = 0;
++
++      edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data);
++      hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
++
++      /* clean the buffer here */
++      while (sw_next_to_clean != hw_next_to_clean) {
++              sw_desc = &etdr->sw_desc[sw_next_to_clean];
++              edma_tx_unmap_and_free(pdev, sw_desc);
++              sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1);
++      }
++
++      etdr->sw_next_to_clean = sw_next_to_clean;
++
++      /* update the TPD consumer index register */
++      edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean);
++
++      /* Wake the queue if queue is stopped and netdev link is up */
++      for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) {
++              if (netif_tx_queue_stopped(etdr->nq[i])) {
++                      if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i]))
++                              netif_tx_wake_queue(etdr->nq[i]);
++              }
++      }
++}
++
++/* edma_get_tx_buffer()
++ *    Get sw_desc corresponding to the TPD
++ */
++static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo,
++                                             struct edma_tx_desc *tpd, int queue_id)
++{
++      struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
++      return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc];
++}
++
++/* edma_get_next_tpd()
++ *    Return a TPD descriptor for transfer
++ */
++static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo,
++                                           int queue_id)
++{
++      struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
++      u16 sw_next_to_fill = etdr->sw_next_to_fill;
++      struct edma_tx_desc *tpd_desc =
++              (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]);
++
++      etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1);
++
++      return tpd_desc;
++}
++
++/* edma_tpd_available()
++ *    Check number of free TPDs
++ */
++static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo,
++                                  int queue_id)
++{
++      struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
++
++      u16 sw_next_to_fill;
++      u16 sw_next_to_clean;
++      u16 count = 0;
++
++      sw_next_to_clean = etdr->sw_next_to_clean;
++      sw_next_to_fill = etdr->sw_next_to_fill;
++
++      if (likely(sw_next_to_clean <= sw_next_to_fill))
++              count = etdr->count;
++
++      return count + sw_next_to_clean - sw_next_to_fill - 1;
++}
++
++/* edma_tx_queue_get()
++ *    Get the starting number of  the queue
++ */
++static inline int edma_tx_queue_get(struct edma_adapter *adapter,
++                                 struct sk_buff *skb, int txq_id)
++{
++      /* skb->priority is used as an index to skb priority table
++       * and based on packet priority, correspong queue is assigned.
++       */
++      return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb);
++}
++
++/* edma_tx_update_hw_idx()
++ *    update the producer index for the ring transmitted
++ */
++static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo,
++                               struct sk_buff *skb, int queue_id)
++{
++      struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
++      u32 tpd_idx_data;
++
++      /* Read and update the producer index */
++      edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data);
++      tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS;
++      tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK)
++              << EDMA_TPD_PROD_IDX_SHIFT;
++
++      edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data);
++}
++
++/* edma_rollback_tx()
++ *    Function to retrieve tx resources in case of error
++ */
++static void edma_rollback_tx(struct edma_adapter *adapter,
++                          struct edma_tx_desc *start_tpd, int queue_id)
++{
++      struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id];
++      struct edma_sw_desc *sw_desc;
++      struct edma_tx_desc *tpd = NULL;
++      u16 start_index, index;
++
++      start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc);
++
++      index = start_index;
++      while (index != etdr->sw_next_to_fill) {
++              tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]);
++              sw_desc = &etdr->sw_desc[index];
++              edma_tx_unmap_and_free(adapter->pdev, sw_desc);
++              memset(tpd, 0, sizeof(struct edma_tx_desc));
++              if (++index == etdr->count)
++                      index = 0;
++      }
++      etdr->sw_next_to_fill = start_index;
++}
++
++/* edma_tx_map_and_fill()
++ *    gets called from edma_xmit_frame
++ *
++ * This is where the dma of the buffer to be transmitted
++ * gets mapped
++ */
++static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo,
++                             struct edma_adapter *adapter, struct sk_buff *skb, int queue_id,
++                             unsigned int flags_transmit, u16 from_cpu, u16 dp_bitmap,
++                             bool packet_is_rstp, int nr_frags)
++{
++      struct edma_sw_desc *sw_desc = NULL;
++      struct platform_device *pdev = edma_cinfo->pdev;
++      struct edma_tx_desc *tpd = NULL, *start_tpd = NULL;
++      struct sk_buff *iter_skb;
++      int i = 0;
++      u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
++      u16 buf_len, lso_desc_len = 0;
++
++      /* It should either be a nr_frags skb or fraglist skb but not both */
++      BUG_ON(nr_frags && skb_has_frag_list(skb));
++
++      if (skb_is_gso(skb)) {
++              /* TODO: What additional checks need to be performed here */
++              if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
++                      lso_word1 |= EDMA_TPD_IPV4_EN;
++                      ip_hdr(skb)->check = 0;
++                      tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
++                              ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
++              } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
++                      lso_word1 |= EDMA_TPD_LSO_V2_EN;
++                      ipv6_hdr(skb)->payload_len = 0;
++                      tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
++                              &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
++              } else
++                      return -EINVAL;
++
++              lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) |
++                              (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT);
++      } else if (flags_transmit & EDMA_HW_CHECKSUM) {
++                      u8 css, cso;
++                      cso = skb_checksum_start_offset(skb);
++                      css = cso  + skb->csum_offset;
++
++                      word1 |= (EDMA_TPD_CUSTOM_CSUM_EN);
++                      word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT;
++                      word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT);
++      }
++
++      if (skb->protocol == htons(ETH_P_PPP_SES))
++              word1 |= EDMA_TPD_PPPOE_EN;
++
++      if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) {
++              switch(skb->vlan_proto) {
++              case htons(ETH_P_8021Q):
++                      word3 |= (1 << EDMA_TX_INS_CVLAN);
++                      word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
++                      break;
++              case htons(ETH_P_8021AD):
++                      word1 |= (1 << EDMA_TX_INS_SVLAN);
++                      svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
++                      break;
++              default:
++                      dev_err(&pdev->dev, "no ctag or stag present\n");
++                      goto vlan_tag_error;
++              }
++      } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) {
++              word3 |= (1 << EDMA_TX_INS_CVLAN);
++              word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT;
++      }
++
++      if (packet_is_rstp) {
++              word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
++              word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT;
++      } else {
++              word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
++      }
++
++      buf_len = skb_headlen(skb);
++
++      if (lso_word1) {
++              if (lso_word1 & EDMA_TPD_LSO_V2_EN) {
++
++                      /* IPv6 LSOv2 descriptor */
++                      start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id);
++                      sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
++                      sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE;
++
++                      /* LSOv2 descriptor overrides addr field to pass length */
++                      tpd->addr = cpu_to_le16(skb->len);
++                      tpd->svlan_tag = svlan_tag;
++                      tpd->word1 = word1 | lso_word1;
++                      tpd->word3 = word3;
++              }
++
++              tpd = edma_get_next_tpd(edma_cinfo, queue_id);
++              if (!start_tpd)
++                      start_tpd = tpd;
++              sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
++
++              /* The last buffer info contain the skb address,
++               * so skb will be freed after unmap
++               */
++              sw_desc->length = lso_desc_len;
++              sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
++
++              sw_desc->dma = dma_map_single(&adapter->pdev->dev,
++                                      skb->data, buf_len, DMA_TO_DEVICE);
++              if (dma_mapping_error(&pdev->dev, sw_desc->dma))
++                      goto dma_error;
++
++              tpd->addr = cpu_to_le32(sw_desc->dma);
++              tpd->len  = cpu_to_le16(buf_len);
++
++              tpd->svlan_tag = svlan_tag;
++              tpd->word1 = word1 | lso_word1;
++              tpd->word3 = word3;
++
++              /* The last buffer info contain the skb address,
++               * so it will be freed after unmap
++               */
++              sw_desc->length = lso_desc_len;
++              sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
++
++              buf_len = 0;
++      }
++
++      if (likely(buf_len)) {
++
++              /* TODO Do not dequeue descriptor if there is a potential error */
++              tpd = edma_get_next_tpd(edma_cinfo, queue_id);
++
++              if (!start_tpd)
++                      start_tpd = tpd;
++
++              sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
++
++              /* The last buffer info contain the skb address,
++               * so it will be free after unmap
++               */
++              sw_desc->length = buf_len;
++              sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
++              sw_desc->dma = dma_map_single(&adapter->pdev->dev,
++                      skb->data, buf_len, DMA_TO_DEVICE);
++              if (dma_mapping_error(&pdev->dev, sw_desc->dma))
++                      goto dma_error;
++
++              tpd->addr = cpu_to_le32(sw_desc->dma);
++              tpd->len  = cpu_to_le16(buf_len);
++
++              tpd->svlan_tag = svlan_tag;
++              tpd->word1 = word1 | lso_word1;
++              tpd->word3 = word3;
++      }
++
++      /* Walk through all paged fragments */
++      while (nr_frags--) {
++              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++              buf_len = skb_frag_size(frag);
++              tpd = edma_get_next_tpd(edma_cinfo, queue_id);
++              sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
++              sw_desc->length = buf_len;
++              sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
++
++              sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE);
++
++              if (dma_mapping_error(NULL, sw_desc->dma))
++                      goto dma_error;
++
++              tpd->addr = cpu_to_le32(sw_desc->dma);
++              tpd->len  = cpu_to_le16(buf_len);
++
++              tpd->svlan_tag = svlan_tag;
++              tpd->word1 = word1 | lso_word1;
++              tpd->word3 = word3;
++              i++;
++      }
++
++      /* Walk through all fraglist skbs */
++      skb_walk_frags(skb, iter_skb) {
++              buf_len = iter_skb->len;
++              tpd = edma_get_next_tpd(edma_cinfo, queue_id);
++              sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
++              sw_desc->length = buf_len;
++              sw_desc->dma =  dma_map_single(&adapter->pdev->dev,
++                              iter_skb->data, buf_len, DMA_TO_DEVICE);
++
++              if (dma_mapping_error(NULL, sw_desc->dma))
++                      goto dma_error;
++
++              tpd->addr = cpu_to_le32(sw_desc->dma);
++              tpd->len  = cpu_to_le16(buf_len);
++              tpd->svlan_tag = svlan_tag;
++              tpd->word1 = word1 | lso_word1;
++              tpd->word3 = word3;
++              sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST;
++      }
++
++      if (tpd)
++              tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT;
++
++      sw_desc->skb = skb;
++      sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST;
++
++      return 0;
++
++dma_error:
++      edma_rollback_tx(adapter, start_tpd, queue_id);
++      dev_err(&pdev->dev, "TX DMA map failed\n");
++vlan_tag_error:
++      return -ENOMEM;
++}
++
++/* edma_check_link()
++ *    check Link status
++ */
++static int edma_check_link(struct edma_adapter *adapter)
++{
++      struct phy_device *phydev = adapter->phydev;
++
++      if (!(adapter->poll_required))
++              return __EDMA_LINKUP;
++
++      if (phydev->link)
++              return __EDMA_LINKUP;
++
++      return __EDMA_LINKDOWN;
++}
++
++/* edma_adjust_link()
++ *    check for edma link status
++ */
++void edma_adjust_link(struct net_device *netdev)
++{
++      int status;
++      struct edma_adapter *adapter = netdev_priv(netdev);
++      struct phy_device *phydev = adapter->phydev;
++
++      if (!test_bit(__EDMA_UP, &adapter->state_flags))
++              return;
++
++      status = edma_check_link(adapter);
++
++      if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) {
++              dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed);
++              adapter->link_state = __EDMA_LINKUP;
++              netif_carrier_on(netdev);
++              if (netif_running(netdev))
++                      netif_tx_wake_all_queues(netdev);
++      } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) {
++              dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name);
++              adapter->link_state = __EDMA_LINKDOWN;
++              netif_carrier_off(netdev);
++              netif_tx_stop_all_queues(netdev);
++      }
++}
++
++/* edma_get_stats()
++ *    Statistics api used to retreive the tx/rx statistics
++ */
++struct net_device_stats *edma_get_stats(struct net_device *netdev)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++
++      return &adapter->stats;
++}
++
++/* edma_xmit()
++ *    Main api to be called by the core for packet transmission
++ */
++netdev_tx_t edma_xmit(struct sk_buff *skb,
++                   struct net_device *net_dev)
++{
++      struct edma_adapter *adapter = netdev_priv(net_dev);
++      struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
++      struct edma_tx_desc_ring *etdr;
++      u16 from_cpu, dp_bitmap, txq_id;
++      int ret, nr_frags = 0, num_tpds_needed = 1, queue_id;
++      unsigned int flags_transmit = 0;
++      bool packet_is_rstp = false;
++      struct netdev_queue *nq = NULL;
++
++      if (skb_shinfo(skb)->nr_frags) {
++              nr_frags = skb_shinfo(skb)->nr_frags;
++              num_tpds_needed += nr_frags;
++      } else if (skb_has_frag_list(skb)) {
++              struct sk_buff *iter_skb;
++
++              skb_walk_frags(skb, iter_skb)
++                      num_tpds_needed++;
++      }
++
++      if (num_tpds_needed > EDMA_MAX_SKB_FRAGS) {
++              dev_err(&net_dev->dev,
++                      "skb received with fragments %d which is more than %lu",
++                      num_tpds_needed, EDMA_MAX_SKB_FRAGS);
++              dev_kfree_skb_any(skb);
++              adapter->stats.tx_errors++;
++              return NETDEV_TX_OK;
++      }
++
++      if (edma_stp_rstp) {
++              u16 ath_hdr, ath_eth_type;
++              u8 mac_addr[EDMA_ETH_HDR_LEN];
++              ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]);
++              if (ath_eth_type == edma_ath_eth_type) {
++                      packet_is_rstp = true;
++                      ath_hdr = htons(*(uint16_t *)&skb->data[14]);
++                      dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK;
++                      from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT;
++                      memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN);
++
++                      skb_pull(skb, 4);
++
++                      memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
++              }
++      }
++
++      /* this will be one of the 4 TX queues exposed to linux kernel */
++      txq_id = skb_get_queue_mapping(skb);
++      queue_id = edma_tx_queue_get(adapter, skb, txq_id);
++      etdr = edma_cinfo->tpd_ring[queue_id];
++      nq = netdev_get_tx_queue(net_dev, txq_id);
++
++      local_bh_disable();
++      /* Tx is not handled in bottom half context. Hence, we need to protect
++       * Tx from tasks and bottom half
++       */
++
++      if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) {
++              /* not enough descriptor, just stop queue */
++              netif_tx_stop_queue(nq);
++              local_bh_enable();
++              dev_dbg(&net_dev->dev, "Not enough descriptors available");
++              edma_cinfo->edma_ethstats.tx_desc_error++;
++              return NETDEV_TX_BUSY;
++      }
++
++      /* Check and mark VLAN tag offload */
++      if (skb_vlan_tag_present(skb))
++              flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG;
++      else if (adapter->default_vlan_tag)
++              flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG;
++
++      /* Check and mark checksum offload */
++      if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
++              flags_transmit |= EDMA_HW_CHECKSUM;
++
++      /* Map and fill descriptor for Tx */
++      ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id,
++              flags_transmit, from_cpu, dp_bitmap, packet_is_rstp, nr_frags);
++      if (ret) {
++              dev_kfree_skb_any(skb);
++              adapter->stats.tx_errors++;
++              goto netdev_okay;
++      }
++
++      /* Update SW producer index */
++      edma_tx_update_hw_idx(edma_cinfo, skb, queue_id);
++
++      /* update tx statistics */
++      adapter->stats.tx_packets++;
++      adapter->stats.tx_bytes += skb->len;
++
++netdev_okay:
++      local_bh_enable();
++      return NETDEV_TX_OK;
++}
++
++/*
++ * edma_flow_may_expire()
++ *    Timer function called periodically to delete the node
++ */
++void edma_flow_may_expire(struct timer_list *t)
++{
++      struct edma_rfs_flow_table *table = from_timer(table, t, expire_rfs);
++      struct edma_adapter *adapter =
++              container_of(table, typeof(*adapter), rfs);
++      int j;
++
++      spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
++      for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) {
++              struct hlist_head *hhead;
++              struct hlist_node *tmp;
++              struct edma_rfs_filter_node *n;
++              bool res;
++
++              hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++];
++              hlist_for_each_entry_safe(n, tmp, hhead, node) {
++                      res = rps_may_expire_flow(adapter->netdev, n->rq_id,
++                                      n->flow_id, n->filter_id);
++                      if (res) {
++                              int ret;
++                              ret = edma_delete_rfs_filter(adapter, n);
++                              if (ret < 0)
++                                      dev_dbg(&adapter->netdev->dev,
++                                                      "RFS entry %d not allowed to be flushed by Switch",
++                                                      n->flow_id);
++                              else {
++                                      hlist_del(&n->node);
++                                      kfree(n);
++                                      adapter->rfs.filter_available++;
++                              }
++                      }
++              }
++      }
++
++      adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1);
++      spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
++      mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
++}
++
++/* edma_rx_flow_steer()
++ *    Called by core to to steer the flow to CPU
++ */
++int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
++                     u16 rxq, u32 flow_id)
++{
++      struct flow_keys keys;
++      struct edma_rfs_filter_node *filter_node;
++      struct edma_adapter *adapter = netdev_priv(dev);
++      u16 hash_tblid;
++      int res;
++
++      if (skb->protocol == htons(ETH_P_IPV6)) {
++              dev_err(&adapter->pdev->dev, "IPv6 not supported\n");
++              res = -EINVAL;
++              goto no_protocol_err;
++      }
++
++      /* Dissect flow parameters
++       * We only support IPv4 + TCP/UDP
++       */
++      res = skb_flow_dissect_flow_keys(skb, &keys, 0);
++      if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) {
++              res = -EPROTONOSUPPORT;
++              goto no_protocol_err;
++      }
++
++      /* Check if table entry exists */
++      hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK;
++
++      spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
++      filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys);
++
++      if (filter_node) {
++              if (rxq == filter_node->rq_id) {
++                      res = -EEXIST;
++                      goto out;
++              } else {
++                      res = edma_delete_rfs_filter(adapter, filter_node);
++                      if (res < 0)
++                              dev_warn(&adapter->netdev->dev,
++                                              "Cannot steer flow %d to different queue",
++                                              filter_node->flow_id);
++                      else {
++                              adapter->rfs.filter_available++;
++                              res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
++                              if (res < 0) {
++                                      dev_warn(&adapter->netdev->dev,
++                                                      "Cannot steer flow %d to different queue",
++                                                      filter_node->flow_id);
++                              } else {
++                                      adapter->rfs.filter_available--;
++                                      filter_node->rq_id = rxq;
++                                      filter_node->filter_id = res;
++                              }
++                      }
++              }
++      } else {
++              if (adapter->rfs.filter_available == 0) {
++                      res = -EBUSY;
++                      goto out;
++              }
++
++              filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC);
++              if (!filter_node) {
++                      res = -ENOMEM;
++                      goto out;
++              }
++
++              res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
++              if (res < 0) {
++                      kfree(filter_node);
++                      goto out;
++              }
++
++              adapter->rfs.filter_available--;
++              filter_node->rq_id = rxq;
++              filter_node->filter_id = res;
++              filter_node->flow_id = flow_id;
++              filter_node->keys = keys;
++              INIT_HLIST_NODE(&filter_node->node);
++              hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]);
++      }
++
++out:
++      spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
++no_protocol_err:
++      return res;
++}
++
++/* edma_register_rfs_filter()
++ *    Add RFS filter callback
++ */
++int edma_register_rfs_filter(struct net_device *netdev,
++                          set_rfs_filter_callback_t set_filter)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++
++      spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
++
++      if (adapter->set_rfs_rule) {
++              spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
++              return -1;
++      }
++
++      adapter->set_rfs_rule = set_filter;
++      spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
++
++      return 0;
++}
++
++/* edma_alloc_tx_rings()
++ *    Allocate rx rings
++ */
++int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++      int i, err = 0;
++
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
++              err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
++              if (err) {
++                      dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i);
++                      return err;
++              }
++      }
++
++      return 0;
++}
++
++/* edma_free_tx_rings()
++ *    Free tx rings
++ */
++void edma_free_tx_rings(struct edma_common_info *edma_cinfo)
++{
++      int i;
++
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++)
++              edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
++}
++
++/* edma_free_tx_resources()
++ *    Free buffers associated with tx rings
++ */
++void edma_free_tx_resources(struct edma_common_info *edma_cinfo)
++{
++      struct edma_tx_desc_ring *etdr;
++      struct edma_sw_desc *sw_desc;
++      struct platform_device *pdev = edma_cinfo->pdev;
++      int i, j;
++
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
++              etdr = edma_cinfo->tpd_ring[i];
++              for (j = 0; j < EDMA_TX_RING_SIZE; j++) {
++                      sw_desc = &etdr->sw_desc[j];
++                      if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD |
++                              EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST))
++                              edma_tx_unmap_and_free(pdev, sw_desc);
++              }
++      }
++}
++
++/* edma_alloc_rx_rings()
++ *    Allocate rx rings
++ */
++int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++      int i, j, err = 0;
++
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
++              if (err) {
++                      dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i);
++                      return err;
++              }
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++
++      return 0;
++}
++
++/* edma_free_rx_rings()
++ *    free rx rings
++ */
++void edma_free_rx_rings(struct edma_common_info *edma_cinfo)
++{
++      int i, j;
++
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++}
++
++/* edma_free_queues()
++ *    Free the queues allocaated
++ */
++void edma_free_queues(struct edma_common_info *edma_cinfo)
++{
++      int i , j;
++
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
++              if (edma_cinfo->tpd_ring[i])
++                      kfree(edma_cinfo->tpd_ring[i]);
++              edma_cinfo->tpd_ring[i] = NULL;
++      }
++
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              if (edma_cinfo->rfd_ring[j])
++                      kfree(edma_cinfo->rfd_ring[j]);
++              edma_cinfo->rfd_ring[j] = NULL;
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++
++      edma_cinfo->num_rx_queues = 0;
++      edma_cinfo->num_tx_queues = 0;
++
++      return;
++}
++
++/* edma_free_rx_resources()
++ *    Free buffers associated with tx rings
++ */
++void edma_free_rx_resources(struct edma_common_info *edma_cinfo)
++{
++        struct edma_rfd_desc_ring *erdr;
++      struct edma_sw_desc *sw_desc;
++      struct platform_device *pdev = edma_cinfo->pdev;
++      int i, j, k;
++
++      for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) {
++              erdr = edma_cinfo->rfd_ring[k];
++              for (j = 0; j < EDMA_RX_RING_SIZE; j++) {
++                      sw_desc = &erdr->sw_desc[j];
++                      if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) {
++                              dma_unmap_single(&pdev->dev, sw_desc->dma,
++                                      sw_desc->length, DMA_FROM_DEVICE);
++                              edma_clean_rfd(erdr, j);
++                      } else if ((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)) {
++                              dma_unmap_page(&pdev->dev, sw_desc->dma,
++                                      sw_desc->length, DMA_FROM_DEVICE);
++                              edma_clean_rfd(erdr, j);
++                      }
++              }
++              k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++
++      }
++}
++
++/* edma_alloc_queues_tx()
++ *    Allocate memory for all rings
++ */
++int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo)
++{
++      int i;
++
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
++              struct edma_tx_desc_ring *etdr;
++              etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL);
++              if (!etdr)
++                      goto err;
++              etdr->count = edma_cinfo->tx_ring_count;
++              edma_cinfo->tpd_ring[i] = etdr;
++      }
++
++      return 0;
++err:
++      edma_free_queues(edma_cinfo);
++      return -1;
++}
++
++/* edma_alloc_queues_rx()
++ *    Allocate memory for all rings
++ */
++int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo)
++{
++      int i, j;
++
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              struct edma_rfd_desc_ring *rfd_ring;
++              rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring),
++                              GFP_KERNEL);
++              if (!rfd_ring)
++                      goto err;
++              rfd_ring->count = edma_cinfo->rx_ring_count;
++              edma_cinfo->rfd_ring[j] = rfd_ring;
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++      return 0;
++err:
++      edma_free_queues(edma_cinfo);
++      return -1;
++}
++
++/* edma_clear_irq_status()
++ *    Clear interrupt status
++ */
++void edma_clear_irq_status()
++{
++      edma_write_reg(EDMA_REG_RX_ISR, 0xff);
++      edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
++      edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff);
++      edma_write_reg(EDMA_REG_WOL_ISR, 0x1);
++};
++
++/* edma_configure()
++ *    Configure skb, edma interrupts and control register.
++ */
++int edma_configure(struct edma_common_info *edma_cinfo)
++{
++      struct edma_hw *hw = &edma_cinfo->hw;
++      u32 intr_modrt_data;
++      u32 intr_ctrl_data = 0;
++      int i, j, ret_count;
++
++      edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data);
++      intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
++      intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
++      edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data);
++
++      edma_clear_irq_status();
++
++      /* Clear any WOL status */
++      edma_write_reg(EDMA_REG_WOL_CTRL, 0);
++      intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
++      intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
++      edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
++      edma_configure_tx(edma_cinfo);
++      edma_configure_rx(edma_cinfo);
++
++      /* Allocate the RX buffer */
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j];
++              ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j);
++              if (ret_count) {
++                      dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n");
++              }
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++
++      /* Configure descriptor Ring */
++      edma_init_desc(edma_cinfo);
++      return 0;
++}
++
++/* edma_irq_enable()
++ *    Enable default interrupt generation settings
++ */
++void edma_irq_enable(struct edma_common_info *edma_cinfo)
++{
++      struct edma_hw *hw = &edma_cinfo->hw;
++      int i, j;
++
++      edma_write_reg(EDMA_REG_RX_ISR, 0xff);
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask);
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++      edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++)
++              edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask);
++}
++
++/* edma_irq_disable()
++ *    Disable Interrupt
++ */
++void edma_irq_disable(struct edma_common_info *edma_cinfo)
++{
++      int i;
++
++      for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++)
++              edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0);
++
++      for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++)
++              edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0);
++      edma_write_reg(EDMA_REG_MISC_IMR, 0);
++      edma_write_reg(EDMA_REG_WOL_IMR, 0);
++}
++
++/* edma_free_irqs()
++ *    Free All IRQs
++ */
++void edma_free_irqs(struct edma_adapter *adapter)
++{
++      struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
++      int i, j;
++      int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2);
++
++      for (i = 0; i < CONFIG_NR_CPUS; i++) {
++              for (j = edma_cinfo->edma_percpu_info[i].tx_start; j < (edma_cinfo->edma_percpu_info[i].tx_start + 4); j++)
++                      free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]);
++
++              for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++)
++                      free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]);
++      }
++}
++
++/* edma_enable_rx_ctrl()
++ *    Enable RX queue control
++ */
++void edma_enable_rx_ctrl(struct edma_hw *hw)
++{
++      u32 data;
++
++      edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
++      data |= EDMA_RXQ_CTRL_EN;
++      edma_write_reg(EDMA_REG_RXQ_CTRL, data);
++}
++
++
++/* edma_enable_tx_ctrl()
++ *    Enable TX queue control
++ */
++void edma_enable_tx_ctrl(struct edma_hw *hw)
++{
++      u32 data;
++
++      edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
++      data |= EDMA_TXQ_CTRL_TXQ_EN;
++      edma_write_reg(EDMA_REG_TXQ_CTRL, data);
++}
++
++/* edma_stop_rx_tx()
++ *    Disable RX/TQ Queue control
++ */
++void edma_stop_rx_tx(struct edma_hw *hw)
++{
++      u32 data;
++
++      edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
++      data &= ~EDMA_RXQ_CTRL_EN;
++      edma_write_reg(EDMA_REG_RXQ_CTRL, data);
++      edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
++      data &= ~EDMA_TXQ_CTRL_TXQ_EN;
++      edma_write_reg(EDMA_REG_TXQ_CTRL, data);
++}
++
++/* edma_reset()
++ *    Reset the EDMA
++ */
++int edma_reset(struct edma_common_info *edma_cinfo)
++{
++      struct edma_hw *hw = &edma_cinfo->hw;
++
++      edma_irq_disable(edma_cinfo);
++
++      edma_clear_irq_status();
++
++      edma_stop_rx_tx(hw);
++
++      return 0;
++}
++
++/* edma_fill_netdev()
++ *    Fill netdev for each etdr
++ */
++int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id,
++                  int dev, int txq_id)
++{
++      struct edma_tx_desc_ring *etdr;
++      int i = 0;
++
++      etdr = edma_cinfo->tpd_ring[queue_id];
++
++      while (etdr->netdev[i])
++              i++;
++
++      if (i >= EDMA_MAX_NETDEV_PER_QUEUE)
++              return -1;
++
++      /* Populate the netdev associated with the tpd ring */
++      etdr->netdev[i] = edma_netdev[dev];
++      etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id);
++
++      return 0;
++}
++
++/* edma_set_mac()
++ *    Change the Ethernet Address of the NIC
++ */
++int edma_set_mac_addr(struct net_device *netdev, void *p)
++{
++      struct sockaddr *addr = p;
++
++      if (!is_valid_ether_addr(addr->sa_data))
++              return -EINVAL;
++
++      if (netif_running(netdev))
++              return -EBUSY;
++
++      memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
++      return 0;
++}
++
++/* edma_set_stp_rstp()
++ *    set stp/rstp
++ */
++void edma_set_stp_rstp(bool rstp)
++{
++      edma_stp_rstp = rstp;
++}
++
++/* edma_assign_ath_hdr_type()
++ *    assign atheros header eth type
++ */
++void edma_assign_ath_hdr_type(int eth_type)
++{
++      edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK;
++}
++
++/* edma_get_default_vlan_tag()
++ *    Used by other modules to get the default vlan tag
++ */
++int edma_get_default_vlan_tag(struct net_device *netdev)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++
++      if (adapter->default_vlan_tag)
++              return adapter->default_vlan_tag;
++
++      return 0;
++}
++
++/* edma_open()
++ *    gets called when netdevice is up, start the queue.
++ */
++int edma_open(struct net_device *netdev)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++      struct platform_device *pdev = adapter->edma_cinfo->pdev;
++
++      netif_tx_start_all_queues(netdev);
++      edma_initialise_rfs_flow_table(adapter);
++      set_bit(__EDMA_UP, &adapter->state_flags);
++
++      /* if Link polling is enabled, in our case enabled for WAN, then
++       * do a phy start, else always set link as UP
++       */
++      if (adapter->poll_required) {
++              if (!IS_ERR(adapter->phydev)) {
++                      phy_start(adapter->phydev);
++                      phy_start_aneg(adapter->phydev);
++                      adapter->link_state = __EDMA_LINKDOWN;
++              } else {
++                      dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n");
++              }
++      } else {
++              adapter->link_state = __EDMA_LINKUP;
++              netif_carrier_on(netdev);
++      }
++
++      return 0;
++}
++
++
++/* edma_close()
++ *    gets called when netdevice is down, stops the queue.
++ */
++int edma_close(struct net_device *netdev)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++
++      edma_free_rfs_flow_table(adapter);
++      netif_carrier_off(netdev);
++      netif_tx_stop_all_queues(netdev);
++
++      if (adapter->poll_required) {
++              if (!IS_ERR(adapter->phydev))
++                      phy_stop(adapter->phydev);
++      }
++
++      adapter->link_state = __EDMA_LINKDOWN;
++
++      /* Set GMAC state to UP before link state is checked
++       */
++      clear_bit(__EDMA_UP, &adapter->state_flags);
++
++      return 0;
++}
++
++/* edma_poll
++ *    polling function that gets called when the napi gets scheduled.
++ *
++ * Main sequence of task performed in this api
++ * is clear irq status -> clear_tx_irq -> clean_rx_irq->
++ * enable interrupts.
++ */
++int edma_poll(struct napi_struct *napi, int budget)
++{
++      struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi,
++              struct edma_per_cpu_queues_info, napi);
++      struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
++      u32 reg_data;
++      u32 shadow_rx_status, shadow_tx_status;
++      int queue_id;
++      int i, work_done = 0;
++
++      /* Store the Rx/Tx status by ANDing it with
++       * appropriate CPU RX?TX mask
++       */
++      edma_read_reg(EDMA_REG_RX_ISR, &reg_data);
++      edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask;
++      shadow_rx_status = edma_percpu_info->rx_status;
++      edma_read_reg(EDMA_REG_TX_ISR, &reg_data);
++      edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask;
++      shadow_tx_status = edma_percpu_info->tx_status;
++
++      /* Every core will have a start, which will be computed
++       * in probe and stored in edma_percpu_info->tx_start variable.
++       * We will shift the status bit by tx_start to obtain
++       * status bits for the core on which the current processing
++       * is happening. Since, there are 4 tx queues per core,
++       * we will run the loop till we get the correct queue to clear.
++       */
++      while (edma_percpu_info->tx_status) {
++              queue_id = ffs(edma_percpu_info->tx_status) - 1;
++              edma_tx_complete(edma_cinfo, queue_id);
++              edma_percpu_info->tx_status &= ~(1 << queue_id);
++      }
++
++      /* Every core will have a start, which will be computed
++       * in probe and stored in edma_percpu_info->tx_start variable.
++       * We will shift the status bit by tx_start to obtain
++       * status bits for the core on which the current processing
++       * is happening. Since, there are 4 tx queues per core, we
++       * will run the loop till we get the correct queue to clear.
++       */
++      while (edma_percpu_info->rx_status) {
++              queue_id = ffs(edma_percpu_info->rx_status) - 1;
++              edma_rx_complete(edma_cinfo, &work_done,
++                              budget, queue_id, napi);
++
++              if (likely(work_done < budget))
++                      edma_percpu_info->rx_status &= ~(1 << queue_id);
++              else
++                      break;
++      }
++
++      /* Clear the status register, to avoid the interrupts to
++       * reoccur.This clearing of interrupt status register is
++       * done here as writing to status register only takes place
++       * once the  producer/consumer index has been updated to
++       * reflect that the packet transmission/reception went fine.
++       */
++      edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status);
++      edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status);
++
++      /* If budget not fully consumed, exit the polling mode */
++      if (likely(work_done < budget)) {
++              napi_complete(napi);
++
++              /* re-enable the interrupts */
++              for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
++                      edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1);
++              for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
++                      edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x1);
++      }
++
++      return work_done;
++}
++
++/* edma interrupt()
++ *    interrupt handler
++ */
++irqreturn_t edma_interrupt(int irq, void *dev)
++{
++      struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev;
++      struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
++      int i;
++
++      /* Unmask the TX/RX interrupt register */
++      for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
++              edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0);
++
++      for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
++              edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x0);
++
++      napi_schedule(&edma_percpu_info->napi);
++
++      return IRQ_HANDLED;
++}
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/essedma/edma.h
+@@ -0,0 +1,447 @@
++/*
++ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#ifndef _EDMA_H_
++#define _EDMA_H_
++
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/io.h>
++#include <linux/vmalloc.h>
++#include <linux/pagemap.h>
++#include <linux/smp.h>
++#include <linux/platform_device.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/sysctl.h>
++#include <linux/phy.h>
++#include <linux/of_net.h>
++#include <net/checksum.h>
++#include <net/ip6_checksum.h>
++#include <asm-generic/bug.h>
++#include "ess_edma.h"
++
++#define EDMA_CPU_CORES_SUPPORTED 4
++#define EDMA_MAX_PORTID_SUPPORTED 5
++#define EDMA_MAX_VLAN_SUPPORTED  EDMA_MAX_PORTID_SUPPORTED
++#define EDMA_MAX_PORTID_BITMAP_INDEX (EDMA_MAX_PORTID_SUPPORTED + 1)
++#define EDMA_MAX_PORTID_BITMAP_SUPPORTED 0x1f /* 0001_1111 = 0x1f */
++#define EDMA_MAX_NETDEV_PER_QUEUE 4 /* 3 Netdev per queue, 1 space for indexing */
++
++#define EDMA_MAX_RECEIVE_QUEUE 8
++#define EDMA_MAX_TRANSMIT_QUEUE 16
++
++/* WAN/LAN adapter number */
++#define EDMA_WAN 0
++#define EDMA_LAN 1
++
++/* VLAN tag */
++#define EDMA_LAN_DEFAULT_VLAN 1
++#define EDMA_WAN_DEFAULT_VLAN 2
++
++#define EDMA_DEFAULT_GROUP1_VLAN 1
++#define EDMA_DEFAULT_GROUP2_VLAN 2
++#define EDMA_DEFAULT_GROUP3_VLAN 3
++#define EDMA_DEFAULT_GROUP4_VLAN 4
++#define EDMA_DEFAULT_GROUP5_VLAN 5
++
++/* Queues exposed to linux kernel */
++#define EDMA_NETDEV_TX_QUEUE 4
++#define EDMA_NETDEV_RX_QUEUE 4
++
++/* Number of queues per core */
++#define EDMA_NUM_TXQ_PER_CORE 4
++#define EDMA_NUM_RXQ_PER_CORE 2
++
++#define EDMA_TPD_EOP_SHIFT 31
++
++#define EDMA_PORT_ID_SHIFT 12
++#define EDMA_PORT_ID_MASK 0x7
++
++/* tpd word 3 bit 18-28 */
++#define EDMA_TPD_PORT_BITMAP_SHIFT 18
++
++#define EDMA_TPD_FROM_CPU_SHIFT 25
++
++#define EDMA_FROM_CPU_MASK 0x80
++#define EDMA_SKB_PRIORITY_MASK 0x38
++
++/* TX/RX descriptor ring count */
++/* should be a power of 2 */
++#define EDMA_RX_RING_SIZE 128
++#define EDMA_TX_RING_SIZE 128
++
++/* Flags used in paged/non paged mode */
++#define EDMA_RX_HEAD_BUFF_SIZE_JUMBO 256
++#define EDMA_RX_HEAD_BUFF_SIZE 1540
++
++/* MAX frame size supported by switch */
++#define EDMA_MAX_JUMBO_FRAME_SIZE 9216
++
++/* Configurations */
++#define EDMA_INTR_CLEAR_TYPE 0
++#define EDMA_INTR_SW_IDX_W_TYPE 0
++#define EDMA_FIFO_THRESH_TYPE 0
++#define EDMA_RSS_TYPE 0
++#define EDMA_RX_IMT 0x0020
++#define EDMA_TX_IMT 0x0050
++#define EDMA_TPD_BURST 5
++#define EDMA_TXF_BURST 0x100
++#define EDMA_RFD_BURST 8
++#define EDMA_RFD_THR 16
++#define EDMA_RFD_LTHR 0
++
++/* RX/TX per CPU based mask/shift */
++#define EDMA_TX_PER_CPU_MASK 0xF
++#define EDMA_RX_PER_CPU_MASK 0x3
++#define EDMA_TX_PER_CPU_MASK_SHIFT 0x2
++#define EDMA_RX_PER_CPU_MASK_SHIFT 0x1
++#define EDMA_TX_CPU_START_SHIFT 0x2
++#define EDMA_RX_CPU_START_SHIFT 0x1
++
++/* FLags used in transmit direction */
++#define EDMA_HW_CHECKSUM 0x00000001
++#define EDMA_VLAN_TX_TAG_INSERT_FLAG 0x00000002
++#define EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG 0x00000004
++
++#define EDMA_SW_DESC_FLAG_LAST 0x1
++#define EDMA_SW_DESC_FLAG_SKB_HEAD 0x2
++#define EDMA_SW_DESC_FLAG_SKB_FRAG 0x4
++#define EDMA_SW_DESC_FLAG_SKB_FRAGLIST 0x8
++#define EDMA_SW_DESC_FLAG_SKB_NONE 0x10
++#define EDMA_SW_DESC_FLAG_SKB_REUSE 0x20
++
++
++#define EDMA_MAX_SKB_FRAGS (MAX_SKB_FRAGS + 1)
++
++/* Ethtool specific list of EDMA supported features */
++#define EDMA_SUPPORTED_FEATURES (SUPPORTED_10baseT_Half \
++                                      | SUPPORTED_10baseT_Full \
++                                      | SUPPORTED_100baseT_Half \
++                                      | SUPPORTED_100baseT_Full \
++                                      | SUPPORTED_1000baseT_Full)
++
++/* Recevie side atheros Header */
++#define EDMA_RX_ATH_HDR_VERSION 0x2
++#define EDMA_RX_ATH_HDR_VERSION_SHIFT 14
++#define EDMA_RX_ATH_HDR_PRIORITY_SHIFT 11
++#define EDMA_RX_ATH_PORT_TYPE_SHIFT 6
++#define EDMA_RX_ATH_HDR_RSTP_PORT_TYPE 0x4
++
++/* Transmit side atheros Header */
++#define EDMA_TX_ATH_HDR_PORT_BITMAP_MASK 0x7F
++#define EDMA_TX_ATH_HDR_FROM_CPU_MASK 0x80
++#define EDMA_TX_ATH_HDR_FROM_CPU_SHIFT 7
++
++#define EDMA_TXQ_START_CORE0 8
++#define EDMA_TXQ_START_CORE1 12
++#define EDMA_TXQ_START_CORE2 0
++#define EDMA_TXQ_START_CORE3 4
++
++#define EDMA_TXQ_IRQ_MASK_CORE0 0x0F00
++#define EDMA_TXQ_IRQ_MASK_CORE1 0xF000
++#define EDMA_TXQ_IRQ_MASK_CORE2 0x000F
++#define EDMA_TXQ_IRQ_MASK_CORE3 0x00F0
++
++#define EDMA_ETH_HDR_LEN 12
++#define EDMA_ETH_TYPE_MASK 0xFFFF
++
++#define EDMA_RX_BUFFER_WRITE 16
++#define EDMA_RFD_AVAIL_THR 80
++
++#define EDMA_GMAC_NO_MDIO_PHY PHY_MAX_ADDR
++
++extern int ssdk_rfs_ipct_rule_set(__be32 ip_src, __be32 ip_dst,
++                                __be16 sport, __be16 dport,
++                                uint8_t proto, u16 loadbalance, bool action);
++struct edma_ethtool_statistics {
++      u32 tx_q0_pkt;
++      u32 tx_q1_pkt;
++      u32 tx_q2_pkt;
++      u32 tx_q3_pkt;
++      u32 tx_q4_pkt;
++      u32 tx_q5_pkt;
++      u32 tx_q6_pkt;
++      u32 tx_q7_pkt;
++      u32 tx_q8_pkt;
++      u32 tx_q9_pkt;
++      u32 tx_q10_pkt;
++      u32 tx_q11_pkt;
++      u32 tx_q12_pkt;
++      u32 tx_q13_pkt;
++      u32 tx_q14_pkt;
++      u32 tx_q15_pkt;
++      u32 tx_q0_byte;
++      u32 tx_q1_byte;
++      u32 tx_q2_byte;
++      u32 tx_q3_byte;
++      u32 tx_q4_byte;
++      u32 tx_q5_byte;
++      u32 tx_q6_byte;
++      u32 tx_q7_byte;
++      u32 tx_q8_byte;
++      u32 tx_q9_byte;
++      u32 tx_q10_byte;
++      u32 tx_q11_byte;
++      u32 tx_q12_byte;
++      u32 tx_q13_byte;
++      u32 tx_q14_byte;
++      u32 tx_q15_byte;
++      u32 rx_q0_pkt;
++      u32 rx_q1_pkt;
++      u32 rx_q2_pkt;
++      u32 rx_q3_pkt;
++      u32 rx_q4_pkt;
++      u32 rx_q5_pkt;
++      u32 rx_q6_pkt;
++      u32 rx_q7_pkt;
++      u32 rx_q0_byte;
++      u32 rx_q1_byte;
++      u32 rx_q2_byte;
++      u32 rx_q3_byte;
++      u32 rx_q4_byte;
++      u32 rx_q5_byte;
++      u32 rx_q6_byte;
++      u32 rx_q7_byte;
++      u32 tx_desc_error;
++};
++
++struct edma_mdio_data {
++      struct mii_bus  *mii_bus;
++      void __iomem    *membase;
++      int phy_irq[PHY_MAX_ADDR];
++};
++
++/* EDMA LINK state */
++enum edma_link_state {
++      __EDMA_LINKUP, /* Indicate link is UP */
++      __EDMA_LINKDOWN /* Indicate link is down */
++};
++
++/* EDMA GMAC state */
++enum edma_gmac_state {
++      __EDMA_UP /* use to indicate GMAC is up */
++};
++
++/* edma transmit descriptor */
++struct edma_tx_desc {
++      __le16  len; /* full packet including CRC */
++      __le16  svlan_tag; /* vlan tag */
++      __le32  word1; /* byte 4-7 */
++      __le32  addr; /* address of buffer */
++      __le32  word3; /* byte 12 */
++};
++
++/* edma receive return descriptor */
++struct edma_rx_return_desc {
++      u16 rrd0;
++      u16 rrd1;
++      u16 rrd2;
++      u16 rrd3;
++      u16 rrd4;
++      u16 rrd5;
++      u16 rrd6;
++      u16 rrd7;
++};
++
++/* RFD descriptor */
++struct edma_rx_free_desc {
++      __le32  buffer_addr; /* buffer address */
++};
++
++/* edma hw specific data */
++struct edma_hw {
++      u32  __iomem *hw_addr; /* inner register address */
++      struct edma_adapter *adapter; /* netdevice adapter */
++      u32 rx_intr_mask; /*rx interrupt mask */
++      u32 tx_intr_mask; /* tx interrupt nask */
++      u32 misc_intr_mask; /* misc interrupt mask */
++      u32 wol_intr_mask; /* wake on lan interrupt mask */
++      bool intr_clear_type; /* interrupt clear */
++      bool intr_sw_idx_w; /* interrupt software index */
++      u32 rx_head_buff_size; /* Rx buffer size */
++      u8 rss_type; /* rss protocol type */
++};
++
++/* edma_sw_desc stores software descriptor
++ * SW descriptor has 1:1 map with HW descriptor
++ */
++struct edma_sw_desc {
++      struct sk_buff *skb;
++      dma_addr_t dma; /* dma address */
++      u16 length; /* Tx/Rx buffer length */
++      u32 flags;
++};
++
++/* per core related information */
++struct edma_per_cpu_queues_info {
++      struct napi_struct napi; /* napi associated with the core */
++      u32 tx_mask; /* tx interrupt mask */
++      u32 rx_mask; /* rx interrupt mask */
++      u32 tx_status; /* tx interrupt status */
++      u32 rx_status; /* rx interrupt status */
++      u32 tx_start; /* tx queue start */
++      u32 rx_start; /* rx queue start */
++      struct edma_common_info *edma_cinfo; /* edma common info */
++};
++
++/* edma specific common info */
++struct edma_common_info {
++      struct edma_tx_desc_ring *tpd_ring[16]; /* 16 Tx queues */
++      struct edma_rfd_desc_ring *rfd_ring[8]; /* 8 Rx queues */
++      struct platform_device *pdev; /* device structure */
++      struct net_device *netdev[EDMA_MAX_PORTID_SUPPORTED];
++      struct net_device *portid_netdev_lookup_tbl[EDMA_MAX_PORTID_BITMAP_INDEX];
++      struct ctl_table_header *edma_ctl_table_hdr;
++      int num_gmac;
++      struct edma_ethtool_statistics edma_ethstats; /* ethtool stats */
++      int num_rx_queues; /* number of rx queue */
++      u32 num_tx_queues; /* number of tx queue */
++      u32 tx_irq[16]; /* number of tx irq */
++      u32 rx_irq[8]; /* number of rx irq */
++      u32 from_cpu; /* from CPU TPD field */
++      u32 num_rxq_per_core; /* Rx queues per core */
++      u32 num_txq_per_core; /* Tx queues per core */
++      u16 tx_ring_count; /* Tx ring count */
++      u16 rx_ring_count; /* Rx ring*/
++      u16 rx_head_buffer_len; /* rx buffer length */
++      u16 rx_page_buffer_len; /* rx buffer length */
++      u32 page_mode; /* Jumbo frame supported flag */
++      u32 fraglist_mode; /* fraglist supported flag */
++      struct edma_hw hw; /* edma hw specific structure */
++      struct edma_per_cpu_queues_info edma_percpu_info[CONFIG_NR_CPUS]; /* per cpu information */
++      spinlock_t stats_lock; /* protect edma stats area for updation */
++      struct timer_list edma_stats_timer;
++};
++
++/* transimit packet descriptor (tpd) ring */
++struct edma_tx_desc_ring {
++      struct netdev_queue *nq[EDMA_MAX_NETDEV_PER_QUEUE]; /* Linux queue index */
++      struct net_device *netdev[EDMA_MAX_NETDEV_PER_QUEUE];
++                      /* Array of netdevs associated with the tpd ring */
++      void *hw_desc; /* descriptor ring virtual address */
++      struct edma_sw_desc *sw_desc; /* buffer associated with ring */
++      int netdev_bmp; /* Bitmap for per-ring netdevs */
++      u32 size; /* descriptor ring length in bytes */
++      u16 count; /* number of descriptors in the ring */
++      dma_addr_t dma; /* descriptor ring physical address */
++      u16 sw_next_to_fill; /* next Tx descriptor to fill */
++      u16 sw_next_to_clean; /* next Tx descriptor to clean */
++};
++
++/* receive free descriptor (rfd) ring */
++struct edma_rfd_desc_ring {
++      void *hw_desc; /* descriptor ring virtual address */
++      struct edma_sw_desc *sw_desc; /* buffer associated with ring */
++      u16 size; /* bytes allocated to sw_desc */
++      u16 count; /* number of descriptors in the ring */
++      dma_addr_t dma; /* descriptor ring physical address */
++      u16 sw_next_to_fill; /* next descriptor to fill */
++      u16 sw_next_to_clean; /* next descriptor to clean */
++};
++
++/* edma_rfs_flter_node - rfs filter node in hash table */
++struct edma_rfs_filter_node {
++      struct flow_keys keys;
++      u32 flow_id; /* flow_id of filter provided by kernel */
++      u16 filter_id; /* filter id of filter returned by adaptor */
++      u16 rq_id; /* desired rq index */
++      struct hlist_node node; /* edma rfs list node */
++};
++
++/* edma_rfs_flow_tbl - rfs flow table */
++struct edma_rfs_flow_table {
++      u16 max_num_filter; /* Maximum number of filters edma supports */
++      u16 hashtoclean; /* hash table index to clean next */
++      int filter_available; /* Number of free filters available */
++      struct hlist_head hlist_head[EDMA_RFS_FLOW_ENTRIES];
++      spinlock_t rfs_ftab_lock;
++      struct timer_list expire_rfs; /* timer function for edma_rps_may_expire_flow */
++};
++
++/* EDMA net device structure */
++struct edma_adapter {
++      struct net_device *netdev; /* netdevice */
++      struct platform_device *pdev; /* platform device */
++      struct edma_common_info *edma_cinfo; /* edma common info */
++      struct phy_device *phydev; /* Phy device */
++      struct edma_rfs_flow_table rfs; /* edma rfs flow table */
++      struct net_device_stats stats; /* netdev statistics */
++      set_rfs_filter_callback_t set_rfs_rule;
++      u32 flags;/* status flags */
++      unsigned long state_flags; /* GMAC up/down flags */
++      u32 forced_speed; /* link force speed */
++      u32 forced_duplex; /* link force duplex */
++      u32 link_state; /* phy link state */
++      u32 phy_mdio_addr; /* PHY device address on MII interface */
++      u32 poll_required; /* check if link polling is required */
++      u32 tx_start_offset[CONFIG_NR_CPUS]; /* tx queue start */
++      u32 default_vlan_tag; /* vlan tag */
++      u32 dp_bitmap;
++      uint8_t phy_id[MII_BUS_ID_SIZE + 3];
++};
++
++int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo);
++int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo);
++int edma_open(struct net_device *netdev);
++int edma_close(struct net_device *netdev);
++void edma_free_tx_resources(struct edma_common_info *edma_c_info);
++void edma_free_rx_resources(struct edma_common_info *edma_c_info);
++int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo);
++int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo);
++void edma_free_tx_rings(struct edma_common_info *edma_cinfo);
++void edma_free_rx_rings(struct edma_common_info *edma_cinfo);
++void edma_free_queues(struct edma_common_info *edma_cinfo);
++void edma_irq_disable(struct edma_common_info *edma_cinfo);
++int edma_reset(struct edma_common_info *edma_cinfo);
++int edma_poll(struct napi_struct *napi, int budget);
++netdev_tx_t edma_xmit(struct sk_buff *skb,
++              struct net_device *netdev);
++int edma_configure(struct edma_common_info *edma_cinfo);
++void edma_irq_enable(struct edma_common_info *edma_cinfo);
++void edma_enable_tx_ctrl(struct edma_hw *hw);
++void edma_enable_rx_ctrl(struct edma_hw *hw);
++void edma_stop_rx_tx(struct edma_hw *hw);
++void edma_free_irqs(struct edma_adapter *adapter);
++irqreturn_t edma_interrupt(int irq, void *dev);
++void edma_write_reg(u16 reg_addr, u32 reg_value);
++void edma_read_reg(u16 reg_addr, volatile u32 *reg_value);
++struct net_device_stats *edma_get_stats(struct net_device *netdev);
++int edma_set_mac_addr(struct net_device *netdev, void *p);
++int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
++              u16 rxq, u32 flow_id);
++int edma_register_rfs_filter(struct net_device *netdev,
++              set_rfs_filter_callback_t set_filter);
++void edma_flow_may_expire(struct timer_list *t);
++void edma_set_ethtool_ops(struct net_device *netdev);
++void edma_set_stp_rstp(bool tag);
++void edma_assign_ath_hdr_type(int tag);
++int edma_get_default_vlan_tag(struct net_device *netdev);
++void edma_adjust_link(struct net_device *netdev);
++int edma_fill_netdev(struct edma_common_info *edma_cinfo, int qid, int num, int txq_id);
++void edma_read_append_stats(struct edma_common_info *edma_cinfo);
++void edma_change_tx_coalesce(int usecs);
++void edma_change_rx_coalesce(int usecs);
++void edma_get_tx_rx_coalesce(u32 *reg_val);
++void edma_clear_irq_status(void);
++#endif /* _EDMA_H_ */
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/essedma/edma_axi.c
+@@ -0,0 +1,1216 @@
++/*
++ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/cpu_rmap.h>
++#include <linux/of.h>
++#include <linux/of_net.h>
++#include <linux/timer.h>
++#include "edma.h"
++#include "ess_edma.h"
++
++/* Weight round robin and virtual QID mask */
++#define EDMA_WRR_VID_SCTL_MASK 0xffff
++
++/* Weight round robin and virtual QID shift */
++#define EDMA_WRR_VID_SCTL_SHIFT 16
++
++char edma_axi_driver_name[] = "ess_edma";
++static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
++      NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
++
++static u32 edma_hw_addr;
++
++char edma_tx_irq[16][64];
++char edma_rx_irq[8][64];
++struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
++static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1,
++                      EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3};
++static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1,
++                      EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3};
++
++static u32 edma_default_ltag  __read_mostly = EDMA_LAN_DEFAULT_VLAN;
++static u32 edma_default_wtag  __read_mostly = EDMA_WAN_DEFAULT_VLAN;
++static u32 edma_default_group1_vtag  __read_mostly = EDMA_DEFAULT_GROUP1_VLAN;
++static u32 edma_default_group2_vtag  __read_mostly = EDMA_DEFAULT_GROUP2_VLAN;
++static u32 edma_default_group3_vtag  __read_mostly = EDMA_DEFAULT_GROUP3_VLAN;
++static u32 edma_default_group4_vtag  __read_mostly = EDMA_DEFAULT_GROUP4_VLAN;
++static u32 edma_default_group5_vtag  __read_mostly = EDMA_DEFAULT_GROUP5_VLAN;
++static u32 edma_rss_idt_val = EDMA_RSS_IDT_VALUE;
++static u32 edma_rss_idt_idx;
++
++static int edma_weight_assigned_to_q __read_mostly;
++static int edma_queue_to_virtual_q __read_mostly;
++static bool edma_enable_rstp  __read_mostly;
++static int edma_athr_hdr_eth_type __read_mostly;
++
++static int page_mode;
++module_param(page_mode, int, 0);
++MODULE_PARM_DESC(page_mode, "enable page mode");
++
++static int overwrite_mode;
++module_param(overwrite_mode, int, 0);
++MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting");
++
++static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE;
++module_param(jumbo_mru, int, 0);
++MODULE_PARM_DESC(jumbo_mru, "enable fraglist support");
++
++static int num_rxq = 4;
++module_param(num_rxq, int, 0);
++MODULE_PARM_DESC(num_rxq, "change the number of rx queues");
++
++void edma_write_reg(u16 reg_addr, u32 reg_value)
++{
++      writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr)));
++}
++
++void edma_read_reg(u16 reg_addr, volatile u32 *reg_value)
++{
++      *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr));
++}
++
++/* edma_change_tx_coalesce()
++ *    change tx interrupt moderation timer
++ */
++void edma_change_tx_coalesce(int usecs)
++{
++      u32 reg_value;
++
++      /* Here, we right shift the value from the user by 1, this is
++       * done because IMT resolution timer is 2usecs. 1 count
++       * of this register corresponds to 2 usecs.
++       */
++      edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
++      reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16));
++      edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
++}
++
++/* edma_change_rx_coalesce()
++ *    change rx interrupt moderation timer
++ */
++void edma_change_rx_coalesce(int usecs)
++{
++      u32 reg_value;
++
++      /* Here, we right shift the value from the user by 1, this is
++       * done because IMT resolution timer is 2usecs. 1 count
++       * of this register corresponds to 2 usecs.
++       */
++      edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
++      reg_value = ((reg_value & 0xffff0000) | (usecs >> 1));
++      edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
++}
++
++/* edma_get_tx_rx_coalesce()
++ *    Get tx/rx interrupt moderation value
++ */
++void edma_get_tx_rx_coalesce(u32 *reg_val)
++{
++      edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val);
++}
++
++void edma_read_append_stats(struct edma_common_info *edma_cinfo)
++{
++      uint32_t *p;
++      int i;
++      u32 stat;
++
++      spin_lock(&edma_cinfo->stats_lock);
++      p = (uint32_t *)&(edma_cinfo->edma_ethstats);
++
++      for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
++              edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat);
++              *p += stat;
++              p++;
++      }
++
++      for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
++              edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat);
++              *p += stat;
++              p++;
++      }
++
++      for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
++              edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat);
++              *p += stat;
++              p++;
++      }
++
++      for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
++              edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat);
++              *p += stat;
++              p++;
++      }
++
++      spin_unlock(&edma_cinfo->stats_lock);
++}
++
++static void edma_statistics_timer(struct timer_list *t)
++{
++      struct edma_common_info *edma_cinfo =
++              from_timer(edma_cinfo, t, edma_stats_timer);
++
++      edma_read_append_stats(edma_cinfo);
++
++      mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ);
++}
++
++static int edma_enable_stp_rstp(struct ctl_table *table, int write,
++                              void __user *buffer, size_t *lenp,
++                              loff_t *ppos)
++{
++      int ret;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      if (write)
++              edma_set_stp_rstp(edma_enable_rstp);
++
++      return ret;
++}
++
++static int edma_ath_hdr_eth_type(struct ctl_table *table, int write,
++                               void __user *buffer, size_t *lenp,
++                               loff_t *ppos)
++{
++      int ret;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      if (write)
++              edma_assign_ath_hdr_type(edma_athr_hdr_eth_type);
++
++      return ret;
++}
++
++static int edma_change_default_lan_vlan(struct ctl_table *table, int write,
++                                      void __user *buffer, size_t *lenp,
++                                      loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      int ret;
++
++      if (!edma_netdev[1]) {
++              pr_err("Netdevice for default_lan does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[1]);
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_ltag;
++
++      return ret;
++}
++
++static int edma_change_default_wan_vlan(struct ctl_table *table, int write,
++                                      void __user *buffer, size_t *lenp,
++                                      loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      int ret;
++
++      if (!edma_netdev[0]) {
++              pr_err("Netdevice for default_wan does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[0]);
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_wtag;
++
++      return ret;
++}
++
++static int edma_change_group1_vtag(struct ctl_table *table, int write,
++                                 void __user *buffer, size_t *lenp,
++                                 loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      struct edma_common_info *edma_cinfo;
++      int ret;
++
++      if (!edma_netdev[0]) {
++              pr_err("Netdevice for Group 1 does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[0]);
++      edma_cinfo = adapter->edma_cinfo;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_group1_vtag;
++
++      return ret;
++}
++
++static int edma_change_group2_vtag(struct ctl_table *table, int write,
++                                 void __user *buffer, size_t *lenp,
++                                 loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      struct edma_common_info *edma_cinfo;
++      int ret;
++
++      if (!edma_netdev[1]) {
++              pr_err("Netdevice for Group 2 does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[1]);
++      edma_cinfo = adapter->edma_cinfo;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_group2_vtag;
++
++      return ret;
++}
++
++static int edma_change_group3_vtag(struct ctl_table *table, int write,
++                                 void __user *buffer, size_t *lenp,
++                                 loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      struct edma_common_info *edma_cinfo;
++      int ret;
++
++      if (!edma_netdev[2]) {
++              pr_err("Netdevice for Group 3 does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[2]);
++      edma_cinfo = adapter->edma_cinfo;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_group3_vtag;
++
++      return ret;
++}
++
++static int edma_change_group4_vtag(struct ctl_table *table, int write,
++                                 void __user *buffer, size_t *lenp,
++                                 loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      struct edma_common_info *edma_cinfo;
++      int ret;
++
++      if (!edma_netdev[3]) {
++              pr_err("Netdevice for Group 4 does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[3]);
++      edma_cinfo = adapter->edma_cinfo;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_group4_vtag;
++
++      return ret;
++}
++
++static int edma_change_group5_vtag(struct ctl_table *table, int write,
++                                 void __user *buffer, size_t *lenp,
++                                 loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      struct edma_common_info *edma_cinfo;
++      int ret;
++
++      if (!edma_netdev[4]) {
++              pr_err("Netdevice for Group 5 does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[4]);
++      edma_cinfo = adapter->edma_cinfo;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_group5_vtag;
++
++      return ret;
++}
++
++static int edma_set_rss_idt_value(struct ctl_table *table, int write,
++                                void __user *buffer, size_t *lenp,
++                                loff_t *ppos)
++{
++      int ret;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      if (write && !ret)
++              edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx),
++                             edma_rss_idt_val);
++      return ret;
++}
++
++static int edma_set_rss_idt_idx(struct ctl_table *table, int write,
++                              void __user *buffer, size_t *lenp,
++                              loff_t *ppos)
++{
++      int ret;
++      u32 old_value = edma_rss_idt_idx;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      if (!write || ret)
++              return ret;
++
++      if (edma_rss_idt_idx >= EDMA_NUM_IDT) {
++              pr_err("Invalid RSS indirection table index %d\n",
++                     edma_rss_idt_idx);
++              edma_rss_idt_idx = old_value;
++              return -EINVAL;
++      }
++      return ret;
++}
++
++static int edma_weight_assigned_to_queues(struct ctl_table *table, int write,
++                                        void __user *buffer, size_t *lenp,
++                                        loff_t *ppos)
++{
++      int ret, queue_id, weight;
++      u32 reg_data, data, reg_addr;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      if (write) {
++              queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK;
++              if (queue_id < 0 || queue_id > 15) {
++                      pr_err("queue_id not within desired range\n");
++                      return -EINVAL;
++              }
++
++              weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT;
++              if (weight < 0 || weight > 0xF) {
++                      pr_err("queue_id not within desired range\n");
++                      return -EINVAL;
++              }
++
++              data = weight << EDMA_WRR_SHIFT(queue_id);
++
++              reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3);
++              edma_read_reg(reg_addr, &reg_data);
++              reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id));
++              edma_write_reg(reg_addr, data | reg_data);
++      }
++
++      return ret;
++}
++
++static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write,
++                                         void __user *buffer, size_t *lenp,
++                                         loff_t *ppos)
++{
++      int ret, queue_id, virtual_qid;
++      u32 reg_data, data, reg_addr;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      if (write) {
++              queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK;
++              if (queue_id < 0 || queue_id > 15) {
++                      pr_err("queue_id not within desired range\n");
++                      return -EINVAL;
++              }
++
++              virtual_qid = edma_queue_to_virtual_q >>
++                      EDMA_WRR_VID_SCTL_SHIFT;
++              if (virtual_qid < 0 || virtual_qid > 8) {
++                      pr_err("queue_id not within desired range\n");
++                      return -EINVAL;
++              }
++
++              data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id);
++
++              reg_addr = EDMA_REG_VQ_CTRL0 + (queue_id & ~0x3);
++              edma_read_reg(reg_addr, &reg_data);
++              reg_data &= ~(1 << EDMA_VQ_ID_SHIFT(queue_id));
++              edma_write_reg(reg_addr, data | reg_data);
++      }
++
++      return ret;
++}
++
++static struct ctl_table edma_table[] = {
++      {
++              .procname       = "default_lan_tag",
++              .data           = &edma_default_ltag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_default_lan_vlan
++      },
++      {
++              .procname       = "default_wan_tag",
++              .data           = &edma_default_wtag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_default_wan_vlan
++      },
++      {
++              .procname       = "weight_assigned_to_queues",
++              .data           = &edma_weight_assigned_to_q,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_weight_assigned_to_queues
++      },
++      {
++              .procname       = "queue_to_virtual_queue_map",
++              .data           = &edma_queue_to_virtual_q,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_queue_to_virtual_queue_map
++      },
++      {
++              .procname       = "enable_stp_rstp",
++              .data           = &edma_enable_rstp,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_enable_stp_rstp
++      },
++      {
++              .procname       = "athr_hdr_eth_type",
++              .data           = &edma_athr_hdr_eth_type,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_ath_hdr_eth_type
++      },
++      {
++              .procname       = "default_group1_vlan_tag",
++              .data           = &edma_default_group1_vtag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_group1_vtag
++      },
++      {
++              .procname       = "default_group2_vlan_tag",
++              .data           = &edma_default_group2_vtag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_group2_vtag
++      },
++      {
++              .procname       = "default_group3_vlan_tag",
++              .data           = &edma_default_group3_vtag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_group3_vtag
++      },
++      {
++              .procname       = "default_group4_vlan_tag",
++              .data           = &edma_default_group4_vtag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_group4_vtag
++      },
++      {
++              .procname       = "default_group5_vlan_tag",
++              .data           = &edma_default_group5_vtag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_group5_vtag
++      },
++      {
++              .procname       = "edma_rss_idt_value",
++              .data           = &edma_rss_idt_val,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_set_rss_idt_value
++      },
++      {
++              .procname       = "edma_rss_idt_idx",
++              .data           = &edma_rss_idt_idx,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_set_rss_idt_idx
++      },
++      {}
++};
++
++/* edma_axi_netdev_ops
++ *    Describe the operations supported by registered netdevices
++ *
++ * static const struct net_device_ops edma_axi_netdev_ops = {
++ *    .ndo_open               = edma_open,
++ *    .ndo_stop               = edma_close,
++ *    .ndo_start_xmit         = edma_xmit_frame,
++ *    .ndo_set_mac_address    = edma_set_mac_addr,
++ * }
++ */
++static const struct net_device_ops edma_axi_netdev_ops = {
++      .ndo_open               = edma_open,
++      .ndo_stop               = edma_close,
++      .ndo_start_xmit         = edma_xmit,
++      .ndo_set_mac_address    = edma_set_mac_addr,
++#ifdef CONFIG_RFS_ACCEL
++      .ndo_rx_flow_steer      = edma_rx_flow_steer,
++      .ndo_register_rfs_filter = edma_register_rfs_filter,
++      .ndo_get_default_vlan_tag = edma_get_default_vlan_tag,
++#endif
++      .ndo_get_stats          = edma_get_stats,
++};
++
++/* edma_axi_probe()
++ *    Initialise an adapter identified by a platform_device structure.
++ *
++ * The OS initialization, configuring of the adapter private structure,
++ * and a hardware reset occur in the probe.
++ */
++static int edma_axi_probe(struct platform_device *pdev)
++{
++      struct edma_common_info *edma_cinfo;
++      struct edma_hw *hw;
++      struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED];
++      struct resource *res;
++      struct device_node *np = pdev->dev.of_node;
++      struct device_node *pnp;
++      struct device_node *mdio_node = NULL;
++      struct platform_device *mdio_plat = NULL;
++      struct mii_bus *miibus = NULL;
++      struct edma_mdio_data *mdio_data = NULL;
++      int i, j, k, err = 0;
++      int portid_bmp;
++      int idx = 0, idx_mac = 0;
++
++      if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) {
++              dev_err(&pdev->dev, "Invalid CPU Cores\n");
++              return -EINVAL;
++      }
++
++      if ((num_rxq != 4) && (num_rxq != 8)) {
++              dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n");
++              return -EINVAL;
++      }
++      edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL);
++      if (!edma_cinfo) {
++              err = -ENOMEM;
++              goto err_alloc;
++      }
++
++      edma_cinfo->pdev = pdev;
++
++      of_property_read_u32(np, "qcom,num_gmac", &edma_cinfo->num_gmac);
++      if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) {
++              pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
++              err = -EINVAL;
++              goto err_cinfo;
++      }
++
++      /* Initialize the netdev array before allocation
++       * to avoid double free
++       */
++      for (i = 0 ; i < edma_cinfo->num_gmac ; i++)
++              edma_netdev[i] = NULL;
++
++      for (i = 0 ; i < edma_cinfo->num_gmac ; i++) {
++              edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter),
++                      EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE);
++
++              if (!edma_netdev[i]) {
++                      dev_err(&pdev->dev,
++                              "net device alloc fails for index=%d\n", i);
++                      err = -ENODEV;
++                      goto err_ioremap;
++              }
++
++              SET_NETDEV_DEV(edma_netdev[i], &pdev->dev);
++              platform_set_drvdata(pdev, edma_netdev[i]);
++              edma_cinfo->netdev[i] = edma_netdev[i];
++      }
++
++      /* Fill ring details */
++      edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE;
++      edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4);
++      edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE;
++
++      /* Update num rx queues based on module parameter */
++      edma_cinfo->num_rx_queues = num_rxq;
++      edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2);
++
++      edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE;
++
++      hw = &edma_cinfo->hw;
++
++      /* Fill HW defaults */
++      hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
++      hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
++
++      of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode);
++      of_property_read_u32(np, "qcom,rx_head_buf_size",
++                           &hw->rx_head_buff_size);
++
++      if (overwrite_mode) {
++              dev_info(&pdev->dev, "page mode overwritten");
++              edma_cinfo->page_mode = page_mode;
++      }
++
++      if (jumbo_mru)
++              edma_cinfo->fraglist_mode = 1;
++
++      if (edma_cinfo->page_mode)
++              hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO;
++      else if (edma_cinfo->fraglist_mode)
++              hw->rx_head_buff_size = jumbo_mru;
++      else if (!hw->rx_head_buff_size)
++              hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE;
++
++      hw->misc_intr_mask = 0;
++      hw->wol_intr_mask = 0;
++
++      hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE;
++      hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
++
++      /* configure RSS type to the different protocol that can be
++       * supported
++       */
++      hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP |
++              EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP |
++              EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
++
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++      edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res);
++      if (IS_ERR(edma_cinfo->hw.hw_addr)) {
++              err = PTR_ERR(edma_cinfo->hw.hw_addr);
++              goto err_ioremap;
++      }
++
++      edma_hw_addr = (u32)edma_cinfo->hw.hw_addr;
++
++      /* Parse tx queue interrupt number from device tree */
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++)
++              edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i);
++
++      /* Parse rx queue interrupt number from device tree
++       * Here we are setting j to point to the point where we
++       * left tx interrupt parsing(i.e 16) and run run the loop
++       * from 0 to 7 to parse rx interrupt number.
++       */
++      for (i = 0, j = edma_cinfo->num_tx_queues, k = 0;
++                      i < edma_cinfo->num_rx_queues; i++) {
++              edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j);
++              k += ((num_rxq == 4) ?  2 : 1);
++              j += ((num_rxq == 4) ?  2 : 1);
++      }
++
++      edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size;
++      edma_cinfo->rx_page_buffer_len = PAGE_SIZE;
++
++      err = edma_alloc_queues_tx(edma_cinfo);
++      if (err) {
++              dev_err(&pdev->dev, "Allocation of TX queue failed\n");
++              goto err_tx_qinit;
++      }
++
++      err = edma_alloc_queues_rx(edma_cinfo);
++      if (err) {
++              dev_err(&pdev->dev, "Allocation of RX queue failed\n");
++              goto err_rx_qinit;
++      }
++
++      err = edma_alloc_tx_rings(edma_cinfo);
++      if (err) {
++              dev_err(&pdev->dev, "Allocation of TX resources failed\n");
++              goto err_tx_rinit;
++      }
++
++      err = edma_alloc_rx_rings(edma_cinfo);
++      if (err) {
++              dev_err(&pdev->dev, "Allocation of RX resources failed\n");
++              goto err_rx_rinit;
++      }
++
++      /* Initialize netdev and netdev bitmap for transmit descriptor rings */
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
++              struct edma_tx_desc_ring *etdr =  edma_cinfo->tpd_ring[i];
++              int j;
++
++              etdr->netdev_bmp = 0;
++              for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) {
++                      etdr->netdev[j] = NULL;
++                      etdr->nq[j] = NULL;
++              }
++      }
++
++      if (of_property_read_bool(np, "qcom,mdio_supported")) {
++              mdio_node = of_find_compatible_node(NULL, NULL,
++                                                  "qcom,ipq4019-mdio");
++              if (!mdio_node) {
++                      dev_err(&pdev->dev, "cannot find mdio node by phandle");
++                      err = -EIO;
++                      goto err_mdiobus_init_fail;
++              }
++
++              mdio_plat = of_find_device_by_node(mdio_node);
++              if (!mdio_plat) {
++                      dev_err(&pdev->dev,
++                              "cannot find platform device from mdio node");
++                      of_node_put(mdio_node);
++                      err = -EIO;
++                      goto err_mdiobus_init_fail;
++              }
++
++              mdio_data = dev_get_drvdata(&mdio_plat->dev);
++              if (!mdio_data) {
++                      dev_err(&pdev->dev,
++                              "cannot get mii bus reference from device data");
++                      of_node_put(mdio_node);
++                      err = -EIO;
++                      goto err_mdiobus_init_fail;
++              }
++
++              miibus = mdio_data->mii_bus;
++      }
++
++      for_each_available_child_of_node(np, pnp) {
++              const char *mac_addr;
++
++              /* this check is needed if parent and daughter dts have
++               * different number of gmac nodes
++               */
++              if (idx_mac == edma_cinfo->num_gmac) {
++                      of_node_put(np);
++                      break;
++              }
++
++              mac_addr = of_get_mac_address(pnp);
++              if (mac_addr)
++                      memcpy(edma_netdev[idx_mac]->dev_addr, mac_addr, ETH_ALEN);
++
++              idx_mac++;
++      }
++
++      /* Populate the adapter structure register the netdevice */
++      for (i = 0; i < edma_cinfo->num_gmac; i++) {
++              int k, m;
++
++              adapter[i] = netdev_priv(edma_netdev[i]);
++              adapter[i]->netdev = edma_netdev[i];
++              adapter[i]->pdev = pdev;
++              for (j = 0; j < CONFIG_NR_CPUS; j++) {
++                      m = i % 2;
++                      adapter[i]->tx_start_offset[j] =
++                              ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1));
++                      /* Share the queues with available net-devices.
++                       * For instance , with 5 net-devices
++                       * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
++                       * and eth1/eth3 will get the remaining.
++                       */
++                      for (k = adapter[i]->tx_start_offset[j]; k <
++                           (adapter[i]->tx_start_offset[j] + 2); k++) {
++                              if (edma_fill_netdev(edma_cinfo, k, i, j)) {
++                                      pr_err("Netdev overflow Error\n");
++                                      goto err_register;
++                              }
++                      }
++              }
++
++              adapter[i]->edma_cinfo = edma_cinfo;
++              edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops;
++              edma_netdev[i]->max_mtu = 9000;
++              edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM
++                                    | NETIF_F_HW_VLAN_CTAG_TX
++                                    | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG |
++                                    NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO;
++              edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
++                              NETIF_F_HW_VLAN_CTAG_RX
++                              | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
++                              NETIF_F_GRO;
++              edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG |
++                                         NETIF_F_TSO | NETIF_F_TSO6 |
++                                         NETIF_F_GRO;
++              edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG |
++                                           NETIF_F_TSO | NETIF_F_TSO6 |
++                                           NETIF_F_GRO;
++
++#ifdef CONFIG_RFS_ACCEL
++              edma_netdev[i]->features |=  NETIF_F_RXHASH | NETIF_F_NTUPLE;
++              edma_netdev[i]->hw_features |=  NETIF_F_RXHASH | NETIF_F_NTUPLE;
++              edma_netdev[i]->vlan_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
++              edma_netdev[i]->wanted_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
++#endif
++              edma_set_ethtool_ops(edma_netdev[i]);
++
++              /* This just fill in some default MAC address
++               */
++              if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) {
++                      random_ether_addr(edma_netdev[i]->dev_addr);
++                      pr_info("EDMA using MAC@ - using");
++                      pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
++                      *(edma_netdev[i]->dev_addr),
++                      *(edma_netdev[i]->dev_addr + 1),
++                      *(edma_netdev[i]->dev_addr + 2),
++                      *(edma_netdev[i]->dev_addr + 3),
++                      *(edma_netdev[i]->dev_addr + 4),
++                      *(edma_netdev[i]->dev_addr + 5));
++              }
++
++              err = register_netdev(edma_netdev[i]);
++              if (err)
++                      goto err_register;
++
++              /* carrier off reporting is important to
++               * ethtool even BEFORE open
++               */
++              netif_carrier_off(edma_netdev[i]);
++
++              /* Allocate reverse irq cpu mapping structure for
++              * receive queues
++              */
++#ifdef CONFIG_RFS_ACCEL
++              edma_netdev[i]->rx_cpu_rmap =
++                      alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE);
++              if (!edma_netdev[i]->rx_cpu_rmap) {
++                      err = -ENOMEM;
++                      goto err_rmap_alloc_fail;
++              }
++#endif
++      }
++
++      for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++)
++              edma_cinfo->portid_netdev_lookup_tbl[i] = NULL;
++
++      for_each_available_child_of_node(np, pnp) {
++              const uint32_t *vlan_tag = NULL;
++              int len;
++
++              /* this check is needed if parent and daughter dts have
++               * different number of gmac nodes
++               */
++              if (idx == edma_cinfo->num_gmac)
++                      break;
++
++              /* Populate port-id to netdev lookup table */
++              vlan_tag = of_get_property(pnp, "vlan_tag", &len);
++              if (!vlan_tag) {
++                      pr_err("Vlan tag parsing Failed.\n");
++                      goto err_rmap_alloc_fail;
++              }
++
++              adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1);
++              vlan_tag++;
++              portid_bmp = of_read_number(vlan_tag, 1);
++              adapter[idx]->dp_bitmap = portid_bmp;
++
++              portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */
++              while (portid_bmp) {
++                      int port_bit = ffs(portid_bmp);
++
++                      if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
++                              goto err_rmap_alloc_fail;
++                      edma_cinfo->portid_netdev_lookup_tbl[port_bit] =
++                              edma_netdev[idx];
++                      portid_bmp &= ~(1 << (port_bit - 1));
++              }
++
++              if (!of_property_read_u32(pnp, "qcom,poll_required",
++                                        &adapter[idx]->poll_required)) {
++                      if (adapter[idx]->poll_required) {
++                              of_property_read_u32(pnp, "qcom,phy_mdio_addr",
++                                                   &adapter[idx]->phy_mdio_addr);
++                              of_property_read_u32(pnp, "qcom,forced_speed",
++                                                   &adapter[idx]->forced_speed);
++                              of_property_read_u32(pnp, "qcom,forced_duplex",
++                                                   &adapter[idx]->forced_duplex);
++
++                              /* create a phyid using MDIO bus id
++                               * and MDIO bus address
++                               */
++                              snprintf(adapter[idx]->phy_id,
++                                       MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
++                                       miibus->id,
++                                       adapter[idx]->phy_mdio_addr);
++                      }
++              } else {
++                      adapter[idx]->poll_required = 0;
++                      adapter[idx]->forced_speed = SPEED_1000;
++                      adapter[idx]->forced_duplex = DUPLEX_FULL;
++              }
++
++              idx++;
++      }
++
++      edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net,
++                                                           "net/edma",
++                                                           edma_table);
++      if (!edma_cinfo->edma_ctl_table_hdr) {
++              dev_err(&pdev->dev, "edma sysctl table hdr not registered\n");
++              goto err_unregister_sysctl_tbl;
++      }
++
++      /* Disable all 16 Tx and 8 rx irqs */
++      edma_irq_disable(edma_cinfo);
++
++      err = edma_reset(edma_cinfo);
++      if (err) {
++              err = -EIO;
++              goto err_reset;
++      }
++
++      /* populate per_core_info, do a napi_Add, request 16 TX irqs,
++       * 8 RX irqs, do a napi enable
++       */
++      for (i = 0; i < CONFIG_NR_CPUS; i++) {
++              u8 rx_start;
++
++              edma_cinfo->edma_percpu_info[i].napi.state = 0;
++
++              netif_napi_add(edma_netdev[0],
++                             &edma_cinfo->edma_percpu_info[i].napi,
++                             edma_poll, 64);
++              napi_enable(&edma_cinfo->edma_percpu_info[i].napi);
++              edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i];
++              edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK
++                              << (i << EDMA_RX_PER_CPU_MASK_SHIFT);
++              edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i];
++              edma_cinfo->edma_percpu_info[i].rx_start =
++                      i << EDMA_RX_CPU_START_SHIFT;
++              rx_start = i << EDMA_RX_CPU_START_SHIFT;
++              edma_cinfo->edma_percpu_info[i].tx_status = 0;
++              edma_cinfo->edma_percpu_info[i].rx_status = 0;
++              edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo;
++
++              /* Request irq per core */
++              for (j = edma_cinfo->edma_percpu_info[i].tx_start;
++                   j < tx_start[i] + 4; j++) {
++                      sprintf(&edma_tx_irq[j][0], "edma_eth_tx%d", j);
++                      err = request_irq(edma_cinfo->tx_irq[j],
++                                        edma_interrupt,
++                                        0,
++                                        &edma_tx_irq[j][0],
++                                        &edma_cinfo->edma_percpu_info[i]);
++                      if (err)
++                              goto err_reset;
++              }
++
++              for (j = edma_cinfo->edma_percpu_info[i].rx_start;
++                   j < (rx_start +
++                   ((edma_cinfo->num_rx_queues == 4) ? 1 : 2));
++                   j++) {
++                      sprintf(&edma_rx_irq[j][0], "edma_eth_rx%d", j);
++                      err = request_irq(edma_cinfo->rx_irq[j],
++                                        edma_interrupt,
++                                        0,
++                                        &edma_rx_irq[j][0],
++                                        &edma_cinfo->edma_percpu_info[i]);
++                      if (err)
++                              goto err_reset;
++              }
++
++#ifdef CONFIG_RFS_ACCEL
++              for (j = edma_cinfo->edma_percpu_info[i].rx_start;
++                   j < rx_start + 2; j += 2) {
++                      err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap,
++                                             edma_cinfo->rx_irq[j]);
++                      if (err)
++                              goto err_rmap_add_fail;
++              }
++#endif
++      }
++
++      /* Used to clear interrupt status, allocate rx buffer,
++       * configure edma descriptors registers
++       */
++      err = edma_configure(edma_cinfo);
++      if (err) {
++              err = -EIO;
++              goto err_configure;
++      }
++
++      /* Configure RSS indirection table.
++       * 128 hash will be configured in the following
++       * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
++       * and so on
++       */
++      for (i = 0; i < EDMA_NUM_IDT; i++)
++              edma_write_reg(EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE);
++
++      /* Configure load balance mapping table.
++       * 4 table entry will be configured according to the
++       * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
++       * respectively.
++       */
++      edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
++
++      /* Configure Virtual queue for Tx rings
++       * User can also change this value runtime through
++       * a sysctl
++       */
++      edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
++      edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
++
++      /* Configure Max AXI Burst write size to 128 bytes*/
++      edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE,
++                     EDMA_AXIW_MAXWRSIZE_VALUE);
++
++      /* Enable All 16 tx and 8 rx irq mask */
++      edma_irq_enable(edma_cinfo);
++      edma_enable_tx_ctrl(&edma_cinfo->hw);
++      edma_enable_rx_ctrl(&edma_cinfo->hw);
++
++      for (i = 0; i < edma_cinfo->num_gmac; i++) {
++              if (adapter[i]->poll_required) {
++                      adapter[i]->phydev =
++                              phy_connect(edma_netdev[i],
++                                          (const char *)adapter[i]->phy_id,
++                                          &edma_adjust_link,
++                                          PHY_INTERFACE_MODE_SGMII);
++                      if (IS_ERR(adapter[i]->phydev)) {
++                              dev_dbg(&pdev->dev, "PHY attach FAIL");
++                              err = -EIO;
++                              goto edma_phy_attach_fail;
++                      } else {
++                              adapter[i]->phydev->advertising |=
++                                      ADVERTISED_Pause |
++                                      ADVERTISED_Asym_Pause;
++                              adapter[i]->phydev->supported |=
++                                      SUPPORTED_Pause |
++                                      SUPPORTED_Asym_Pause;
++                      }
++              } else {
++                      adapter[i]->phydev = NULL;
++              }
++      }
++
++      spin_lock_init(&edma_cinfo->stats_lock);
++
++      timer_setup(&edma_cinfo->edma_stats_timer, edma_statistics_timer, 0);
++      mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ);
++
++      return 0;
++
++edma_phy_attach_fail:
++      miibus = NULL;
++err_configure:
++#ifdef CONFIG_RFS_ACCEL
++      for (i = 0; i < edma_cinfo->num_gmac; i++) {
++              free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap);
++              adapter[i]->netdev->rx_cpu_rmap = NULL;
++      }
++#endif
++err_rmap_add_fail:
++      edma_free_irqs(adapter[0]);
++      for (i = 0; i < CONFIG_NR_CPUS; i++)
++              napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
++err_reset:
++err_unregister_sysctl_tbl:
++err_rmap_alloc_fail:
++      for (i = 0; i < edma_cinfo->num_gmac; i++)
++              unregister_netdev(edma_netdev[i]);
++err_register:
++err_mdiobus_init_fail:
++      edma_free_rx_rings(edma_cinfo);
++err_rx_rinit:
++      edma_free_tx_rings(edma_cinfo);
++err_tx_rinit:
++      edma_free_queues(edma_cinfo);
++err_rx_qinit:
++err_tx_qinit:
++      iounmap(edma_cinfo->hw.hw_addr);
++err_ioremap:
++      for (i = 0; i < edma_cinfo->num_gmac; i++) {
++              if (edma_netdev[i])
++                      free_netdev(edma_netdev[i]);
++      }
++err_cinfo:
++      kfree(edma_cinfo);
++err_alloc:
++      return err;
++}
++
++/* edma_axi_remove()
++ *    Device Removal Routine
++ *
++ * edma_axi_remove is called by the platform subsystem to alert the driver
++ * that it should release a platform device.
++ */
++static int edma_axi_remove(struct platform_device *pdev)
++{
++      struct edma_adapter *adapter = netdev_priv(edma_netdev[0]);
++      struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
++      struct edma_hw *hw = &edma_cinfo->hw;
++      int i;
++
++      for (i = 0; i < edma_cinfo->num_gmac; i++)
++              unregister_netdev(edma_netdev[i]);
++
++      edma_stop_rx_tx(hw);
++      for (i = 0; i < CONFIG_NR_CPUS; i++)
++              napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
++
++      edma_irq_disable(edma_cinfo);
++      edma_write_reg(EDMA_REG_RX_ISR, 0xff);
++      edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
++#ifdef CONFIG_RFS_ACCEL
++      for (i = 0; i < edma_cinfo->num_gmac; i++) {
++              free_irq_cpu_rmap(edma_netdev[i]->rx_cpu_rmap);
++              edma_netdev[i]->rx_cpu_rmap = NULL;
++      }
++#endif
++
++      for (i = 0; i < edma_cinfo->num_gmac; i++) {
++              struct edma_adapter *adapter = netdev_priv(edma_netdev[i]);
++
++              if (adapter->phydev)
++                      phy_disconnect(adapter->phydev);
++      }
++
++      del_timer_sync(&edma_cinfo->edma_stats_timer);
++      edma_free_irqs(adapter);
++      unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr);
++      edma_free_tx_resources(edma_cinfo);
++      edma_free_rx_resources(edma_cinfo);
++      edma_free_tx_rings(edma_cinfo);
++      edma_free_rx_rings(edma_cinfo);
++      edma_free_queues(edma_cinfo);
++      for (i = 0; i < edma_cinfo->num_gmac; i++)
++              free_netdev(edma_netdev[i]);
++
++      kfree(edma_cinfo);
++
++      return 0;
++}
++
++static const struct of_device_id edma_of_mtable[] = {
++      {.compatible = "qcom,ess-edma" },
++      {}
++};
++MODULE_DEVICE_TABLE(of, edma_of_mtable);
++
++static struct platform_driver edma_axi_driver = {
++      .driver = {
++              .name    = edma_axi_driver_name,
++              .of_match_table = edma_of_mtable,
++      },
++      .probe    = edma_axi_probe,
++      .remove   = edma_axi_remove,
++};
++
++module_platform_driver(edma_axi_driver);
++
++MODULE_AUTHOR("Qualcomm Atheros Inc");
++MODULE_DESCRIPTION("QCA ESS EDMA driver");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c
+@@ -0,0 +1,374 @@
++/*
++ * Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/ethtool.h>
++#include <linux/netdevice.h>
++#include <linux/string.h>
++#include "edma.h"
++
++struct edma_ethtool_stats {
++      uint8_t stat_string[ETH_GSTRING_LEN];
++      uint32_t stat_offset;
++};
++
++#define EDMA_STAT(m)    offsetof(struct edma_ethtool_statistics, m)
++#define DRVINFO_LEN   32
++
++/* Array of strings describing statistics
++ */
++static const struct edma_ethtool_stats edma_gstrings_stats[] = {
++      {"tx_q0_pkt", EDMA_STAT(tx_q0_pkt)},
++      {"tx_q1_pkt", EDMA_STAT(tx_q1_pkt)},
++      {"tx_q2_pkt", EDMA_STAT(tx_q2_pkt)},
++      {"tx_q3_pkt", EDMA_STAT(tx_q3_pkt)},
++      {"tx_q4_pkt", EDMA_STAT(tx_q4_pkt)},
++      {"tx_q5_pkt", EDMA_STAT(tx_q5_pkt)},
++      {"tx_q6_pkt", EDMA_STAT(tx_q6_pkt)},
++      {"tx_q7_pkt", EDMA_STAT(tx_q7_pkt)},
++      {"tx_q8_pkt", EDMA_STAT(tx_q8_pkt)},
++      {"tx_q9_pkt", EDMA_STAT(tx_q9_pkt)},
++      {"tx_q10_pkt", EDMA_STAT(tx_q10_pkt)},
++      {"tx_q11_pkt", EDMA_STAT(tx_q11_pkt)},
++      {"tx_q12_pkt", EDMA_STAT(tx_q12_pkt)},
++      {"tx_q13_pkt", EDMA_STAT(tx_q13_pkt)},
++      {"tx_q14_pkt", EDMA_STAT(tx_q14_pkt)},
++      {"tx_q15_pkt", EDMA_STAT(tx_q15_pkt)},
++      {"tx_q0_byte", EDMA_STAT(tx_q0_byte)},
++      {"tx_q1_byte", EDMA_STAT(tx_q1_byte)},
++      {"tx_q2_byte", EDMA_STAT(tx_q2_byte)},
++      {"tx_q3_byte", EDMA_STAT(tx_q3_byte)},
++      {"tx_q4_byte", EDMA_STAT(tx_q4_byte)},
++      {"tx_q5_byte", EDMA_STAT(tx_q5_byte)},
++      {"tx_q6_byte", EDMA_STAT(tx_q6_byte)},
++      {"tx_q7_byte", EDMA_STAT(tx_q7_byte)},
++      {"tx_q8_byte", EDMA_STAT(tx_q8_byte)},
++      {"tx_q9_byte", EDMA_STAT(tx_q9_byte)},
++      {"tx_q10_byte", EDMA_STAT(tx_q10_byte)},
++      {"tx_q11_byte", EDMA_STAT(tx_q11_byte)},
++      {"tx_q12_byte", EDMA_STAT(tx_q12_byte)},
++      {"tx_q13_byte", EDMA_STAT(tx_q13_byte)},
++      {"tx_q14_byte", EDMA_STAT(tx_q14_byte)},
++      {"tx_q15_byte", EDMA_STAT(tx_q15_byte)},
++      {"rx_q0_pkt", EDMA_STAT(rx_q0_pkt)},
++      {"rx_q1_pkt", EDMA_STAT(rx_q1_pkt)},
++      {"rx_q2_pkt", EDMA_STAT(rx_q2_pkt)},
++      {"rx_q3_pkt", EDMA_STAT(rx_q3_pkt)},
++      {"rx_q4_pkt", EDMA_STAT(rx_q4_pkt)},
++      {"rx_q5_pkt", EDMA_STAT(rx_q5_pkt)},
++      {"rx_q6_pkt", EDMA_STAT(rx_q6_pkt)},
++      {"rx_q7_pkt", EDMA_STAT(rx_q7_pkt)},
++      {"rx_q0_byte", EDMA_STAT(rx_q0_byte)},
++      {"rx_q1_byte", EDMA_STAT(rx_q1_byte)},
++      {"rx_q2_byte", EDMA_STAT(rx_q2_byte)},
++      {"rx_q3_byte", EDMA_STAT(rx_q3_byte)},
++      {"rx_q4_byte", EDMA_STAT(rx_q4_byte)},
++      {"rx_q5_byte", EDMA_STAT(rx_q5_byte)},
++      {"rx_q6_byte", EDMA_STAT(rx_q6_byte)},
++      {"rx_q7_byte", EDMA_STAT(rx_q7_byte)},
++      {"tx_desc_error", EDMA_STAT(tx_desc_error)},
++};
++
++#define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats)
++
++/* edma_get_strset_count()
++ *    Get strset count
++ */
++static int edma_get_strset_count(struct net_device *netdev,
++                               int sset)
++{
++      switch (sset) {
++      case ETH_SS_STATS:
++              return EDMA_STATS_LEN;
++      default:
++              netdev_dbg(netdev, "%s: Invalid string set", __func__);
++              return -EOPNOTSUPP;
++      }
++}
++
++
++/* edma_get_strings()
++ *    get stats string
++ */
++static void edma_get_strings(struct net_device *netdev, uint32_t stringset,
++                           uint8_t *data)
++{
++      uint8_t *p = data;
++      uint32_t i;
++
++      switch (stringset) {
++      case ETH_SS_STATS:
++              for (i = 0; i < EDMA_STATS_LEN; i++) {
++                      memcpy(p, edma_gstrings_stats[i].stat_string,
++                              min((size_t)ETH_GSTRING_LEN,
++                                  strlen(edma_gstrings_stats[i].stat_string)
++                                  + 1));
++                      p += ETH_GSTRING_LEN;
++              }
++              break;
++      }
++}
++
++/* edma_get_ethtool_stats()
++ *    Get ethtool statistics
++ */
++static void edma_get_ethtool_stats(struct net_device *netdev,
++                                 struct ethtool_stats *stats, uint64_t *data)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++      struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
++      int i;
++      uint8_t *p = NULL;
++
++      edma_read_append_stats(edma_cinfo);
++
++      for(i = 0; i < EDMA_STATS_LEN; i++) {
++              p = (uint8_t *)&(edma_cinfo->edma_ethstats) +
++                      edma_gstrings_stats[i].stat_offset;
++              data[i] = *(uint32_t *)p;
++      }
++}
++
++/* edma_get_drvinfo()
++ *    get edma driver info
++ */
++static void edma_get_drvinfo(struct net_device *dev,
++                           struct ethtool_drvinfo *info)
++{
++      strlcpy(info->driver, "ess_edma", DRVINFO_LEN);
++      strlcpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN);
++}
++
++/* edma_nway_reset()
++ *    Reset the phy, if available.
++ */
++static int edma_nway_reset(struct net_device *netdev)
++{
++      return -EINVAL;
++}
++
++/* edma_get_wol()
++ *    get wake on lan info
++ */
++static void edma_get_wol(struct net_device *netdev,
++                       struct ethtool_wolinfo *wol)
++{
++      wol->supported = 0;
++      wol->wolopts = 0;
++}
++
++/* edma_get_msglevel()
++ *    get message level.
++ */
++static uint32_t edma_get_msglevel(struct net_device *netdev)
++{
++      return 0;
++}
++
++/* edma_get_settings()
++ *    Get edma settings
++ */
++static int edma_get_settings(struct net_device *netdev,
++                           struct ethtool_cmd *ecmd)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++
++      if (adapter->poll_required) {
++              struct phy_device *phydev = NULL;
++              uint16_t phyreg;
++
++              if ((adapter->forced_speed != SPEED_UNKNOWN)
++                      && !(adapter->poll_required))
++                      return -EPERM;
++
++              phydev = adapter->phydev;
++
++              ecmd->advertising = phydev->advertising;
++              ecmd->autoneg = phydev->autoneg;
++
++              if (adapter->link_state == __EDMA_LINKDOWN) {
++                      ecmd->speed =  SPEED_UNKNOWN;
++                      ecmd->duplex = DUPLEX_UNKNOWN;
++              } else {
++                      ecmd->speed = phydev->speed;
++                      ecmd->duplex = phydev->duplex;
++              }
++
++              ecmd->phy_address = adapter->phy_mdio_addr;
++
++              phyreg = (uint16_t)phy_read(adapter->phydev, MII_LPA);
++              if (phyreg & LPA_10HALF)
++                      ecmd->lp_advertising |= ADVERTISED_10baseT_Half;
++
++              if (phyreg & LPA_10FULL)
++                      ecmd->lp_advertising |= ADVERTISED_10baseT_Full;
++
++              if (phyreg & LPA_100HALF)
++                      ecmd->lp_advertising |= ADVERTISED_100baseT_Half;
++
++              if (phyreg & LPA_100FULL)
++                      ecmd->lp_advertising |= ADVERTISED_100baseT_Full;
++
++              phyreg = (uint16_t)phy_read(adapter->phydev, MII_STAT1000);
++              if (phyreg & LPA_1000HALF)
++                      ecmd->lp_advertising |= ADVERTISED_1000baseT_Half;
++
++              if (phyreg & LPA_1000FULL)
++                      ecmd->lp_advertising |= ADVERTISED_1000baseT_Full;
++      } else {
++              /* If the speed/duplex for this GMAC is forced and we
++               * are not polling for link state changes, return the
++               * values as specified by platform. This will be true
++               * for GMACs connected to switch, and interfaces that
++               * do not use a PHY.
++               */
++              if (!(adapter->poll_required)) {
++                      if (adapter->forced_speed != SPEED_UNKNOWN) {
++                              /* set speed and duplex */
++                              ethtool_cmd_speed_set(ecmd, SPEED_1000);
++                              ecmd->duplex = DUPLEX_FULL;
++
++                              /* Populate capabilities advertised by self */
++                              ecmd->advertising = 0;
++                              ecmd->autoneg = 0;
++                              ecmd->port = PORT_TP;
++                              ecmd->transceiver = XCVR_EXTERNAL;
++                      } else {
++                              /* non link polled and non
++                               * forced speed/duplex interface
++                               */
++                              return -EIO;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++/* edma_set_settings()
++ *    Set EDMA settings
++ */
++static int edma_set_settings(struct net_device *netdev,
++                           struct ethtool_cmd *ecmd)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++      struct phy_device *phydev = NULL;
++
++      if ((adapter->forced_speed != SPEED_UNKNOWN) &&
++           !adapter->poll_required)
++              return -EPERM;
++
++      phydev = adapter->phydev;
++      phydev->advertising = ecmd->advertising;
++      phydev->autoneg = ecmd->autoneg;
++      phydev->speed = ethtool_cmd_speed(ecmd);
++      phydev->duplex = ecmd->duplex;
++
++      genphy_config_aneg(phydev);
++
++      return 0;
++}
++
++/* edma_get_coalesce
++ *    get interrupt mitigation
++ */
++static int edma_get_coalesce(struct net_device *netdev,
++                           struct ethtool_coalesce *ec)
++{
++      u32 reg_val;
++
++      edma_get_tx_rx_coalesce(&reg_val);
++
++      /* We read the Interrupt Moderation Timer(IMT) register value,
++       * use lower 16 bit for rx and higher 16 bit for Tx. We do a
++       * left shift by 1, because IMT resolution timer is 2usecs.
++       * Hence the value given by the register is multiplied by 2 to
++       * get the actual time in usecs.
++       */
++      ec->tx_coalesce_usecs = (((reg_val >> 16) & 0xffff) << 1);
++      ec->rx_coalesce_usecs = ((reg_val & 0xffff) << 1);
++
++      return 0;
++}
++
++/* edma_set_coalesce
++ *    set interrupt mitigation
++ */
++static int edma_set_coalesce(struct net_device *netdev,
++                           struct ethtool_coalesce *ec)
++{
++      if (ec->tx_coalesce_usecs)
++              edma_change_tx_coalesce(ec->tx_coalesce_usecs);
++      if (ec->rx_coalesce_usecs)
++              edma_change_rx_coalesce(ec->rx_coalesce_usecs);
++
++      return 0;
++}
++
++/* edma_set_priv_flags()
++ *    Set EDMA private flags
++ */
++static int edma_set_priv_flags(struct net_device *netdev, u32 flags)
++{
++      return 0;
++}
++
++/* edma_get_priv_flags()
++ *    get edma driver flags
++ */
++static u32 edma_get_priv_flags(struct net_device *netdev)
++{
++      return 0;
++}
++
++/* edma_get_ringparam()
++ *    get ring size
++ */
++static void edma_get_ringparam(struct net_device *netdev,
++                             struct ethtool_ringparam *ring)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++      struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
++
++      ring->tx_max_pending = edma_cinfo->tx_ring_count;
++      ring->rx_max_pending = edma_cinfo->rx_ring_count;
++}
++
++/* Ethtool operations
++ */
++static const struct ethtool_ops edma_ethtool_ops = {
++      .get_drvinfo = &edma_get_drvinfo,
++      .get_link = &ethtool_op_get_link,
++      .get_msglevel = &edma_get_msglevel,
++      .nway_reset = &edma_nway_reset,
++      .get_wol = &edma_get_wol,
++      .get_settings = &edma_get_settings,
++      .set_settings = &edma_set_settings,
++      .get_strings = &edma_get_strings,
++      .get_sset_count = &edma_get_strset_count,
++      .get_ethtool_stats = &edma_get_ethtool_stats,
++      .get_coalesce = &edma_get_coalesce,
++      .set_coalesce = &edma_set_coalesce,
++      .get_priv_flags = edma_get_priv_flags,
++      .set_priv_flags = edma_set_priv_flags,
++      .get_ringparam = edma_get_ringparam,
++};
++
++/* edma_set_ethtool_ops
++ *    Set ethtool operations
++ */
++void edma_set_ethtool_ops(struct net_device *netdev)
++{
++      netdev->ethtool_ops = &edma_ethtool_ops;
++}
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/essedma/ess_edma.h
+@@ -0,0 +1,332 @@
++/*
++ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#ifndef _ESS_EDMA_H_
++#define _ESS_EDMA_H_
++
++#include <linux/types.h>
++
++struct edma_adapter;
++struct edma_hw;
++
++/* register definition */
++#define EDMA_REG_MAS_CTRL 0x0
++#define EDMA_REG_TIMEOUT_CTRL 0x004
++#define EDMA_REG_DBG0 0x008
++#define EDMA_REG_DBG1 0x00C
++#define EDMA_REG_SW_CTRL0 0x100
++#define EDMA_REG_SW_CTRL1 0x104
++
++/* Interrupt Status Register */
++#define EDMA_REG_RX_ISR 0x200
++#define EDMA_REG_TX_ISR 0x208
++#define EDMA_REG_MISC_ISR 0x210
++#define EDMA_REG_WOL_ISR 0x218
++
++#define EDMA_MISC_ISR_RX_URG_Q(x) (1 << x)
++
++#define EDMA_MISC_ISR_AXIR_TIMEOUT 0x00000100
++#define EDMA_MISC_ISR_AXIR_ERR 0x00000200
++#define EDMA_MISC_ISR_TXF_DEAD 0x00000400
++#define EDMA_MISC_ISR_AXIW_ERR 0x00000800
++#define EDMA_MISC_ISR_AXIW_TIMEOUT 0x00001000
++
++#define EDMA_WOL_ISR 0x00000001
++
++/* Interrupt Mask Register */
++#define EDMA_REG_MISC_IMR 0x214
++#define EDMA_REG_WOL_IMR 0x218
++
++#define EDMA_RX_IMR_NORMAL_MASK 0x1
++#define EDMA_TX_IMR_NORMAL_MASK 0x1
++#define EDMA_MISC_IMR_NORMAL_MASK 0x80001FFF
++#define EDMA_WOL_IMR_NORMAL_MASK 0x1
++
++/* Edma receive consumer index */
++#define EDMA_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
++/* Edma transmit consumer index */
++#define EDMA_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
++
++/* IRQ Moderator Initial Timer Register */
++#define EDMA_REG_IRQ_MODRT_TIMER_INIT 0x280
++#define EDMA_IRQ_MODRT_TIMER_MASK 0xFFFF
++#define EDMA_IRQ_MODRT_RX_TIMER_SHIFT 0
++#define EDMA_IRQ_MODRT_TX_TIMER_SHIFT 16
++
++/* Interrupt Control Register */
++#define EDMA_REG_INTR_CTRL 0x284
++#define EDMA_INTR_CLR_TYP_SHIFT 0
++#define EDMA_INTR_SW_IDX_W_TYP_SHIFT 1
++#define EDMA_INTR_CLEAR_TYPE_W1 0
++#define EDMA_INTR_CLEAR_TYPE_R 1
++
++/* RX Interrupt Mask Register */
++#define EDMA_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
++
++/* TX Interrupt mask register */
++#define EDMA_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
++
++/* Load Ptr Register
++ * Software sets this bit after the initialization of the head and tail
++ */
++#define EDMA_REG_TX_SRAM_PART 0x400
++#define EDMA_LOAD_PTR_SHIFT 16
++
++/* TXQ Control Register */
++#define EDMA_REG_TXQ_CTRL 0x404
++#define EDMA_TXQ_CTRL_IP_OPTION_EN 0x10
++#define EDMA_TXQ_CTRL_TXQ_EN 0x20
++#define EDMA_TXQ_CTRL_ENH_MODE 0x40
++#define EDMA_TXQ_CTRL_LS_8023_EN 0x80
++#define EDMA_TXQ_CTRL_TPD_BURST_EN 0x100
++#define EDMA_TXQ_CTRL_LSO_BREAK_EN 0x200
++#define EDMA_TXQ_NUM_TPD_BURST_MASK 0xF
++#define EDMA_TXQ_TXF_BURST_NUM_MASK 0xFFFF
++#define EDMA_TXQ_NUM_TPD_BURST_SHIFT 0
++#define EDMA_TXQ_TXF_BURST_NUM_SHIFT 16
++
++#define       EDMA_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
++#define EDMA_TXF_WATER_MARK_MASK 0x0FFF
++#define EDMA_TXF_LOW_WATER_MARK_SHIFT 0
++#define EDMA_TXF_HIGH_WATER_MARK_SHIFT 16
++#define EDMA_TXQ_CTRL_BURST_MODE_EN 0x80000000
++
++/* WRR Control Register */
++#define EDMA_REG_WRR_CTRL_Q0_Q3 0x40c
++#define EDMA_REG_WRR_CTRL_Q4_Q7 0x410
++#define EDMA_REG_WRR_CTRL_Q8_Q11 0x414
++#define EDMA_REG_WRR_CTRL_Q12_Q15 0x418
++
++/* Weight round robin(WRR), it takes queue as input, and computes
++ * starting bits where we need to write the weight for a particular
++ * queue
++ */
++#define EDMA_WRR_SHIFT(x) (((x) * 5) % 20)
++
++/* Tx Descriptor Control Register */
++#define EDMA_REG_TPD_RING_SIZE 0x41C
++#define EDMA_TPD_RING_SIZE_SHIFT 0
++#define EDMA_TPD_RING_SIZE_MASK 0xFFFF
++
++/* Transmit descriptor base address */
++#define EDMA_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
++
++/* TPD Index Register */
++#define EDMA_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
++
++#define EDMA_TPD_PROD_IDX_BITS 0x0000FFFF
++#define EDMA_TPD_CONS_IDX_BITS 0xFFFF0000
++#define EDMA_TPD_PROD_IDX_MASK 0xFFFF
++#define EDMA_TPD_CONS_IDX_MASK 0xFFFF
++#define EDMA_TPD_PROD_IDX_SHIFT 0
++#define EDMA_TPD_CONS_IDX_SHIFT 16
++
++/* TX Virtual Queue Mapping Control Register */
++#define EDMA_REG_VQ_CTRL0 0x4A0
++#define EDMA_REG_VQ_CTRL1 0x4A4
++
++/* Virtual QID shift, it takes queue as input, and computes
++ * Virtual QID position in virtual qid control register
++ */
++#define EDMA_VQ_ID_SHIFT(i) (((i) * 3) % 24)
++
++/* Virtual Queue Default Value */
++#define EDMA_VQ_REG_VALUE 0x240240
++
++/* Tx side Port Interface Control Register */
++#define EDMA_REG_PORT_CTRL 0x4A8
++#define EDMA_PAD_EN_SHIFT 15
++
++/* Tx side VLAN Configuration Register */
++#define EDMA_REG_VLAN_CFG 0x4AC
++
++#define EDMA_TX_CVLAN 16
++#define EDMA_TX_INS_CVLAN 17
++#define EDMA_TX_CVLAN_TAG_SHIFT 0
++
++#define EDMA_TX_SVLAN 14
++#define EDMA_TX_INS_SVLAN 15
++#define EDMA_TX_SVLAN_TAG_SHIFT 16
++
++/* Tx Queue Packet Statistic Register */
++#define EDMA_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
++
++#define EDMA_TX_STAT_PKT_MASK 0xFFFFFF
++
++/* Tx Queue Byte Statistic Register */
++#define EDMA_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
++
++/* Load Balance Based Ring Offset Register */
++#define EDMA_REG_LB_RING 0x800
++#define EDMA_LB_RING_ENTRY_MASK 0xff
++#define EDMA_LB_RING_ID_MASK 0x7
++#define EDMA_LB_RING_PROFILE_ID_MASK 0x3
++#define EDMA_LB_RING_ENTRY_BIT_OFFSET 8
++#define EDMA_LB_RING_ID_OFFSET 0
++#define EDMA_LB_RING_PROFILE_ID_OFFSET 3
++#define EDMA_LB_REG_VALUE 0x6040200
++
++/* Load Balance Priority Mapping Register */
++#define EDMA_REG_LB_PRI_START 0x804
++#define EDMA_REG_LB_PRI_END 0x810
++#define EDMA_LB_PRI_REG_INC 4
++#define EDMA_LB_PRI_ENTRY_BIT_OFFSET 4
++#define EDMA_LB_PRI_ENTRY_MASK 0xf
++
++/* RSS Priority Mapping Register */
++#define EDMA_REG_RSS_PRI 0x820
++#define EDMA_RSS_PRI_ENTRY_MASK 0xf
++#define EDMA_RSS_RING_ID_MASK 0x7
++#define EDMA_RSS_PRI_ENTRY_BIT_OFFSET 4
++
++/* RSS Indirection Register */
++#define EDMA_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
++#define EDMA_NUM_IDT 16
++#define EDMA_RSS_IDT_VALUE 0x64206420
++
++/* Default RSS Ring Register */
++#define EDMA_REG_DEF_RSS 0x890
++#define EDMA_DEF_RSS_MASK 0x7
++
++/* RSS Hash Function Type Register */
++#define EDMA_REG_RSS_TYPE 0x894
++#define EDMA_RSS_TYPE_NONE 0x01
++#define EDMA_RSS_TYPE_IPV4TCP 0x02
++#define EDMA_RSS_TYPE_IPV6_TCP 0x04
++#define EDMA_RSS_TYPE_IPV4_UDP 0x08
++#define EDMA_RSS_TYPE_IPV6UDP 0x10
++#define EDMA_RSS_TYPE_IPV4 0x20
++#define EDMA_RSS_TYPE_IPV6 0x40
++#define EDMA_RSS_HASH_MODE_MASK 0x7f
++
++#define EDMA_REG_RSS_HASH_VALUE 0x8C0
++
++#define EDMA_REG_RSS_TYPE_RESULT 0x8C4
++
++#define EDMA_HASH_TYPE_START 0
++#define EDMA_HASH_TYPE_END 5
++#define EDMA_HASH_TYPE_SHIFT 12
++
++#define EDMA_RFS_FLOW_ENTRIES 1024
++#define EDMA_RFS_FLOW_ENTRIES_MASK (EDMA_RFS_FLOW_ENTRIES - 1)
++#define EDMA_RFS_EXPIRE_COUNT_PER_CALL 128
++
++/* RFD Base Address Register */
++#define EDMA_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
++
++/* RFD Index Register */
++#define EDMA_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2))
++
++#define EDMA_RFD_PROD_IDX_BITS 0x00000FFF
++#define EDMA_RFD_CONS_IDX_BITS 0x0FFF0000
++#define EDMA_RFD_PROD_IDX_MASK 0xFFF
++#define EDMA_RFD_CONS_IDX_MASK 0xFFF
++#define EDMA_RFD_PROD_IDX_SHIFT 0
++#define EDMA_RFD_CONS_IDX_SHIFT 16
++
++/* Rx Descriptor Control Register */
++#define EDMA_REG_RX_DESC0 0xA10
++#define EDMA_RFD_RING_SIZE_MASK 0xFFF
++#define EDMA_RX_BUF_SIZE_MASK 0xFFFF
++#define EDMA_RFD_RING_SIZE_SHIFT 0
++#define EDMA_RX_BUF_SIZE_SHIFT 16
++
++#define EDMA_REG_RX_DESC1 0xA14
++#define EDMA_RXQ_RFD_BURST_NUM_MASK 0x3F
++#define EDMA_RXQ_RFD_PF_THRESH_MASK 0x1F
++#define EDMA_RXQ_RFD_LOW_THRESH_MASK 0xFFF
++#define EDMA_RXQ_RFD_BURST_NUM_SHIFT 0
++#define EDMA_RXQ_RFD_PF_THRESH_SHIFT 8
++#define EDMA_RXQ_RFD_LOW_THRESH_SHIFT 16
++
++/* RXQ Control Register */
++#define EDMA_REG_RXQ_CTRL 0xA18
++#define EDMA_FIFO_THRESH_TYPE_SHIF 0
++#define EDMA_FIFO_THRESH_128_BYTE 0x0
++#define EDMA_FIFO_THRESH_64_BYTE 0x1
++#define EDMA_RXQ_CTRL_RMV_VLAN 0x00000002
++#define EDMA_RXQ_CTRL_EN 0x0000FF00
++
++/* AXI Burst Size Config */
++#define EDMA_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
++#define EDMA_AXIW_MAXWRSIZE_VALUE 0x0
++
++/* Rx Statistics Register */
++#define EDMA_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
++#define EDMA_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
++
++/* WoL Pattern Length Register */
++#define EDMA_REG_WOL_PATTERN_LEN0 0xC00
++#define EDMA_WOL_PT_LEN_MASK 0xFF
++#define EDMA_WOL_PT0_LEN_SHIFT 0
++#define EDMA_WOL_PT1_LEN_SHIFT 8
++#define EDMA_WOL_PT2_LEN_SHIFT 16
++#define EDMA_WOL_PT3_LEN_SHIFT 24
++
++#define EDMA_REG_WOL_PATTERN_LEN1 0xC04
++#define EDMA_WOL_PT4_LEN_SHIFT 0
++#define EDMA_WOL_PT5_LEN_SHIFT 8
++#define EDMA_WOL_PT6_LEN_SHIFT 16
++
++/* WoL Control Register */
++#define EDMA_REG_WOL_CTRL 0xC08
++#define EDMA_WOL_WK_EN 0x00000001
++#define EDMA_WOL_MG_EN 0x00000002
++#define EDMA_WOL_PT0_EN 0x00000004
++#define EDMA_WOL_PT1_EN 0x00000008
++#define EDMA_WOL_PT2_EN 0x00000010
++#define EDMA_WOL_PT3_EN 0x00000020
++#define EDMA_WOL_PT4_EN 0x00000040
++#define EDMA_WOL_PT5_EN 0x00000080
++#define EDMA_WOL_PT6_EN 0x00000100
++
++/* MAC Control Register */
++#define EDMA_REG_MAC_CTRL0 0xC20
++#define EDMA_REG_MAC_CTRL1 0xC24
++
++/* WoL Pattern Register */
++#define EDMA_REG_WOL_PATTERN_START 0x5000
++#define EDMA_PATTERN_PART_REG_OFFSET 0x40
++
++
++/* TX descriptor fields */
++#define EDMA_TPD_HDR_SHIFT 0
++#define EDMA_TPD_PPPOE_EN 0x00000100
++#define EDMA_TPD_IP_CSUM_EN 0x00000200
++#define EDMA_TPD_TCP_CSUM_EN 0x0000400
++#define EDMA_TPD_UDP_CSUM_EN 0x00000800
++#define EDMA_TPD_CUSTOM_CSUM_EN 0x00000C00
++#define EDMA_TPD_LSO_EN 0x00001000
++#define EDMA_TPD_LSO_V2_EN 0x00002000
++#define EDMA_TPD_IPV4_EN 0x00010000
++#define EDMA_TPD_MSS_MASK 0x1FFF
++#define EDMA_TPD_MSS_SHIFT 18
++#define EDMA_TPD_CUSTOM_CSUM_SHIFT 18
++
++/* RRD descriptor fields */
++#define EDMA_RRD_NUM_RFD_MASK 0x000F
++#define EDMA_RRD_SVLAN 0x8000
++#define EDMA_RRD_FLOW_COOKIE_MASK 0x07FF;
++
++#define EDMA_RRD_PKT_SIZE_MASK 0x3FFF
++#define EDMA_RRD_CSUM_FAIL_MASK 0xC000
++#define EDMA_RRD_CVLAN 0x0001
++#define EDMA_RRD_DESC_VALID 0x8000
++
++#define EDMA_RRD_PRIORITY_SHIFT 4
++#define EDMA_RRD_PRIORITY_MASK 0x7
++#define EDMA_RRD_PORT_TYPE_SHIFT 7
++#define EDMA_RRD_PORT_TYPE_MASK 0x1F
++#endif /* _ESS_EDMA_H_ */
diff --git a/target/linux/ipq40xx/patches-4.19/711-dts-ipq4019-add-ethernet-essedma-node.patch b/target/linux/ipq40xx/patches-4.19/711-dts-ipq4019-add-ethernet-essedma-node.patch
new file mode 100644 (file)
index 0000000..7302d23
--- /dev/null
@@ -0,0 +1,92 @@
+From c611d3780fa101662a822d10acf8feb04ca97409 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Sun, 20 Nov 2016 01:01:10 +0100
+Subject: [PATCH] dts: ipq4019: add ethernet essedma node
+
+This patch adds the device-tree node for the ethernet
+interfaces.
+
+Note: The driver isn't anywhere close to be upstream,
+so the info might change.
+
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 60 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 60 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -44,6 +44,8 @@
+               spi1 = &blsp1_spi2;
+               i2c0 = &blsp1_i2c3;
+               i2c1 = &blsp1_i2c4;
++              ethernet0 = &gmac0;
++              ethernet1 = &gmac1;
+       };
+       cpus {
+@@ -617,6 +619,64 @@
+                       status = "disabled";
+               };
++              edma@c080000 {
++                      compatible = "qcom,ess-edma";
++                      reg = <0xc080000 0x8000>;
++                      qcom,page-mode = <0>;
++                      qcom,rx_head_buf_size = <1540>;
++                      qcom,mdio_supported;
++                      qcom,poll_required = <1>;
++                      qcom,num_gmac = <2>;
++                      interrupts = <0  65 IRQ_TYPE_EDGE_RISING
++                                    0  66 IRQ_TYPE_EDGE_RISING
++                                    0  67 IRQ_TYPE_EDGE_RISING
++                                    0  68 IRQ_TYPE_EDGE_RISING
++                                    0  69 IRQ_TYPE_EDGE_RISING
++                                    0  70 IRQ_TYPE_EDGE_RISING
++                                    0  71 IRQ_TYPE_EDGE_RISING
++                                    0  72 IRQ_TYPE_EDGE_RISING
++                                    0  73 IRQ_TYPE_EDGE_RISING
++                                    0  74 IRQ_TYPE_EDGE_RISING
++                                    0  75 IRQ_TYPE_EDGE_RISING
++                                    0  76 IRQ_TYPE_EDGE_RISING
++                                    0  77 IRQ_TYPE_EDGE_RISING
++                                    0  78 IRQ_TYPE_EDGE_RISING
++                                    0  79 IRQ_TYPE_EDGE_RISING
++                                    0  80 IRQ_TYPE_EDGE_RISING
++                                    0 240 IRQ_TYPE_EDGE_RISING
++                                    0 241 IRQ_TYPE_EDGE_RISING
++                                    0 242 IRQ_TYPE_EDGE_RISING
++                                    0 243 IRQ_TYPE_EDGE_RISING
++                                    0 244 IRQ_TYPE_EDGE_RISING
++                                    0 245 IRQ_TYPE_EDGE_RISING
++                                    0 246 IRQ_TYPE_EDGE_RISING
++                                    0 247 IRQ_TYPE_EDGE_RISING
++                                    0 248 IRQ_TYPE_EDGE_RISING
++                                    0 249 IRQ_TYPE_EDGE_RISING
++                                    0 250 IRQ_TYPE_EDGE_RISING
++                                    0 251 IRQ_TYPE_EDGE_RISING
++                                    0 252 IRQ_TYPE_EDGE_RISING
++                                    0 253 IRQ_TYPE_EDGE_RISING
++                                    0 254 IRQ_TYPE_EDGE_RISING
++                                    0 255 IRQ_TYPE_EDGE_RISING>;
++
++                      status = "disabled";
++
++                      gmac0: gmac0 {
++                              local-mac-address = [00 00 00 00 00 00];
++                              vlan_tag = <1 0x1f>;
++                      };
++
++                      gmac1: gmac1 {
++                              local-mac-address = [00 00 00 00 00 00];
++                              qcom,phy_mdio_addr = <4>;
++                              qcom,poll_required = <1>;
++                              qcom,forced_speed = <1000>;
++                              qcom,forced_duplex = <1>;
++                              vlan_tag = <2 0x20>;
++                      };
++              };
++
+               usb3_ss_phy: ssphy@9a000 {
+                       compatible = "qcom,usb-ss-ipq4019-phy";
+                       #phy-cells = <0>;
diff --git a/target/linux/ipq40xx/patches-4.19/712-mr33-essedma.patch b/target/linux/ipq40xx/patches-4.19/712-mr33-essedma.patch
new file mode 100644 (file)
index 0000000..ee6452e
--- /dev/null
@@ -0,0 +1,334 @@
+--- a/drivers/net/ethernet/qualcomm/essedma/edma.c
++++ b/drivers/net/ethernet/qualcomm/essedma/edma.c
+@@ -161,8 +161,10 @@ static void edma_configure_rx(struct edm
+       /* Set Rx FIFO threshold to start to DMA data to host */
+       rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE;
++      if (!edma_cinfo->is_single_phy) {
+       /* Set RX remove vlan bit */
+       rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN;
++      }
+       edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data);
+ }
+@@ -1293,6 +1295,10 @@ void edma_adjust_link(struct net_device
+       if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) {
+               dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed);
+               adapter->link_state = __EDMA_LINKUP;
++              if (adapter->edma_cinfo->is_single_phy) {
++                      ess_set_port_status_speed(adapter->edma_cinfo, phydev,
++                                                ffs(adapter->dp_bitmap) - 1);
++              }
+               netif_carrier_on(netdev);
+               if (netif_running(netdev))
+                       netif_tx_wake_all_queues(netdev);
+@@ -1386,10 +1392,12 @@ netdev_tx_t edma_xmit(struct sk_buff *sk
+       }
+       /* Check and mark VLAN tag offload */
+-      if (skb_vlan_tag_present(skb))
+-              flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG;
+-      else if (adapter->default_vlan_tag)
+-              flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG;
++      if (!adapter->edma_cinfo->is_single_phy) {
++              if (unlikely(skb_vlan_tag_present(skb)))
++                      flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG;
++              else if (adapter->default_vlan_tag)
++                      flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG;
++      }
+       /* Check and mark checksum offload */
+       if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+--- a/drivers/net/ethernet/qualcomm/essedma/edma.h
++++ b/drivers/net/ethernet/qualcomm/essedma/edma.h
+@@ -31,6 +31,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
++#include <linux/clk.h>
+ #include <linux/kernel.h>
+ #include <linux/device.h>
+ #include <linux/sysctl.h>
+@@ -332,6 +333,9 @@ struct edma_common_info {
+       struct edma_per_cpu_queues_info edma_percpu_info[CONFIG_NR_CPUS]; /* per cpu information */
+       spinlock_t stats_lock; /* protect edma stats area for updation */
+       struct timer_list edma_stats_timer;
++      bool is_single_phy;
++      void __iomem *ess_hw_addr;
++      struct clk *ess_clk;
+ };
+ /* transimit packet descriptor (tpd) ring */
+@@ -444,4 +448,6 @@ void edma_change_tx_coalesce(int usecs);
+ void edma_change_rx_coalesce(int usecs);
+ void edma_get_tx_rx_coalesce(u32 *reg_val);
+ void edma_clear_irq_status(void);
++void ess_set_port_status_speed(struct edma_common_info *edma_cinfo,
++                               struct phy_device *phydev, uint8_t port_id);
+ #endif /* _EDMA_H_ */
+--- a/drivers/net/ethernet/qualcomm/essedma/edma_axi.c
++++ b/drivers/net/ethernet/qualcomm/essedma/edma_axi.c
+@@ -17,6 +17,11 @@
+ #include <linux/of.h>
+ #include <linux/of_net.h>
+ #include <linux/timer.h>
++#include <linux/of_platform.h>
++#include <linux/of_address.h>
++#include <linux/clk.h>
++#include <linux/string.h>
++#include <linux/reset.h>
+ #include "edma.h"
+ #include "ess_edma.h"
+@@ -81,6 +86,101 @@ void edma_read_reg(u16 reg_addr, volatil
+       *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr));
+ }
++static void ess_write_reg(struct edma_common_info *edma, u16 reg_addr, u32 reg_value)
++{
++      writel(reg_value, ((void __iomem *)
++              ((unsigned long)edma->ess_hw_addr + reg_addr)));
++}
++
++static void ess_read_reg(struct edma_common_info *edma, u16 reg_addr,
++                volatile u32 *reg_value)
++{
++      *reg_value = readl((void __iomem *)
++              ((unsigned long)edma->ess_hw_addr + reg_addr));
++}
++
++static int ess_reset(struct edma_common_info *edma)
++{
++      struct device_node *switch_node = NULL;
++      struct reset_control *ess_rst;
++      u32 regval;
++
++      switch_node = of_find_node_by_name(NULL, "ess-switch");
++      if (!switch_node) {
++              pr_err("switch-node not found\n");
++              return -EINVAL;
++      }
++
++      ess_rst = of_reset_control_get(switch_node, "ess_rst");
++      of_node_put(switch_node);
++
++      if (IS_ERR(ess_rst)) {
++              pr_err("failed to find ess_rst!\n");
++              return -ENOENT;
++      }
++
++      reset_control_assert(ess_rst);
++      msleep(10);
++      reset_control_deassert(ess_rst);
++      msleep(100);
++      reset_control_put(ess_rst);
++
++      /* Enable only port 5 <--> port 0
++       * bits 0:6 bitmap of ports it can fwd to */
++#define SET_PORT_BMP(r,v) \
++              ess_read_reg(edma, r, &regval); \
++              ess_write_reg(edma, r, ((regval & ~0x3F) | v));
++
++      SET_PORT_BMP(ESS_PORT0_LOOKUP_CTRL,0x20);
++      SET_PORT_BMP(ESS_PORT1_LOOKUP_CTRL,0x00);
++      SET_PORT_BMP(ESS_PORT2_LOOKUP_CTRL,0x00);
++      SET_PORT_BMP(ESS_PORT3_LOOKUP_CTRL,0x00);
++      SET_PORT_BMP(ESS_PORT4_LOOKUP_CTRL,0x00);
++      SET_PORT_BMP(ESS_PORT5_LOOKUP_CTRL,0x01);
++      ess_write_reg(edma, ESS_RGMII_CTRL, 0x400);
++      ess_write_reg(edma, ESS_PORT0_STATUS, ESS_PORT_1G_FDX);
++      ess_write_reg(edma, ESS_PORT5_STATUS, ESS_PORT_1G_FDX);
++      ess_write_reg(edma, ESS_PORT0_HEADER_CTRL, 0);
++#undef SET_PORT_BMP
++
++      /* forward multicast and broadcast frames to CPU */
++      ess_write_reg(edma, ESS_FWD_CTRL1,
++              (ESS_PORTS_ALL << ESS_FWD_CTRL1_UC_FLOOD_S) |
++              (ESS_PORTS_ALL << ESS_FWD_CTRL1_MC_FLOOD_S) |
++              (ESS_PORTS_ALL << ESS_FWD_CTRL1_BC_FLOOD_S));
++
++      return 0;
++}
++
++void ess_set_port_status_speed(struct edma_common_info *edma,
++                             struct phy_device *phydev, uint8_t port_id)
++{
++      uint16_t reg_off = ESS_PORT0_STATUS + (4 * port_id);
++      uint32_t reg_val = 0;
++
++      ess_read_reg(edma, reg_off, &reg_val);
++
++      /* reset the speed bits [0:1] */
++      reg_val &= ~ESS_PORT_STATUS_SPEED_INV;
++
++      /* set the new speed */
++      switch(phydev->speed) {
++              case SPEED_1000:  reg_val |= ESS_PORT_STATUS_SPEED_1000; break;
++              case SPEED_100:   reg_val |= ESS_PORT_STATUS_SPEED_100;  break;
++              case SPEED_10:    reg_val |= ESS_PORT_STATUS_SPEED_10;   break;
++              default:          reg_val |= ESS_PORT_STATUS_SPEED_INV;  break;
++      }
++
++      /* check full/half duplex */
++      if (phydev->duplex) {
++              reg_val |= ESS_PORT_STATUS_DUPLEX_MODE;
++      } else {
++              reg_val &= ~ESS_PORT_STATUS_DUPLEX_MODE;
++      }
++
++      ess_write_reg(edma, reg_off, reg_val);
++}
++
+ /* edma_change_tx_coalesce()
+  *    change tx interrupt moderation timer
+  */
+@@ -550,6 +650,31 @@ static struct ctl_table edma_table[] = {
+       {}
+ };
++static int ess_parse(struct edma_common_info *edma)
++{
++      struct device_node *switch_node;
++      int ret = -EINVAL;
++
++      switch_node = of_find_node_by_name(NULL, "ess-switch");
++      if (!switch_node) {
++              pr_err("cannot find ess-switch node\n");
++              goto out;
++      }
++
++      edma->ess_hw_addr = of_io_request_and_map(switch_node,
++                                                0, KBUILD_MODNAME);
++      if (!edma->ess_hw_addr) {
++              pr_err("%s ioremap fail.", __func__);
++              goto out;
++      }
++
++      edma->ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
++      ret = clk_prepare_enable(edma->ess_clk);
++out:
++      of_node_put(switch_node);
++      return ret;
++}
++
+ /* edma_axi_netdev_ops
+  *    Describe the operations supported by registered netdevices
+  *
+@@ -785,6 +910,17 @@ static int edma_axi_probe(struct platfor
+               miibus = mdio_data->mii_bus;
+       }
++      if (of_property_read_bool(np, "qcom,single-phy") &&
++          edma_cinfo->num_gmac == 1) {
++              err = ess_parse(edma_cinfo);
++              if (!err)
++                      err = ess_reset(edma_cinfo);
++              if (err)
++                      goto err_single_phy_init;
++              else
++                      edma_cinfo->is_single_phy = true;
++      }
++
+       for_each_available_child_of_node(np, pnp) {
+               const char *mac_addr;
+@@ -1073,11 +1209,15 @@ static int edma_axi_probe(struct platfor
+       for (i = 0; i < edma_cinfo->num_gmac; i++) {
+               if (adapter[i]->poll_required) {
++                      int phy_mode = of_get_phy_mode(np);
++
++                      if (phy_mode < 0)
++                              phy_mode = PHY_INTERFACE_MODE_SGMII;
+                       adapter[i]->phydev =
+                               phy_connect(edma_netdev[i],
+                                           (const char *)adapter[i]->phy_id,
+                                           &edma_adjust_link,
+-                                          PHY_INTERFACE_MODE_SGMII);
++                                          phy_mode);
+                       if (IS_ERR(adapter[i]->phydev)) {
+                               dev_dbg(&pdev->dev, "PHY attach FAIL");
+                               err = -EIO;
+@@ -1121,6 +1261,9 @@ err_rmap_alloc_fail:
+       for (i = 0; i < edma_cinfo->num_gmac; i++)
+               unregister_netdev(edma_netdev[i]);
+ err_register:
++err_single_phy_init:
++      iounmap(edma_cinfo->ess_hw_addr);
++      clk_disable_unprepare(edma_cinfo->ess_clk);
+ err_mdiobus_init_fail:
+       edma_free_rx_rings(edma_cinfo);
+ err_rx_rinit:
+@@ -1181,6 +1324,8 @@ static int edma_axi_remove(struct platfo
+       del_timer_sync(&edma_cinfo->edma_stats_timer);
+       edma_free_irqs(adapter);
+       unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr);
++      iounmap(edma_cinfo->ess_hw_addr);
++      clk_disable_unprepare(edma_cinfo->ess_clk);
+       edma_free_tx_resources(edma_cinfo);
+       edma_free_rx_resources(edma_cinfo);
+       edma_free_tx_rings(edma_cinfo);
+--- a/drivers/net/ethernet/qualcomm/essedma/ess_edma.h
++++ b/drivers/net/ethernet/qualcomm/essedma/ess_edma.h
+@@ -329,4 +329,61 @@ struct edma_hw;
+ #define EDMA_RRD_PRIORITY_MASK 0x7
+ #define EDMA_RRD_PORT_TYPE_SHIFT 7
+ #define EDMA_RRD_PORT_TYPE_MASK 0x1F
++
++#define ESS_RGMII_CTRL                0x0004
++
++/* Port status registers */
++#define ESS_PORT0_STATUS      0x007C
++#define ESS_PORT1_STATUS      0x0080
++#define ESS_PORT2_STATUS      0x0084
++#define ESS_PORT3_STATUS      0x0088
++#define ESS_PORT4_STATUS      0x008C
++#define ESS_PORT5_STATUS      0x0090
++
++#define ESS_PORT_STATUS_HDX_FLOW_CTL  0x80
++#define ESS_PORT_STATUS_DUPLEX_MODE   0x40
++#define ESS_PORT_STATUS_RX_FLOW_EN    0x20
++#define ESS_PORT_STATUS_TX_FLOW_EN    0x10
++#define ESS_PORT_STATUS_RX_MAC_EN     0x08
++#define ESS_PORT_STATUS_TX_MAC_EN     0x04
++#define ESS_PORT_STATUS_SPEED_INV     0x03
++#define ESS_PORT_STATUS_SPEED_1000    0x02
++#define ESS_PORT_STATUS_SPEED_100     0x01
++#define ESS_PORT_STATUS_SPEED_10      0x00
++
++#define ESS_PORT_1G_FDX      (ESS_PORT_STATUS_DUPLEX_MODE | ESS_PORT_STATUS_RX_FLOW_EN | \
++                             ESS_PORT_STATUS_TX_FLOW_EN  | ESS_PORT_STATUS_RX_MAC_EN  | \
++                             ESS_PORT_STATUS_TX_MAC_EN   | ESS_PORT_STATUS_SPEED_1000)
++
++#define PHY_STATUS_REG                        0x11
++#define PHY_STATUS_SPEED              0xC000
++#define PHY_STATUS_SPEED_SHIFT                14
++#define PHY_STATUS_DUPLEX             0x2000
++#define PHY_STATUS_DUPLEX_SHIFT       13
++#define PHY_STATUS_SPEED_DUPLEX_RESOLVED 0x0800
++#define PHY_STATUS_CARRIER            0x0400
++#define PHY_STATUS_CARRIER_SHIFT      10
++
++/* Port lookup control registers */
++#define ESS_PORT0_LOOKUP_CTRL 0x0660
++#define ESS_PORT1_LOOKUP_CTRL 0x066C
++#define ESS_PORT2_LOOKUP_CTRL 0x0678
++#define ESS_PORT3_LOOKUP_CTRL 0x0684
++#define ESS_PORT4_LOOKUP_CTRL 0x0690
++#define ESS_PORT5_LOOKUP_CTRL 0x069C
++
++#define ESS_PORT0_HEADER_CTRL 0x009C
++
++#define ESS_PORTS_ALL         0x3f
++
++#define ESS_FWD_CTRL1         0x0624
++#define   ESS_FWD_CTRL1_UC_FLOOD              BITS(0, 7)
++#define   ESS_FWD_CTRL1_UC_FLOOD_S            0
++#define   ESS_FWD_CTRL1_MC_FLOOD              BITS(8, 7)
++#define   ESS_FWD_CTRL1_MC_FLOOD_S            8
++#define   ESS_FWD_CTRL1_BC_FLOOD              BITS(16, 7)
++#define   ESS_FWD_CTRL1_BC_FLOOD_S            16
++#define   ESS_FWD_CTRL1_IGMP                  BITS(24, 7)
++#define   ESS_FWD_CTRL1_IGMP_S                        24
++
+ #endif /* _ESS_EDMA_H_ */
diff --git a/target/linux/ipq40xx/patches-4.19/713-essedma-alloc-skb-ip-align.patch b/target/linux/ipq40xx/patches-4.19/713-essedma-alloc-skb-ip-align.patch
new file mode 100644 (file)
index 0000000..8c70fce
--- /dev/null
@@ -0,0 +1,21 @@
+From 17681f0bb474d0d227f07369144149d1555d8bce Mon Sep 17 00:00:00 2001
+From: Chen Minqiang <ptpt52@gmail.com>
+Date: Sun, 17 Jun 2018 04:14:13 +0800
+Subject: [PATCH] essedma: alloc skb ip align
+
+Signed-off-by: Chen Minqiang <ptpt52@gmail.com>
+---
+ drivers/net/ethernet/qualcomm/essedma/edma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qualcomm/essedma/edma.c
++++ b/drivers/net/ethernet/qualcomm/essedma/edma.c
+@@ -201,7 +201,7 @@ static int edma_alloc_rx_buf(struct edma
+                       skb = sw_desc->skb;
+               } else {
+                       /* alloc skb */
+-                      skb = netdev_alloc_skb(edma_netdev[0], length);
++                      skb = netdev_alloc_skb_ip_align(edma_netdev[0], length);
+                       if (!skb) {
+                               /* Better luck next round */
+                               break;
diff --git a/target/linux/ipq40xx/patches-4.19/850-soc-add-qualcomm-syscon.patch b/target/linux/ipq40xx/patches-4.19/850-soc-add-qualcomm-syscon.patch
new file mode 100644 (file)
index 0000000..7b4eeff
--- /dev/null
@@ -0,0 +1,180 @@
+From: Christian Lamparter <chunkeey@googlemail.com>
+Subject: SoC: add qualcomm syscon
+--- a/drivers/soc/qcom/Makefile
++++ b/drivers/soc/qcom/Makefile
+@@ -18,6 +18,7 @@ obj-$(CONFIG_QCOM_SMEM_STATE) += smem_st
+ obj-$(CONFIG_QCOM_SMP2P)      += smp2p.o
+ obj-$(CONFIG_QCOM_SMSM)       += smsm.o
+ obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
++obj-$(CONFIG_QCOM_TCSR)        += qcom_tcsr.o
+ obj-$(CONFIG_QCOM_APR) += apr.o
+ obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
+ obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
+--- a/drivers/soc/qcom/Kconfig
++++ b/drivers/soc/qcom/Kconfig
+@@ -146,6 +146,13 @@ config QCOM_SMSM
+         Say yes here to support the Qualcomm Shared Memory State Machine.
+         The state machine is represented by bits in shared memory.
++config QCOM_TCSR
++      tristate "QCOM Top Control and Status Registers"
++      depends on ARCH_QCOM
++      help
++        Say y here to enable TCSR support.  The TCSR provides control
++        functions for various peripherals.
++
+ config QCOM_WCNSS_CTRL
+       tristate "Qualcomm WCNSS control driver"
+       depends on ARCH_QCOM
+--- /dev/null
++++ b/drivers/soc/qcom/qcom_tcsr.c
+@@ -0,0 +1,98 @@
++/*
++ * Copyright (c) 2014, The Linux foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License rev 2 and
++ * only rev 2 as published by the free Software foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
++
++#define TCSR_USB_PORT_SEL     0xb0
++#define TCSR_USB_HSPHY_CONFIG 0xC
++
++#define TCSR_ESS_INTERFACE_SEL_OFFSET   0x0
++#define TCSR_ESS_INTERFACE_SEL_MASK     0xf
++
++#define TCSR_WIFI0_GLB_CFG_OFFSET     0x0
++#define TCSR_WIFI1_GLB_CFG_OFFSET     0x4
++#define TCSR_PNOC_SNOC_MEMTYPE_M0_M2  0x4
++
++static int tcsr_probe(struct platform_device *pdev)
++{
++      struct resource *res;
++      const struct device_node *node = pdev->dev.of_node;
++      void __iomem *base;
++      u32 val;
++
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      base = devm_ioremap_resource(&pdev->dev, res);
++      if (IS_ERR(base))
++              return PTR_ERR(base);
++
++      if (!of_property_read_u32(node, "qcom,usb-ctrl-select", &val)) {
++              dev_err(&pdev->dev, "setting usb port select = %d\n", val);
++              writel(val, base + TCSR_USB_PORT_SEL);
++      }
++
++      if (!of_property_read_u32(node, "qcom,usb-hsphy-mode-select", &val)) {
++              dev_info(&pdev->dev, "setting usb hs phy mode select = %x\n", val);
++              writel(val, base + TCSR_USB_HSPHY_CONFIG);
++      }
++
++      if (!of_property_read_u32(node, "qcom,ess-interface-select", &val)) {
++              u32 tmp = 0;
++              dev_info(&pdev->dev, "setting ess interface select = %x\n", val);
++              tmp = readl(base + TCSR_ESS_INTERFACE_SEL_OFFSET);
++              tmp = tmp & (~TCSR_ESS_INTERFACE_SEL_MASK);
++              tmp = tmp | (val&TCSR_ESS_INTERFACE_SEL_MASK);
++              writel(tmp, base + TCSR_ESS_INTERFACE_SEL_OFFSET);
++        }
++
++      if (!of_property_read_u32(node, "qcom,wifi_glb_cfg", &val)) {
++              dev_info(&pdev->dev, "setting wifi_glb_cfg = %x\n", val);
++              writel(val, base + TCSR_WIFI0_GLB_CFG_OFFSET);
++              writel(val, base + TCSR_WIFI1_GLB_CFG_OFFSET);
++      }
++
++      if (!of_property_read_u32(node, "qcom,wifi_noc_memtype_m0_m2", &val)) {
++              dev_info(&pdev->dev,
++                      "setting wifi_noc_memtype_m0_m2 = %x\n", val);
++              writel(val, base + TCSR_PNOC_SNOC_MEMTYPE_M0_M2);
++      }
++
++      return 0;
++}
++
++static const struct of_device_id tcsr_dt_match[] = {
++      { .compatible = "qcom,tcsr", },
++      { },
++};
++
++MODULE_DEVICE_TABLE(of, tcsr_dt_match);
++
++static struct platform_driver tcsr_driver = {
++      .driver = {
++              .name           = "tcsr",
++              .owner          = THIS_MODULE,
++              .of_match_table = tcsr_dt_match,
++      },
++      .probe = tcsr_probe,
++};
++
++module_platform_driver(tcsr_driver);
++
++MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
++MODULE_DESCRIPTION("QCOM TCSR driver");
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/include/dt-bindings/soc/qcom,tcsr.h
+@@ -0,0 +1,48 @@
++/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++#ifndef __DT_BINDINGS_QCOM_TCSR_H
++#define __DT_BINDINGS_QCOM_TCSR_H
++
++#define TCSR_USB_SELECT_USB3_P0               0x1
++#define TCSR_USB_SELECT_USB3_P1               0x2
++#define TCSR_USB_SELECT_USB3_DUAL     0x3
++
++/* IPQ40xx HS PHY Mode Select */
++#define TCSR_USB_HSPHY_HOST_MODE      0x00E700E7
++#define TCSR_USB_HSPHY_DEVICE_MODE    0x00C700E7
++
++/* IPQ40xx ess interface mode select */
++#define TCSR_ESS_PSGMII              0
++#define TCSR_ESS_PSGMII_RGMII5       1
++#define TCSR_ESS_PSGMII_RMII0        2
++#define TCSR_ESS_PSGMII_RMII1        4
++#define TCSR_ESS_PSGMII_RMII0_RMII1  6
++#define TCSR_ESS_PSGMII_RGMII4       9
++
++/*
++ * IPQ40xx WiFi Global Config
++ * Bit 30:AXID_EN
++ * Enable AXI master bus Axid translating to confirm all txn submitted by order
++ * Bit 24: Use locally generated socslv_wxi_bvalid
++ * 1:  use locally generate socslv_wxi_bvalid for performance.
++ * 0:  use SNOC socslv_wxi_bvalid.
++ */
++#define TCSR_WIFI_GLB_CFG             0x41000000
++
++/* IPQ40xx MEM_TYPE_SEL_M0_M2 Select Bit 26:24 - 2 NORMAL */
++#define TCSR_WIFI_NOC_MEMTYPE_M0_M2   0x02222222
++
++/* TCSR A/B REG */
++#define IPQ806X_TCSR_REG_A_ADM_CRCI_MUX_SEL     0
++#define IPQ806X_TCSR_REG_B_ADM_CRCI_MUX_SEL     1
++
++#endif
diff --git a/target/linux/ipq40xx/patches-4.19/900-dts-ipq4019-ap-dk01.1.patch b/target/linux/ipq40xx/patches-4.19/900-dts-ipq4019-ap-dk01.1.patch
new file mode 100644 (file)
index 0000000..349f67c
--- /dev/null
@@ -0,0 +1,157 @@
+--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
+@@ -15,6 +15,7 @@
+  */
+ #include "qcom-ipq4019.dtsi"
++#include <dt-bindings/soc/qcom,tcsr.h>
+ / {
+       model = "Qualcomm Technologies, Inc. IPQ4019/AP-DK01.1";
+@@ -29,6 +30,32 @@
+       };
+       soc {
++              tcsr@194b000 {
++                      /* select hostmode */
++                      compatible = "qcom,tcsr";
++                      reg = <0x194b000 0x100>;
++                      qcom,usb-hsphy-mode-select = <TCSR_USB_HSPHY_HOST_MODE>;
++                      status = "ok";
++              };
++
++              ess_tcsr@1953000 {
++                      compatible = "qcom,tcsr";
++                      reg = <0x1953000 0x1000>;
++                      qcom,ess-interface-select = <TCSR_ESS_PSGMII>;
++              };
++
++              tcsr@1949000 {
++                      compatible = "qcom,tcsr";
++                      reg = <0x1949000 0x100>;
++                      qcom,wifi_glb_cfg = <TCSR_WIFI_GLB_CFG>;
++              };
++
++              tcsr@1957000 {
++                      compatible = "qcom,tcsr";
++                      reg = <0x1957000 0x100>;
++                      qcom,wifi_noc_memtype_m0_m2 = <TCSR_WIFI_NOC_MEMTYPE_M0_M2>;
++              };
++
+               rng@22000 {
+                       status = "ok";
+               };
+@@ -74,14 +101,6 @@
+                       pinctrl-names = "default";
+                       status = "ok";
+                       cs-gpios = <&tlmm 54 0>;
+-
+-                      mx25l25635e@0 {
+-                              #address-cells = <1>;
+-                              #size-cells = <1>;
+-                              reg = <0>;
+-                              compatible = "mx25l25635e";
+-                              spi-max-frequency = <24000000>;
+-                      };
+               };
+               serial@78af000 {
+@@ -110,6 +129,22 @@
+                       status = "ok";
+               };
++              mdio@90000 {
++                      status = "okay";
++              };
++
++              ess-switch@c000000 {
++                      status = "okay";
++              };
++
++              ess-psgmii@98000 {
++                      status = "okay";
++              };
++
++              edma@c080000 {
++                      status = "okay";
++              };
++
+               usb3_ss_phy: ssphy@9a000 {
+                       status = "ok";
+               };
+--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts
++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts
+@@ -18,5 +18,73 @@
+ / {
+       model = "Qualcomm Technologies, Inc. IPQ40xx/AP-DK01.1-C1";
++      compatible = "qcom,ap-dk01.1-c1", "qcom,ap-dk01.2-c1", "qcom,ipq4019";
++      memory {
++              device_type = "memory";
++              reg = <0x80000000 0x10000000>;
++      };
++};
++
++&blsp1_spi1 {
++      mx25l25635f@0 {
++              compatible = "mx25l25635f", "jedec,spi-nor";
++              #address-cells = <1>;
++              #size-cells = <1>;
++              reg = <0>;
++              spi-max-frequency = <24000000>;
++
++              SBL1@0 {
++                      label = "SBL1";
++                      reg = <0x0 0x40000>;
++                      read-only;
++              };
++              MIBIB@40000 {
++                      label = "MIBIB";
++                      reg = <0x40000 0x20000>;
++                      read-only;
++              };
++              QSEE@60000 {
++                      label = "QSEE";
++                      reg = <0x60000 0x60000>;
++                      read-only;
++              };
++              CDT@c0000 {
++                      label = "CDT";
++                      reg = <0xc0000 0x10000>;
++                      read-only;
++              };
++              DDRPARAMS@d0000 {
++                      label = "DDRPARAMS";
++                      reg = <0xd0000 0x10000>;
++                      read-only;
++              };
++              APPSBLENV@e0000 {
++                      label = "APPSBLENV";
++                      reg = <0xe0000 0x10000>;
++                      read-only;
++              };
++              APPSBL@f0000 {
++                      label = "APPSBL";
++                      reg = <0xf0000 0x80000>;
++                      read-only;
++              };
++              ART@170000 {
++                      label = "ART";
++                      reg = <0x170000 0x10000>;
++                      read-only;
++              };
++              kernel@180000 {
++                      label = "kernel";
++                      reg = <0x180000 0x400000>;
++              };
++              rootfs@580000 {
++                      label = "rootfs";
++                      reg = <0x580000 0x1600000>;
++              };
++              firmware@180000 {
++                      label = "firmware";
++                      reg = <0x180000 0x1a00000>;
++              };
++      };
+ };
diff --git a/target/linux/ipq40xx/patches-4.19/901-arm-boot-add-dts-files.patch b/target/linux/ipq40xx/patches-4.19/901-arm-boot-add-dts-files.patch
new file mode 100644 (file)
index 0000000..d6d551c
--- /dev/null
@@ -0,0 +1,37 @@
+From 8f68331e14dff9a101f2d0e1d6bec84a031f27ee Mon Sep 17 00:00:00 2001
+From: John Crispin <john@phrozen.org>
+Date: Thu, 9 Mar 2017 11:03:18 +0100
+Subject: [PATCH 69/69] arm: boot: add dts files
+
+Signed-off-by: John Crispin <john@phrozen.org>
+---
+ arch/arm/boot/dts/Makefile | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/arm/boot/dts/Makefile
++++ b/arch/arm/boot/dts/Makefile
+@@ -785,11 +785,24 @@ dtb-$(CONFIG_ARCH_QCOM) += \
+       qcom-apq8074-dragonboard.dtb \
+       qcom-apq8084-ifc6540.dtb \
+       qcom-apq8084-mtp.dtb \
++      qcom-ipq4018-a42.dtb \
++      qcom-ipq4018-eap1300.dtb \
++      qcom-ipq4018-ex6100v2.dtb \
++      qcom-ipq4018-ex6150v2.dtb \
++      qcom-ipq4018-fritz4040.dtb \
++      qcom-ipq4018-jalapeno.dtb \
++      qcom-ipq4018-nbg6617.dtb \
++      qcom-ipq4018-rt-ac58u.dtb \
++      qcom-ipq4018-wre6606.dtb \
+       qcom-ipq4019-ap.dk01.1-c1.dtb \
+       qcom-ipq4019-ap.dk04.1-c1.dtb \
+       qcom-ipq4019-ap.dk04.1-c3.dtb \
+       qcom-ipq4019-ap.dk07.1-c1.dtb \
+       qcom-ipq4019-ap.dk07.1-c2.dtb \
++      qcom-ipq4019-a62.dtb \
++      qcom-ipq4028-wpj428.dtb \
++      qcom-ipq4029-gl-b1300.dtb \
++      qcom-ipq4029-mr33.dtb \
+       qcom-ipq8064-ap148.dtb \
+       qcom-msm8660-surf.dtb \
+       qcom-msm8960-cdp.dtb \
diff --git a/target/linux/ipq40xx/patches-4.19/997-device_tree_cmdline.patch b/target/linux/ipq40xx/patches-4.19/997-device_tree_cmdline.patch
new file mode 100644 (file)
index 0000000..7600b0e
--- /dev/null
@@ -0,0 +1,12 @@
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -1090,6 +1090,9 @@ int __init early_init_dt_scan_chosen(uns
+       p = of_get_flat_dt_prop(node, "bootargs", &l);
+       if (p != NULL && l > 0)
+               strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
++      p = of_get_flat_dt_prop(node, "bootargs-append", &l);
++      if (p != NULL && l > 0)
++              strlcat(data, p, min_t(int, strlen(data) + (int)l, COMMAND_LINE_SIZE));
+       /*
+        * CONFIG_CMDLINE is meant to be a default in case nothing else