ipq40xx: add target
authorJohn Crispin <john@phrozen.org>
Wed, 21 Feb 2018 19:40:50 +0000 (20:40 +0100)
committerMathias Kresin <dev@kresin.me>
Wed, 14 Mar 2018 18:04:50 +0000 (19:04 +0100)
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Mathias Kresin <dev@kresin.me>
Signed-off-by: John Crispin <john@phrozen.org>
58 files changed:
package/boot/uboot-envtools/Makefile
package/boot/uboot-envtools/files/ipq [deleted file]
package/boot/uboot-envtools/files/ipq40xx [new file with mode: 0644]
package/boot/uboot-envtools/files/ipq806x [new file with mode: 0644]
package/boot/uboot-fritz4040/Makefile
package/firmware/ipq-wifi/Makefile
package/kernel/mac80211/Makefile
target/linux/ipq40xx/Makefile [new file with mode: 0644]
target/linux/ipq40xx/base-files/etc/board.d/01_leds [new file with mode: 0755]
target/linux/ipq40xx/base-files/etc/board.d/02_network [new file with mode: 0755]
target/linux/ipq40xx/base-files/etc/diag.sh [new file with mode: 0755]
target/linux/ipq40xx/base-files/etc/hotplug.d/firmware/11-ath10k-caldata [new file with mode: 0644]
target/linux/ipq40xx/base-files/etc/inittab [new file with mode: 0644]
target/linux/ipq40xx/base-files/lib/upgrade/openmesh.sh [new file with mode: 0644]
target/linux/ipq40xx/base-files/lib/upgrade/platform.sh [new file with mode: 0644]
target/linux/ipq40xx/config-4.14 [new file with mode: 0644]
target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-a42.dts [new file with mode: 0644]
target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts [new file with mode: 0644]
target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi [new file with mode: 0644]
target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-bus.dtsi [new file with mode: 0644]
target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-fritz4040.dts [new file with mode: 0644]
target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-gl-b1300.dts [new file with mode: 0644]
target/linux/ipq40xx/image/Makefile [new file with mode: 0644]
target/linux/ipq40xx/modules.mk [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/017-qcom-ipq4019-add-cpu-operating-points-for-cpufreq-su.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/030-mtd-nand-Use-standard-large-page-OOB-layout-when-usi.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/031-mtd-nand-use-usual-return-values-for-the-erase-hook.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/040-dmaengine-qcom-bam-Process-multiple-pending-descript.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/050-0002-mtd-nand-qcom-add-command-elements-in-BAM-transactio.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/050-0003-mtd-nand-qcom-support-for-command-descriptor-formati.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/050-0004-mtd-nand-provide-several-helpers-to-do-common-NAND-o.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/050-0005-mtd-nand-force-drivers-to-explicitly-send-READ-PROG-.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/050-0006-mtd-nand-qcom-Add-a-NULL-check-for-devm_kasprintf.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/059-ARM-cpuidle-Add-cpuidle-support-for-QCOM-cpus.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/069-arm-boot-add-dts-files.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/070-qcom-spm-fix-probe-order.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/101-ARM-dts-ipq4019-Add-a-few-peripheral-nodes.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/102-ARM-dts-ipq4019-fix-PCI-range.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/104-mtd-nand-add-Winbond-manufacturer-and-chip.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/105-mtd-nor-add-mx25l25635f.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/305-qcom-ipq4019-use-v2-of-the-kpss-bringup-mechanism.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/306-qcom-ipq4019-add-USB-nodes-to-ipq4019-SoC-device-tre.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/307-ARM-qcom-Add-IPQ4019-SoC-support.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/310-msm-adhoc-bus-support.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/605-net-IPQ4019-needs-rfs-vlan_tag-callbacks-in.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/700-net-add-qualcomm-mdio-and-phy.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/701-dts-ipq4019-add-mdio-node.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/702-dts-ipq4019-add-PHY-switch-nodes.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/710-net-add-qualcomm-essedma-ethernet-driver.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/711-dts-ipq4019-add-ethernet-essedma-node.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/820-qcom-ipq4019-Add-IPQ4019-USB-HS-SS-PHY-drivers.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/830-usb-dwc3-register-qca-ipq4019-dwc3-in-dwc3-of-simple.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/850-soc-add-qualcomm-syscon.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/864-03-dts-ipq4019-ap-dk01-add-tcsr-config-to-dtsi.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/864-05-dts-ipq4019-ap-dk01-remove-spi-chip-node-from-dtsi.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/864-07-dts-ipq4019-ap-dk01.1-c1-add-spi-and-ram-nodes.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.14/864-08-dts-ipq4019-ap-dk01.1-c1-add-compatible-string.patch [new file with mode: 0644]
target/linux/ipq40xx/profiles/00-default.mk [new file with mode: 0644]

index 57a2ec5393626d112dd376967702205d1d39320b..442cf3fd834972e907b284b6a2e14fdde7695d6e 100644 (file)
@@ -88,9 +88,13 @@ ifneq ($(CONFIG_TARGET_imx6),)
        $(INSTALL_DIR) $(1)/etc/uci-defaults
        $(INSTALL_DATA) ./files/imx6 $(1)/etc/uci-defaults/30_uboot-envtools
 endif
+ifneq ($(CONFIG_TARGET_ipq40xx),)
+       $(INSTALL_DIR) $(1)/etc/uci-defaults
+       $(INSTALL_DATA) ./files/ipq40xx $(1)/etc/uci-defaults/30_uboot-envtools
+endif
 ifneq ($(CONFIG_TARGET_ipq806x),)
        $(INSTALL_DIR) $(1)/etc/uci-defaults
-       $(INSTALL_DATA) ./files/ipq $(1)/etc/uci-defaults/30_uboot-envtools
+       $(INSTALL_DATA) ./files/ipq806x $(1)/etc/uci-defaults/30_uboot-envtools
 endif
 ifneq ($(CONFIG_TARGET_kirkwood),)
        $(INSTALL_DIR) $(1)/etc/uci-defaults
diff --git a/package/boot/uboot-envtools/files/ipq b/package/boot/uboot-envtools/files/ipq
deleted file mode 100644 (file)
index 441ba48..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/sh
-#
-# Copyright (C) 2016 LEDE
-#
-
-[ -e /etc/config/ubootenv ] && exit 0
-
-touch /etc/config/ubootenv
-
-. /lib/uboot-envtools.sh
-. /lib/functions.sh
-
-board=$(board_name)
-
-ubootenv_mtdinfo () {
-       UBOOTENV_PART=$(cat /proc/mtd | grep APPSBLENV)
-       mtd_dev=$(echo $UBOOTENV_PART | awk '{print $1}' | sed 's/:$//')
-       mtd_size=$(echo $UBOOTENV_PART | awk '{print "0x"$2}')
-       mtd_erase=$(echo $UBOOTENV_PART | awk '{print "0x"$3}')
-       nor_flash=$(find /sys/bus/spi/devices/*/mtd -name ${mtd_dev})
-
-       if [ -n "$nor_flash" ]; then
-               ubootenv_size=$mtd_size
-       else
-               # size is fixed to 0x40000 in u-boot
-               ubootenv_size=0x40000
-       fi
-
-       sectors=$(( $ubootenv_size / $mtd_erase ))
-       echo /dev/$mtd_dev 0x0 $ubootenv_size $mtd_erase $sectors
-}
-
-case "$board" in
-linksys,ea8500)
-       ubootenv_add_uci_config "/dev/mtd10" "0x0" "0x20000" "0x20000"
-       ;;
-openmesh,a42)
-       ubootenv_add_uci_config "/dev/mtd5" "0x0" "0x10000" "0x10000"
-       ;;
-qcom,ipq8064-ap148 |\
-qcom,ipq8064-db149)
-       ubootenv_add_uci_config $(ubootenv_mtdinfo)
-       ;;
-zyxel,nbg6817)
-       ubootenv_add_uci_config "/dev/mtdblock9" "0x0" "0x10000" "0x10000"
-       ;;
-esac
-
-config_load ubootenv
-config_foreach ubootenv_add_app_config ubootenv
-
-exit 0
diff --git a/package/boot/uboot-envtools/files/ipq40xx b/package/boot/uboot-envtools/files/ipq40xx
new file mode 100644 (file)
index 0000000..4eae506
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/sh
+#
+# Copyright (C) 2016 LEDE
+#
+
+[ -e /etc/config/ubootenv ] && exit 0
+
+touch /etc/config/ubootenv
+
+. /lib/uboot-envtools.sh
+. /lib/functions.sh
+
+board=$(board_name)
+
+ubootenv_mtdinfo () {
+       UBOOTENV_PART=$(cat /proc/mtd | grep APPSBLENV)
+       mtd_dev=$(echo $UBOOTENV_PART | awk '{print $1}' | sed 's/:$//')
+       mtd_size=$(echo $UBOOTENV_PART | awk '{print "0x"$2}')
+       mtd_erase=$(echo $UBOOTENV_PART | awk '{print "0x"$3}')
+       nor_flash=$(find /sys/bus/spi/devices/*/mtd -name ${mtd_dev})
+
+       if [ -n "$nor_flash" ]; then
+               ubootenv_size=$mtd_size
+       else
+               # size is fixed to 0x40000 in u-boot
+               ubootenv_size=0x40000
+       fi
+
+       sectors=$(( $ubootenv_size / $mtd_erase ))
+       echo /dev/$mtd_dev 0x0 $ubootenv_size $mtd_erase $sectors
+}
+
+case "$board" in
+openmesh,a42)
+       ubootenv_add_uci_config "/dev/mtd5" "0x0" "0x10000" "0x10000"
+       ;;
+esac
+
+config_load ubootenv
+config_foreach ubootenv_add_app_config ubootenv
+
+exit 0
diff --git a/package/boot/uboot-envtools/files/ipq806x b/package/boot/uboot-envtools/files/ipq806x
new file mode 100644 (file)
index 0000000..4618aac
--- /dev/null
@@ -0,0 +1,49 @@
+#!/bin/sh
+#
+# Copyright (C) 2016 LEDE
+#
+
+[ -e /etc/config/ubootenv ] && exit 0
+
+touch /etc/config/ubootenv
+
+. /lib/uboot-envtools.sh
+. /lib/functions.sh
+
+board=$(board_name)
+
+ubootenv_mtdinfo () {
+       UBOOTENV_PART=$(cat /proc/mtd | grep APPSBLENV)
+       mtd_dev=$(echo $UBOOTENV_PART | awk '{print $1}' | sed 's/:$//')
+       mtd_size=$(echo $UBOOTENV_PART | awk '{print "0x"$2}')
+       mtd_erase=$(echo $UBOOTENV_PART | awk '{print "0x"$3}')
+       nor_flash=$(find /sys/bus/spi/devices/*/mtd -name ${mtd_dev})
+
+       if [ -n "$nor_flash" ]; then
+               ubootenv_size=$mtd_size
+       else
+               # size is fixed to 0x40000 in u-boot
+               ubootenv_size=0x40000
+       fi
+
+       sectors=$(( $ubootenv_size / $mtd_erase ))
+       echo /dev/$mtd_dev 0x0 $ubootenv_size $mtd_erase $sectors
+}
+
+case "$board" in
+linksys,ea8500)
+       ubootenv_add_uci_config "/dev/mtd10" "0x0" "0x20000" "0x20000"
+       ;;
+qcom,ipq8064-ap148 |\
+qcom,ipq8064-db149)
+       ubootenv_add_uci_config $(ubootenv_mtdinfo)
+       ;;
+zyxel,nbg6817)
+       ubootenv_add_uci_config "/dev/mtdblock9" "0x0" "0x10000" "0x10000"
+       ;;
+esac
+
+config_load ubootenv
+config_foreach ubootenv_add_app_config ubootenv
+
+exit 0
index 65d70e05022d19a4e8716f9b069d7bf8ed0780fa..77d6fdc3b8bba3a646d4d24a9618a00497f9b7e8 100644 (file)
@@ -20,7 +20,7 @@ include $(INCLUDE_DIR)/u-boot.mk
 include $(INCLUDE_DIR)/package.mk
 
 define U-Boot/Default
-  BUILD_TARGET:=ipq806x
+  BUILD_TARGET:=ipq40xx
   UBOOT_IMAGE:=uboot-fritz4040.bin
 endef
 
index cc29d321365c79f1f213b7ce4c979b7a0632e9dc..3f929bc03da737a735559cf44f32c8e870cd2b09 100644 (file)
@@ -20,7 +20,7 @@ define Package/ipq-wifi-default
   SUBMENU:=ath10k IPQ4019 Boarddata
   SECTION:=firmware
   CATEGORY:=Firmware
-  DEPENDS:=@TARGET_ipq806x +ath10k-firmware-qca4019
+  DEPENDS:=@TARGET_ipq40xx +ath10k-firmware-qca4019
   TITLE:=Custom Board
 endef
 
index 6ea07fdbfb0b631e963a562bfd327c88132769ec..22ed841394f7eff6fe6619529f02522275dfebe4 100644 (file)
@@ -1584,7 +1584,7 @@ config-$(CONFIG_PACKAGE_ATH_SPECTRAL) += ATH9K_COMMON_SPECTRAL ATH10K_SPECTRAL
 config-$(call config_package,ath9k) += ATH9K
 config-$(call config_package,ath9k-common) += ATH9K_COMMON
 config-$(CONFIG_TARGET_ar71xx) += ATH9K_AHB
-config-$(CONFIG_TARGET_ipq806x) += ATH10K_AHB
+config-$(CONFIG_TARGET_ipq40xx) += ATH10K_AHB
 config-$(CONFIG_PCI) += ATH9K_PCI
 config-$(CONFIG_ATH_USER_REGD) += ATH_USER_REGD
 config-$(CONFIG_ATH9K_SUPPORT_PCOEM) += ATH9K_PCOEM
diff --git a/target/linux/ipq40xx/Makefile b/target/linux/ipq40xx/Makefile
new file mode 100644 (file)
index 0000000..390426c
--- /dev/null
@@ -0,0 +1,21 @@
+include $(TOPDIR)/rules.mk
+
+ARCH:=arm
+BOARD:=ipq40xx
+BOARDNAME:=Qualcomm Atheros IPQ40XX
+FEATURES:=squashfs fpu
+CPU_TYPE:=cortex-a7
+CPU_SUBTYPE:=neon-vfpv4
+MAINTAINER:=John Crispin <john@phrozen.org>
+
+KERNEL_PATCHVER:=4.14
+
+KERNELNAME:=zImage Image dtbs
+
+include $(INCLUDE_DIR)/target.mk
+DEFAULT_PACKAGES += \
+       kmod-leds-gpio kmod-gpio-button-hotplug swconfig \
+       kmod-ath10k wpad-mini \
+       kmod-usb3 kmod-usb-dwc3-of-simple kmod-usb-phy-qcom-dwc3 \
+
+$(eval $(call BuildTarget))
diff --git a/target/linux/ipq40xx/base-files/etc/board.d/01_leds b/target/linux/ipq40xx/base-files/etc/board.d/01_leds
new file mode 100755 (executable)
index 0000000..31c6e32
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/sh
+#
+# Copyright (C) 2015 OpenWrt.org
+#
+
+. /lib/functions/uci-defaults.sh
+
+board_config_update
+
+board=$(board_name)
+boardname="${board##*,}"
+
+case "$board" in
+avm,fritzbox-4040)
+       ucidef_set_led_wlan "wlan" "WLAN" "fritz4040:green:wlan" "phy0tpt" "phy1tpt"
+       ucidef_set_led_netdev "wan" "WAN" "fritz4040:green:wan" "eth1"
+       ucidef_set_led_switch "lan" "LAN" "fritz4040:green:lan" "switch0" "0x1e"
+       ;;
+glinet,gl-b1300)
+       ucidef_set_led_wlan "wlan" "WLAN" "${boardname}:green:wlan" "phy0tpt"
+       ;;
+*)
+       ;;
+esac
+
+board_config_flush
+
+exit 0
diff --git a/target/linux/ipq40xx/base-files/etc/board.d/02_network b/target/linux/ipq40xx/base-files/etc/board.d/02_network
new file mode 100755 (executable)
index 0000000..37f81a3
--- /dev/null
@@ -0,0 +1,31 @@
+#!/bin/sh
+#
+# Copyright (c) 2015 The Linux Foundation. All rights reserved.
+# Copyright (c) 2011-2015 OpenWrt.org
+#
+
+. /lib/functions/uci-defaults.sh
+. /lib/functions/system.sh
+
+board_config_update
+
+board=$(board_name)
+
+case "$board" in
+avm,fritzbox-4040)
+       ucidef_set_interfaces_lan_wan "eth0" "eth1"
+       ucidef_add_switch "switch0" \
+               "0u@eth0" "1:lan" "2:lan" "3:lan" "4:lan"
+       ;;
+glinet,gl-b1300 |\
+openmesh,a42)
+       ucidef_set_interfaces_lan_wan "eth1" "eth0"
+       ;;
+*)
+       echo "Unsupported hardware. Network interfaces not intialized"
+       ;;
+esac
+
+board_config_flush
+
+exit 0
diff --git a/target/linux/ipq40xx/base-files/etc/diag.sh b/target/linux/ipq40xx/base-files/etc/diag.sh
new file mode 100755 (executable)
index 0000000..4cfe632
--- /dev/null
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+. /lib/functions/leds.sh
+
+boot="$(get_dt_led boot)"
+failsafe="$(get_dt_led failsafe)"
+running="$(get_dt_led running)"
+upgrade="$(get_dt_led upgrade)"
+
+set_state() {
+       status_led="$boot"
+
+       case "$1" in
+       preinit)
+               status_led_blink_preinit
+               ;;
+       failsafe)
+               status_led_off
+               [ -n "$running" ] && {
+                       status_led="$running"
+                       status_led_off
+               }
+               status_led="$failsafe"
+               status_led_blink_failsafe
+               ;;
+       preinit_regular)
+               status_led_blink_preinit_regular
+               ;;
+       upgrade)
+               [ -n "$running" ] && {
+                       status_led="$upgrade"
+                       status_led_blink_preinit_regular
+               }
+               ;;
+       done)
+               status_led_off
+               [ -n "$running" ] && {
+                       status_led="$running"
+                       status_led_on
+               }
+               ;;
+       esac
+}
diff --git a/target/linux/ipq40xx/base-files/etc/hotplug.d/firmware/11-ath10k-caldata b/target/linux/ipq40xx/base-files/etc/hotplug.d/firmware/11-ath10k-caldata
new file mode 100644 (file)
index 0000000..33ea9b4
--- /dev/null
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+ath10kcal_die() {
+       echo "ath10cal: " "$*"
+       exit 1
+}
+
+ath10kcal_extract() {
+       local part=$1
+       local offset=$2
+       local count=$3
+       local mtd
+
+       mtd=$(find_mtd_chardev $part)
+       [ -n "$mtd" ] || \
+               ath10kcal_die "no mtd device found for partition $part"
+
+       dd if=$mtd of=/lib/firmware/$FIRMWARE bs=1 skip=$offset count=$count 2>/dev/null || \
+               ath10kcal_die "failed to extract calibration data from $mtd"
+}
+
+[ -e /lib/firmware/$FIRMWARE ] && exit 0
+
+. /lib/functions.sh
+. /lib/functions/system.sh
+
+board=$(board_name)
+
+
+case "$FIRMWARE" in
+"ath10k/pre-cal-ahb-a000000.wifi.bin")
+       case "$board" in
+       avm,fritzbox-4040)
+               /usr/bin/fritz_cal_extract -i 1 -s 0x400 -e 0x207 -l 12064 -o /lib/firmware/$FIRMWARE $(find_mtd_chardev "urlader_config")
+               ;;
+       glinet,gl-b1300 |\
+       qcom,ap-dk01.1-c1)
+               ath10kcal_extract "ART" 4096 12064
+               ;;
+       openmesh,a42)
+               ath10kcal_extract "0:ART" 4096 12064
+               ;;
+       esac
+       ;;
+"ath10k/pre-cal-ahb-a800000.wifi.bin")
+       case "$board" in
+       avm,fritzbox-4040)
+               /usr/bin/fritz_cal_extract -i 1 -s 0x400 -e 0x208 -l 12064 -o /lib/firmware/$FIRMWARE $(find_mtd_chardev "urlader_config")
+               ;;
+       glinet,gl-b1300 |\
+       qcom,ap-dk01.1-c1)
+               ath10kcal_extract "ART" 20480 12064
+               ;;
+       openmesh,a42)
+               ath10kcal_extract "0:ART" 20480 12064
+               ;;
+       esac
+       ;;
+*)
+       exit 1
+       ;;
+esac
diff --git a/target/linux/ipq40xx/base-files/etc/inittab b/target/linux/ipq40xx/base-files/etc/inittab
new file mode 100644 (file)
index 0000000..809bba5
--- /dev/null
@@ -0,0 +1,4 @@
+# Copyright (c) 2013 The Linux Foundation. All rights reserved.
+::sysinit:/etc/init.d/rcS S boot
+::shutdown:/etc/init.d/rcS K shutdown
+ttyMSM0::askfirst:/usr/libexec/login.sh
diff --git a/target/linux/ipq40xx/base-files/lib/upgrade/openmesh.sh b/target/linux/ipq40xx/base-files/lib/upgrade/openmesh.sh
new file mode 100644 (file)
index 0000000..71ab247
--- /dev/null
@@ -0,0 +1,110 @@
+# The U-Boot loader of the OpenMesh devices requires image sizes and
+# checksums to be provided in the U-Boot environment.
+# The OpenMesh devices come with 2 main partitions - while one is active
+# sysupgrade will flash the other. The boot order is changed to boot the
+# newly flashed partition. If the new partition can't be booted due to
+# upgrade failures the previously used partition is loaded.
+
+platform_do_upgrade_openmesh() {
+       local tar_file="$1"
+       local restore_backup
+       local primary_kernel_mtd
+
+       local setenv_script="/tmp/fw_env_upgrade"
+
+       local kernel_mtd="$(find_mtd_index $PART_NAME)"
+       local kernel_offset="$(cat /sys/class/mtd/mtd${kernel_mtd}/offset)"
+       local total_size="$(cat /sys/class/mtd/mtd${kernel_mtd}/size)"
+
+       # detect to which flash region the new image is written to.
+       #
+       # 1. check what is the mtd index for the first flash region on this
+       #    device
+       # 2. check if the target partition ("inactive") has the mtd index of
+       #    the first flash region
+       #
+       #    - when it is: the new bootseq will be 1,2 and the first region is
+       #      modified
+       #    - when it isnt: bootseq will be 2,1 and the second region is
+       #      modified
+       #
+       # The detection has to be done via the hardcoded mtd partition because
+       # the current boot might be done with the fallback region. Let us
+       # assume that the current bootseq is 1,2. The bootloader detected that
+       # the image in flash region 1 is corrupt and thus switches to flash
+       # region 2. The bootseq in the u-boot-env is now still the same and
+       # the sysupgrade code can now only rely on the actual mtd indexes and
+       # not the bootseq variable to detect the currently booted flash
+       # region/image.
+       #
+       # In the above example, an implementation which uses bootseq ("1,2") to
+       # detect the currently booted image would assume that region 1 is booted
+       # and then overwrite the variables for the wrong flash region (aka the
+       # one which isn't modified). This could result in a device which doesn't
+       # boot anymore to Linux until it was reflashed with ap51-flash.
+       local next_boot_part="1"
+       case "$(board_name)" in
+       openmesh,a42)
+               primary_kernel_mtd=8
+               ;;
+       *)
+               echo "failed to detect primary kernel mtd partition for board"
+               return 1
+               ;;
+       esac
+       [ "$kernel_mtd" = "$primary_kernel_mtd" ] || next_boot_part="2"
+
+       local board_dir=$(tar tf $tar_file | grep -m 1 '^sysupgrade-.*/$')
+       board_dir=${board_dir%/}
+
+       local kernel_length=$(tar xf $tar_file ${board_dir}/kernel -O | wc -c)
+       local rootfs_length=$(tar xf $tar_file ${board_dir}/root -O | wc -c)
+       # rootfs without EOF marker
+       rootfs_length=$((rootfs_length-4))
+
+       local kernel_md5=$(tar xf $tar_file ${board_dir}/kernel -O | md5sum); kernel_md5="${kernel_md5%% *}"
+       # md5 checksum of rootfs with EOF marker
+       local rootfs_md5=$(tar xf $tar_file ${board_dir}/root -O | dd bs=1 count=$rootfs_length | md5sum); rootfs_md5="${rootfs_md5%% *}"
+
+       #
+       # add tar support to get_image() to use default_do_upgrade() instead?
+       #
+
+       # take care of restoring a saved config
+       [ "$SAVE_CONFIG" -eq 1 ] && restore_backup="${MTD_CONFIG_ARGS} -j ${CONF_TAR}"
+
+       # write concatinated kernel + rootfs to flash
+       tar xf $tar_file ${board_dir}/kernel ${board_dir}/root -O | \
+               mtd $restore_backup write - $PART_NAME
+
+       # prepare new u-boot env
+       if [ "$next_boot_part" = "1" ]; then
+               echo "bootseq 1,2" > $setenv_script
+       else
+               echo "bootseq 2,1" > $setenv_script
+       fi
+
+       printf "kernel_size_%i 0x%08x\n" $next_boot_part $kernel_length >> $setenv_script
+       printf "vmlinux_start_addr 0x%08x\n" ${kernel_offset} >> $setenv_script
+       printf "vmlinux_size 0x%08x\n" ${kernel_length} >> $setenv_script
+       printf "vmlinux_checksum %s\n" ${kernel_md5} >> $setenv_script
+
+       printf "rootfs_size_%i 0x%08x\n" $next_boot_part $((total_size-kernel_length)) >> $setenv_script
+       printf "rootfs_start_addr 0x%08x\n" $((kernel_offset+kernel_length)) >> $setenv_script
+       printf "rootfs_size 0x%08x\n" ${rootfs_length} >> $setenv_script
+       printf "rootfs_checksum %s\n" ${rootfs_md5} >> $setenv_script
+
+       # store u-boot env changes
+       fw_setenv -s $setenv_script || {
+               echo "failed to update U-Boot environment"
+               return 1
+       }
+}
+
+# create /var/lock for the lock "fw_setenv.lock" of fw_setenv
+# the rest is copied using ipq806x's RAMFS_COPY_BIN and RAMFS_COPY_DATA
+platform_add_ramfs_ubootenv()
+{
+       mkdir -p $RAM_ROOT/var/lock
+}
+append sysupgrade_pre_upgrade platform_add_ramfs_ubootenv
diff --git a/target/linux/ipq40xx/base-files/lib/upgrade/platform.sh b/target/linux/ipq40xx/base-files/lib/upgrade/platform.sh
new file mode 100644 (file)
index 0000000..defa04d
--- /dev/null
@@ -0,0 +1,27 @@
+PART_NAME=firmware
+REQUIRE_IMAGE_METADATA=1
+
+RAMFS_COPY_BIN='fw_printenv fw_setenv'
+RAMFS_COPY_DATA='/etc/fw_env.config /var/lock/fw_printenv.lock'
+
+platform_check_image() {
+       return 0;
+}
+
+platform_do_upgrade() {
+       case "$(board_name)" in
+       openmesh,a42)
+               PART_NAME="inactive"
+               platform_do_upgrade_openmesh "$ARGV"
+               ;;
+       *)
+               default_do_upgrade "$ARGV"
+               ;;
+       esac
+}
+
+blink_led() {
+       . /etc/diag.sh; set_state upgrade
+}
+
+append sysupgrade_pre_upgrade blink_led
diff --git a/target/linux/ipq40xx/config-4.14 b/target/linux/ipq40xx/config-4.14
new file mode 100644 (file)
index 0000000..b530157
--- /dev/null
@@ -0,0 +1,484 @@
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_APQ_GCC_8084 is not set
+# CONFIG_APQ_MMCC_8084 is not set
+CONFIG_AR40XX_PHY=y
+CONFIG_ARCH_CLOCKSOURCE_DATA=y
+CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
+CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
+CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
+CONFIG_ARCH_HAS_SET_MEMORY=y
+CONFIG_ARCH_HAS_SG_CHAIN=y
+CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
+CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
+CONFIG_ARCH_HAS_TICK_BROADCAST=y
+CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_IPQ40XX=y
+# CONFIG_ARCH_MDM9615 is not set
+CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
+# CONFIG_ARCH_MSM8960 is not set
+# CONFIG_ARCH_MSM8974 is not set
+# CONFIG_ARCH_MSM8X60 is not set
+CONFIG_ARCH_MULTIPLATFORM=y
+# CONFIG_ARCH_MULTI_CPU_AUTO is not set
+CONFIG_ARCH_MULTI_V6_V7=y
+CONFIG_ARCH_MULTI_V7=y
+CONFIG_ARCH_NR_GPIO=0
+CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y
+CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y
+CONFIG_ARCH_QCOM=y
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
+CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
+CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
+# CONFIG_ARCH_WANTS_THP_SWAP is not set
+CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_ARM=y
+CONFIG_ARM_AMBA=y
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
+# CONFIG_ARM_ATAG_DTB_COMPAT is not set
+CONFIG_ARM_CPUIDLE=y
+CONFIG_ARM_CPU_SUSPEND=y
+# CONFIG_ARM_CPU_TOPOLOGY is not set
+CONFIG_ARM_GIC=y
+CONFIG_ARM_HAS_SG_CHAIN=y
+CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
+# CONFIG_ARM_LPAE is not set
+CONFIG_ARM_PATCH_IDIV=y
+CONFIG_ARM_PATCH_PHYS_VIRT=y
+CONFIG_ARM_QCOM_CPUIDLE=y
+# CONFIG_ARM_SMMU is not set
+# CONFIG_ARM_SP805_WATCHDOG is not set
+CONFIG_ARM_THUMB=y
+# CONFIG_ARM_THUMBEE is not set
+CONFIG_ARM_UNWIND=y
+CONFIG_ARM_VIRT_EXT=y
+CONFIG_AT803X_PHY=y
+CONFIG_AUTO_ZRELADDR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_MQ_PCI=y
+CONFIG_BOUNCE=y
+CONFIG_BUS_TOPOLOGY_ADHOC=y
+# CONFIG_CACHE_L2X0 is not set
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_CLKSRC_QCOM=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_COMMON_CLK=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_CPUFREQ_DT_PLATDEV=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+# CONFIG_CPU_BIG_ENDIAN is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_HAS_ASID=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_PM=y
+CONFIG_CPU_RMAP=y
+CONFIG_CPU_THERMAL=y
+CONFIG_CPU_THUMB_CAPABLE=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_V7=y
+CONFIG_CRC16=y
+# CONFIG_CRC32_SARWATE is not set
+CONFIG_CRC32_SLICEBY8=y
+CONFIG_CRYPTO_ACOMP2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DRBG=y
+CONFIG_CRYPTO_DRBG_HMAC=y
+CONFIG_CRYPTO_DRBG_MENU=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_GF128MUL=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_JITTERENTROPY=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_NULL2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_RNG_DEFAULT=y
+CONFIG_CRYPTO_SEQIV=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_XTS=y
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S"
+# CONFIG_DEBUG_UART_8250 is not set
+# CONFIG_DEBUG_USER is not set
+CONFIG_DMADEVICES=y
+CONFIG_DMA_ENGINE=y
+# CONFIG_DMA_NOOP_OPS is not set
+CONFIG_DMA_OF=y
+CONFIG_DMA_SHARED_BUFFER=y
+CONFIG_DMA_VIRTUAL_CHANNELS=y
+# CONFIG_DMA_VIRT_OPS is not set
+# CONFIG_DRM_LIB_RANDOM is not set
+CONFIG_DTC=y
+CONFIG_DT_IDLE_STATES=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_EDAC_ATOMIC_SCRUB=y
+CONFIG_EDAC_SUPPORT=y
+CONFIG_ESSEDMA=y
+CONFIG_EXPORTFS=y
+CONFIG_EXTCON=y
+CONFIG_FIXED_PHY=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_FUTEX_PI=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_GENERIC_EARLY_IOREMAP=y
+CONFIG_GENERIC_IDLE_POLL_SETUP=y
+CONFIG_GENERIC_IO=y
+CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
+CONFIG_GENERIC_MSI_IRQ=y
+CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_PHY=y
+CONFIG_GENERIC_PINCONF=y
+CONFIG_GENERIC_PINCTRL_GROUPS=y
+CONFIG_GENERIC_PINMUX_FUNCTIONS=y
+CONFIG_GENERIC_SCHED_CLOCK=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIOLIB_IRQCHIP=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_WATCHDOG=y
+# CONFIG_GPIO_WATCHDOG_ARCH_INITCALL is not set
+# CONFIG_GRO_CELLS is not set
+CONFIG_HANDLE_DOMAIN_IRQ=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_HAS_DMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT_MAP=y
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_ARCH_AUDITSYSCALL=y
+CONFIG_HAVE_ARCH_BITREVERSE=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_HAVE_ARCH_PFN_VALID=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_ARM_ARCH_TIMER=y
+CONFIG_HAVE_ARM_SMCCC=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
+CONFIG_HAVE_CC_STACKPROTECTOR=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_CLK_PREPARE=y
+CONFIG_HAVE_CONTEXT_TRACKING=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_HAVE_EBPF_JIT=y
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_HAVE_IDE=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
+CONFIG_HAVE_NET_DSA=y
+CONFIG_HAVE_OPROFILE=y
+CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_SMP=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_UID16=y
+CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
+CONFIG_HIGHMEM=y
+# CONFIG_HIGHPTE is not set
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM=y
+CONFIG_HZ_FIXED=0
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_QUP=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_IOMMU_HELPER=y
+# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set
+# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set
+CONFIG_IOMMU_SUPPORT=y
+CONFIG_IPQ_GCC_4019=y
+# CONFIG_IPQ_GCC_806X is not set
+# CONFIG_IPQ_GCC_8074 is not set
+# CONFIG_IPQ_LCC_806X is not set
+CONFIG_IRQCHIP=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_IRQ_WORK=y
+CONFIG_LIBFDT=y
+CONFIG_LOCK_SPIN_ON_OWNER=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MDIO_BUS=y
+CONFIG_MDIO_DEVICE=y
+CONFIG_MDIO_GPIO=y
+CONFIG_MDIO_IPQ40XX=y
+# CONFIG_MDM_GCC_9615 is not set
+# CONFIG_MDM_LCC_9615 is not set
+# CONFIG_MFD_QCOM_RPM is not set
+# CONFIG_MFD_SPMI_PMIC is not set
+CONFIG_MFD_SYSCON=y
+CONFIG_MIGHT_HAVE_CACHE_L2X0=y
+CONFIG_MIGHT_HAVE_PCI=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_MSM_BUS_SCALING=y
+# CONFIG_MSM_GCC_8660 is not set
+# CONFIG_MSM_GCC_8916 is not set
+# CONFIG_MSM_GCC_8960 is not set
+# CONFIG_MSM_GCC_8974 is not set
+# CONFIG_MSM_GCC_8994 is not set
+# CONFIG_MSM_GCC_8996 is not set
+# CONFIG_MSM_LCC_8960 is not set
+# CONFIG_MSM_MMCC_8960 is not set
+# CONFIG_MSM_MMCC_8974 is not set
+# CONFIG_MSM_MMCC_8996 is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ECC=y
+CONFIG_MTD_NAND_QCOM=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_SPLIT_FIRMWARE=y
+CONFIG_MTD_SPLIT_FIT_FW=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BEB_LIMIT=20
+CONFIG_MTD_UBI_BLOCK=y
+# CONFIG_MTD_UBI_FASTMAP is not set
+# CONFIG_MTD_UBI_GLUEBI is not set
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MULTI_IRQ_HANDLER=y
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NEON=y
+CONFIG_NET_DSA=y
+CONFIG_NET_DSA_QCA8K=y
+CONFIG_NET_DSA_TAG_QCA=y
+CONFIG_NET_FLOW_LIMIT=y
+CONFIG_NET_PTP_CLASSIFY=y
+CONFIG_NET_SWITCHDEV=y
+CONFIG_NLS=y
+CONFIG_NO_BOOTMEM=y
+CONFIG_NO_HZ=y
+CONFIG_NO_HZ_COMMON=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=4
+CONFIG_NVMEM=y
+CONFIG_OF=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_ADDRESS_PCI=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_MDIO=y
+CONFIG_OF_NET=y
+CONFIG_OF_PCI=y
+CONFIG_OF_PCI_IRQ=y
+CONFIG_OF_RESERVED_MEM=y
+CONFIG_OLD_SIGACTION=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OPTEE=y
+CONFIG_PADATA=y
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PCI=y
+CONFIG_PCIEAER=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIE_DW=y
+CONFIG_PCIE_DW_HOST=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCI_DISABLE_COMMON_QUIRKS=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_DOMAINS_GENERIC=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_MSI_IRQ_DOMAIN=y
+CONFIG_PERF_USE_VMALLOC=y
+CONFIG_PGTABLE_LEVELS=2
+CONFIG_PHYLIB=y
+# CONFIG_PHY_QCOM_APQ8064_SATA is not set
+# CONFIG_PHY_QCOM_IPQ806X_SATA is not set
+# CONFIG_PHY_QCOM_QMP is not set
+# CONFIG_PHY_QCOM_QUSB2 is not set
+# CONFIG_PHY_QCOM_UFS is not set
+CONFIG_PINCTRL=y
+# CONFIG_PINCTRL_APQ8064 is not set
+# CONFIG_PINCTRL_APQ8084 is not set
+CONFIG_PINCTRL_IPQ4019=y
+# CONFIG_PINCTRL_IPQ8064 is not set
+# CONFIG_PINCTRL_IPQ8074 is not set
+# CONFIG_PINCTRL_MDM9615 is not set
+CONFIG_PINCTRL_MSM=y
+# CONFIG_PINCTRL_MSM8660 is not set
+# CONFIG_PINCTRL_MSM8916 is not set
+# CONFIG_PINCTRL_MSM8960 is not set
+# CONFIG_PINCTRL_MSM8994 is not set
+# CONFIG_PINCTRL_MSM8996 is not set
+# CONFIG_PINCTRL_QCOM_SPMI_PMIC is not set
+# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set
+CONFIG_PM_OPP=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_PPS=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_QCOM_BAM_DMA=y
+# CONFIG_QCOM_EBI2 is not set
+# CONFIG_QCOM_GSBI is not set
+# CONFIG_QCOM_IOMMU is not set
+CONFIG_QCOM_PM=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_SCM_32=y
+CONFIG_QCOM_SMEM=y
+# CONFIG_QCOM_SMP2P is not set
+# CONFIG_QCOM_SMSM is not set
+CONFIG_QCOM_TCSR=y
+# CONFIG_QCOM_TSENS is not set
+CONFIG_QCOM_WDT=y
+# CONFIG_QRTR is not set
+CONFIG_RAS=y
+CONFIG_RATIONAL=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=21
+CONFIG_RCU_NEED_SEGCBLIST=y
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_MMIO=y
+CONFIG_REGMAP_SPI=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_QCOM_SPMI is not set
+CONFIG_REGULATOR_VCTRL=y
+CONFIG_RESET_CONTROLLER=y
+CONFIG_RFS_ACCEL=y
+# CONFIG_RPMSG_QCOM_SMD is not set
+CONFIG_RPS=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_DRV_CMOS is not set
+CONFIG_RTC_I2C_AND_SPI=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+# CONFIG_SCHED_INFO is not set
+# CONFIG_SCSI_DMA is not set
+CONFIG_SERIAL_8250_FSL=y
+# CONFIG_SERIAL_AMBA_PL011 is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SMP=y
+CONFIG_SMP_ON_UP=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_QUP=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB=y
+CONFIG_SRCU=y
+CONFIG_SWCONFIG=y
+CONFIG_SWCONFIG_LEDS=y
+CONFIG_SWIOTLB=y
+CONFIG_SWPHY=y
+CONFIG_SWP_EMULATE=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_TEE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_OF=y
+CONFIG_THIN_ARCHIVES=y
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_TIMER_OF=y
+CONFIG_TIMER_PROBE=y
+CONFIG_TREE_RCU=y
+CONFIG_TREE_SRCU=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+CONFIG_UEVENT_HELPER_PATH=""
+CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h"
+CONFIG_USB=y
+CONFIG_USB_COMMON=y
+# CONFIG_USB_EHCI_HCD is not set
+CONFIG_USB_IPQ4019_PHY=y
+CONFIG_USB_PHY=y
+# CONFIG_USB_QCOM_8X16_PHY is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USE_OF=y
+CONFIG_VDSO=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_XPS=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_BCJ=y
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_ZLIB_INFLATE=y
diff --git a/target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-a42.dts b/target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-a42.dts
new file mode 100644 (file)
index 0000000..887be99
--- /dev/null
@@ -0,0 +1,244 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, Sven Eckelmann <sven.eckelmann@openmesh.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "qcom-ipq4019.dtsi"
+#include "qcom-ipq4019-bus.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/soc/qcom,tcsr.h>
+
+/ {
+       model = "OpenMesh A42";
+       compatible = "openmesh,a42", "qcom,ipq4019";
+
+       reserved-memory {
+               #address-cells = <0x1>;
+               #size-cells = <0x1>;
+               ranges;
+
+               rsvd1@87000000 {
+                       reg = <0x87000000 0x500000>;
+                       no-map;
+               };
+
+               wifi_dump@87500000 {
+                       reg = <0x87500000 0x600000>;
+                       no-map;
+               };
+
+               rsvd2@87B00000 {
+                       reg = <0x87b00000 0x500000>;
+                       no-map;
+               };
+       };
+
+       soc {
+               tcsr@194b000 {
+                       /* select hostmode */
+                       compatible = "qcom,tcsr";
+                       reg = <0x194b000 0x100>;
+                       qcom,usb-hsphy-mode-select = <TCSR_USB_HSPHY_HOST_MODE>;
+                       status = "ok";
+               };
+
+               ess_tcsr@1953000 {
+                       compatible = "qcom,tcsr";
+                       reg = <0x1953000 0x1000>;
+                       qcom,ess-interface-select = <TCSR_ESS_PSGMII>;
+               };
+
+               tcsr@1949000 {
+                       compatible = "qcom,tcsr";
+                       reg = <0x1949000 0x100>;
+                       qcom,wifi_glb_cfg = <TCSR_WIFI_GLB_CFG>;
+               };
+
+               tcsr@1957000 {
+                       compatible = "qcom,tcsr";
+                       reg = <0x1957000 0x100>;
+                       qcom,wifi_noc_memtype_m0_m2 = <TCSR_WIFI_NOC_MEMTYPE_M0_M2>;
+               };
+
+               pinctrl@1000000 {
+                       serial_pins: serial_pinmux {
+                               mux {
+                                       pins = "gpio60", "gpio61";
+                                       function = "blsp_uart0";
+                                       bias-disable;
+                               };
+                       };
+
+                       spi_0_pins: spi_0_pinmux {
+                               pinmux {
+                                       function = "blsp_spi0";
+                                       pins = "gpio55", "gpio56", "gpio57";
+                               };
+                               pinmux_cs {
+                                       function = "gpio";
+                                       pins = "gpio54";
+                               };
+                               pinconf {
+                                       pins = "gpio55", "gpio56", "gpio57";
+                                       drive-strength = <12>;
+                                       bias-disable;
+                               };
+                               pinconf_cs {
+                                       pins = "gpio54";
+                                       drive-strength = <2>;
+                                       bias-disable;
+                                       output-high;
+                               };
+                       };
+               };
+
+               blsp_dma: dma@7884000 {
+                       status = "ok";
+               };
+
+               spi_0: spi@78b5000 {
+                       pinctrl-0 = <&spi_0_pins>;
+                       pinctrl-names = "default";
+                       status = "ok";
+                       cs-gpios = <&tlmm 54 0>;
+
+                       m25p80@0 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               compatible = "jedec,spi-nor";
+                               reg = <0>;
+                               spi-max-frequency = <24000000>;
+
+                               /* partitions are passed via bootloader */
+                       };
+               };
+
+               serial@78af000 {
+                       pinctrl-0 = <&serial_pins>;
+                       pinctrl-names = "default";
+                       status = "ok";
+               };
+
+               cryptobam: dma@8e04000 {
+                       status = "ok";
+               };
+
+               crypto@8e3a000 {
+                       status = "ok";
+               };
+
+               watchdog@b017000 {
+                       status = "ok";
+               };
+
+               usb2_hs_phy: hsphy@a8000 {
+                       status = "ok";
+               };
+
+               usb2: usb2@60f8800 {
+                       status = "ok";
+               };
+
+               mdio@90000 {
+                       status = "okay";
+               };
+
+               ess-switch@c000000 {
+                       status = "okay";
+               };
+
+               ess-psgmii@98000 {
+                       status = "okay";
+               };
+
+               edma@c080000 {
+                       status = "okay";
+               };
+
+               wifi@a000000 {
+                       status = "okay";
+                       qcom,ath10k-calibration-variant = "OM-A42";
+               };
+
+               wifi@a800000 {
+                       status = "okay";
+                       qcom,ath10k-calibration-variant = "OM-A42";
+               };
+       };
+
+       gpio-keys {
+               compatible = "gpio-keys";
+
+               reset {
+                       label = "reset";
+                       gpios = <&tlmm 59 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_RESTART>;
+               };
+       };
+
+       aliases {
+               led-boot = &power;
+               led-failsafe = &power;
+               led-running = &power;
+               led-upgrade = &power;
+       };
+
+       gpio-leds {
+               compatible = "gpio-leds";
+
+               red {
+                       label = "a42:red:status";
+                       gpios = <&tlmm 0 GPIO_ACTIVE_HIGH>;
+                       linux,default-trigger = "default-off";
+               };
+
+               power: green {
+                       label = "a42:green:status";
+                       gpios = <&tlmm 1 GPIO_ACTIVE_HIGH>;
+               };
+
+               blue {
+                       label = "a42:blue:status";
+                       gpios = <&tlmm 2 GPIO_ACTIVE_HIGH>;
+                       linux,default-trigger = "default-off";
+               };
+       };
+
+       watchdog {
+               compatible = "linux,wdt-gpio";
+               gpios = <&tlmm 5 GPIO_ACTIVE_LOW>;
+               hw_algo = "toggle";
+               /* hw_margin_ms is actually 300s but driver limits it to 60s */
+               hw_margin_ms = <60000>;
+               always-running;
+       };
+};
+
+&gmac0 {
+       qcom,phy_mdio_addr = <4>;
+       qcom,poll_required = <1>;
+       qcom,forced_speed = <1000>;
+       qcom,forced_duplex = <1>;
+       vlan_tag = <2 0x20>;
+};
+
+&gmac1 {
+       qcom,phy_mdio_addr = <3>;
+       qcom,poll_required = <1>;
+       qcom,forced_speed = <1000>;
+       qcom,forced_duplex = <1>;
+       vlan_tag = <1 0x10>;
+};
diff --git a/target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts b/target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts
new file mode 100644 (file)
index 0000000..47202d2
--- /dev/null
@@ -0,0 +1,21 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "qcom-ipq4019-ap.dk04.1.dtsi"
+
+/ {
+       model = "Qualcomm Technologies, Inc. IPQ40xx/AP-DK04.1-C1";
+};
diff --git a/target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi b/target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi
new file mode 100644 (file)
index 0000000..99fe8af
--- /dev/null
@@ -0,0 +1,176 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "qcom-ipq4019.dtsi"
+
+/ {
+       model = "Qualcomm Technologies, Inc. IPQ4019/AP-DK04.1";
+       compatible = "qcom,ipq4019";
+
+       soc {
+               pinctrl@1000000 {
+                       serial_0_pins: serial_pinmux {
+                               mux {
+                                       pins = "gpio16", "gpio17";
+                                       function = "blsp_uart0";
+                                       bias-disable;
+                               };
+                       };
+
+                       serial_1_pins: serial1_pinmux {
+                               mux {
+                                       pins = "gpio8", "gpio9";
+                                       function = "blsp_uart1";
+                                       bias-disable;
+                               };
+                       };
+
+                       spi_0_pins: spi_0_pinmux {
+                               pinmux {
+                                       function = "blsp_spi0";
+                                       pins = "gpio13", "gpio14", "gpio15";
+                               };
+                               pinmux_cs {
+                                       function = "gpio";
+                                       pins = "gpio12";
+                               };
+                               pinconf {
+                                       pins = "gpio13", "gpio14", "gpio15";
+                                       drive-strength = <12>;
+                                       bias-disable;
+                               };
+                               pinconf_cs {
+                                       pins = "gpio12";
+                                       drive-strength = <2>;
+                                       bias-disable;
+                                       output-high;
+                               };
+                       };
+
+                       i2c_0_pins: i2c_0_pinmux {
+                               pinmux {
+                                       function = "blsp_i2c0";
+                                       pins = "gpio10", "gpio11";
+                               };
+                               pinconf {
+                                       pins = "gpio10", "gpio11";
+                                       drive-strength = <16>;
+                                       bias-disable;
+                               };
+                       };
+
+                       nand_pins: nand_pins {
+
+                               pullups {
+                                       pins = "gpio52", "gpio53", "gpio58",
+                                               "gpio59";
+                                       function = "qpic";
+                                       bias-pull-up;
+                               };
+
+                               pulldowns {
+                                       pins = "gpio54", "gpio55", "gpio56",
+                                               "gpio57", "gpio60", "gpio61",
+                                               "gpio62", "gpio63", "gpio64",
+                                               "gpio65", "gpio66", "gpio67",
+                                               "gpio68", "gpio69";
+                                       function = "qpic";
+                                       bias-pull-down;
+                               };
+                       };
+               };
+
+               blsp_dma: dma@7884000 {
+                       status = "ok";
+               };
+
+               spi_0: spi@78b5000 {
+                       pinctrl-0 = <&spi_0_pins>;
+                       pinctrl-names = "default";
+                       status = "ok";
+                       cs-gpios = <&tlmm 12 0>;
+
+                       mx25l25635e@0 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0>;
+                               compatible = "mx25l25635e";
+                               spi-max-frequency = <24000000>;
+                       };
+               };
+
+               i2c_0: i2c@78b7000 { /* BLSP1 QUP2 */
+                       pinctrl-0 = <&i2c_0_pins>;
+                       pinctrl-names = "default";
+
+                       status = "ok";
+               };
+
+               serial@78af000 {
+                       pinctrl-0 = <&serial_0_pins>;
+                       pinctrl-names = "default";
+                       status = "ok";
+               };
+
+               serial@78b0000 {
+                       pinctrl-0 = <&serial_1_pins>;
+                       pinctrl-names = "default";
+                       status = "ok";
+               };
+
+               usb3_ss_phy: ssphy@9a000 {
+                       status = "ok";
+               };
+
+               usb3_hs_phy: hsphy@a6000 {
+                       status = "ok";
+               };
+
+               usb3: usb3@8af8800 {
+                       status = "ok";
+               };
+
+               usb2_hs_phy: hsphy@a8000 {
+                       status = "ok";
+               };
+
+               usb2: usb2@60f8800 {
+                       status = "ok";
+               };
+
+               cryptobam: dma@8e04000 {
+                       status = "ok";
+               };
+
+               crypto@8e3a000 {
+                       status = "ok";
+               };
+
+               watchdog@b017000 {
+                       status = "ok";
+               };
+
+               qpic_bam: dma@7984000 {
+                       status = "ok";
+               };
+
+               nand: qpic-nand@79b0000 {
+                       pinctrl-0 = <&nand_pins>;
+                       pinctrl-names = "default";
+                       status = "ok";
+               };
+       };
+};
diff --git a/target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-bus.dtsi b/target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-bus.dtsi
new file mode 100644 (file)
index 0000000..1695059
--- /dev/null
@@ -0,0 +1,1142 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+/ {
+
+soc {
+       ad_hoc_bus: ad-hoc-bus {
+               compatible = "qcom,msm-bus-device";
+               reg = <0x580000 0x14000>,
+                       <0x500000 0x11000>;
+               reg-names = "snoc-base", "pcnoc-base";
+
+               /*Buses*/
+
+               fab_pcnoc: fab-pcnoc {
+                       cell-id = <MSM_BUS_FAB_PERIPH_NOC>;
+                       label = "fab-pcnoc";
+                       qcom,fab-dev;
+                       qcom,base-name = "pcnoc-base";
+                       qcom,bypass-qos-prg;
+                       qcom,bus-type = <1>;
+                       qcom,qos-off = <0x1000>;
+                       qcom,base-offset = <0x0>;
+                       clocks = <>;
+               };
+
+               fab_snoc: fab-snoc {
+                       cell-id = <MSM_BUS_FAB_SYS_NOC>;
+                       label = "fab-snoc";
+                       qcom,fab-dev;
+                       qcom,base-name = "snoc-base";
+                       qcom,bypass-qos-prg;
+                       qcom,bus-type = <1>;
+                       qcom,qos-off = <0x80>;
+                       qcom,base-offset = <0x0>;
+                       clocks = <>;
+               };
+
+               /*Masters*/
+
+               mas_blsp_bam: mas-blsp-bam {
+                       cell-id = <MSM_BUS_MASTER_BLSP_BAM>;
+                       label = "mas-blsp-bam";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&pcnoc_m_0>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_BLSP_BAM>;
+                       qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg
+                                &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg
+                                &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg
+                                &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg
+                                &slv_srif &slv_prng &slv_qdss_cfg
+                                &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt
+                                &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg
+                                &slv_boot_rom &slv_security &slv_spdm
+                                &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg
+                                &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl
+                                &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg
+                                &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg
+                                &slv_sdcc_cfg &slv_snoc_cfg>;
+               };
+
+               mas_usb2_bam: mas-usb2-bam {
+                       cell-id = <MSM_BUS_MASTER_USB2_BAM>;
+                       label = "mas-usb2-bam";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,qport = <15>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&slv_pcnoc_snoc>;
+                       qcom,prio1 = <1>;
+                       qcom,prio0 = <1>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_USB2_BAM>;
+                       qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg
+                                &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg
+                                &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg
+                                &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg
+                                &slv_srif &slv_prng &slv_qdss_cfg
+                                &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt
+                                &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg
+                                &slv_boot_rom &slv_security &slv_spdm
+                                &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg
+                                &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl
+                                &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg
+                                &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg
+                                &slv_sdcc_cfg &slv_snoc_cfg>;
+               };
+
+               mas_adss_dma0: mas-adss-dma0 {
+                       cell-id = <MSM_BUS_MASTER_ADDS_DMA0>;
+                       label = "mas-adss-dma0";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&pcnoc_m_1>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_ADSS_DMA0>;
+                       qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg
+                                &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg
+                                &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg
+                                &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg
+                                &slv_srif &slv_prng &slv_qdss_cfg
+                                &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt
+                                &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg
+                                &slv_boot_rom &slv_security &slv_spdm
+                                &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg
+                                &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl
+                                &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg
+                                &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg
+                                &slv_sdcc_cfg &slv_snoc_cfg>;
+               };
+
+               mas_adss_dma1: mas-adss-dma1 {
+                       cell-id = <MSM_BUS_MASTER_ADDS_DMA1>;
+                       label = "mas-adss-dma1";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&pcnoc_m_1>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_ADSS_DMA1>;
+                       qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg
+                                &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg
+                                &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg
+                                &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg
+                                &slv_srif &slv_prng &slv_qdss_cfg
+                                &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt
+                                &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg
+                                &slv_boot_rom &slv_security &slv_spdm
+                                &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg
+                                &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl
+                                &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg
+                                &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg
+                                &slv_sdcc_cfg &slv_snoc_cfg>;
+               };
+
+               mas_adss_dma2: mas-adss-dma2 {
+                       cell-id = <MSM_BUS_MASTER_ADDS_DMA2>;
+                       label = "mas-adss-dma2";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&pcnoc_m_1>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_ADSS_DMA2>;
+                       qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg
+                                &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg
+                                &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg
+                                &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg
+                                &slv_srif &slv_prng &slv_qdss_cfg
+                                &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt
+                                &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg
+                                &slv_boot_rom &slv_security &slv_spdm
+                                &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg
+                                &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl
+                                &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg
+                                &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg
+                                &slv_sdcc_cfg &slv_snoc_cfg>;
+               };
+
+               mas_adss_dma3: mas-adss-dma3 {
+                       cell-id = <MSM_BUS_MASTER_ADDS_DMA3>;
+                       label = "mas-adss-dma3";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&pcnoc_m_1>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_ADSS_DMA3>;
+                       qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg
+                                &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg
+                                &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg
+                                &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg
+                                &slv_srif &slv_prng &slv_qdss_cfg
+                                &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt
+                                &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg
+                                &slv_boot_rom &slv_security &slv_spdm
+                                &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg
+                                &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl
+                                &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg
+                                &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg
+                                &slv_sdcc_cfg &slv_snoc_cfg>;
+               };
+
+               mas_qpic_bam: mas-qpic-bam {
+                       cell-id = <MSM_BUS_MASTER_QPIC_BAM>;
+                       label = "mas-qpic-bam";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&pcnoc_m_0>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_QPIC_BAM>;
+                       qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg
+                                &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg
+                                &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg
+                                &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg
+                                &slv_srif &slv_prng &slv_qdss_cfg
+                                &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt
+                                &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg
+                                &slv_boot_rom &slv_security &slv_spdm
+                                &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg
+                                &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl
+                                &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg
+                                &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg
+                                &slv_sdcc_cfg &slv_snoc_cfg>;
+               };
+
+               mas_spdm: mas-spdm {
+                       cell-id = <MSM_BUS_MASTER_SPDM>;
+                       label = "mas-spdm";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&pcnoc_m_0>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_SPDM>;
+                       qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg
+                                &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg
+                                &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg
+                                &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg
+                                &slv_srif &slv_prng &slv_qdss_cfg
+                                &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt
+                                &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg
+                                &slv_boot_rom &slv_security &slv_spdm
+                                &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg
+                                &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl
+                                &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg
+                                &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg
+                                &slv_sdcc_cfg &slv_snoc_cfg>;
+               };
+
+               mas_pcnoc_cfg: mas-pcnoc-cfg {
+                       cell-id = <MSM_BUS_MASTER_PNOC_CFG>;
+                       label = "mas-pcnoc-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&slv_srvc_pcnoc>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_CFG>;
+               };
+
+               mas_tic: mas-tic {
+                       cell-id = <MSM_BUS_MASTER_TIC>;
+                       label = "mas-tic";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&pcnoc_int_0 &slv_pcnoc_snoc>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_TIC>;
+               };
+
+               mas_sdcc_bam: mas-sdcc-bam {
+                       cell-id = <MSM_BUS_MASTER_SDCC_BAM>;
+                       label = "mas-sdcc-bam";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,qport = <14>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&slv_pcnoc_snoc>;
+                       qcom,prio1 = <0>;
+                       qcom,prio0 = <0>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_SDCC_BAM>;
+                       qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg
+                                &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg
+                                &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg
+                                &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg
+                                &slv_srif &slv_prng &slv_qdss_cfg
+                                &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt
+                                &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg
+                                &slv_boot_rom &slv_security &slv_spdm
+                                &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg
+                                &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl
+                                &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg
+                                &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg
+                                &slv_sdcc_cfg &slv_snoc_cfg>;
+               };
+
+               mas_snoc_pcnoc: mas-snoc-pcnoc {
+                       cell-id = <MSM_BUS_SNOC_PNOC_MAS>;
+                       label = "mas-snoc-pcnoc";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,qport = <16>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&pcnoc_int_0>;
+                       qcom,prio1 = <0>;
+                       qcom,prio0 = <0>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_SNOC_PCNOC>;
+               };
+
+               mas_qdss_dap: mas-qdss-dap {
+                       cell-id = <MSM_BUS_MASTER_QDSS_DAP>;
+                       label = "mas-qdss-dap";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&pcnoc_int_0 &slv_pcnoc_snoc>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_QDSS_DAP>;
+               };
+
+               mas_ddrc_snoc: mas-ddrc-snoc {
+                       cell-id = <MSM_BUS_MASTER_DDRC_SNOC>;
+                       label = "mas-ddrc-snoc";
+                       qcom,buswidth = <16>;
+                       qcom,ap-owned;
+                       qcom,connections = <&snoc_int_0 &snoc_int_1
+                                &slv_pcie>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_DDRC_SNOC>;
+                       qcom,blacklist = <&slv_snoc_ddrc_m1 &slv_srvc_snoc>;
+               };
+
+               mas_wss_0: mas-wss-0 {
+                       cell-id = <MSM_BUS_MASTER_WSS_0>;
+                       label = "mas-wss-0";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,qport = <26>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&snoc_int_0 &slv_snoc_ddrc_m1>;
+                       qcom,prio1 = <0>;
+                       qcom,prio0 = <0>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_WSS_0>;
+                       qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_pcie
+                                &slv_wss1_cfg &slv_wss0_cfg &slv_crypto_cfg
+                                &slv_srvc_snoc>;
+               };
+
+               mas_wss_1: mas-wss-1 {
+                       cell-id = <MSM_BUS_MASTER_WSS_1>;
+                       label = "mas-wss-1";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,qport = <27>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&snoc_int_0 &slv_snoc_ddrc_m1>;
+                       qcom,prio1 = <0>;
+                       qcom,prio0 = <0>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_WSS_1>;
+                       qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_pcie
+                                &slv_wss1_cfg &slv_wss0_cfg &slv_crypto_cfg
+                                &slv_srvc_snoc>;
+               };
+
+               mas_crypto: mas-crypto {
+                       cell-id = <MSM_BUS_MASTER_CRYPTO>;
+                       label = "mas-crypto";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,qport = <5>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&snoc_int_0 &snoc_int_1
+                                &slv_snoc_ddrc_m1>;
+                       qcom,prio1 = <0>;
+                       qcom,prio0 = <0>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_CRYPTO>;
+                       qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_a7ss
+                                &slv_pcie &slv_qdss_stm &slv_crypto_cfg
+                                &slv_srvc_snoc>;
+               };
+
+               mas_ess: mas-ess {
+                       cell-id = <MSM_BUS_MASTER_ESS>;
+                       label = "mas-ess";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,qport = <44>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&snoc_int_0 &slv_snoc_ddrc_m1>;
+                       qcom,prio1 = <0>;
+                       qcom,prio0 = <0>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_ESS>;
+                       qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_a7ss
+                                &slv_pcie &slv_qdss_stm &slv_wss1_cfg
+                                &slv_wss0_cfg &slv_crypto_cfg &slv_srvc_snoc>;
+               };
+
+               mas_pcie: mas-pcie {
+                       cell-id = <MSM_BUS_MASTER_PCIE>;
+                       label = "mas-pcie";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,qport = <6>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&snoc_int_0 &slv_snoc_ddrc_m1>;
+                       qcom,prio1 = <0>;
+                       qcom,prio0 = <0>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCIE>;
+                       qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_pcie
+                                &slv_qdss_stm &slv_wss1_cfg &slv_wss0_cfg
+                                &slv_crypto_cfg &slv_srvc_snoc>;
+               };
+
+               mas_usb3: mas-usb3 {
+                       cell-id = <MSM_BUS_MASTER_USB3>;
+                       label = "mas-usb3";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,qport = <7>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&snoc_int_0 &slv_snoc_ddrc_m1>;
+                       qcom,prio1 = <0>;
+                       qcom,prio0 = <0>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_USB3>;
+                       qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_a7ss
+                                &slv_pcie &slv_qdss_stm &slv_wss1_cfg
+                                &slv_wss0_cfg &slv_crypto_cfg &slv_srvc_snoc>;
+               };
+
+               mas_qdss_etr: mas-qdss-etr {
+                       cell-id = <MSM_BUS_MASTER_QDSS_ETR>;
+                       label = "mas-qdss-etr";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,qport = <544>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&qdss_int>;
+                       qcom,prio1 = <0>;
+                       qcom,prio0 = <0>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_QDSS_ETR>;
+                       qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_a7ss
+                                &slv_pcie &slv_qdss_stm &slv_wss1_cfg
+                                &slv_wss0_cfg &slv_crypto_cfg &slv_srvc_snoc>;
+               };
+
+               mas_qdss_bamndp: mas-qdss-bamndp {
+                       cell-id = <MSM_BUS_MASTER_QDSS_BAMNDP>;
+                       label = "mas-qdss-bamndp";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,qport = <576>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&qdss_int>;
+                       qcom,prio1 = <0>;
+                       qcom,prio0 = <0>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_QDSS_BAMNDP>;
+                       qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_a7ss
+                                &slv_pcie &slv_qdss_stm &slv_wss1_cfg
+                                &slv_wss0_cfg &slv_crypto_cfg &slv_srvc_snoc>;
+               };
+
+               mas_pcnoc_snoc: mas-pcnoc-snoc {
+                       cell-id = <MSM_BUS_PNOC_SNOC_MAS>;
+                       label = "mas-pcnoc-snoc";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,qport = <384>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&snoc_int_0 &snoc_int_1
+                                &slv_snoc_ddrc_m1>;
+                       qcom,prio1 = <0>;
+                       qcom,prio0 = <0>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PNOC_SNOC>;
+                       qcom,blacklist = <&slv_srvc_snoc>;
+               };
+
+               mas_snoc_cfg: mas-snoc-cfg {
+                       cell-id = <MSM_BUS_MASTER_QDSS_SNOC_CFG>;
+                       label = "mas-snoc-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&slv_srvc_snoc>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_QDSS_SNOC_CFG>;
+               };
+
+               /*Internal nodes*/
+
+
+               pcnoc_m_0: pcnoc-m-0 {
+                       cell-id = <MSM_BUS_PNOC_M_0>;
+                       label = "pcnoc-m-0";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,qport = <12>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&slv_pcnoc_snoc>;
+                       qcom,prio1 = <1>;
+                       qcom,prio0 = <1>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_M_0>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_M_0>;
+               };
+
+               pcnoc_m_1: pcnoc-m-1 {
+                       cell-id = <MSM_BUS_PNOC_M_1>;
+                       label = "pcnoc-m-1";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,qport = <13>;
+                       qcom,qos-mode = "fixed";
+                       qcom,connections = <&slv_pcnoc_snoc>;
+                       qcom,prio1 = <1>;
+                       qcom,prio0 = <1>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_M_1>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_M_1>;
+               };
+
+               pcnoc_int_0: pcnoc-int-0 {
+                       cell-id = <MSM_BUS_PNOC_INT_0>;
+                       label = "pcnoc-int-0";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,connections = < &pcnoc_s_1 &pcnoc_s_2 &pcnoc_s_0
+                                &pcnoc_s_4 &pcnoc_s_5
+                                &pcnoc_s_6 &pcnoc_s_7
+                                &pcnoc_s_8 &pcnoc_s_9
+                                &pcnoc_s_3>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_INT_0>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_INT_0>;
+               };
+
+               pcnoc_s_0: pcnoc-s-0 {
+                       cell-id = <MSM_BUS_PNOC_SLV_0>;
+                       label = "pcnoc-s-0";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&slv_clk_ctl &slv_tcsr &slv_security
+                                &slv_tlmm>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_0>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_0>;
+               };
+
+               pcnoc_s_1: pcnoc-s-1 {
+                       cell-id = <MSM_BUS_PNOC_SLV_1>;
+                       label = "pcnoc-s-1";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = < &slv_prng_apu_cfg &slv_prng&slv_imem_cfg>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_1>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_1>;
+               };
+
+               pcnoc_s_2: pcnoc-s-2 {
+                       cell-id = <MSM_BUS_PNOC_SLV_2>;
+                       label = "pcnoc-s-2";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = < &slv_spdm &slv_pcnoc_mpu_cfg &slv_pcnoc_cfg
+                               &slv_boot_rom>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_2>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_2>;
+               };
+
+               pcnoc_s_3: pcnoc-s-3 {
+                       cell-id = <MSM_BUS_PNOC_SLV_3>;
+                       label = "pcnoc-s-3";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = < &slv_qdss_cfg&slv_gcnt &slv_snoc_cfg
+                                &slv_snoc_mpu_cfg>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_3>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_3>;
+               };
+
+               pcnoc_s_4: pcnoc-s-4 {
+                       cell-id = <MSM_BUS_PNOC_SLV_4>;
+                       label = "pcnoc-s-4";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&slv_adss_cfg &slv_adss_vmidmt_cfg &slv_adss_apu>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_4>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_4>;
+               };
+
+               pcnoc_s_5: pcnoc-s-5 {
+                       cell-id = <MSM_BUS_PNOC_SLV_5>;
+                       label = "pcnoc-s-5";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = <&slv_qhss_apu_cfg &slv_fephy_cfg &slv_mdio
+                                &slv_srif>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_5>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_5>;
+               };
+
+               pcnoc_s_6: pcnoc-s-6 {
+                       cell-id = <MSM_BUS_PNOC_SLV_6>;
+                       label = "pcnoc-s-6";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = < &slv_ddrc_mpu0_cfg &slv_ddrc_apu_cfg &slv_ddrc_mpu2_cfg
+                               &slv_ddrc_cfg &slv_ddrc_mpu1_cfg>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_6>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_6>;
+               };
+
+               pcnoc_s_7: pcnoc-s-7 {
+                       cell-id = <MSM_BUS_PNOC_SLV_7>;
+                       label = "pcnoc-s-7";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = < &slv_ess_apu_cfg &slv_usb2_cfg&slv_ess_vmidmt_cfg>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_7>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_7>;
+               };
+
+               pcnoc_s_8: pcnoc-s-8 {
+                       cell-id = <MSM_BUS_PNOC_SLV_8>;
+                       label = "pcnoc-s-8";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = < &slv_sdcc_cfg &slv_qpic_cfg&slv_blsp_cfg>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_8>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_8>;
+               };
+
+               pcnoc_s_9: pcnoc-s-9 {
+                       cell-id = <MSM_BUS_PNOC_SLV_9>;
+                       label = "pcnoc-s-9";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,connections = < &slv_wss1_apu_cfg &slv_wss1_vmidmt_cfg&slv_wss0_vmidmt_cfg
+                                &slv_wss0_apu_cfg>;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_9>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_9>;
+               };
+
+               snoc_int_0: snoc-int-0 {
+                       cell-id = <MSM_BUS_SNOC_INT_0>;
+                       label = "snoc-int-0";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,connections = < &slv_ocimem&slv_qdss_stm>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_SNOC_INT_0>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_INT_0>;
+               };
+
+               snoc_int_1: snoc-int-1 {
+                       cell-id = <MSM_BUS_SNOC_INT_1>;
+                       label = "snoc-int-1";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,connections = < &slv_crypto_cfg &slv_a7ss &slv_ess_cfg
+                                &slv_usb3_cfg &slv_wss1_cfg
+                               &slv_wss0_cfg>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_SNOC_INT_1>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_INT_1>;
+               };
+
+               qdss_int: qdss-int {
+                       cell-id = <MSM_BUS_SNOC_QDSS_INT>;
+                       label = "qdss-int";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,connections = <&snoc_int_0 &slv_snoc_ddrc_m1>;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,mas-rpm-id = <ICBID_MASTER_QDSS_INT>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_QDSS_INT>;
+               };
+               /*Slaves*/
+
+               slv_clk_ctl:slv-clk-ctl {
+                       cell-id = <MSM_BUS_SLAVE_CLK_CTL>;
+                       label = "slv-clk-ctl";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_CLK_CTL>;
+               };
+
+               slv_security:slv-security {
+                       cell-id = <MSM_BUS_SLAVE_SECURITY>;
+                       label = "slv-security";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_SECURITY>;
+               };
+
+               slv_tcsr:slv-tcsr {
+                       cell-id = <MSM_BUS_SLAVE_TCSR>;
+                       label = "slv-tcsr";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_TCSR>;
+               };
+
+               slv_tlmm:slv-tlmm {
+                       cell-id = <MSM_BUS_SLAVE_TLMM>;
+                       label = "slv-tlmm";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_TLMM>;
+               };
+
+               slv_imem_cfg:slv-imem-cfg {
+                       cell-id = <MSM_BUS_SLAVE_IMEM_CFG>;
+                       label = "slv-imem-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_IMEM_CFG>;
+               };
+
+               slv_prng:slv-prng {
+                       cell-id = <MSM_BUS_SLAVE_PRNG>;
+                       label = "slv-prng";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PRNG>;
+               };
+
+               slv_prng_apu_cfg:slv-prng-apu-cfg {
+                       cell-id = <MSM_BUS_SLAVE_PRNG_APU_CFG>;
+                       label = "slv-prng-apu-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PRNG_APU_CFG>;
+               };
+
+               slv_boot_rom:slv-boot-rom {
+                       cell-id = <MSM_BUS_SLAVE_BOOT_ROM>;
+                       label = "slv-boot-rom";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_BOOT_ROM>;
+               };
+
+               slv_spdm:slv-spdm {
+                       cell-id = <MSM_BUS_SLAVE_SPDM_WRAPPER>;
+                       label = "slv-spdm";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_SPDM_WRAPPER>;
+               };
+
+               slv_pcnoc_cfg:slv-pcnoc-cfg {
+                       cell-id = <MSM_BUS_SLAVE_PNOC_CFG>;
+                       label = "slv-pcnoc-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PNOC_CFG>;
+               };
+
+               slv_pcnoc_mpu_cfg:slv-pcnoc-mpu-cfg {
+                       cell-id = <MSM_BUS_SLAVE_PERIPH_MPU_CFG>;
+                       label = "slv-pcnoc-mpu-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PERIPH_MPU_CFG>;
+               };
+
+               slv_gcnt:slv-gcnt {
+                       cell-id = <MSM_BUS_SLAVE_GCNT>;
+                       label = "slv-gcnt";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_GCNT>;
+               };
+
+               slv_qdss_cfg:slv-qdss-cfg {
+                       cell-id = <MSM_BUS_SLAVE_QDSS_CFG>;
+                       label = "slv-qdss-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_QDSS_CFG>;
+               };
+
+               slv_snoc_cfg:slv-snoc-cfg {
+                       cell-id = <MSM_BUS_SLAVE_SNOC_CFG>;
+                       label = "slv-snoc-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_CFG>;
+               };
+
+               slv_snoc_mpu_cfg:slv-snoc-mpu-cfg {
+                       cell-id = <MSM_BUS_SLAVE_SNOC_MPU_CFG>;
+                       label = "slv-snoc-mpu-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_MPU_CFG>;
+               };
+
+               slv_adss_cfg:slv-adss-cfg {
+                       cell-id = <MSM_BUS_SLAVE_ADSS_CFG>;
+                       label = "slv-adss-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_ADSS_CFG>;
+               };
+
+               slv_adss_apu:slv-adss-apu {
+                       cell-id = <MSM_BUS_SLAVE_ADSS_VMIDMT_CFG>;
+                       label = "slv-adss-apu";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_ADSS_APU>;
+               };
+
+               slv_adss_vmidmt_cfg:slv-adss-vmidmt-cfg {
+                       cell-id = <MSM_BUS_SLAVE_ADSS_VMIDMT_CFG>;
+                       label = "slv-adss-vmidmt-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_ADSS_VMIDMT_CFG>;
+               };
+
+               slv_qhss_apu_cfg:slv-qhss-apu-cfg {
+                       cell-id = <MSM_BUS_SLAVE_QHSS_APU_CFG>;
+                       label = "slv-qhss-apu-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_QHSS_APU_CFG>;
+               };
+
+               slv_mdio:slv-mdio {
+                       cell-id = <MSM_BUS_SLAVE_MDIO>;
+                       label = "slv-mdio";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_MDIO>;
+               };
+
+               slv_fephy_cfg:slv-fephy-cfg {
+                       cell-id = <MSM_BUS_SLAVE_FEPHY_CFG>;
+                       label = "slv-fephy-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_FEPHY_CFG>;
+               };
+
+               slv_srif:slv-srif {
+                       cell-id = <MSM_BUS_SLAVE_SRIF>;
+                       label = "slv-srif";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_SRIF>;
+               };
+
+               slv_ddrc_cfg:slv-ddrc-cfg {
+                       cell-id = <MSM_BUS_SLAVE_DDRC_CFG>;
+                       label = "slv-ddrc-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_DDRC_CFG>;
+               };
+
+               slv_ddrc_apu_cfg:slv-ddrc-apu-cfg {
+                       cell-id = <MSM_BUS_SLAVE_DDRC_APU_CFG>;
+                       label = "slv-ddrc-apu-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_DDRC_APU_CFG>;
+               };
+
+               slv_ddrc_mpu0_cfg:slv-ddrc-mpu0-cfg {
+                       cell-id = <MSM_BUS_SLAVE_MPU0_CFG>;
+                       label = "slv-ddrc-mpu0-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_DDRC_MPU0_CFG>;
+               };
+
+               slv_ddrc_mpu1_cfg:slv-ddrc-mpu1-cfg {
+                       cell-id = <MSM_BUS_SLAVE_MPU1_CFG>;
+                       label = "slv-ddrc-mpu1-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_DDRC_MPU1_CFG>;
+               };
+
+               slv_ddrc_mpu2_cfg:slv-ddrc-mpu2-cfg {
+                       cell-id = <MSM_BUS_SLAVE_MPU2_CFG>;
+                       label = "slv-ddrc-mpu2-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_DDRC_MPU2_CFG>;
+               };
+
+               slv_ess_vmidmt_cfg:slv-ess-vmidmt-cfg {
+                       cell-id = <MSM_BUS_SLAVE_ESS_VMIDMT_CFG>;
+                       label = "slv-ess-vmidmt-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_ESS_VMIDMT_CFG>;
+               };
+
+               slv_ess_apu_cfg:slv-ess-apu-cfg {
+                       cell-id = <MSM_BUS_SLAVE_ESS_APU_CFG>;
+                       label = "slv-ess-apu-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_ESS_APU_CFG>;
+               };
+
+               slv_usb2_cfg:slv-usb2-cfg {
+                       cell-id = <MSM_BUS_SLAVE_USB2_CFG>;
+                       label = "slv-usb2-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_USB2_CFG>;
+               };
+
+               slv_blsp_cfg:slv-blsp-cfg {
+                       cell-id = <MSM_BUS_SLAVE_BLSP_CFG>;
+                       label = "slv-blsp-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_BLSP_CFG>;
+               };
+
+               slv_qpic_cfg:slv-qpic-cfg {
+                       cell-id = <MSM_BUS_SLAVE_QPIC_CFG>;
+                       label = "slv-qpic-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_QPIC_CFG>;
+               };
+
+               slv_sdcc_cfg:slv-sdcc-cfg {
+                       cell-id = <MSM_BUS_SLAVE_SDCC_CFG>;
+                       label = "slv-sdcc-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_SDCC_CFG>;
+               };
+
+               slv_wss0_vmidmt_cfg:slv-wss0-vmidmt-cfg {
+                       cell-id = <MSM_BUS_SLAVE_WSS0_VMIDMT_CFG>;
+                       label = "slv-wss0-vmidmt-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_WSS0_VMIDMT_CFG>;
+               };
+
+               slv_wss0_apu_cfg:slv-wss0-apu-cfg {
+                       cell-id = <MSM_BUS_SLAVE_WSS0_APU_CFG>;
+                       label = "slv-wss0-apu-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_WSS0_APU_CFG>;
+               };
+
+               slv_wss1_vmidmt_cfg:slv-wss1-vmidmt-cfg {
+                       cell-id = <MSM_BUS_SLAVE_WSS1_VMIDMT_CFG>;
+                       label = "slv-wss1-vmidmt-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_WSS1_VMIDMT_CFG>;
+               };
+
+               slv_wss1_apu_cfg:slv-wss1-apu-cfg {
+                       cell-id = <MSM_BUS_SLAVE_WSS1_APU_CFG>;
+                       label = "slv-wss1-apu-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_WSS1_APU_CFG>;
+               };
+
+               slv_pcnoc_snoc:slv-pcnoc-snoc {
+                       cell-id = <MSM_BUS_PNOC_SNOC_SLV>;
+                       label = "slv-pcnoc-snoc";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_SNOC>;
+               };
+
+               slv_srvc_pcnoc:slv-srvc-pcnoc {
+                       cell-id = <MSM_BUS_SLAVE_SRVC_PCNOC>;
+                       label = "slv-srvc-pcnoc";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_pcnoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_SRVC_PCNOC>;
+               };
+
+               slv_snoc_ddrc_m1:slv-snoc-ddrc-m1 {
+                       cell-id = <MSM_BUS_SLAVE_SNOC_DDRC>;
+                       label = "slv-snoc-ddrc-m1";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_DDRC>;
+               };
+
+               slv_a7ss:slv-a7ss {
+                       cell-id = <MSM_BUS_SLAVE_A7SS>;
+                       label = "slv-a7ss";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_A7SS>;
+               };
+
+               slv_ocimem:slv-ocimem {
+                       cell-id = <MSM_BUS_SLAVE_OCIMEM>;
+                       label = "slv-ocimem";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_OCIMEM>;
+               };
+
+               slv_wss0_cfg:slv-wss0-cfg {
+                       cell-id = <MSM_BUS_SLAVE_WSS0_CFG>;
+                       label = "slv-wss0-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_WSS0_CFG>;
+               };
+
+               slv_wss1_cfg:slv-wss1-cfg {
+                       cell-id = <MSM_BUS_SLAVE_WSS1_CFG>;
+                       label = "slv-wss1-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_WSS1_CFG>;
+               };
+
+               slv_pcie:slv-pcie {
+                       cell-id = <MSM_BUS_SLAVE_PCIE>;
+                       label = "slv-pcie";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_PCIE>;
+               };
+
+               slv_usb3_cfg:slv-usb3-cfg {
+                       cell-id = <MSM_BUS_SLAVE_USB3_CFG>;
+                       label = "slv-usb3-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_USB3_CFG>;
+               };
+
+               slv_crypto_cfg:slv-crypto-cfg {
+                       cell-id = <MSM_BUS_SLAVE_CRYPTO_CFG>;
+                       label = "slv-crypto-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_CRYPTO_CFG>;
+               };
+
+               slv_ess_cfg:slv-ess-cfg {
+                       cell-id = <MSM_BUS_SLAVE_ESS_CFG>;
+                       label = "slv-ess-cfg";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_ESS_CFG>;
+               };
+
+               slv_qdss_stm:slv-qdss-stm {
+                       cell-id = <MSM_BUS_SLAVE_QDSS_STM>;
+                       label = "slv-qdss-stm";
+                       qcom,buswidth = <4>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_QDSS_STM>;
+               };
+
+               slv_srvc_snoc:slv-srvc-snoc {
+                       cell-id = <MSM_BUS_SLAVE_SRVC_SNOC>;
+                       label = "slv-srvc-snoc";
+                       qcom,buswidth = <8>;
+                       qcom,ap-owned;
+                       qcom,bus-dev = <&fab_snoc>;
+                       qcom,slv-rpm-id = <ICBID_SLAVE_SRVC_SNOC>;
+               };
+       };
+};
+
+};
diff --git a/target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-fritz4040.dts b/target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-fritz4040.dts
new file mode 100644 (file)
index 0000000..f5ca3d5
--- /dev/null
@@ -0,0 +1,322 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "qcom-ipq4019.dtsi"
+#include "qcom-ipq4019-bus.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/soc/qcom,tcsr.h>
+
+/ {
+       model = "AVM FRITZ!Box 4040";
+       compatible = "avm,fritzbox-4040", "qcom,ipq4019";
+
+       aliases {
+               led-boot = &power;
+               led-failsafe = &flash;
+               led-running = &power;
+               led-upgrade = &flash;
+       };
+
+       reserved-memory {
+               #address-cells = <0x1>;
+               #size-cells = <0x1>;
+               ranges;
+
+               tz_apps@87b80000 {
+                       reg = <0x87b80000 0x280000>;
+                       reusable;
+               };
+
+               smem@87e00000 {
+                       reg = <0x87e00000 0x080000>;
+                       no-map;
+               };
+
+               tz@87e80000 {
+                       reg = <0x87e80000 0x180000>;
+                       no-map;
+               };
+       };
+
+       soc {
+               mdio@90000 {
+                       status = "okay";
+               };
+
+               ess-psgmii@98000 {
+                       status = "okay";
+               };
+
+               tcsr@1949000 {
+                       compatible = "qcom,tcsr";
+                       reg = <0x1949000 0x100>;
+                       qcom,wifi_glb_cfg = <TCSR_WIFI_GLB_CFG>;
+               };
+
+               tcsr@194b000 {
+                       compatible = "qcom,tcsr";
+                       reg = <0x194b000 0x100>;
+                       qcom,usb-hsphy-mode-select = <TCSR_USB_HSPHY_HOST_MODE>;
+               };
+
+               ess_tcsr@1953000 {
+                       compatible = "qcom,tcsr";
+                       reg = <0x1953000 0x1000>;
+                       qcom,ess-interface-select = <TCSR_ESS_PSGMII>;
+               };
+
+               tcsr@1957000 {
+                       compatible = "qcom,tcsr";
+                       reg = <0x1957000 0x100>;
+                       qcom,wifi_noc_memtype_m0_m2 = <TCSR_WIFI_NOC_MEMTYPE_M0_M2>;
+               };
+
+               usb2@60f8800 {
+                       status = "ok";
+               };
+
+               serial@78af000 {
+                       pinctrl-0 = <&serial_pins>;
+                       pinctrl-names = "default";
+                       status = "ok";
+               };
+
+               usb3@8af8800 {
+                       status = "ok";
+               };
+
+               crypto@8e3a000 {
+                       status = "ok";
+               };
+
+               wifi@a000000 {
+                       status = "okay";
+               };
+
+               wifi@a800000 {
+                       status = "okay";
+               };
+
+               watchdog@b017000 {
+                       status = "ok";
+               };
+
+               qca8075: ess-switch@c000000 {
+                       status = "okay";
+
+                       #gpio-cells = <2>;
+                       gpio-controller;
+
+                       enable-usb-power {
+                               gpio-hog;
+                               line-name = "enable USB3 power";
+                               gpios = <7 GPIO_ACTIVE_HIGH>;
+                               output-high;
+                       };
+               };
+
+               edma@c080000 {
+                       status = "okay";
+               };
+       };
+
+       gpio-keys {
+               compatible = "gpio-keys";
+
+               wlan {
+                       label = "wlan";
+                       gpios = <&tlmm 58 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_RFKILL>;
+               };
+
+               wps {
+                       label = "wps";
+                       gpios = <&tlmm 63 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_WPS_BUTTON>;
+               };
+       };
+
+       gpio-leds {
+               compatible = "gpio-leds";
+
+               wlan {
+                       label = "fritz4040:green:wlan";
+                       gpios = <&qca8075 1 GPIO_ACTIVE_HIGH>;
+               };
+
+               panic: info_red {
+                       label = "fritz4040:red:info";
+                       gpios = <&qca8075 3 GPIO_ACTIVE_HIGH>;
+                       panic-indicator;
+               };
+
+               wan {
+                       label = "fritz4040:green:wan";
+                       gpios = <&qca8075 5 GPIO_ACTIVE_HIGH>;
+               };
+
+               power: power {
+                       label = "fritz4040:green:power";
+                       gpios = <&qca8075 11 GPIO_ACTIVE_HIGH>;
+               };
+
+               lan {
+                       label = "fritz4040:green:lan";
+                       gpios = <&qca8075 13 GPIO_ACTIVE_HIGH>;
+               };
+
+               flash: info_amber {
+                       label = "fritz4040:amber:info";
+                       gpios = <&qca8075 15 GPIO_ACTIVE_HIGH>;
+               };
+       };
+};
+
+&tlmm {
+       serial_pins: serial_pinmux {
+               mux {
+                       pins = "gpio60", "gpio61";
+                       function = "blsp_uart0";
+                       bias-disable;
+               };
+       };
+
+       spi_0_pins: spi_0_pinmux {
+               mux {
+                       function = "blsp_spi0";
+                       pins = "gpio55", "gpio56", "gpio57";
+                       drive-strength = <12>;
+                       bias-disable;
+               };
+
+               mux_cs {
+                       function = "gpio";
+                       pins = "gpio54";
+                       drive-strength = <2>;
+                       bias-disable;
+                       output-high;
+               };
+       };
+};
+
+&cryptobam {
+       status = "ok";
+};
+
+&blsp_dma {
+       status = "ok";
+};
+
+&spi_0 { /* BLSP1 QUP1 */
+       pinctrl-0 = <&spi_0_pins>;
+       pinctrl-names = "default";
+       status = "ok";
+       cs-gpios = <&tlmm 54 0>;
+
+       mx25l25635f@0 {
+               compatible = "jedec,spi-nor";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0>;
+               spi-max-frequency = <24000000>;
+               status = "ok";
+               m25p,fast-read;
+
+               partitions {
+                       compatible = "fixed-partitions";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       partition0@0 {
+                               label = "SBL1";
+                               reg = <0x00000000 0x00040000>;
+                               read-only;
+                       };
+                       partition1@40000 {
+                               label = "MIBIB";
+                               reg = <0x00040000 0x00020000>;
+                               read-only;
+                       };
+                       partition2@60000 {
+                               label = "QSEE";
+                               reg = <0x00060000 0x00060000>;
+                               read-only;
+                       };
+                       partition3@c0000 {
+                               label = "CDT";
+                               reg = <0x000c0000 0x00010000>;
+                               read-only;
+                       };
+                       partition4@d0000 {
+                               label = "DDRPARAMS";
+                               reg = <0x000d0000 0x00010000>;
+                               read-only;
+                       };
+                       partition5@e0000 {
+                               label = "APPSBLENV"; /* uboot env - empty */
+                               reg = <0x000e0000 0x00010000>;
+                               read-only;
+                       };
+                       partition6@f0000 {
+                               label = "urlader"; /* APPSBL */
+                               reg = <0x000f0000 0x0002dc000>;
+                               read-only;
+                       };
+                       partition7@11dc00 {
+                               /* make a backup of this partition! */
+                               label = "urlader_config";
+                               reg = <0x0011dc00 0x00002400>;
+                               read-only;
+                       };
+                       partition8@120000 {
+                               label = "tffs1";
+                               reg = <0x00120000 0x00080000>;
+                               read-only;
+                       };
+                       partition9@1a0000 {
+                               label = "tffs2";
+                               reg = <0x001a0000 0x00080000>;
+                               read-only;
+                       };
+                       partition10@220000 {
+                               label = "uboot";
+                               reg = <0x00220000 0x00080000>;
+                               read-only;
+                       };
+                       partition11@2A0000 {
+                               label = "firmware";
+                               reg = <0x002a0000 0x01c60000>;
+                       };
+                       partition12@1f00000 {
+                               label = "jffs2";
+                               reg = <0x01f00000 0x00100000>;
+                       };
+               };
+       };
+};
+
+&usb3_ss_phy {
+       status = "ok";
+};
+
+&usb3_hs_phy {
+       status = "ok";
+};
+
+&usb2_hs_phy {
+       status = "ok";
+};
diff --git a/target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-gl-b1300.dts b/target/linux/ipq40xx/files-4.14/arch/arm/boot/dts/qcom-ipq4019-gl-b1300.dts
new file mode 100644 (file)
index 0000000..53824e3
--- /dev/null
@@ -0,0 +1,316 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "qcom-ipq4019.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/soc/qcom,tcsr.h>
+
+/ {
+       model = "GL.iNet GL-B1300";
+       compatible = "glinet,gl-b1300", "qcom,ipq4019";
+
+       memory {
+               device_type = "memory";
+               reg = <0x80000000 0x10000000>;
+       };
+
+       reserved-memory {
+               #address-cells = <0x1>;
+               #size-cells = <0x1>;
+               ranges;
+
+               apps_bl@87000000 {
+                       reg = <0x87000000 0x400000>;
+                       no-map;
+               };
+
+               sbl@87400000 {
+                       reg = <0x87400000 0x100000>;
+                       no-map;
+               };
+
+               cnss_debug@87500000 {
+                       reg = <0x87500000 0x600000>;
+                       no-map;
+               };
+
+               cpu_context_dump@87b00000 {
+                       reg = <0x87b00000 0x080000>;
+                       no-map;
+               };
+
+               tz_apps@87b80000 {
+                       reg = <0x87b80000 0x280000>;
+                       no-map;
+               };
+
+               smem@87e00000 {
+                       reg = <0x87e00000 0x080000>;
+                       no-map;
+               };
+
+               tz@87e80000 {
+                       reg = <0x87e80000 0x180000>;
+                       no-map;
+               };
+       };
+
+       soc {
+               tcsr@194b000 {
+                       /* select hostmode */
+                       compatible = "qcom,tcsr";
+                       reg = <0x194b000 0x100>;
+                       qcom,usb-hsphy-mode-select = <TCSR_USB_HSPHY_HOST_MODE>;
+                       status = "ok";
+               };
+
+               ess_tcsr@1953000 {
+                       compatible = "qcom,tcsr";
+                       reg = <0x1953000 0x1000>;
+                       qcom,ess-interface-select = <TCSR_ESS_PSGMII>;
+               };
+
+               tcsr@1949000 {
+                       compatible = "qcom,tcsr";
+                       reg = <0x1949000 0x100>;
+                       qcom,wifi_glb_cfg = <TCSR_WIFI_GLB_CFG>;
+               };
+
+               tcsr@1957000 {
+                       compatible = "qcom,tcsr";
+                       reg = <0x1957000 0x100>;
+                       qcom,wifi_noc_memtype_m0_m2 = <TCSR_WIFI_NOC_MEMTYPE_M0_M2>;
+               };
+
+               pinctrl@1000000 {
+                       serial_pins: serial_pinmux {
+                               mux {
+                                       pins = "gpio60", "gpio61";
+                                       function = "blsp_uart0";
+                                       bias-disable;
+                               };
+                       };
+
+                       spi_0_pins: spi_0_pinmux {
+                               pinmux {
+                                       function = "blsp_spi0";
+                                       pins = "gpio55", "gpio56", "gpio57";
+                               };
+                               pinmux_cs {
+                                       function = "gpio";
+                                       pins = "gpio54";
+                               };
+                               pinconf {
+                                       pins = "gpio55", "gpio56", "gpio57";
+                                       drive-strength = <12>;
+                                       bias-disable;
+                               };
+                               pinconf_cs {
+                                       pins = "gpio54";
+                                       drive-strength = <2>;
+                                       bias-disable;
+                                       output-high;
+                               };
+                       };
+               };
+
+               blsp_dma: dma@7884000 {
+                       status = "ok";
+               };
+
+               spi_0: spi@78b5000 {
+                       pinctrl-0 = <&spi_0_pins>;
+                       pinctrl-names = "default";
+                       status = "ok";
+                       cs-gpios = <&tlmm 54 0>;
+               };
+
+               serial@78af000 {
+                       pinctrl-0 = <&serial_pins>;
+                       pinctrl-names = "default";
+                       status = "ok";
+               };
+
+               cryptobam: dma@8e04000 {
+                       status = "ok";
+               };
+
+               crypto@8e3a000 {
+                       status = "ok";
+               };
+
+               watchdog@b017000 {
+                       status = "ok";
+               };
+
+               usb3_ss_phy: ssphy@9a000 {
+                       status = "ok";
+               };
+
+               usb3_hs_phy: hsphy@a6000 {
+                       status = "ok";
+               };
+
+               usb3: usb3@8af8800 {
+                       status = "ok";
+               };
+
+               usb2_hs_phy: hsphy@a8000 {
+                       status = "ok";
+               };
+
+               usb2: usb2@60f8800 {
+                       status = "ok";
+               };
+
+               mdio@90000 {
+                       status = "okay";
+               };
+
+               ess-switch@c000000 {
+                       status = "okay";
+               };
+
+               ess-psgmii@98000 {
+                       status = "okay";
+               };
+
+               edma@c080000 {
+                       status = "okay";
+               };
+
+               wifi@a000000 {
+                       status = "okay";
+               };
+
+               wifi@a800000 {
+                       status = "okay";
+               };
+       };
+
+       gpio-keys {
+               compatible = "gpio-keys";
+
+               wps {
+                       label = "wps";
+                       gpios = <&tlmm 5 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_WPS_BUTTON>;
+               };
+               reset {
+                       label = "reset";
+                       gpios = <&tlmm 63 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_RESTART>;
+               };
+       };
+
+       gpio-leds {
+               compatible = "gpio-leds";
+
+               power {
+                       label = "gl-b1300:green:power";
+                       gpios = <&tlmm 4 GPIO_ACTIVE_HIGH>;
+                       default-state = "on";
+               };
+               mesh {
+                       label = "gl-b1300:green:mesh";
+                       gpios = <&tlmm 3 GPIO_ACTIVE_HIGH>;
+               };
+               wlan {
+                       label = "gl-b1300:green:wlan";
+                       gpios = <&tlmm 2 GPIO_ACTIVE_HIGH>;
+               };
+       };
+};
+
+&spi_0 {
+       mx25l25635f@0 {
+               compatible = "mx25l25635f", "jedec,spi-nor";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               reg = <0>;
+               spi-max-frequency = <24000000>;
+
+               SBL1@0 {
+                       label = "SBL1";
+                       reg = <0x0 0x40000>;
+                       read-only;
+               };
+               MIBIB@40000 {
+                       label = "MIBIB";
+                       reg = <0x40000 0x20000>;
+                       read-only;
+               };
+               QSEE@60000 {
+                       label = "QSEE";
+                       reg = <0x60000 0x60000>;
+                       read-only;
+               };
+               CDT@c0000 {
+                       label = "CDT";
+                       reg = <0xc0000 0x10000>;
+                       read-only;
+               };
+               DDRPARAMS@d0000 {
+                       label = "DDRPARAMS";
+                       reg = <0xd0000 0x10000>;
+                       read-only;
+               };
+               APPSBLENV@e0000 {
+                       label = "APPSBLENV";
+                       reg = <0xe0000 0x10000>;
+                       read-only;
+               };
+               APPSBL@f0000 {
+                       label = "APPSBL";
+                       reg = <0xf0000 0x80000>;
+                       read-only;
+               };
+               ART@170000 {
+                       label = "ART";
+                       reg = <0x170000 0x10000>;
+                       read-only;
+               };
+               kernel@180000 {
+                       label = "kernel";
+                       reg = <0x180000 0x400000>;
+               };
+               rootfs@580000 {
+                       label = "rootfs";
+                       reg = <0x580000 0x1a80000>;
+               };
+               firmware@180000 {
+                       label = "firmware";
+                       reg = <0x180000 0x1e80000>;
+               };
+       };
+};
+
+&gmac0 {
+       qcom,phy_mdio_addr = <4>;
+       qcom,poll_required = <1>;
+       qcom,forced_speed = <1000>;
+       qcom,forced_duplex = <1>;
+       vlan_tag = <2 0x20>;
+};
+
+&gmac1 {
+       qcom,phy_mdio_addr = <3>;
+       qcom,poll_required = <1>;
+       qcom,forced_speed = <1000>;
+       qcom,forced_duplex = <1>;
+       vlan_tag = <1 0x10>;
+};
\ No newline at end of file
diff --git a/target/linux/ipq40xx/image/Makefile b/target/linux/ipq40xx/image/Makefile
new file mode 100644 (file)
index 0000000..e584b6f
--- /dev/null
@@ -0,0 +1,104 @@
+include $(TOPDIR)/rules.mk
+include $(INCLUDE_DIR)/image.mk
+
+define Device/Default
+       PROFILES := Default
+       KERNEL_DEPENDS = $$(wildcard $(DTS_DIR)/$$(DEVICE_DTS).dts)
+       KERNEL_INITRAMFS_PREFIX := $$(IMG_PREFIX)-$(1)-initramfs
+       KERNEL_PREFIX := $$(IMAGE_PREFIX)
+       KERNEL_LOADADDR := 0x80208000
+       SUPPORTED_DEVICES := $(subst _,$(comma),$(1))
+       IMAGE/sysupgrade.bin = sysupgrade-tar | append-metadata
+       IMAGE/sysupgrade.bin/squashfs :=
+endef
+
+define Device/FitImage
+       KERNEL_SUFFIX := -fit-uImage.itb
+       KERNEL = kernel-bin | gzip | fit gzip $$(DTS_DIR)/$$(DEVICE_DTS).dtb
+       KERNEL_NAME := Image
+endef
+
+define Device/FitImageLzma
+       KERNEL_SUFFIX := -fit-uImage.itb
+       KERNEL = kernel-bin | lzma | fit lzma $$(DTS_DIR)/$$(DEVICE_DTS).dtb
+       KERNEL_NAME := Image
+endef
+
+define Device/UbiFit
+       KERNEL_IN_UBI := 1
+       IMAGES := nand-factory.ubi nand-sysupgrade.bin
+       IMAGE/nand-factory.ubi := append-ubi
+       IMAGE/nand-sysupgrade.bin := sysupgrade-tar | append-metadata
+endef
+
+define Device/avm_fritzbox-4040
+       $(call Device/FitImageLzma)
+       DEVICE_DTS := qcom-ipq4019-fritz4040
+       BLOCKSIZE := 4k
+       PAGESIZE := 256
+       BOARD_NAME := fritz4040
+       DEVICE_TITLE := AVM Fritz!Box 4040
+       IMAGE_SIZE := 29753344
+       IMAGES = sysupgrade.bin
+       IMAGE/sysupgrade.bin := append-kernel | append-rootfs | pad-rootfs | append-metadata
+       DEVICE_PACKAGES := ipq-wifi-avm_fritzbox-4040 fritz-tffs fritz-caldata u-boot-fritz4040
+endef
+TARGET_DEVICES += avm_fritzbox-4040
+
+define Device/glinet_gl-b1300
+       DEVICE_TITLE := GL.iNet GL-B1300
+       BOARD_NAME := gl-b1300
+       DEVICE_DTS := qcom-ipq4019-gl-b1300
+       KERNEL_INSTALL := 1
+       KERNEL_SIZE := 4096k
+       IMAGE_SIZE := 26624k
+       $(call Device/FitImage)
+       IMAGES := sysupgrade.bin
+       IMAGE/sysupgrade.bin := append-kernel | pad-to $$$${KERNEL_SIZE} | append-rootfs | pad-rootfs | append-metadata
+       DEVICE_PACKAGES := ipq-wifi-glinet_gl-b1300
+endef
+TARGET_DEVICES += glinet_gl-b1300
+
+define Device/openmesh_a42
+       $(call Device/FitImageLzma)
+       DEVICE_DTS := qcom-ipq4019-a42
+       BLOCKSIZE := 64k
+       SUPPORTED_DEVICES := openmesh,a42
+       DEVICE_TITLE := OpenMesh A42
+       KERNEL = kernel-bin | lzma | fit lzma $$(DTS_DIR)/$$(DEVICE_DTS).dtb | pad-to $$(BLOCKSIZE)
+       IMAGE_SIZE := 15616k
+       IMAGES = factory.bin sysupgrade.bin
+       IMAGE/factory.bin := append-rootfs | pad-rootfs | openmesh-image ce_type=A42
+       IMAGE/sysupgrade.bin/squashfs := append-rootfs | pad-rootfs | sysupgrade-tar rootfs=$$$$@ | append-metadata
+       DEVICE_PACKAGES := ath10k-firmware-qca4019 uboot-envtools
+endef
+TARGET_DEVICES += openmesh_a42
+
+define Device/qcom_ap-dk01.1-c1
+       DEVICE_TITLE := QCA AP-DK01.1-C1
+       BOARD_NAME := ap-dk01.1-c1
+       DEVICE_DTS := qcom-ipq4019-ap.dk01.1-c1
+       KERNEL_INSTALL := 1
+       KERNEL_SIZE := 4096k
+       IMAGE_SIZE := 26624k
+       $(call Device/FitImage)
+       IMAGES := sysupgrade.bin
+       IMAGE/sysupgrade.bin := append-kernel | pad-to $$$${KERNEL_SIZE} | append-rootfs | pad-rootfs | append-metadata
+       DEVICE_PACKAGES := ath10k-firmware-qca4019
+endef
+TARGET_DEVICES += qcom_ap-dk01.1-c1
+
+define Device/qcom_ap-dk04.1-c1
+       $(call Device/FitImage)
+       $(call Device/UbiFit)
+       DEVICE_DTS := qcom-ipq4019-ap.dk04.1-c1
+       KERNEL_INSTALL := 1
+       KERNEL_SIZE := 4048k
+       BLOCKSIZE := 128k
+       PAGESIZE := 2048
+       BOARD_NAME := ap-dk04.1-c1
+       DEVICE_TITLE := QCA AP-DK04.1-C1
+endef
+TARGET_DEVICES += qcom_ap-dk04.1-c1
+
+$(eval $(call BuildImage))
diff --git a/target/linux/ipq40xx/modules.mk b/target/linux/ipq40xx/modules.mk
new file mode 100644 (file)
index 0000000..43f3c9c
--- /dev/null
@@ -0,0 +1,33 @@
+define KernelPackage/usb-dwc3-of-simple
+  TITLE:=DWC3 USB simple OF driver
+  DEPENDS:=+kmod-usb-dwc3
+  KCONFIG:= CONFIG_USB_DWC3_OF_SIMPLE
+  FILES:= $(LINUX_DIR)/drivers/usb/dwc3/dwc3-of-simple.ko
+  AUTOLOAD:=$(call AutoLoad,53,dwc3-of-simple,1)
+  $(call AddDepends/usb)
+endef
+
+define KernelPackage/usb-dwc3-of-simple/description
+ This driver provides generic platform glue for the integrated DesignWare
+ USB3 IP Core.
+endef
+
+$(eval $(call KernelPackage,usb-dwc3-of-simple))
+
+define KernelPackage/usb-phy-qcom-dwc3
+  TITLE:=DWC3 USB QCOM PHY driver
+  DEPENDS:=@TARGET_ipq806x +kmod-usb-dwc3-of-simple
+  KCONFIG:= CONFIG_PHY_QCOM_DWC3
+  FILES:= \
+    $(LINUX_DIR)/drivers/phy/phy-qcom-dwc3.ko@lt4.13 \
+    $(LINUX_DIR)/drivers/phy/qualcomm/phy-qcom-dwc3.ko@ge4.13
+  AUTOLOAD:=$(call AutoLoad,45,phy-qcom-dwc3,1)
+  $(call AddDepends/usb)
+endef
+
+define KernelPackage/usb-phy-qcom-dwc3/description
+ This driver provides support for the integrated DesignWare
+ USB3 IP Core within the QCOM SoCs.
+endef
+
+$(eval $(call KernelPackage,usb-phy-qcom-dwc3))
diff --git a/target/linux/ipq40xx/patches-4.14/017-qcom-ipq4019-add-cpu-operating-points-for-cpufreq-su.patch b/target/linux/ipq40xx/patches-4.14/017-qcom-ipq4019-add-cpu-operating-points-for-cpufreq-su.patch
new file mode 100644 (file)
index 0000000..138a2dd
--- /dev/null
@@ -0,0 +1,77 @@
+From 18c3b42575a154343831aec0637aab00e19440e1 Mon Sep 17 00:00:00 2001
+From: Matthew McClintock <mmcclint@codeaurora.org>
+Date: Thu, 17 Mar 2016 15:01:09 -0500
+Subject: [PATCH 17/69] qcom: ipq4019: add cpu operating points for cpufreq
+ support
+
+This adds some operating points for cpu frequeny scaling
+
+Signed-off-by: Matthew McClintock <mmcclint@codeaurora.org>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 34 ++++++++++++++++++++++++++--------
+ 1 file changed, 26 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -40,14 +40,7 @@
+                       reg = <0x0>;
+                       clocks = <&gcc GCC_APPS_CLK_SRC>;
+                       clock-frequency = <0>;
+-                      operating-points = <
+-                              /* kHz  uV (fixed) */
+-                              48000   1100000
+-                              200000  1100000
+-                              500000  1100000
+-                              666000  1100000
+-                      >;
+-                      clock-latency = <256000>;
++                      operating-points-v2 = <&cpu0_opp_table>;
+               };
+               cpu@1 {
+@@ -59,6 +52,7 @@
+                       reg = <0x1>;
+                       clocks = <&gcc GCC_APPS_CLK_SRC>;
+                       clock-frequency = <0>;
++                      operating-points-v2 = <&cpu0_opp_table>;
+               };
+               cpu@2 {
+@@ -70,6 +64,7 @@
+                       reg = <0x2>;
+                       clocks = <&gcc GCC_APPS_CLK_SRC>;
+                       clock-frequency = <0>;
++                      operating-points-v2 = <&cpu0_opp_table>;
+               };
+               cpu@3 {
+@@ -81,6 +76,29 @@
+                       reg = <0x3>;
+                       clocks = <&gcc GCC_APPS_CLK_SRC>;
+                       clock-frequency = <0>;
++                      operating-points-v2 = <&cpu0_opp_table>;
++              };
++      };
++
++      cpu0_opp_table: opp_table0 {
++              compatible = "operating-points-v2";
++              opp-shared;
++
++              opp-48000000 {
++                      opp-hz = /bits/ 64 <48000000>;
++                      clock-latency-ns = <256000>;
++              };
++              opp-200000000 {
++                      opp-hz = /bits/ 64 <200000000>;
++                      clock-latency-ns = <256000>;
++              };
++              opp-500000000 {
++                      opp-hz = /bits/ 64 <500000000>;
++                      clock-latency-ns = <256000>;
++              };
++              opp-716800000 {
++                      opp-hz = /bits/ 64 <716800000>;
++                      clock-latency-ns = <256000>;
+               };
+       };
diff --git a/target/linux/ipq40xx/patches-4.14/030-mtd-nand-Use-standard-large-page-OOB-layout-when-usi.patch b/target/linux/ipq40xx/patches-4.14/030-mtd-nand-Use-standard-large-page-OOB-layout-when-usi.patch
new file mode 100644 (file)
index 0000000..479a890
--- /dev/null
@@ -0,0 +1,47 @@
+From 882fd1577cbe7812ae3a48988180c5f0fda475ca Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@free-electrons.com>
+Date: Sat, 26 Aug 2017 17:19:15 +0200
+Subject: [PATCH] mtd: nand: Use standard large page OOB layout when using
+ NAND_ECC_NONE
+
+Use the core's large page OOB layout functions when not reserving any
+space for ECC bytes in the OOB layout. Fix ->nand_ooblayout_ecc_lp()
+to return -ERANGE instead of a zero length in this case.
+
+Signed-off-by: Miquel Raynal <miquel.raynal@free-electrons.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+---
+ drivers/mtd/nand/nand_base.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -115,7 +115,7 @@ static int nand_ooblayout_ecc_lp(struct
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+-      if (section)
++      if (section || !ecc->total)
+               return -ERANGE;
+       oobregion->length = ecc->total;
+@@ -4707,6 +4707,19 @@ int nand_scan_tail(struct mtd_info *mtd)
+                       mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
+                       break;
+               default:
++                      /*
++                       * Expose the whole OOB area to users if ECC_NONE
++                       * is passed. We could do that for all kind of
++                       * ->oobsize, but we must keep the old large/small
++                       * page with ECC layout when ->oobsize <= 128 for
++                       * compatibility reasons.
++                       */
++                      if (ecc->mode == NAND_ECC_NONE) {
++                              mtd_set_ooblayout(mtd,
++                                              &nand_ooblayout_lp_ops);
++                              break;
++                      }
++
+                       WARN(1, "No oob scheme defined for oobsize %d\n",
+                               mtd->oobsize);
+                       ret = -EINVAL;
diff --git a/target/linux/ipq40xx/patches-4.14/031-mtd-nand-use-usual-return-values-for-the-erase-hook.patch b/target/linux/ipq40xx/patches-4.14/031-mtd-nand-use-usual-return-values-for-the-erase-hook.patch
new file mode 100644 (file)
index 0000000..67ffb19
--- /dev/null
@@ -0,0 +1,48 @@
+From eb94555e9e97c9983461214046b4d72c4ab4ba70 Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@free-electrons.com>
+Date: Thu, 30 Nov 2017 18:01:28 +0100
+Subject: [PATCH] mtd: nand: use usual return values for the ->erase() hook
+
+Avoid using specific defined values for checking returned status of the
+->erase() hook. Instead, use usual negative error values on failure,
+zero otherwise.
+
+Signed-off-by: Miquel Raynal <miquel.raynal@free-electrons.com>
+Acked-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+---
+ drivers/mtd/nand/denali.c    |  2 +-
+ drivers/mtd/nand/docg4.c     |  7 ++++++-
+ drivers/mtd/nand/nand_base.c | 10 ++++++++--
+ 3 files changed, 15 insertions(+), 4 deletions(-)
+
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2989,11 +2989,17 @@ out:
+ static int single_erase(struct mtd_info *mtd, int page)
+ {
+       struct nand_chip *chip = mtd_to_nand(mtd);
++      int status;
++
+       /* Send commands to erase a block */
+       chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+       chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+-      return chip->waitfunc(mtd, chip);
++      status = chip->waitfunc(mtd, chip);
++      if (status < 0)
++              return status;
++
++      return status & NAND_STATUS_FAIL ? -EIO : 0;
+ }
+ /**
+@@ -3077,7 +3083,7 @@ int nand_erase_nand(struct mtd_info *mtd
+               status = chip->erase(mtd, page & chip->pagemask);
+               /* See if block erase succeeded */
+-              if (status & NAND_STATUS_FAIL) {
++              if (status) {
+                       pr_debug("%s: failed erase, page 0x%08x\n",
+                                       __func__, page);
+                       instr->state = MTD_ERASE_FAILED;
diff --git a/target/linux/ipq40xx/patches-4.14/040-dmaengine-qcom-bam-Process-multiple-pending-descript.patch b/target/linux/ipq40xx/patches-4.14/040-dmaengine-qcom-bam-Process-multiple-pending-descript.patch
new file mode 100644 (file)
index 0000000..dca516e
--- /dev/null
@@ -0,0 +1,395 @@
+From 6b4faeac05bc0b91616b921191cb054d1376f3b4 Mon Sep 17 00:00:00 2001
+From: Sricharan R <sricharan@codeaurora.org>
+Date: Mon, 28 Aug 2017 20:30:24 +0530
+Subject: [PATCH] dmaengine: qcom-bam: Process multiple pending descriptors
+
+The bam dmaengine has a circular FIFO to which we
+add hw descriptors that describes the transaction.
+The FIFO has space for about 4096 hw descriptors.
+
+Currently we add one descriptor and wait for it to
+complete with interrupt and then add the next pending
+descriptor. In this way, the FIFO is underutilized
+since only one descriptor is processed at a time, although
+there is space in FIFO for the BAM to process more.
+
+Instead keep adding descriptors to FIFO till its full,
+that allows BAM to continue to work on the next descriptor
+immediately after signalling completion interrupt for the
+previous descriptor.
+
+Also when the client has not set the DMA_PREP_INTERRUPT for
+a descriptor, then do not configure BAM to trigger a interrupt
+upon completion of that descriptor. This way we get a interrupt
+only for the descriptor for which DMA_PREP_INTERRUPT was
+requested and there signal completion of all the previous completed
+descriptors. So we still do callbacks for all requested descriptors,
+but just that the number of interrupts are reduced.
+
+CURRENT:
+
+            ------      -------   ---------------
+            |DES 0|     |DESC 1|  |DESC 2 + INT |
+            ------      -------   ---------------
+               |           |            |
+               |           |            |
+INTERRUPT:   (INT)       (INT)       (INT)
+CALLBACK:     (CB)        (CB)         (CB)
+
+               MTD_SPEEDTEST READ PAGE: 3560 KiB/s
+               MTD_SPEEDTEST WRITE PAGE: 2664 KiB/s
+               IOZONE READ: 2456 KB/s
+               IOZONE WRITE: 1230 KB/s
+
+       bam dma interrupts (after tests): 96508
+
+CHANGE:
+
+        ------  -------    -------------
+        |DES 0| |DESC 1   |DESC 2 + INT |
+        ------  -------   --------------
+                               |
+                               |
+                             (INT)
+                             (CB for 0, 1, 2)
+
+               MTD_SPEEDTEST READ PAGE: 3860 KiB/s
+               MTD_SPEEDTEST WRITE PAGE: 2837 KiB/s
+               IOZONE READ: 2677 KB/s
+               IOZONE WRITE: 1308 KB/s
+
+       bam dma interrupts (after tests): 58806
+
+Signed-off-by: Sricharan R <sricharan@codeaurora.org>
+Reviewed-by: Andy Gross <andy.gross@linaro.org>
+Tested-by: Abhishek Sahu <absahu@codeaurora.org>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+---
+ drivers/dma/qcom/bam_dma.c | 169 +++++++++++++++++++++++++++++----------------
+ 1 file changed, 109 insertions(+), 60 deletions(-)
+
+--- a/drivers/dma/qcom/bam_dma.c
++++ b/drivers/dma/qcom/bam_dma.c
+@@ -46,6 +46,7 @@
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
+ #include <linux/of_dma.h>
++#include <linux/circ_buf.h>
+ #include <linux/clk.h>
+ #include <linux/dmaengine.h>
+ #include <linux/pm_runtime.h>
+@@ -78,6 +79,8 @@ struct bam_async_desc {
+       struct bam_desc_hw *curr_desc;
++      /* list node for the desc in the bam_chan list of descriptors */
++      struct list_head desc_node;
+       enum dma_transfer_direction dir;
+       size_t length;
+       struct bam_desc_hw desc[0];
+@@ -347,6 +350,8 @@ static const struct reg_offset_data bam_
+ #define BAM_DESC_FIFO_SIZE    SZ_32K
+ #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
+ #define BAM_FIFO_SIZE (SZ_32K - 8)
++#define IS_BUSY(chan) (CIRC_SPACE(bchan->tail, bchan->head,\
++                       MAX_DESCRIPTORS + 1) == 0)
+ struct bam_chan {
+       struct virt_dma_chan vc;
+@@ -356,8 +361,6 @@ struct bam_chan {
+       /* configuration from device tree */
+       u32 id;
+-      struct bam_async_desc *curr_txd;        /* current running dma */
+-
+       /* runtime configuration */
+       struct dma_slave_config slave;
+@@ -372,6 +375,8 @@ struct bam_chan {
+       unsigned int initialized;       /* is the channel hw initialized? */
+       unsigned int paused;            /* is the channel paused? */
+       unsigned int reconfigure;       /* new slave config? */
++      /* list of descriptors currently processed */
++      struct list_head desc_list;
+       struct list_head node;
+ };
+@@ -539,7 +544,7 @@ static void bam_free_chan(struct dma_cha
+       vchan_free_chan_resources(to_virt_chan(chan));
+-      if (bchan->curr_txd) {
++      if (!list_empty(&bchan->desc_list)) {
+               dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
+               goto err;
+       }
+@@ -632,8 +637,6 @@ static struct dma_async_tx_descriptor *b
+       if (flags & DMA_PREP_INTERRUPT)
+               async_desc->flags |= DESC_FLAG_EOT;
+-      else
+-              async_desc->flags |= DESC_FLAG_INT;
+       async_desc->num_desc = num_alloc;
+       async_desc->curr_desc = async_desc->desc;
+@@ -684,14 +687,16 @@ err_out:
+ static int bam_dma_terminate_all(struct dma_chan *chan)
+ {
+       struct bam_chan *bchan = to_bam_chan(chan);
++      struct bam_async_desc *async_desc, *tmp;
+       unsigned long flag;
+       LIST_HEAD(head);
+       /* remove all transactions, including active transaction */
+       spin_lock_irqsave(&bchan->vc.lock, flag);
+-      if (bchan->curr_txd) {
+-              list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
+-              bchan->curr_txd = NULL;
++      list_for_each_entry_safe(async_desc, tmp,
++                               &bchan->desc_list, desc_node) {
++              list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
++              list_del(&async_desc->desc_node);
+       }
+       vchan_get_all_descriptors(&bchan->vc, &head);
+@@ -763,9 +768,9 @@ static int bam_resume(struct dma_chan *c
+  */
+ static u32 process_channel_irqs(struct bam_device *bdev)
+ {
+-      u32 i, srcs, pipe_stts;
++      u32 i, srcs, pipe_stts, offset, avail;
+       unsigned long flags;
+-      struct bam_async_desc *async_desc;
++      struct bam_async_desc *async_desc, *tmp;
+       srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
+@@ -785,27 +790,40 @@ static u32 process_channel_irqs(struct b
+               writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
+               spin_lock_irqsave(&bchan->vc.lock, flags);
+-              async_desc = bchan->curr_txd;
+-              if (async_desc) {
+-                      async_desc->num_desc -= async_desc->xfer_len;
+-                      async_desc->curr_desc += async_desc->xfer_len;
+-                      bchan->curr_txd = NULL;
++              offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) &
++                                     P_SW_OFSTS_MASK;
++              offset /= sizeof(struct bam_desc_hw);
++
++              /* Number of bytes available to read */
++              avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
++
++              list_for_each_entry_safe(async_desc, tmp,
++                                       &bchan->desc_list, desc_node) {
++                      /* Not enough data to read */
++                      if (avail < async_desc->xfer_len)
++                              break;
+                       /* manage FIFO */
+                       bchan->head += async_desc->xfer_len;
+                       bchan->head %= MAX_DESCRIPTORS;
++                      async_desc->num_desc -= async_desc->xfer_len;
++                      async_desc->curr_desc += async_desc->xfer_len;
++                      avail -= async_desc->xfer_len;
++
+                       /*
+-                       * if complete, process cookie.  Otherwise
++                       * if complete, process cookie. Otherwise
+                        * push back to front of desc_issued so that
+                        * it gets restarted by the tasklet
+                        */
+-                      if (!async_desc->num_desc)
++                      if (!async_desc->num_desc) {
+                               vchan_cookie_complete(&async_desc->vd);
+-                      else
++                      } else {
+                               list_add(&async_desc->vd.node,
+-                                      &bchan->vc.desc_issued);
++                                       &bchan->vc.desc_issued);
++                      }
++                      list_del(&async_desc->desc_node);
+               }
+               spin_unlock_irqrestore(&bchan->vc.lock, flags);
+@@ -867,6 +885,7 @@ static enum dma_status bam_tx_status(str
+               struct dma_tx_state *txstate)
+ {
+       struct bam_chan *bchan = to_bam_chan(chan);
++      struct bam_async_desc *async_desc;
+       struct virt_dma_desc *vd;
+       int ret;
+       size_t residue = 0;
+@@ -882,11 +901,17 @@ static enum dma_status bam_tx_status(str
+       spin_lock_irqsave(&bchan->vc.lock, flags);
+       vd = vchan_find_desc(&bchan->vc, cookie);
+-      if (vd)
++      if (vd) {
+               residue = container_of(vd, struct bam_async_desc, vd)->length;
+-      else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
+-              for (i = 0; i < bchan->curr_txd->num_desc; i++)
+-                      residue += bchan->curr_txd->curr_desc[i].size;
++      } else {
++              list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
++                      if (async_desc->vd.tx.cookie != cookie)
++                              continue;
++
++                      for (i = 0; i < async_desc->num_desc; i++)
++                              residue += async_desc->curr_desc[i].size;
++              }
++      }
+       spin_unlock_irqrestore(&bchan->vc.lock, flags);
+@@ -927,63 +952,86 @@ static void bam_start_dma(struct bam_cha
+ {
+       struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
+       struct bam_device *bdev = bchan->bdev;
+-      struct bam_async_desc *async_desc;
++      struct bam_async_desc *async_desc = NULL;
+       struct bam_desc_hw *desc;
+       struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
+                                       sizeof(struct bam_desc_hw));
+       int ret;
++      unsigned int avail;
++      struct dmaengine_desc_callback cb;
+       lockdep_assert_held(&bchan->vc.lock);
+       if (!vd)
+               return;
+-      list_del(&vd->node);
+-
+-      async_desc = container_of(vd, struct bam_async_desc, vd);
+-      bchan->curr_txd = async_desc;
+-
+       ret = pm_runtime_get_sync(bdev->dev);
+       if (ret < 0)
+               return;
+-      /* on first use, initialize the channel hardware */
+-      if (!bchan->initialized)
+-              bam_chan_init_hw(bchan, async_desc->dir);
+-
+-      /* apply new slave config changes, if necessary */
+-      if (bchan->reconfigure)
+-              bam_apply_new_config(bchan, async_desc->dir);
++      while (vd && !IS_BUSY(bchan)) {
++              list_del(&vd->node);
+-      desc = bchan->curr_txd->curr_desc;
++              async_desc = container_of(vd, struct bam_async_desc, vd);
+-      if (async_desc->num_desc > MAX_DESCRIPTORS)
+-              async_desc->xfer_len = MAX_DESCRIPTORS;
+-      else
+-              async_desc->xfer_len = async_desc->num_desc;
++              /* on first use, initialize the channel hardware */
++              if (!bchan->initialized)
++                      bam_chan_init_hw(bchan, async_desc->dir);
+-      /* set any special flags on the last descriptor */
+-      if (async_desc->num_desc == async_desc->xfer_len)
+-              desc[async_desc->xfer_len - 1].flags |=
+-                                      cpu_to_le16(async_desc->flags);
+-      else
+-              desc[async_desc->xfer_len - 1].flags |=
+-                                      cpu_to_le16(DESC_FLAG_INT);
++              /* apply new slave config changes, if necessary */
++              if (bchan->reconfigure)
++                      bam_apply_new_config(bchan, async_desc->dir);
++
++              desc = async_desc->curr_desc;
++              avail = CIRC_SPACE(bchan->tail, bchan->head,
++                                 MAX_DESCRIPTORS + 1);
++
++              if (async_desc->num_desc > avail)
++                      async_desc->xfer_len = avail;
++              else
++                      async_desc->xfer_len = async_desc->num_desc;
++
++              /* set any special flags on the last descriptor */
++              if (async_desc->num_desc == async_desc->xfer_len)
++                      desc[async_desc->xfer_len - 1].flags |=
++                                              cpu_to_le16(async_desc->flags);
+-      if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
+-              u32 partial = MAX_DESCRIPTORS - bchan->tail;
++              vd = vchan_next_desc(&bchan->vc);
+-              memcpy(&fifo[bchan->tail], desc,
+-                              partial * sizeof(struct bam_desc_hw));
+-              memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
++              dmaengine_desc_get_callback(&async_desc->vd.tx, &cb);
++
++              /*
++               * An interrupt is generated at this desc, if
++               *  - FIFO is FULL.
++               *  - No more descriptors to add.
++               *  - If a callback completion was requested for this DESC,
++               *     In this case, BAM will deliver the completion callback
++               *     for this desc and continue processing the next desc.
++               */
++              if (((avail <= async_desc->xfer_len) || !vd ||
++                   dmaengine_desc_callback_valid(&cb)) &&
++                  !(async_desc->flags & DESC_FLAG_EOT))
++                      desc[async_desc->xfer_len - 1].flags |=
++                              cpu_to_le16(DESC_FLAG_INT);
++
++              if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
++                      u32 partial = MAX_DESCRIPTORS - bchan->tail;
++
++                      memcpy(&fifo[bchan->tail], desc,
++                             partial * sizeof(struct bam_desc_hw));
++                      memcpy(fifo, &desc[partial],
++                             (async_desc->xfer_len - partial) *
+                               sizeof(struct bam_desc_hw));
+-      } else {
+-              memcpy(&fifo[bchan->tail], desc,
+-                      async_desc->xfer_len * sizeof(struct bam_desc_hw));
+-      }
++              } else {
++                      memcpy(&fifo[bchan->tail], desc,
++                             async_desc->xfer_len *
++                             sizeof(struct bam_desc_hw));
++              }
+-      bchan->tail += async_desc->xfer_len;
+-      bchan->tail %= MAX_DESCRIPTORS;
++              bchan->tail += async_desc->xfer_len;
++              bchan->tail %= MAX_DESCRIPTORS;
++              list_add_tail(&async_desc->desc_node, &bchan->desc_list);
++      }
+       /* ensure descriptor writes and dma start not reordered */
+       wmb();
+@@ -1012,7 +1060,7 @@ static void dma_tasklet(unsigned long da
+               bchan = &bdev->channels[i];
+               spin_lock_irqsave(&bchan->vc.lock, flags);
+-              if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
++              if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan))
+                       bam_start_dma(bchan);
+               spin_unlock_irqrestore(&bchan->vc.lock, flags);
+       }
+@@ -1033,7 +1081,7 @@ static void bam_issue_pending(struct dma
+       spin_lock_irqsave(&bchan->vc.lock, flags);
+       /* if work pending and idle, start a transaction */
+-      if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
++      if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan))
+               bam_start_dma(bchan);
+       spin_unlock_irqrestore(&bchan->vc.lock, flags);
+@@ -1133,6 +1181,7 @@ static void bam_channel_init(struct bam_
+       vchan_init(&bchan->vc, &bdev->common);
+       bchan->vc.desc_free = bam_dma_free_desc;
++      INIT_LIST_HEAD(&bchan->desc_list);
+ }
+ static const struct of_device_id bam_of_match[] = {
diff --git a/target/linux/ipq40xx/patches-4.14/050-0002-mtd-nand-qcom-add-command-elements-in-BAM-transactio.patch b/target/linux/ipq40xx/patches-4.14/050-0002-mtd-nand-qcom-add-command-elements-in-BAM-transactio.patch
new file mode 100644 (file)
index 0000000..1a32cc3
--- /dev/null
@@ -0,0 +1,89 @@
+From 8c4cdce8b1ab044a2ee1d86d5a086f67e32b3c10 Mon Sep 17 00:00:00 2001
+From: Abhishek Sahu <absahu@codeaurora.org>
+Date: Mon, 25 Sep 2017 13:21:25 +0530
+Subject: [PATCH 2/7] mtd: nand: qcom: add command elements in BAM transaction
+
+All the QPIC register read/write through BAM DMA requires
+command descriptor which contains the array of command elements.
+
+Reviewed-by: Archit Taneja <architt@codeaurora.org>
+Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
+Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+---
+ drivers/mtd/nand/qcom_nandc.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/qcom_nandc.c
++++ b/drivers/mtd/nand/qcom_nandc.c
+@@ -22,6 +22,7 @@
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/delay.h>
++#include <linux/dma/qcom_bam_dma.h>
+ /* NANDc reg offsets */
+ #define       NAND_FLASH_CMD                  0x00
+@@ -199,6 +200,7 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_
+  */
+ #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
++#define QPIC_PER_CW_CMD_ELEMENTS      32
+ #define QPIC_PER_CW_CMD_SGL           32
+ #define QPIC_PER_CW_DATA_SGL          8
+@@ -221,8 +223,13 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_
+ /*
+  * This data type corresponds to the BAM transaction which will be used for all
+  * NAND transfers.
++ * @bam_ce - the array of BAM command elements
+  * @cmd_sgl - sgl for NAND BAM command pipe
+  * @data_sgl - sgl for NAND BAM consumer/producer pipe
++ * @bam_ce_pos - the index in bam_ce which is available for next sgl
++ * @bam_ce_start - the index in bam_ce which marks the start position ce
++ *               for current sgl. It will be used for size calculation
++ *               for current sgl
+  * @cmd_sgl_pos - current index in command sgl.
+  * @cmd_sgl_start - start index in command sgl.
+  * @tx_sgl_pos - current index in data sgl for tx.
+@@ -231,8 +238,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_
+  * @rx_sgl_start - start index in data sgl for rx.
+  */
+ struct bam_transaction {
++      struct bam_cmd_element *bam_ce;
+       struct scatterlist *cmd_sgl;
+       struct scatterlist *data_sgl;
++      u32 bam_ce_pos;
++      u32 bam_ce_start;
+       u32 cmd_sgl_pos;
+       u32 cmd_sgl_start;
+       u32 tx_sgl_pos;
+@@ -462,7 +472,8 @@ alloc_bam_transaction(struct qcom_nand_c
+       bam_txn_size =
+               sizeof(*bam_txn) + num_cw *
+-              ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
++              ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
++              (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+               (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
+       bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
+@@ -472,6 +483,10 @@ alloc_bam_transaction(struct qcom_nand_c
+       bam_txn = bam_txn_buf;
+       bam_txn_buf += sizeof(*bam_txn);
++      bam_txn->bam_ce = bam_txn_buf;
++      bam_txn_buf +=
++              sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
++
+       bam_txn->cmd_sgl = bam_txn_buf;
+       bam_txn_buf +=
+               sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
+@@ -489,6 +504,8 @@ static void clear_bam_transaction(struct
+       if (!nandc->props->is_bam)
+               return;
++      bam_txn->bam_ce_pos = 0;
++      bam_txn->bam_ce_start = 0;
+       bam_txn->cmd_sgl_pos = 0;
+       bam_txn->cmd_sgl_start = 0;
+       bam_txn->tx_sgl_pos = 0;
diff --git a/target/linux/ipq40xx/patches-4.14/050-0003-mtd-nand-qcom-support-for-command-descriptor-formati.patch b/target/linux/ipq40xx/patches-4.14/050-0003-mtd-nand-qcom-support-for-command-descriptor-formati.patch
new file mode 100644 (file)
index 0000000..8dd209b
--- /dev/null
@@ -0,0 +1,201 @@
+From 8d6b6d7e135e9bbfc923d34a45cb0e72695e63ed Mon Sep 17 00:00:00 2001
+From: Abhishek Sahu <absahu@codeaurora.org>
+Date: Mon, 25 Sep 2017 13:21:26 +0530
+Subject: [PATCH 3/7] mtd: nand: qcom: support for command descriptor formation
+
+1. Add the function for command descriptor preparation which will
+   be used only by BAM DMA and it will form the DMA descriptors
+   containing command elements
+2. DMA_PREP_CMD flag should be used for forming command DMA
+   descriptors
+
+Reviewed-by: Archit Taneja <architt@codeaurora.org>
+Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
+Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+---
+ drivers/mtd/nand/qcom_nandc.c | 108 +++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 92 insertions(+), 16 deletions(-)
+
+--- a/drivers/mtd/nand/qcom_nandc.c
++++ b/drivers/mtd/nand/qcom_nandc.c
+@@ -200,6 +200,14 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_
+  */
+ #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
++/* Returns the NAND register physical address */
++#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
++
++/* Returns the dma address for reg read buffer */
++#define reg_buf_dma_addr(chip, vaddr) \
++      ((chip)->reg_read_dma + \
++      ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
++
+ #define QPIC_PER_CW_CMD_ELEMENTS      32
+ #define QPIC_PER_CW_CMD_SGL           32
+ #define QPIC_PER_CW_DATA_SGL          8
+@@ -317,7 +325,8 @@ struct nandc_regs {
+  *                            controller
+  * @dev:                      parent device
+  * @base:                     MMIO base
+- * @base_dma:                 physical base address of controller registers
++ * @base_phys:                        physical base address of controller registers
++ * @base_dma:                 dma base address of controller registers
+  * @core_clk:                 controller clock
+  * @aon_clk:                  another controller clock
+  *
+@@ -350,6 +359,7 @@ struct qcom_nand_controller {
+       struct device *dev;
+       void __iomem *base;
++      phys_addr_t base_phys;
+       dma_addr_t base_dma;
+       struct clk *core_clk;
+@@ -751,6 +761,66 @@ static int prepare_bam_async_desc(struct
+ }
+ /*
++ * Prepares the command descriptor for BAM DMA which will be used for NAND
++ * register reads and writes. The command descriptor requires the command
++ * to be formed in command element type so this function uses the command
++ * element from bam transaction ce array and fills the same with required
++ * data. A single SGL can contain multiple command elements so
++ * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
++ * after the current command element.
++ */
++static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++                               int reg_off, const void *vaddr,
++                               int size, unsigned int flags)
++{
++      int bam_ce_size;
++      int i, ret;
++      struct bam_cmd_element *bam_ce_buffer;
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++
++      bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
++
++      /* fill the command desc */
++      for (i = 0; i < size; i++) {
++              if (read)
++                      bam_prep_ce(&bam_ce_buffer[i],
++                                  nandc_reg_phys(nandc, reg_off + 4 * i),
++                                  BAM_READ_COMMAND,
++                                  reg_buf_dma_addr(nandc,
++                                                   (__le32 *)vaddr + i));
++              else
++                      bam_prep_ce_le32(&bam_ce_buffer[i],
++                                       nandc_reg_phys(nandc, reg_off + 4 * i),
++                                       BAM_WRITE_COMMAND,
++                                       *((__le32 *)vaddr + i));
++      }
++
++      bam_txn->bam_ce_pos += size;
++
++      /* use the separate sgl after this command */
++      if (flags & NAND_BAM_NEXT_SGL) {
++              bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
++              bam_ce_size = (bam_txn->bam_ce_pos -
++                              bam_txn->bam_ce_start) *
++                              sizeof(struct bam_cmd_element);
++              sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
++                         bam_ce_buffer, bam_ce_size);
++              bam_txn->cmd_sgl_pos++;
++              bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
++
++              if (flags & NAND_BAM_NWD) {
++                      ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
++                                                   DMA_PREP_FENCE |
++                                                   DMA_PREP_CMD);
++                      if (ret)
++                              return ret;
++              }
++      }
++
++      return 0;
++}
++
++/*
+  * Prepares the data descriptor for BAM DMA which will be used for NAND
+  * data reads and writes.
+  */
+@@ -868,19 +938,22 @@ static int read_reg_dma(struct qcom_nand
+ {
+       bool flow_control = false;
+       void *vaddr;
+-      int size;
+-      if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+-              flow_control = true;
++      vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
++      nandc->reg_read_pos += num_regs;
+       if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+               first = dev_cmd_reg_addr(nandc, first);
+-      size = num_regs * sizeof(u32);
+-      vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+-      nandc->reg_read_pos += num_regs;
++      if (nandc->props->is_bam)
++              return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
++                                           num_regs, flags);
++
++      if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
++              flow_control = true;
+-      return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
++      return prep_adm_dma_desc(nandc, true, first, vaddr,
++                               num_regs * sizeof(u32), flow_control);
+ }
+ /*
+@@ -897,13 +970,9 @@ static int write_reg_dma(struct qcom_nan
+       bool flow_control = false;
+       struct nandc_regs *regs = nandc->regs;
+       void *vaddr;
+-      int size;
+       vaddr = offset_to_nandc_reg(regs, first);
+-      if (first == NAND_FLASH_CMD)
+-              flow_control = true;
+-
+       if (first == NAND_ERASED_CW_DETECT_CFG) {
+               if (flags & NAND_ERASED_CW_SET)
+                       vaddr = &regs->erased_cw_detect_cfg_set;
+@@ -920,10 +989,15 @@ static int write_reg_dma(struct qcom_nan
+       if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+               first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+-      size = num_regs * sizeof(u32);
++      if (nandc->props->is_bam)
++              return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
++                                           num_regs, flags);
++
++      if (first == NAND_FLASH_CMD)
++              flow_control = true;
+-      return prep_adm_dma_desc(nandc, false, first, vaddr, size,
+-                               flow_control);
++      return prep_adm_dma_desc(nandc, false, first, vaddr,
++                               num_regs * sizeof(u32), flow_control);
+ }
+ /*
+@@ -1187,7 +1261,8 @@ static int submit_descs(struct qcom_nand
+               }
+               if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+-                      r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
++                      r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
++                                                 DMA_PREP_CMD);
+                       if (r)
+                               return r;
+               }
+@@ -2722,6 +2797,7 @@ static int qcom_nandc_probe(struct platf
+       if (IS_ERR(nandc->base))
+               return PTR_ERR(nandc->base);
++      nandc->base_phys = res->start;
+       nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
+       nandc->core_clk = devm_clk_get(dev, "core");
diff --git a/target/linux/ipq40xx/patches-4.14/050-0004-mtd-nand-provide-several-helpers-to-do-common-NAND-o.patch b/target/linux/ipq40xx/patches-4.14/050-0004-mtd-nand-provide-several-helpers-to-do-common-NAND-o.patch
new file mode 100644 (file)
index 0000000..7cbbcf5
--- /dev/null
@@ -0,0 +1,1586 @@
+commit 97d90da8a886949f09bb4754843fb0b504956ad2
+Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+Date:   Thu Nov 30 18:01:29 2017 +0100
+
+    mtd: nand: provide several helpers to do common NAND operations
+    
+    This is part of the process of removing direct calls to ->cmdfunc()
+    outside of the core in order to introduce a better interface to execute
+    NAND operations.
+    
+    Here we provide several helpers and make use of them to remove all
+    direct calls to ->cmdfunc(). This way, we can easily modify those
+    helpers to make use of the new ->exec_op() interface when available.
+    
+    Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+    [miquel.raynal@free-electrons.com: rebased and fixed some conflicts]
+    Signed-off-by: Miquel Raynal <miquel.raynal@free-electrons.com>
+    Acked-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -561,14 +561,19 @@ static int nand_block_markbad_lowlevel(s
+ static int nand_check_wp(struct mtd_info *mtd)
+ {
+       struct nand_chip *chip = mtd_to_nand(mtd);
++      u8 status;
++      int ret;
+       /* Broken xD cards report WP despite being writable */
+       if (chip->options & NAND_BROKEN_XD)
+               return 0;
+       /* Check the WP bit */
+-      chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+-      return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
++      ret = nand_status_op(chip, &status);
++      if (ret)
++              return ret;
++
++      return status & NAND_STATUS_WP ? 0 : 1;
+ }
+ /**
+@@ -667,10 +672,17 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
+ static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
+ {
+       register struct nand_chip *chip = mtd_to_nand(mtd);
++      int ret;
+       timeo = jiffies + msecs_to_jiffies(timeo);
+       do {
+-              if ((chip->read_byte(mtd) & NAND_STATUS_READY))
++              u8 status;
++
++              ret = nand_read_data_op(chip, &status, sizeof(status), true);
++              if (ret)
++                      return;
++
++              if (status & NAND_STATUS_READY)
+                       break;
+               touch_softlockup_watchdog();
+       } while (time_before(jiffies, timeo));
+@@ -1016,7 +1028,15 @@ static void panic_nand_wait(struct mtd_i
+                       if (chip->dev_ready(mtd))
+                               break;
+               } else {
+-                      if (chip->read_byte(mtd) & NAND_STATUS_READY)
++                      int ret;
++                      u8 status;
++
++                      ret = nand_read_data_op(chip, &status, sizeof(status),
++                                              true);
++                      if (ret)
++                              return;
++
++                      if (status & NAND_STATUS_READY)
+                               break;
+               }
+               mdelay(1);
+@@ -1033,8 +1053,9 @@ static void panic_nand_wait(struct mtd_i
+ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
+ {
+-      int status;
+       unsigned long timeo = 400;
++      u8 status;
++      int ret;
+       /*
+        * Apply this short delay always to ensure that we do wait tWB in any
+@@ -1042,7 +1063,9 @@ static int nand_wait(struct mtd_info *mt
+        */
+       ndelay(100);
+-      chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
++      ret = nand_status_op(chip, NULL);
++      if (ret)
++              return ret;
+       if (in_interrupt() || oops_in_progress)
+               panic_nand_wait(mtd, chip, timeo);
+@@ -1053,14 +1076,22 @@ static int nand_wait(struct mtd_info *mt
+                               if (chip->dev_ready(mtd))
+                                       break;
+                       } else {
+-                              if (chip->read_byte(mtd) & NAND_STATUS_READY)
++                              ret = nand_read_data_op(chip, &status,
++                                                      sizeof(status), true);
++                              if (ret)
++                                      return ret;
++
++                              if (status & NAND_STATUS_READY)
+                                       break;
+                       }
+                       cond_resched();
+               } while (time_before(jiffies, timeo));
+       }
+-      status = (int)chip->read_byte(mtd);
++      ret = nand_read_data_op(chip, &status, sizeof(status), true);
++      if (ret)
++              return ret;
++
+       /* This can happen if in case of timeout or buggy dev_ready */
+       WARN_ON(!(status & NAND_STATUS_READY));
+       return status;
+@@ -1215,6 +1246,516 @@ static void nand_release_data_interface(
+ }
+ /**
++ * nand_read_page_op - Do a READ PAGE operation
++ * @chip: The NAND chip
++ * @page: page to read
++ * @offset_in_page: offset within the page
++ * @buf: buffer used to store the data
++ * @len: length of the buffer
++ *
++ * This function issues a READ PAGE operation.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_read_page_op(struct nand_chip *chip, unsigned int page,
++                    unsigned int offset_in_page, void *buf, unsigned int len)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++
++      if (len && !buf)
++              return -EINVAL;
++
++      if (offset_in_page + len > mtd->writesize + mtd->oobsize)
++              return -EINVAL;
++
++      chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
++      if (len)
++              chip->read_buf(mtd, buf, len);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_read_page_op);
++
++/**
++ * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
++ * @chip: The NAND chip
++ * @page: parameter page to read
++ * @buf: buffer used to store the data
++ * @len: length of the buffer
++ *
++ * This function issues a READ PARAMETER PAGE operation.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
++                                 unsigned int len)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++      unsigned int i;
++      u8 *p = buf;
++
++      if (len && !buf)
++              return -EINVAL;
++
++      chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
++      for (i = 0; i < len; i++)
++              p[i] = chip->read_byte(mtd);
++
++      return 0;
++}
++
++/**
++ * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
++ * @chip: The NAND chip
++ * @offset_in_page: offset within the page
++ * @buf: buffer used to store the data
++ * @len: length of the buffer
++ * @force_8bit: force 8-bit bus access
++ *
++ * This function issues a CHANGE READ COLUMN operation.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_change_read_column_op(struct nand_chip *chip,
++                             unsigned int offset_in_page, void *buf,
++                             unsigned int len, bool force_8bit)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++
++      if (len && !buf)
++              return -EINVAL;
++
++      if (offset_in_page + len > mtd->writesize + mtd->oobsize)
++              return -EINVAL;
++
++      chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
++      if (len)
++              chip->read_buf(mtd, buf, len);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_change_read_column_op);
++
++/**
++ * nand_read_oob_op - Do a READ OOB operation
++ * @chip: The NAND chip
++ * @page: page to read
++ * @offset_in_oob: offset within the OOB area
++ * @buf: buffer used to store the data
++ * @len: length of the buffer
++ *
++ * This function issues a READ OOB operation.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
++                   unsigned int offset_in_oob, void *buf, unsigned int len)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++
++      if (len && !buf)
++              return -EINVAL;
++
++      if (offset_in_oob + len > mtd->oobsize)
++              return -EINVAL;
++
++      chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
++      if (len)
++              chip->read_buf(mtd, buf, len);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_read_oob_op);
++
++/**
++ * nand_prog_page_begin_op - starts a PROG PAGE operation
++ * @chip: The NAND chip
++ * @page: page to write
++ * @offset_in_page: offset within the page
++ * @buf: buffer containing the data to write to the page
++ * @len: length of the buffer
++ *
++ * This function issues the first half of a PROG PAGE operation.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
++                          unsigned int offset_in_page, const void *buf,
++                          unsigned int len)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++
++      if (len && !buf)
++              return -EINVAL;
++
++      if (offset_in_page + len > mtd->writesize + mtd->oobsize)
++              return -EINVAL;
++
++      chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
++
++      if (buf)
++              chip->write_buf(mtd, buf, len);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
++
++/**
++ * nand_prog_page_end_op - ends a PROG PAGE operation
++ * @chip: The NAND chip
++ *
++ * This function issues the second half of a PROG PAGE operation.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_prog_page_end_op(struct nand_chip *chip)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++      int status;
++
++      chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
++
++      status = chip->waitfunc(mtd, chip);
++      if (status & NAND_STATUS_FAIL)
++              return -EIO;
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
++
++/**
++ * nand_prog_page_op - Do a full PROG PAGE operation
++ * @chip: The NAND chip
++ * @page: page to write
++ * @offset_in_page: offset within the page
++ * @buf: buffer containing the data to write to the page
++ * @len: length of the buffer
++ *
++ * This function issues a full PROG PAGE operation.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
++                    unsigned int offset_in_page, const void *buf,
++                    unsigned int len)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++      int status;
++
++      if (!len || !buf)
++              return -EINVAL;
++
++      if (offset_in_page + len > mtd->writesize + mtd->oobsize)
++              return -EINVAL;
++
++      chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
++      chip->write_buf(mtd, buf, len);
++      chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
++
++      status = chip->waitfunc(mtd, chip);
++      if (status & NAND_STATUS_FAIL)
++              return -EIO;
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_prog_page_op);
++
++/**
++ * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
++ * @chip: The NAND chip
++ * @offset_in_page: offset within the page
++ * @buf: buffer containing the data to send to the NAND
++ * @len: length of the buffer
++ * @force_8bit: force 8-bit bus access
++ *
++ * This function issues a CHANGE WRITE COLUMN operation.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_change_write_column_op(struct nand_chip *chip,
++                              unsigned int offset_in_page,
++                              const void *buf, unsigned int len,
++                              bool force_8bit)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++
++      if (len && !buf)
++              return -EINVAL;
++
++      if (offset_in_page + len > mtd->writesize + mtd->oobsize)
++              return -EINVAL;
++
++      chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
++      if (len)
++              chip->write_buf(mtd, buf, len);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_change_write_column_op);
++
++/**
++ * nand_readid_op - Do a READID operation
++ * @chip: The NAND chip
++ * @addr: address cycle to pass after the READID command
++ * @buf: buffer used to store the ID
++ * @len: length of the buffer
++ *
++ * This function sends a READID command and reads back the ID returned by the
++ * NAND.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
++                 unsigned int len)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++      unsigned int i;
++      u8 *id = buf;
++
++      if (len && !buf)
++              return -EINVAL;
++
++      chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
++
++      for (i = 0; i < len; i++)
++              id[i] = chip->read_byte(mtd);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_readid_op);
++
++/**
++ * nand_status_op - Do a STATUS operation
++ * @chip: The NAND chip
++ * @status: out variable to store the NAND status
++ *
++ * This function sends a STATUS command and reads back the status returned by
++ * the NAND.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_status_op(struct nand_chip *chip, u8 *status)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++
++      chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
++      if (status)
++              *status = chip->read_byte(mtd);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_status_op);
++
++/**
++ * nand_exit_status_op - Exit a STATUS operation
++ * @chip: The NAND chip
++ *
++ * This function sends a READ0 command to cancel the effect of the STATUS
++ * command to avoid reading only the status until a new read command is sent.
++ *
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_exit_status_op(struct nand_chip *chip)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++
++      chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_exit_status_op);
++
++/**
++ * nand_erase_op - Do an erase operation
++ * @chip: The NAND chip
++ * @eraseblock: block to erase
++ *
++ * This function sends an ERASE command and waits for the NAND to be ready
++ * before returning.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++      unsigned int page = eraseblock <<
++                          (chip->phys_erase_shift - chip->page_shift);
++      int status;
++
++      chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
++      chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
++
++      status = chip->waitfunc(mtd, chip);
++      if (status < 0)
++              return status;
++
++      if (status & NAND_STATUS_FAIL)
++              return -EIO;
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_erase_op);
++
++/**
++ * nand_set_features_op - Do a SET FEATURES operation
++ * @chip: The NAND chip
++ * @feature: feature id
++ * @data: 4 bytes of data
++ *
++ * This function sends a SET FEATURES command and waits for the NAND to be
++ * ready before returning.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++static int nand_set_features_op(struct nand_chip *chip, u8 feature,
++                              const void *data)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++      const u8 *params = data;
++      int i, status;
++
++      chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
++      for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
++              chip->write_byte(mtd, params[i]);
++
++      status = chip->waitfunc(mtd, chip);
++      if (status & NAND_STATUS_FAIL)
++              return -EIO;
++
++      return 0;
++}
++
++/**
++ * nand_get_features_op - Do a GET FEATURES operation
++ * @chip: The NAND chip
++ * @feature: feature id
++ * @data: 4 bytes of data
++ *
++ * This function sends a GET FEATURES command and waits for the NAND to be
++ * ready before returning.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++static int nand_get_features_op(struct nand_chip *chip, u8 feature,
++                              void *data)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++      u8 *params = data;
++      int i;
++
++      chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
++      for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
++              params[i] = chip->read_byte(mtd);
++
++      return 0;
++}
++
++/**
++ * nand_reset_op - Do a reset operation
++ * @chip: The NAND chip
++ *
++ * This function sends a RESET command and waits for the NAND to be ready
++ * before returning.
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_reset_op(struct nand_chip *chip)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++
++      chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_reset_op);
++
++/**
++ * nand_read_data_op - Read data from the NAND
++ * @chip: The NAND chip
++ * @buf: buffer used to store the data
++ * @len: length of the buffer
++ * @force_8bit: force 8-bit bus access
++ *
++ * This function does a raw data read on the bus. Usually used after launching
++ * another NAND operation like nand_read_page_op().
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
++                    bool force_8bit)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++
++      if (!len || !buf)
++              return -EINVAL;
++
++      if (force_8bit) {
++              u8 *p = buf;
++              unsigned int i;
++
++              for (i = 0; i < len; i++)
++                      p[i] = chip->read_byte(mtd);
++      } else {
++              chip->read_buf(mtd, buf, len);
++      }
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_read_data_op);
++
++/**
++ * nand_write_data_op - Write data from the NAND
++ * @chip: The NAND chip
++ * @buf: buffer containing the data to send on the bus
++ * @len: length of the buffer
++ * @force_8bit: force 8-bit bus access
++ *
++ * This function does a raw data write on the bus. Usually used after launching
++ * another NAND operation like nand_write_page_begin_op().
++ * This function does not select/unselect the CS line.
++ *
++ * Returns 0 on success, a negative error code otherwise.
++ */
++int nand_write_data_op(struct nand_chip *chip, const void *buf,
++                     unsigned int len, bool force_8bit)
++{
++      struct mtd_info *mtd = nand_to_mtd(chip);
++
++      if (!len || !buf)
++              return -EINVAL;
++
++      if (force_8bit) {
++              const u8 *p = buf;
++              unsigned int i;
++
++              for (i = 0; i < len; i++)
++                      chip->write_byte(mtd, p[i]);
++      } else {
++              chip->write_buf(mtd, buf, len);
++      }
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(nand_write_data_op);
++
++/**
+  * nand_reset - Reset and initialize a NAND device
+  * @chip: The NAND chip
+  * @chipnr: Internal die id
+@@ -1235,8 +1776,10 @@ int nand_reset(struct nand_chip *chip, i
+        * interface settings, hence this weird ->select_chip() dance.
+        */
+       chip->select_chip(mtd, chipnr);
+-      chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
++      ret = nand_reset_op(chip);
+       chip->select_chip(mtd, -1);
++      if (ret)
++              return ret;
+       chip->select_chip(mtd, chipnr);
+       ret = nand_setup_data_interface(chip, chipnr);
+@@ -1392,9 +1935,19 @@ EXPORT_SYMBOL(nand_check_erased_ecc_chun
+ int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+                      uint8_t *buf, int oob_required, int page)
+ {
+-      chip->read_buf(mtd, buf, mtd->writesize);
+-      if (oob_required)
+-              chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
++      int ret;
++
++      ret = nand_read_data_op(chip, buf, mtd->writesize, false);
++      if (ret)
++              return ret;
++
++      if (oob_required) {
++              ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
++                                      false);
++              if (ret)
++                      return ret;
++      }
++
+       return 0;
+ }
+ EXPORT_SYMBOL(nand_read_page_raw);
+@@ -1416,29 +1969,46 @@ static int nand_read_page_raw_syndrome(s
+       int eccsize = chip->ecc.size;
+       int eccbytes = chip->ecc.bytes;
+       uint8_t *oob = chip->oob_poi;
+-      int steps, size;
++      int steps, size, ret;
+       for (steps = chip->ecc.steps; steps > 0; steps--) {
+-              chip->read_buf(mtd, buf, eccsize);
++              ret = nand_read_data_op(chip, buf, eccsize, false);
++              if (ret)
++                      return ret;
++
+               buf += eccsize;
+               if (chip->ecc.prepad) {
+-                      chip->read_buf(mtd, oob, chip->ecc.prepad);
++                      ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
++                                              false);
++                      if (ret)
++                              return ret;
++
+                       oob += chip->ecc.prepad;
+               }
+-              chip->read_buf(mtd, oob, eccbytes);
++              ret = nand_read_data_op(chip, oob, eccbytes, false);
++              if (ret)
++                      return ret;
++
+               oob += eccbytes;
+               if (chip->ecc.postpad) {
+-                      chip->read_buf(mtd, oob, chip->ecc.postpad);
++                      ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
++                                              false);
++                      if (ret)
++                              return ret;
++
+                       oob += chip->ecc.postpad;
+               }
+       }
+       size = mtd->oobsize - (oob - chip->oob_poi);
+-      if (size)
+-              chip->read_buf(mtd, oob, size);
++      if (size) {
++              ret = nand_read_data_op(chip, oob, size, false);
++              if (ret)
++                      return ret;
++      }
+       return 0;
+ }
+@@ -1527,7 +2097,9 @@ static int nand_read_subpage(struct mtd_
+               chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
+       p = bufpoi + data_col_addr;
+-      chip->read_buf(mtd, p, datafrag_len);
++      ret = nand_read_data_op(chip, p, datafrag_len, false);
++      if (ret)
++              return ret;
+       /* Calculate ECC */
+       for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
+@@ -1545,8 +2117,11 @@ static int nand_read_subpage(struct mtd_
+               gaps = 1;
+       if (gaps) {
+-              chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+-              chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
++              ret = nand_change_read_column_op(chip, mtd->writesize,
++                                               chip->oob_poi, mtd->oobsize,
++                                               false);
++              if (ret)
++                      return ret;
+       } else {
+               /*
+                * Send the command to read the particular ECC bytes take care
+@@ -1560,9 +2135,12 @@ static int nand_read_subpage(struct mtd_
+                   (busw - 1))
+                       aligned_len++;
+-              chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+-                            mtd->writesize + aligned_pos, -1);
+-              chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
++              ret = nand_change_read_column_op(chip,
++                                               mtd->writesize + aligned_pos,
++                                               &chip->oob_poi[aligned_pos],
++                                               aligned_len, false);
++              if (ret)
++                      return ret;
+       }
+       ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
+@@ -1619,10 +2197,17 @@ static int nand_read_page_hwecc(struct m
+       for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+               chip->ecc.hwctl(mtd, NAND_ECC_READ);
+-              chip->read_buf(mtd, p, eccsize);
++
++              ret = nand_read_data_op(chip, p, eccsize, false);
++              if (ret)
++                      return ret;
++
+               chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+       }
+-      chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
++
++      ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
++      if (ret)
++              return ret;
+       ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+                                        chip->ecc.total);
+@@ -1681,9 +2266,13 @@ static int nand_read_page_hwecc_oob_firs
+       unsigned int max_bitflips = 0;
+       /* Read the OOB area first */
+-      chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+-      chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+-      chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
++      ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
++      if (ret)
++              return ret;
++
++      ret = nand_read_page_op(chip, page, 0, NULL, 0);
++      if (ret)
++              return ret;
+       ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+                                        chip->ecc.total);
+@@ -1694,7 +2283,11 @@ static int nand_read_page_hwecc_oob_firs
+               int stat;
+               chip->ecc.hwctl(mtd, NAND_ECC_READ);
+-              chip->read_buf(mtd, p, eccsize);
++
++              ret = nand_read_data_op(chip, p, eccsize, false);
++              if (ret)
++                      return ret;
++
+               chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+               stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
+@@ -1731,7 +2324,7 @@ static int nand_read_page_hwecc_oob_firs
+ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+                                  uint8_t *buf, int oob_required, int page)
+ {
+-      int i, eccsize = chip->ecc.size;
++      int ret, i, eccsize = chip->ecc.size;
+       int eccbytes = chip->ecc.bytes;
+       int eccsteps = chip->ecc.steps;
+       int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+@@ -1743,21 +2336,36 @@ static int nand_read_page_syndrome(struc
+               int stat;
+               chip->ecc.hwctl(mtd, NAND_ECC_READ);
+-              chip->read_buf(mtd, p, eccsize);
++
++              ret = nand_read_data_op(chip, p, eccsize, false);
++              if (ret)
++                      return ret;
+               if (chip->ecc.prepad) {
+-                      chip->read_buf(mtd, oob, chip->ecc.prepad);
++                      ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
++                                              false);
++                      if (ret)
++                              return ret;
++
+                       oob += chip->ecc.prepad;
+               }
+               chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
+-              chip->read_buf(mtd, oob, eccbytes);
++
++              ret = nand_read_data_op(chip, oob, eccbytes, false);
++              if (ret)
++                      return ret;
++
+               stat = chip->ecc.correct(mtd, p, oob, NULL);
+               oob += eccbytes;
+               if (chip->ecc.postpad) {
+-                      chip->read_buf(mtd, oob, chip->ecc.postpad);
++                      ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
++                                              false);
++                      if (ret)
++                              return ret;
++
+                       oob += chip->ecc.postpad;
+               }
+@@ -1781,8 +2389,11 @@ static int nand_read_page_syndrome(struc
+       /* Calculate remaining oob bytes */
+       i = mtd->oobsize - (oob - chip->oob_poi);
+-      if (i)
+-              chip->read_buf(mtd, oob, i);
++      if (i) {
++              ret = nand_read_data_op(chip, oob, i, false);
++              if (ret)
++                      return ret;
++      }
+       return max_bitflips;
+ }
+@@ -1903,8 +2514,11 @@ static int nand_do_read_ops(struct mtd_i
+                                                __func__, buf);
+ read_retry:
+-                      if (nand_standard_page_accessors(&chip->ecc))
+-                              chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
++                      if (nand_standard_page_accessors(&chip->ecc)) {
++                              ret = nand_read_page_op(chip, page, 0, NULL, 0);
++                              if (ret)
++                                      break;
++                      }
+                       /*
+                        * Now read the page into the buffer.  Absent an error,
+@@ -2063,9 +2677,7 @@ static int nand_read(struct mtd_info *mt
+  */
+ int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
+ {
+-      chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+-      chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+-      return 0;
++      return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ }
+ EXPORT_SYMBOL(nand_read_oob_std);
+@@ -2083,25 +2695,43 @@ int nand_read_oob_syndrome(struct mtd_in
+       int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+       int eccsize = chip->ecc.size;
+       uint8_t *bufpoi = chip->oob_poi;
+-      int i, toread, sndrnd = 0, pos;
++      int i, toread, sndrnd = 0, pos, ret;
++
++      ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
++      if (ret)
++              return ret;
+-      chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
+       for (i = 0; i < chip->ecc.steps; i++) {
+               if (sndrnd) {
++                      int ret;
++
+                       pos = eccsize + i * (eccsize + chunk);
+                       if (mtd->writesize > 512)
+-                              chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
++                              ret = nand_change_read_column_op(chip, pos,
++                                                               NULL, 0,
++                                                               false);
+                       else
+-                              chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
++                              ret = nand_read_page_op(chip, page, pos, NULL,
++                                                      0);
++
++                      if (ret)
++                              return ret;
+               } else
+                       sndrnd = 1;
+               toread = min_t(int, length, chunk);
+-              chip->read_buf(mtd, bufpoi, toread);
++
++              ret = nand_read_data_op(chip, bufpoi, toread, false);
++              if (ret)
++                      return ret;
++
+               bufpoi += toread;
+               length -= toread;
+       }
+-      if (length > 0)
+-              chip->read_buf(mtd, bufpoi, length);
++      if (length > 0) {
++              ret = nand_read_data_op(chip, bufpoi, length, false);
++              if (ret)
++                      return ret;
++      }
+       return 0;
+ }
+@@ -2115,18 +2745,8 @@ EXPORT_SYMBOL(nand_read_oob_syndrome);
+  */
+ int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
+ {
+-      int status = 0;
+-      const uint8_t *buf = chip->oob_poi;
+-      int length = mtd->oobsize;
+-
+-      chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+-      chip->write_buf(mtd, buf, length);
+-      /* Send command to program the OOB data */
+-      chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+-
+-      status = chip->waitfunc(mtd, chip);
+-
+-      return status & NAND_STATUS_FAIL ? -EIO : 0;
++      return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
++                               mtd->oobsize);
+ }
+ EXPORT_SYMBOL(nand_write_oob_std);
+@@ -2142,7 +2762,7 @@ int nand_write_oob_syndrome(struct mtd_i
+ {
+       int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+       int eccsize = chip->ecc.size, length = mtd->oobsize;
+-      int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
++      int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
+       const uint8_t *bufpoi = chip->oob_poi;
+       /*
+@@ -2156,7 +2776,10 @@ int nand_write_oob_syndrome(struct mtd_i
+       } else
+               pos = eccsize;
+-      chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
++      ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
++      if (ret)
++              return ret;
++
+       for (i = 0; i < steps; i++) {
+               if (sndcmd) {
+                       if (mtd->writesize <= 512) {
+@@ -2165,28 +2788,40 @@ int nand_write_oob_syndrome(struct mtd_i
+                               len = eccsize;
+                               while (len > 0) {
+                                       int num = min_t(int, len, 4);
+-                                      chip->write_buf(mtd, (uint8_t *)&fill,
+-                                                      num);
++
++                                      ret = nand_write_data_op(chip, &fill,
++                                                               num, false);
++                                      if (ret)
++                                              return ret;
++
+                                       len -= num;
+                               }
+                       } else {
+                               pos = eccsize + i * (eccsize + chunk);
+-                              chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
++                              ret = nand_change_write_column_op(chip, pos,
++                                                                NULL, 0,
++                                                                false);
++                              if (ret)
++                                      return ret;
+                       }
+               } else
+                       sndcmd = 1;
+               len = min_t(int, length, chunk);
+-              chip->write_buf(mtd, bufpoi, len);
++
++              ret = nand_write_data_op(chip, bufpoi, len, false);
++              if (ret)
++                      return ret;
++
+               bufpoi += len;
+               length -= len;
+       }
+-      if (length > 0)
+-              chip->write_buf(mtd, bufpoi, length);
+-
+-      chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+-      status = chip->waitfunc(mtd, chip);
++      if (length > 0) {
++              ret = nand_write_data_op(chip, bufpoi, length, false);
++              if (ret)
++                      return ret;
++      }
+-      return status & NAND_STATUS_FAIL ? -EIO : 0;
++      return nand_prog_page_end_op(chip);
+ }
+ EXPORT_SYMBOL(nand_write_oob_syndrome);
+@@ -2341,9 +2976,18 @@ static int nand_read_oob(struct mtd_info
+ int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+                       const uint8_t *buf, int oob_required, int page)
+ {
+-      chip->write_buf(mtd, buf, mtd->writesize);
+-      if (oob_required)
+-              chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
++      int ret;
++
++      ret = nand_write_data_op(chip, buf, mtd->writesize, false);
++      if (ret)
++              return ret;
++
++      if (oob_required) {
++              ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
++                                       false);
++              if (ret)
++                      return ret;
++      }
+       return 0;
+ }
+@@ -2367,29 +3011,46 @@ static int nand_write_page_raw_syndrome(
+       int eccsize = chip->ecc.size;
+       int eccbytes = chip->ecc.bytes;
+       uint8_t *oob = chip->oob_poi;
+-      int steps, size;
++      int steps, size, ret;
+       for (steps = chip->ecc.steps; steps > 0; steps--) {
+-              chip->write_buf(mtd, buf, eccsize);
++              ret = nand_write_data_op(chip, buf, eccsize, false);
++              if (ret)
++                      return ret;
++
+               buf += eccsize;
+               if (chip->ecc.prepad) {
+-                      chip->write_buf(mtd, oob, chip->ecc.prepad);
++                      ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
++                                               false);
++                      if (ret)
++                              return ret;
++
+                       oob += chip->ecc.prepad;
+               }
+-              chip->write_buf(mtd, oob, eccbytes);
++              ret = nand_write_data_op(chip, oob, eccbytes, false);
++              if (ret)
++                      return ret;
++
+               oob += eccbytes;
+               if (chip->ecc.postpad) {
+-                      chip->write_buf(mtd, oob, chip->ecc.postpad);
++                      ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
++                                               false);
++                      if (ret)
++                              return ret;
++
+                       oob += chip->ecc.postpad;
+               }
+       }
+       size = mtd->oobsize - (oob - chip->oob_poi);
+-      if (size)
+-              chip->write_buf(mtd, oob, size);
++      if (size) {
++              ret = nand_write_data_op(chip, oob, size, false);
++              if (ret)
++                      return ret;
++      }
+       return 0;
+ }
+@@ -2443,7 +3104,11 @@ static int nand_write_page_hwecc(struct
+       for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+               chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+-              chip->write_buf(mtd, p, eccsize);
++
++              ret = nand_write_data_op(chip, p, eccsize, false);
++              if (ret)
++                      return ret;
++
+               chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+       }
+@@ -2452,7 +3117,9 @@ static int nand_write_page_hwecc(struct
+       if (ret)
+               return ret;
+-      chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
++      ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
++      if (ret)
++              return ret;
+       return 0;
+ }
+@@ -2488,7 +3155,9 @@ static int nand_write_subpage_hwecc(stru
+               chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+               /* write data (untouched subpages already masked by 0xFF) */
+-              chip->write_buf(mtd, buf, ecc_size);
++              ret = nand_write_data_op(chip, buf, ecc_size, false);
++              if (ret)
++                      return ret;
+               /* mask ECC of un-touched subpages by padding 0xFF */
+               if ((step < start_step) || (step > end_step))
+@@ -2515,7 +3184,9 @@ static int nand_write_subpage_hwecc(stru
+               return ret;
+       /* write OOB buffer to NAND device */
+-      chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
++      ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
++      if (ret)
++              return ret;
+       return 0;
+ }
+@@ -2542,31 +3213,49 @@ static int nand_write_page_syndrome(stru
+       int eccsteps = chip->ecc.steps;
+       const uint8_t *p = buf;
+       uint8_t *oob = chip->oob_poi;
++      int ret;
+       for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+-
+               chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+-              chip->write_buf(mtd, p, eccsize);
++
++              ret = nand_write_data_op(chip, p, eccsize, false);
++              if (ret)
++                      return ret;
+               if (chip->ecc.prepad) {
+-                      chip->write_buf(mtd, oob, chip->ecc.prepad);
++                      ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
++                                               false);
++                      if (ret)
++                              return ret;
++
+                       oob += chip->ecc.prepad;
+               }
+               chip->ecc.calculate(mtd, p, oob);
+-              chip->write_buf(mtd, oob, eccbytes);
++
++              ret = nand_write_data_op(chip, oob, eccbytes, false);
++              if (ret)
++                      return ret;
++
+               oob += eccbytes;
+               if (chip->ecc.postpad) {
+-                      chip->write_buf(mtd, oob, chip->ecc.postpad);
++                      ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
++                                               false);
++                      if (ret)
++                              return ret;
++
+                       oob += chip->ecc.postpad;
+               }
+       }
+       /* Calculate remaining oob bytes */
+       i = mtd->oobsize - (oob - chip->oob_poi);
+-      if (i)
+-              chip->write_buf(mtd, oob, i);
++      if (i) {
++              ret = nand_write_data_op(chip, oob, i, false);
++              if (ret)
++                      return ret;
++      }
+       return 0;
+ }
+@@ -2594,8 +3283,11 @@ static int nand_write_page(struct mtd_in
+       else
+               subpage = 0;
+-      if (nand_standard_page_accessors(&chip->ecc))
+-              chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
++      if (nand_standard_page_accessors(&chip->ecc)) {
++              status = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
++              if (status)
++                      return status;
++      }
+       if (unlikely(raw))
+               status = chip->ecc.write_page_raw(mtd, chip, buf,
+@@ -2610,13 +3302,8 @@ static int nand_write_page(struct mtd_in
+       if (status < 0)
+               return status;
+-      if (nand_standard_page_accessors(&chip->ecc)) {
+-              chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+-
+-              status = chip->waitfunc(mtd, chip);
+-              if (status & NAND_STATUS_FAIL)
+-                      return -EIO;
+-      }
++      if (nand_standard_page_accessors(&chip->ecc))
++              return nand_prog_page_end_op(chip);
+       return 0;
+ }
+@@ -2989,17 +3676,12 @@ out:
+ static int single_erase(struct mtd_info *mtd, int page)
+ {
+       struct nand_chip *chip = mtd_to_nand(mtd);
+-      int status;
++      unsigned int eraseblock;
+       /* Send commands to erase a block */
+-      chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+-      chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+-
+-      status = chip->waitfunc(mtd, chip);
+-      if (status < 0)
+-              return status;
++      eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
+-      return status & NAND_STATUS_FAIL ? -EIO : 0;
++      return nand_erase_op(chip, eraseblock);
+ }
+ /**
+@@ -3226,22 +3908,12 @@ static int nand_max_bad_blocks(struct mt
+ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
+                       int addr, uint8_t *subfeature_param)
+ {
+-      int status;
+-      int i;
+-
+       if (!chip->onfi_version ||
+           !(le16_to_cpu(chip->onfi_params.opt_cmd)
+             & ONFI_OPT_CMD_SET_GET_FEATURES))
+               return -EINVAL;
+-      chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
+-      for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+-              chip->write_byte(mtd, subfeature_param[i]);
+-
+-      status = chip->waitfunc(mtd, chip);
+-      if (status & NAND_STATUS_FAIL)
+-              return -EIO;
+-      return 0;
++      return nand_set_features_op(chip, addr, subfeature_param);
+ }
+ /**
+@@ -3254,17 +3926,12 @@ static int nand_onfi_set_features(struct
+ static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
+                       int addr, uint8_t *subfeature_param)
+ {
+-      int i;
+-
+       if (!chip->onfi_version ||
+           !(le16_to_cpu(chip->onfi_params.opt_cmd)
+             & ONFI_OPT_CMD_SET_GET_FEATURES))
+               return -EINVAL;
+-      chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
+-      for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+-              *subfeature_param++ = chip->read_byte(mtd);
+-      return 0;
++      return nand_get_features_op(chip, addr, subfeature_param);
+ }
+ /**
+@@ -3407,12 +4074,11 @@ static u16 onfi_crc16(u16 crc, u8 const
+ static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
+                                           struct nand_onfi_params *p)
+ {
+-      struct mtd_info *mtd = nand_to_mtd(chip);
+       struct onfi_ext_param_page *ep;
+       struct onfi_ext_section *s;
+       struct onfi_ext_ecc_info *ecc;
+       uint8_t *cursor;
+-      int ret = -EINVAL;
++      int ret;
+       int len;
+       int i;
+@@ -3422,14 +4088,18 @@ static int nand_flash_detect_ext_param_p
+               return -ENOMEM;
+       /* Send our own NAND_CMD_PARAM. */
+-      chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
++      ret = nand_read_param_page_op(chip, 0, NULL, 0);
++      if (ret)
++              goto ext_out;
+       /* Use the Change Read Column command to skip the ONFI param pages. */
+-      chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+-                      sizeof(*p) * p->num_of_param_pages , -1);
++      ret = nand_change_read_column_op(chip,
++                                       sizeof(*p) * p->num_of_param_pages,
++                                       ep, len, true);
++      if (ret)
++              goto ext_out;
+-      /* Read out the Extended Parameter Page. */
+-      chip->read_buf(mtd, (uint8_t *)ep, len);
++      ret = -EINVAL;
+       if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+               != le16_to_cpu(ep->crc))) {
+               pr_debug("fail in the CRC.\n");
+@@ -3482,19 +4152,23 @@ static int nand_flash_detect_onfi(struct
+ {
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       struct nand_onfi_params *p = &chip->onfi_params;
+-      int i, j;
+-      int val;
++      char id[4];
++      int i, ret, val;
+       /* Try ONFI for unknown chip or LP */
+-      chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
+-      if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
+-              chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
++      ret = nand_readid_op(chip, 0x20, id, sizeof(id));
++      if (ret || strncmp(id, "ONFI", 4))
++              return 0;
++
++      ret = nand_read_param_page_op(chip, 0, NULL, 0);
++      if (ret)
+               return 0;
+-      chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+       for (i = 0; i < 3; i++) {
+-              for (j = 0; j < sizeof(*p); j++)
+-                      ((uint8_t *)p)[j] = chip->read_byte(mtd);
++              ret = nand_read_data_op(chip, p, sizeof(*p), true);
++              if (ret)
++                      return 0;
++
+               if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
+                               le16_to_cpu(p->crc)) {
+                       break;
+@@ -3585,20 +4259,22 @@ static int nand_flash_detect_jedec(struc
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       struct nand_jedec_params *p = &chip->jedec_params;
+       struct jedec_ecc_info *ecc;
+-      int val;
+-      int i, j;
++      char id[5];
++      int i, val, ret;
+       /* Try JEDEC for unknown chip or LP */
+-      chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
+-      if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
+-              chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
+-              chip->read_byte(mtd) != 'C')
++      ret = nand_readid_op(chip, 0x40, id, sizeof(id));
++      if (ret || strncmp(id, "JEDEC", sizeof(id)))
++              return 0;
++
++      ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
++      if (ret)
+               return 0;
+-      chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
+       for (i = 0; i < 3; i++) {
+-              for (j = 0; j < sizeof(*p); j++)
+-                      ((uint8_t *)p)[j] = chip->read_byte(mtd);
++              ret = nand_read_data_op(chip, p, sizeof(*p), true);
++              if (ret)
++                      return 0;
+               if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
+                               le16_to_cpu(p->crc))
+@@ -3877,8 +4553,7 @@ static int nand_detect(struct nand_chip
+ {
+       const struct nand_manufacturer *manufacturer;
+       struct mtd_info *mtd = nand_to_mtd(chip);
+-      int busw;
+-      int i;
++      int busw, ret;
+       u8 *id_data = chip->id.data;
+       u8 maf_id, dev_id;
+@@ -3886,17 +4561,21 @@ static int nand_detect(struct nand_chip
+        * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
+        * after power-up.
+        */
+-      nand_reset(chip, 0);
++      ret = nand_reset(chip, 0);
++      if (ret)
++              return ret;
+       /* Select the device */
+       chip->select_chip(mtd, 0);
+       /* Send the command for reading device ID */
+-      chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
++      ret = nand_readid_op(chip, 0, id_data, 2);
++      if (ret)
++              return ret;
+       /* Read manufacturer and device IDs */
+-      maf_id = chip->read_byte(mtd);
+-      dev_id = chip->read_byte(mtd);
++      maf_id = id_data[0];
++      dev_id = id_data[1];
+       /*
+        * Try again to make sure, as some systems the bus-hold or other
+@@ -3905,11 +4584,10 @@ static int nand_detect(struct nand_chip
+        * not match, ignore the device completely.
+        */
+-      chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+-
+       /* Read entire ID string */
+-      for (i = 0; i < ARRAY_SIZE(chip->id.data); i++)
+-              id_data[i] = chip->read_byte(mtd);
++      ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
++      if (ret)
++              return ret;
+       if (id_data[0] != maf_id || id_data[1] != dev_id) {
+               pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
+@@ -4233,15 +4911,16 @@ int nand_scan_ident(struct mtd_info *mtd
+       /* Check for a chip array */
+       for (i = 1; i < maxchips; i++) {
++              u8 id[2];
++
+               /* See comment in nand_get_flash_type for reset */
+               nand_reset(chip, i);
+               chip->select_chip(mtd, i);
+               /* Send the command for reading device ID */
+-              chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
++              nand_readid_op(chip, 0, id, sizeof(id));
+               /* Read manufacturer and device IDs */
+-              if (nand_maf_id != chip->read_byte(mtd) ||
+-                  nand_dev_id != chip->read_byte(mtd)) {
++              if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
+                       chip->select_chip(mtd, -1);
+                       break;
+               }
+--- a/drivers/mtd/nand/qcom_nandc.c
++++ b/drivers/mtd/nand/qcom_nandc.c
+@@ -1990,7 +1990,7 @@ static int qcom_nandc_write_oob(struct m
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+       u8 *oob = chip->oob_poi;
+       int data_size, oob_size;
+-      int ret, status = 0;
++      int ret;
+       host->use_ecc = true;
+@@ -2027,11 +2027,7 @@ static int qcom_nandc_write_oob(struct m
+               return -EIO;
+       }
+-      chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+-
+-      status = chip->waitfunc(mtd, chip);
+-
+-      return status & NAND_STATUS_FAIL ? -EIO : 0;
++      return nand_prog_page_end_op(chip);
+ }
+ static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
+@@ -2081,7 +2077,7 @@ static int qcom_nandc_block_markbad(stru
+       struct qcom_nand_host *host = to_qcom_nand_host(chip);
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+-      int page, ret, status = 0;
++      int page, ret;
+       clear_read_regs(nandc);
+       clear_bam_transaction(nandc);
+@@ -2114,11 +2110,7 @@ static int qcom_nandc_block_markbad(stru
+               return -EIO;
+       }
+-      chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+-
+-      status = chip->waitfunc(mtd, chip);
+-
+-      return status & NAND_STATUS_FAIL ? -EIO : 0;
++      return nand_prog_page_end_op(chip);
+ }
+ /*
+--- a/include/linux/mtd/rawnand.h
++++ b/include/linux/mtd/rawnand.h
+@@ -1313,6 +1313,35 @@ int nand_write_page_raw(struct mtd_info
+ /* Reset and initialize a NAND device */
+ int nand_reset(struct nand_chip *chip, int chipnr);
++/* NAND operation helpers */
++int nand_reset_op(struct nand_chip *chip);
++int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
++                 unsigned int len);
++int nand_status_op(struct nand_chip *chip, u8 *status);
++int nand_exit_status_op(struct nand_chip *chip);
++int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
++int nand_read_page_op(struct nand_chip *chip, unsigned int page,
++                    unsigned int offset_in_page, void *buf, unsigned int len);
++int nand_change_read_column_op(struct nand_chip *chip,
++                             unsigned int offset_in_page, void *buf,
++                             unsigned int len, bool force_8bit);
++int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
++                   unsigned int offset_in_page, void *buf, unsigned int len);
++int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
++                          unsigned int offset_in_page, const void *buf,
++                          unsigned int len);
++int nand_prog_page_end_op(struct nand_chip *chip);
++int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
++                    unsigned int offset_in_page, const void *buf,
++                    unsigned int len);
++int nand_change_write_column_op(struct nand_chip *chip,
++                              unsigned int offset_in_page, const void *buf,
++                              unsigned int len, bool force_8bit);
++int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
++                    bool force_8bit);
++int nand_write_data_op(struct nand_chip *chip, const void *buf,
++                     unsigned int len, bool force_8bit);
++
+ /* Free resources held by the NAND device */
+ void nand_cleanup(struct nand_chip *chip);
diff --git a/target/linux/ipq40xx/patches-4.14/050-0005-mtd-nand-force-drivers-to-explicitly-send-READ-PROG-.patch b/target/linux/ipq40xx/patches-4.14/050-0005-mtd-nand-force-drivers-to-explicitly-send-READ-PROG-.patch
new file mode 100644 (file)
index 0000000..e7e2e79
--- /dev/null
@@ -0,0 +1,94 @@
+From 25f815f66a141436df8a4c45e5d2765272aea2ac Mon Sep 17 00:00:00 2001
+From: Boris Brezillon <boris.brezillon@free-electrons.com>
+Date: Thu, 30 Nov 2017 18:01:30 +0100
+Subject: [PATCH 5/7] mtd: nand: force drivers to explicitly send READ/PROG
+ commands
+
+The core currently send the READ0 and SEQIN+PAGEPROG commands in
+nand_do_read/write_ops(). This is inconsistent with
+->read/write_oob[_raw]() hooks behavior which are expected to send
+these commands.
+
+There's already a flag (NAND_ECC_CUSTOM_PAGE_ACCESS) to inform the core
+that a specific controller wants to send the READ/SEQIN+PAGEPROG
+commands on its own, but it's an opt-in flag, and existing drivers are
+unlikely to be updated to pass it.
+
+Moreover, some controllers cannot dissociate the READ/PAGEPROG commands
+from the associated data transfer and ECC engine activation, and
+developers have to hack things in their ->cmdfunc() implementation to
+handle such complex cases, or have to accept the perf penalty of sending
+twice the same command.
+To address this problem we are planning on adding a new interface which
+is passed all information about a NAND operation (including the amount
+of data to transfer) and replacing all calls to ->cmdfunc() to calls to
+this new ->exec_op() hook. But, in order to do that, we need to have all
+->cmdfunc() calls placed near their associated ->read/write_buf/byte()
+calls.
+
+Modify the core and relevant drivers to make NAND_ECC_CUSTOM_PAGE_ACCESS
+the default case, and remove this flag.
+
+Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+[miquel.raynal@free-electrons.com: tested, fixed and rebased on nand/next]
+Signed-off-by: Miquel Raynal <miquel.raynal@free-electrons.com>
+Acked-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+---
+ drivers/mtd/nand/qcom_nandc.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/mtd/nand/qcom_nandc.c
++++ b/drivers/mtd/nand/qcom_nandc.c
+@@ -1725,6 +1725,7 @@ static int qcom_nandc_read_page(struct m
+       u8 *data_buf, *oob_buf = NULL;
+       int ret;
++      nand_read_page_op(chip, page, 0, NULL, 0);
+       data_buf = buf;
+       oob_buf = oob_required ? chip->oob_poi : NULL;
+@@ -1750,6 +1751,7 @@ static int qcom_nandc_read_page_raw(stru
+       int i, ret;
+       int read_loc;
++      nand_read_page_op(chip, page, 0, NULL, 0);
+       data_buf = buf;
+       oob_buf = chip->oob_poi;
+@@ -1850,6 +1852,8 @@ static int qcom_nandc_write_page(struct
+       u8 *data_buf, *oob_buf;
+       int i, ret;
++      nand_prog_page_begin_op(chip, page, 0, NULL, 0);
++
+       clear_read_regs(nandc);
+       clear_bam_transaction(nandc);
+@@ -1902,6 +1906,9 @@ static int qcom_nandc_write_page(struct
+       free_descs(nandc);
++      if (!ret)
++              ret = nand_prog_page_end_op(chip);
++
+       return ret;
+ }
+@@ -1916,6 +1923,7 @@ static int qcom_nandc_write_page_raw(str
+       u8 *data_buf, *oob_buf;
+       int i, ret;
++      nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+       clear_read_regs(nandc);
+       clear_bam_transaction(nandc);
+@@ -1970,6 +1978,9 @@ static int qcom_nandc_write_page_raw(str
+       free_descs(nandc);
++      if (!ret)
++              ret = nand_prog_page_end_op(chip);
++
+       return ret;
+ }
diff --git a/target/linux/ipq40xx/patches-4.14/050-0006-mtd-nand-qcom-Add-a-NULL-check-for-devm_kasprintf.patch b/target/linux/ipq40xx/patches-4.14/050-0006-mtd-nand-qcom-Add-a-NULL-check-for-devm_kasprintf.patch
new file mode 100644 (file)
index 0000000..4ddc014
--- /dev/null
@@ -0,0 +1,26 @@
+From 069f05346d01e7298939f16533953cdf52370be3 Mon Sep 17 00:00:00 2001
+From: Fabio Estevam <fabio.estevam@nxp.com>
+Date: Fri, 5 Jan 2018 18:02:55 -0200
+Subject: [PATCH 6/7] mtd: nand: qcom: Add a NULL check for devm_kasprintf()
+
+devm_kasprintf() may fail, so we should better add a NULL check
+and propagate an error on failure.
+
+Signed-off-by: Fabio Estevam <fabio.estevam@nxp.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+---
+ drivers/mtd/nand/qcom_nandc.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/mtd/nand/qcom_nandc.c
++++ b/drivers/mtd/nand/qcom_nandc.c
+@@ -2639,6 +2639,9 @@ static int qcom_nand_host_init(struct qc
+       nand_set_flash_node(chip, dn);
+       mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
++      if (!mtd->name)
++              return -ENOMEM;
++
+       mtd->owner = THIS_MODULE;
+       mtd->dev.parent = dev;
diff --git a/target/linux/ipq40xx/patches-4.14/059-ARM-cpuidle-Add-cpuidle-support-for-QCOM-cpus.patch b/target/linux/ipq40xx/patches-4.14/059-ARM-cpuidle-Add-cpuidle-support-for-QCOM-cpus.patch
new file mode 100644 (file)
index 0000000..5bd58c8
--- /dev/null
@@ -0,0 +1,29 @@
+From 04ca10340f1b4d92e849724d322a7ca225d11539 Mon Sep 17 00:00:00 2001
+From: Lina Iyer <lina.iyer@linaro.org>
+Date: Wed, 25 Mar 2015 14:25:29 -0600
+Subject: [PATCH 59/69] ARM: cpuidle: Add cpuidle support for QCOM cpus
+
+Define ARM_QCOM_CPUIDLE config item to enable cpuidle support.
+
+Cc: Stephen Boyd <sboyd@codeaurora.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Kevin Hilman <khilman@linaro.org>
+Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
+Signed-off-by: Lina Iyer <lina.iyer@linaro.org>
+---
+ drivers/cpuidle/Kconfig.arm | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/cpuidle/Kconfig.arm
++++ b/drivers/cpuidle/Kconfig.arm
+@@ -75,3 +75,10 @@ config ARM_MVEBU_V7_CPUIDLE
+       depends on ARCH_MVEBU && !ARM64
+       help
+         Select this to enable cpuidle on Armada 370, 38x and XP processors.
++
++config ARM_QCOM_CPUIDLE
++      bool "CPU Idle Driver for QCOM processors"
++      depends on ARCH_QCOM
++      select ARM_CPUIDLE
++      help
++        Select this to enable cpuidle on QCOM processors.
diff --git a/target/linux/ipq40xx/patches-4.14/069-arm-boot-add-dts-files.patch b/target/linux/ipq40xx/patches-4.14/069-arm-boot-add-dts-files.patch
new file mode 100644 (file)
index 0000000..e830565
--- /dev/null
@@ -0,0 +1,24 @@
+From 8f68331e14dff9a101f2d0e1d6bec84a031f27ee Mon Sep 17 00:00:00 2001
+From: John Crispin <john@phrozen.org>
+Date: Thu, 9 Mar 2017 11:03:18 +0100
+Subject: [PATCH 69/69] arm: boot: add dts files
+
+Signed-off-by: John Crispin <john@phrozen.org>
+---
+ arch/arm/boot/dts/Makefile | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/arm/boot/dts/Makefile
++++ b/arch/arm/boot/dts/Makefile
+@@ -697,7 +697,11 @@ dtb-$(CONFIG_ARCH_QCOM) += \
+       qcom-apq8074-dragonboard.dtb \
+       qcom-apq8084-ifc6540.dtb \
+       qcom-apq8084-mtp.dtb \
++      qcom-ipq4019-a42.dtb \
+       qcom-ipq4019-ap.dk01.1-c1.dtb \
++      qcom-ipq4019-ap.dk04.1-c1.dtb \
++      qcom-ipq4019-fritz4040.dtb \
++      qcom-ipq4019-gl-b1300.dtb \
+       qcom-ipq8064-ap148.dtb \
+       qcom-msm8660-surf.dtb \
+       qcom-msm8960-cdp.dtb \
diff --git a/target/linux/ipq40xx/patches-4.14/070-qcom-spm-fix-probe-order.patch b/target/linux/ipq40xx/patches-4.14/070-qcom-spm-fix-probe-order.patch
new file mode 100644 (file)
index 0000000..b7e375d
--- /dev/null
@@ -0,0 +1,16 @@
+Check for SCM availability before attempting to use SPM
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+
+--- a/drivers/soc/qcom/spm.c
++++ b/drivers/soc/qcom/spm.c
+@@ -219,6 +219,9 @@ static int __init qcom_cpuidle_init(stru
+       cpumask_t mask;
+       bool use_scm_power_down = false;
++      if (!qcom_scm_is_available())
++              return -EPROBE_DEFER;
++
+       for (i = 0; ; i++) {
+               state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+               if (!state_node)
diff --git a/target/linux/ipq40xx/patches-4.14/101-ARM-dts-ipq4019-Add-a-few-peripheral-nodes.patch b/target/linux/ipq40xx/patches-4.14/101-ARM-dts-ipq4019-Add-a-few-peripheral-nodes.patch
new file mode 100644 (file)
index 0000000..0f039f2
--- /dev/null
@@ -0,0 +1,188 @@
+From patchwork Mon Jan 29 05:11:16 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [02/15] ARM: dts: ipq4019: Add a few peripheral nodes
+From: Sricharan R <sricharan@codeaurora.org>
+X-Patchwork-Id: 10189263
+Message-Id: <1517202689-14212-3-git-send-email-sricharan@codeaurora.org>
+To: robh+dt@kernel.org, robh@kernel.org, mark.rutland@arm.com,
+ linux@armlinux.org.uk, andy.gross@linaro.org, david.brown@linaro.org, 
+ catalin.marinas@arm.com, will.deacon@arm.com, sboyd@codeaurora.org,
+ bjorn.andersson@linaro.org, devicetree@vger.kernel.org,
+ linux-kernel@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
+ linux-arm-msm@vger.kernel.org, linux-soc@vger.kernel.org
+Cc: sricharan@codeaurora.org
+Date: Mon, 29 Jan 2018 10:41:16 +0530
+
+Now with the driver updates for some peripherals being there,
+add i2c, spi, pcie, bam, qpic-nand, scm nodes to enhance the available
+peripheral support.
+
+Signed-off-by: Sricharan R <sricharan@codeaurora.org>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 134 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 134 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -25,7 +25,9 @@
+       aliases {
+               spi0 = &spi_0;
++              spi1 = &spi_1;
+               i2c0 = &i2c_0;
++              i2c1 = &i2c_1;
+       };
+       cpus {
+@@ -190,6 +192,22 @@
+                       clock-names = "core", "iface";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
++                      dmas = <&blsp_dma 5>, <&blsp_dma 4>;
++                      dma-names = "rx", "tx";
++                      status = "disabled";
++              };
++
++              spi_1: spi@78b6000 { /* BLSP1 QUP2 */
++                      compatible = "qcom,spi-qup-v2.2.1";
++                      reg = <0x78b6000 0x600>;
++                      interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
++                      clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
++                              <&gcc GCC_BLSP1_AHB_CLK>;
++                      clock-names = "core", "iface";
++                      #address-cells = <1>;
++                      #size-cells = <0>;
++                      dmas = <&blsp_dma 7>, <&blsp_dma 6>;
++                      dma-names = "rx", "tx";
+                       status = "disabled";
+               };
+@@ -202,9 +220,24 @@
+                       clock-names = "iface", "core";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
++                      dmas = <&blsp_dma 9>, <&blsp_dma 8>;
++                      dma-names = "rx", "tx";
+                       status = "disabled";
+               };
++              i2c_1: i2c@78b8000 { /* BLSP1 QUP4 */
++                      compatible = "qcom,i2c-qup-v2.2.1";
++                      reg = <0x78b8000 0x600>;
++                      interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
++                      clocks = <&gcc GCC_BLSP1_AHB_CLK>,
++                               <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
++                      clock-names = "iface", "core";
++                      #address-cells = <1>;
++                      #size-cells = <0>;
++                      dmas = <&blsp_dma 11>, <&blsp_dma 10>;
++                      dma-names = "rx", "tx";
++                      status = "disabled";
++              };
+               cryptobam: dma@8e04000 {
+                       compatible = "qcom,bam-v1.7.0";
+@@ -311,6 +344,101 @@
+                       reg = <0x4ab000 0x4>;
+               };
++              pcie0: pci@40000000 {
++                      compatible = "qcom,pcie-ipq4019", "snps,dw-pcie";
++                      reg =  <0x40000000 0xf1d
++                              0x40000f20 0xa8
++                              0x80000 0x2000
++                              0x40100000 0x1000>;
++                      reg-names = "dbi", "elbi", "parf", "config";
++                      device_type = "pci";
++                      linux,pci-domain = <0>;
++                      bus-range = <0x00 0xff>;
++                      num-lanes = <1>;
++                      #address-cells = <3>;
++                      #size-cells = <2>;
++
++                      ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000
++                                0x82000000 0 0x48000000 0x48000000 0 0x10000000>;
++
++                      interrupts = <GIC_SPI 141 IRQ_TYPE_NONE>;
++                      interrupt-names = "msi";
++                      #interrupt-cells = <1>;
++                      interrupt-map-mask = <0 0 0 0x7>;
++                      interrupt-map = <0 0 0 1 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
++                                      <0 0 0 2 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
++                                      <0 0 0 3 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
++                                      <0 0 0 4 &intc 0 145 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
++                      clocks = <&gcc GCC_PCIE_AHB_CLK>,
++                               <&gcc GCC_PCIE_AXI_M_CLK>,
++                               <&gcc GCC_PCIE_AXI_S_CLK>;
++                      clock-names = "aux",
++                                    "master_bus",
++                                    "slave_bus";
++
++                      resets = <&gcc PCIE_AXI_M_ARES>,
++                               <&gcc PCIE_AXI_S_ARES>,
++                               <&gcc PCIE_PIPE_ARES>,
++                               <&gcc PCIE_AXI_M_VMIDMT_ARES>,
++                               <&gcc PCIE_AXI_S_XPU_ARES>,
++                               <&gcc PCIE_PARF_XPU_ARES>,
++                               <&gcc PCIE_PHY_ARES>,
++                               <&gcc PCIE_AXI_M_STICKY_ARES>,
++                               <&gcc PCIE_PIPE_STICKY_ARES>,
++                               <&gcc PCIE_PWR_ARES>,
++                               <&gcc PCIE_AHB_ARES>,
++                               <&gcc PCIE_PHY_AHB_ARES>;
++                      reset-names = "axi_m",
++                                    "axi_s",
++                                    "pipe",
++                                    "axi_m_vmid",
++                                    "axi_s_xpu",
++                                    "parf",
++                                    "phy",
++                                    "axi_m_sticky",
++                                    "pipe_sticky",
++                                    "pwr",
++                                    "ahb",
++                                    "phy_ahb";
++
++                      status = "disabled";
++              };
++
++              qpic_bam: dma@7984000 {
++                      compatible = "qcom,bam-v1.7.0";
++                      reg = <0x7984000 0x1a000>;
++                      interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
++                      clocks = <&gcc GCC_QPIC_CLK>;
++                      clock-names = "bam_clk";
++                      #dma-cells = <1>;
++                      qcom,ee = <0>;
++                      status = "disabled";
++              };
++
++              nand: qpic-nand@79b0000 {
++                      compatible = "qcom,ipq4019-nand";
++                      reg = <0x79b0000 0x1000>;
++                      #address-cells = <1>;
++                      #size-cells = <0>;
++                      clocks = <&gcc GCC_QPIC_CLK>,
++                               <&gcc GCC_QPIC_AHB_CLK>;
++                      clock-names = "core", "aon";
++
++                      dmas = <&qpic_bam 0>,
++                             <&qpic_bam 1>,
++                             <&qpic_bam 2>;
++                      dma-names = "tx", "rx", "cmd";
++                      status = "disabled";
++
++                      nand@0 {
++                              reg = <0>;
++
++                              nand-ecc-strength = <4>;
++                              nand-ecc-step-size = <512>;
++                              nand-bus-width = <8>;
++                      };
++              };
++
+               wifi0: wifi@a000000 {
+                       compatible = "qcom,ipq4019-wifi";
+                       reg = <0xa000000 0x200000>;
diff --git a/target/linux/ipq40xx/patches-4.14/102-ARM-dts-ipq4019-fix-PCI-range.patch b/target/linux/ipq40xx/patches-4.14/102-ARM-dts-ipq4019-fix-PCI-range.patch
new file mode 100644 (file)
index 0000000..eaccb00
--- /dev/null
@@ -0,0 +1,23 @@
+From 561a7e69d2811f236266ff9222a1e683ebf8b9e0 Mon Sep 17 00:00:00 2001
+From: Mathias Kresin <dev@kresin.me>
+Date: Thu, 1 Mar 2018 20:50:29 +0100
+Subject: [PATCH] ARM: dts: ipq4019: fix PCI range
+
+The PCI range is invalid and PCI attached devices doen't work.
+
+Signed-off-by: Mathias Kresin <dev@kresin.me>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -359,7 +359,7 @@
+                       #size-cells = <2>;
+                       ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000
+-                                0x82000000 0 0x48000000 0x48000000 0 0x10000000>;
++                                0x82000000 0 0x40300000 0x40300000 0 0x400000>;
+                       interrupts = <GIC_SPI 141 IRQ_TYPE_NONE>;
+                       interrupt-names = "msi";
diff --git a/target/linux/ipq40xx/patches-4.14/104-mtd-nand-add-Winbond-manufacturer-and-chip.patch b/target/linux/ipq40xx/patches-4.14/104-mtd-nand-add-Winbond-manufacturer-and-chip.patch
new file mode 100644 (file)
index 0000000..295bc16
--- /dev/null
@@ -0,0 +1,38 @@
+From 07b6d0cdbbda8c917480eceaec668f09e4cf24a5 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Mon, 14 Nov 2016 23:49:22 +0100
+Subject: [PATCH] mtd: nand: add Winbond manufacturer and chip
+
+This patch adds the W25N01GV NAND to the table of
+known devices. Without this patch the device gets detected:
+
+nand: device found, Manufacturer ID: 0xef, Chip ID: 0xaa
+nand: Unknown NAND 256MiB 1,8V 8-bit
+nand: 256 MiB, SLC, erase size: 64 KiB, page size: 1024, OOB size : 16
+
+Whereas the u-boot identifies it as:
+spi_nand: spi_nand_flash_probe SF NAND ID 00:ef:aa:21
+SF: Detected W25N01GV with page size 2 KiB, total 128 MiB
+
+Due to the page size discrepancy, it's impossible to attach
+ubi volumes on the device.
+
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+---
+ drivers/mtd/nand/nand_ids.c | 4 ++++
+ include/linux/mtd/nand.h    | 1 +
+ 2 files changed, 5 insertions(+)
+
+--- a/drivers/mtd/nand/nand_ids.c
++++ b/drivers/mtd/nand/nand_ids.c
+@@ -54,6 +54,10 @@ struct nand_flash_dev nand_flash_ids[] =
+               { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
+                 SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
+                 NAND_ECC_INFO(40, SZ_1K), 4 },
++      {"W25N01GV 1G 3.3V 8-bit",
++              { .id = {0xef, 0xaa} },
++                SZ_2K, SZ_128, SZ_128K, NAND_NO_SUBPAGE_WRITE,
++                2, 64, NAND_ECC_INFO(1, SZ_512) },
+       LEGACY_ID_NAND("NAND 4MiB 5V 8-bit",   0x6B, 4, SZ_8K, SP_OPTIONS),
+       LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
diff --git a/target/linux/ipq40xx/patches-4.14/105-mtd-nor-add-mx25l25635f.patch b/target/linux/ipq40xx/patches-4.14/105-mtd-nor-add-mx25l25635f.patch
new file mode 100644 (file)
index 0000000..ea9d911
--- /dev/null
@@ -0,0 +1,22 @@
+Subject: mtd: spi-nor: add mx25l25635f with SECT_4K
+
+This patch fixes an issue with the creation of the
+ubi volume on the AVM FRITZ!Box 4040. The mx25l25635f
+and mx25l25635e support SECT_4K which will set the
+erase size to 4K. This is used by ubi to calculate
+VID header offsets. Without this, uboot and linux
+disagrees about the layout and refuse to attach
+the ubi volume created by the other.
+
+---
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1023,7 +1023,7 @@ static const struct flash_info spi_nor_i
+       { "mx25u6435f",  INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
+       { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+       { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+-      { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++      { "mx25l25635f", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SECT_4K) },
+       { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
+       { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+       { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
diff --git a/target/linux/ipq40xx/patches-4.14/305-qcom-ipq4019-use-v2-of-the-kpss-bringup-mechanism.patch b/target/linux/ipq40xx/patches-4.14/305-qcom-ipq4019-use-v2-of-the-kpss-bringup-mechanism.patch
new file mode 100644 (file)
index 0000000..1d08b9d
--- /dev/null
@@ -0,0 +1,109 @@
+From 6a6c067b7ce2b3de4efbafddc134afbea3ddc1a3 Mon Sep 17 00:00:00 2001
+From: Matthew McClintock <mmcclint@codeaurora.org>
+Date: Fri, 8 Apr 2016 15:26:10 -0500
+Subject: [PATCH] qcom: ipq4019: use v2 of the kpss bringup mechanism
+
+v1 was the incorrect choice here and sometimes the board
+would not come up properly.
+
+Signed-off-by: Matthew McClintock <mmcclint@codeaurora.org>
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+---
+Changes:
+       - moved L2-Cache to be a subnode of cpu0
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 32 ++++++++++++++++++++++++--------
+ 1 file changed, 24 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -36,19 +36,27 @@
+               cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+-                      enable-method = "qcom,kpss-acc-v1";
++                      enable-method = "qcom,kpss-acc-v2";
++                      next-level-cache = <&L2>;
+                       qcom,acc = <&acc0>;
+                       qcom,saw = <&saw0>;
+                       reg = <0x0>;
+                       clocks = <&gcc GCC_APPS_CLK_SRC>;
+                       clock-frequency = <0>;
+                       operating-points-v2 = <&cpu0_opp_table>;
++
++                      L2: l2-cache {
++                              compatible = "qcom,arch-cache";
++                              cache-level = <2>;
++                              qcom,saw = <&saw_l2>;
++                      };
+               };
+               cpu@1 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+-                      enable-method = "qcom,kpss-acc-v1";
++                      enable-method = "qcom,kpss-acc-v2";
++                      next-level-cache = <&L2>;
+                       qcom,acc = <&acc1>;
+                       qcom,saw = <&saw1>;
+                       reg = <0x1>;
+@@ -60,7 +68,8 @@
+               cpu@2 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+-                      enable-method = "qcom,kpss-acc-v1";
++                      enable-method = "qcom,kpss-acc-v2";
++                      next-level-cache = <&L2>;
+                       qcom,acc = <&acc2>;
+                       qcom,saw = <&saw2>;
+                       reg = <0x2>;
+@@ -72,7 +81,8 @@
+               cpu@3 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+-                      enable-method = "qcom,kpss-acc-v1";
++                      enable-method = "qcom,kpss-acc-v2";
++                      next-level-cache = <&L2>;
+                       qcom,acc = <&acc3>;
+                       qcom,saw = <&saw3>;
+                       reg = <0x3>;
+@@ -264,22 +274,22 @@
+               };
+                 acc0: clock-controller@b088000 {
+-                        compatible = "qcom,kpss-acc-v1";
++                        compatible = "qcom,kpss-acc-v2";
+                         reg = <0x0b088000 0x1000>, <0xb008000 0x1000>;
+                 };
+                 acc1: clock-controller@b098000 {
+-                        compatible = "qcom,kpss-acc-v1";
++                        compatible = "qcom,kpss-acc-v2";
+                         reg = <0x0b098000 0x1000>, <0xb008000 0x1000>;
+                 };
+                 acc2: clock-controller@b0a8000 {
+-                        compatible = "qcom,kpss-acc-v1";
++                        compatible = "qcom,kpss-acc-v2";
+                         reg = <0x0b0a8000 0x1000>, <0xb008000 0x1000>;
+                 };
+                 acc3: clock-controller@b0b8000 {
+-                        compatible = "qcom,kpss-acc-v1";
++                        compatible = "qcom,kpss-acc-v2";
+                         reg = <0x0b0b8000 0x1000>, <0xb008000 0x1000>;
+                 };
+@@ -307,6 +317,12 @@
+                         regulator;
+                 };
++              saw_l2: regulator@b012000 {
++                      compatible = "qcom,saw2";
++                      reg = <0xb012000 0x1000>;
++                      regulator;
++              };
++
+               serial@78af000 {
+                       compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+                       reg = <0x78af000 0x200>;
diff --git a/target/linux/ipq40xx/patches-4.14/306-qcom-ipq4019-add-USB-nodes-to-ipq4019-SoC-device-tre.patch b/target/linux/ipq40xx/patches-4.14/306-qcom-ipq4019-add-USB-nodes-to-ipq4019-SoC-device-tre.patch
new file mode 100644 (file)
index 0000000..cd0f14e
--- /dev/null
@@ -0,0 +1,130 @@
+From ea5f4d6f4716f3a0bb4fc3614b7a0e8c0df1cb81 Mon Sep 17 00:00:00 2001
+From: Matthew McClintock <mmcclint@codeaurora.org>
+Date: Thu, 17 Mar 2016 16:22:28 -0500
+Subject: [PATCH] qcom: ipq4019: add USB nodes to ipq4019 SoC device tree
+
+This adds the SoC nodes to the ipq4019 device tree and
+enable it for the DK01.1 board.
+
+Signed-off-by: Matthew McClintock <mmcclint@codeaurora.org>
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+---
+Changes:
+       - replaced space with tab
+       - added sleep and mock_utmi clocks
+       - added registers for usb2 and usb3 parent node
+       - changed compatible to qca,ipa4019-dwc3
+       - updated usb2 and usb3 names
+         (included the reg - in case they become necessary later)
+---
+ arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi | 20 ++++++++
+ arch/arm/boot/dts/qcom-ipq4019.dtsi           | 71 +++++++++++++++++++++++++++
+ 2 files changed, 91 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
+@@ -101,5 +101,25 @@
+               wifi@a800000 {
+                       status = "ok";
+               };
++
++              usb3_ss_phy: ssphy@9a000 {
++                      status = "ok";
++              };
++
++              usb3_hs_phy: hsphy@a6000 {
++                      status = "ok";
++              };
++
++              usb3: usb3@8af8800 {
++                      status = "ok";
++              };
++
++              usb2_hs_phy: hsphy@a8000 {
++                      status = "ok";
++              };
++
++              usb2: usb2@60f8800 {
++                      status = "ok";
++              };
+       };
+ };
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -538,5 +538,76 @@
+                                         "legacy";
+                       status = "disabled";
+               };
++
++              usb3_ss_phy: ssphy@9a000 {
++                      compatible = "qca,uni-ssphy";
++                      reg = <0x9a000 0x800>;
++                      reg-names = "phy_base";
++                      resets = <&gcc USB3_UNIPHY_PHY_ARES>;
++                      reset-names = "por_rst";
++                      status = "disabled";
++              };
++
++              usb3_hs_phy: hsphy@a6000 {
++                      compatible = "qca,baldur-usb3-hsphy";
++                      reg = <0xa6000 0x40>;
++                      reg-names = "phy_base";
++                      resets = <&gcc USB3_HSPHY_POR_ARES>, <&gcc USB3_HSPHY_S_ARES>;
++                      reset-names = "por_rst", "srif_rst";
++                      status = "disabled";
++              };
++
++              usb3@8af8800 {
++                      compatible = "qca,ipq4019-dwc3";
++                      reg = <0x8af8800 0x100>;
++                      #address-cells = <1>;
++                      #size-cells = <1>;
++                      clocks = <&gcc GCC_USB3_MASTER_CLK>,
++                               <&gcc GCC_USB3_SLEEP_CLK>,
++                               <&gcc GCC_USB3_MOCK_UTMI_CLK>;
++                      clock-names = "master", "sleep", "mock_utmi";
++                      ranges;
++                      status = "disabled";
++
++                      dwc3@8a00000 {
++                              compatible = "snps,dwc3";
++                              reg = <0x8a00000 0xf8000>;
++                              interrupts = <0 132 0>;
++                              usb-phy = <&usb3_hs_phy>, <&usb3_ss_phy>;
++                              phy-names = "usb2-phy", "usb3-phy";
++                              dr_mode = "host";
++                      };
++              };
++
++              usb2_hs_phy: hsphy@a8000 {
++                      compatible = "qca,baldur-usb2-hsphy";
++                      reg = <0xa8000 0x40>;
++                      reg-names = "phy_base";
++                      resets = <&gcc USB2_HSPHY_POR_ARES>, <&gcc USB2_HSPHY_S_ARES>;
++                      reset-names = "por_rst", "srif_rst";
++                      status = "disabled";
++              };
++
++              usb2@60f8800 {
++                      compatible = "qca,ipq4019-dwc3";
++                      reg = <0x60f8800 0x100>;
++                      #address-cells = <1>;
++                      #size-cells = <1>;
++                      clocks = <&gcc GCC_USB2_MASTER_CLK>,
++                               <&gcc GCC_USB2_SLEEP_CLK>,
++                               <&gcc GCC_USB2_MOCK_UTMI_CLK>;
++                      clock-names = "master", "sleep", "mock_utmi";
++                      ranges;
++                      status = "disabled";
++
++                      dwc3@6000000 {
++                              compatible = "snps,dwc3";
++                              reg = <0x6000000 0xf8000>;
++                              interrupts = <0 136 0>;
++                              usb-phy = <&usb2_hs_phy>;
++                              phy-names = "usb2-phy";
++                              dr_mode = "host";
++                      };
++              };
+       };
+ };
diff --git a/target/linux/ipq40xx/patches-4.14/307-ARM-qcom-Add-IPQ4019-SoC-support.patch b/target/linux/ipq40xx/patches-4.14/307-ARM-qcom-Add-IPQ4019-SoC-support.patch
new file mode 100644 (file)
index 0000000..1dc1c97
--- /dev/null
@@ -0,0 +1,35 @@
+From e7748d641ae37081e2034869491f1629461ae13c Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Sat, 19 Nov 2016 00:58:18 +0100
+Subject: [PATCH] ARM: qcom: Add IPQ4019 SoC support
+
+Add support for the Qualcomm Atheros IPQ4019 SoC.
+
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+---
+ arch/arm/Makefile          | 1 +
+ arch/arm/mach-qcom/Kconfig | 5 +++++
+ 2 files changed, 6 insertions(+)
+
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -149,6 +149,7 @@ textofs-$(CONFIG_SA1111) := 0x00208000
+ endif
+ textofs-$(CONFIG_ARCH_MSM8X60) := 0x00208000
+ textofs-$(CONFIG_ARCH_MSM8960) := 0x00208000
++textofs-$(CONFIG_ARCH_IPQ40XX) := 0x00208000
+ textofs-$(CONFIG_ARCH_AXXIA) := 0x00308000
+ # Machine directory name.  This list is sorted alphanumerically
+--- a/arch/arm/mach-qcom/Kconfig
++++ b/arch/arm/mach-qcom/Kconfig
+@@ -27,4 +27,9 @@ config ARCH_MDM9615
+       bool "Enable support for MDM9615"
+       select CLKSRC_QCOM
++config ARCH_IPQ40XX
++      bool "Enable support for IPQ40XX"
++      select CLKSRC_QCOM
++      select HAVE_ARM_ARCH_TIMER
++
+ endif
diff --git a/target/linux/ipq40xx/patches-4.14/310-msm-adhoc-bus-support.patch b/target/linux/ipq40xx/patches-4.14/310-msm-adhoc-bus-support.patch
new file mode 100644 (file)
index 0000000..cd9fd89
--- /dev/null
@@ -0,0 +1,11026 @@
+From: Christian Lamparter <chunkeey@googlemail.com>
+Subject: BUS: add MSM_BUS
+--- a/drivers/bus/Makefile
++++ b/drivers/bus/Makefile
+@@ -11,6 +11,7 @@ obj-$(CONFIG_BRCMSTB_GISB_ARB)       += brcmst
+ obj-$(CONFIG_IMX_WEIM)                += imx-weim.o
+ obj-$(CONFIG_MIPS_CDMM)               += mips_cdmm.o
+ obj-$(CONFIG_MVEBU_MBUS)      += mvebu-mbus.o
++obj-$(CONFIG_BUS_TOPOLOGY_ADHOC)+= msm_bus/
+ # Interconnect bus driver for OMAP SoCs.
+ obj-$(CONFIG_OMAP_INTERCONNECT)       += omap_l3_smx.o omap_l3_noc.o
+--- a/drivers/bus/Kconfig
++++ b/drivers/bus/Kconfig
+@@ -93,6 +93,8 @@ config MVEBU_MBUS
+         Driver needed for the MBus configuration on Marvell EBU SoCs
+         (Kirkwood, Dove, Orion5x, MV78XX0 and Armada 370/XP).
++source "drivers/bus/msm_bus/Kconfig"
++
+ config OMAP_INTERCONNECT
+       tristate "OMAP INTERCONNECT DRIVER"
+       depends on ARCH_OMAP2PLUS
+--- /dev/null
++++ b/include/dt-bindings/msm/msm-bus-ids.h
+@@ -0,0 +1,869 @@
++/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __MSM_BUS_IDS_H
++#define __MSM_BUS_IDS_H
++
++/* Topology related enums */
++#define       MSM_BUS_FAB_DEFAULT 0
++#define       MSM_BUS_FAB_APPSS 0
++#define       MSM_BUS_FAB_SYSTEM 1024
++#define       MSM_BUS_FAB_MMSS 2048
++#define       MSM_BUS_FAB_SYSTEM_FPB 3072
++#define       MSM_BUS_FAB_CPSS_FPB 4096
++
++#define       MSM_BUS_FAB_BIMC 0
++#define       MSM_BUS_FAB_SYS_NOC 1024
++#define       MSM_BUS_FAB_MMSS_NOC 2048
++#define       MSM_BUS_FAB_OCMEM_NOC 3072
++#define       MSM_BUS_FAB_PERIPH_NOC 4096
++#define       MSM_BUS_FAB_CONFIG_NOC 5120
++#define       MSM_BUS_FAB_OCMEM_VNOC 6144
++#define       MSM_BUS_FAB_MMSS_AHB 2049
++#define       MSM_BUS_FAB_A0_NOC 6145
++#define       MSM_BUS_FAB_A1_NOC 6146
++#define       MSM_BUS_FAB_A2_NOC 6147
++
++#define       MSM_BUS_MASTER_FIRST 1
++#define       MSM_BUS_MASTER_AMPSS_M0 1
++#define       MSM_BUS_MASTER_AMPSS_M1 2
++#define       MSM_BUS_APPSS_MASTER_FAB_MMSS 3
++#define       MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
++#define       MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
++#define       MSM_BUS_MASTER_SPS 6
++#define       MSM_BUS_MASTER_ADM_PORT0 7
++#define       MSM_BUS_MASTER_ADM_PORT1 8
++#define       MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
++#define       MSM_BUS_MASTER_ADM1_PORT1 10
++#define       MSM_BUS_MASTER_LPASS_PROC 11
++#define       MSM_BUS_MASTER_MSS_PROCI 12
++#define       MSM_BUS_MASTER_MSS_PROCD 13
++#define       MSM_BUS_MASTER_MSS_MDM_PORT0 14
++#define       MSM_BUS_MASTER_LPASS 15
++#define       MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
++#define       MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
++#define       MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
++#define       MSM_BUS_MASTER_ADM1_CI 19
++#define       MSM_BUS_MASTER_ADM0_CI 20
++#define       MSM_BUS_MASTER_MSS_MDM_PORT1 21
++#define       MSM_BUS_MASTER_MDP_PORT0 22
++#define       MSM_BUS_MASTER_MDP_PORT1 23
++#define       MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
++#define       MSM_BUS_MASTER_ROTATOR 25
++#define       MSM_BUS_MASTER_GRAPHICS_3D 26
++#define       MSM_BUS_MASTER_JPEG_DEC 27
++#define       MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
++#define       MSM_BUS_MASTER_VFE 29
++#define       MSM_BUS_MASTER_VPE 30
++#define       MSM_BUS_MASTER_JPEG_ENC 31
++#define       MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
++#define       MSM_BUS_MMSS_MASTER_APPS_FAB 33
++#define       MSM_BUS_MASTER_HD_CODEC_PORT0 34
++#define       MSM_BUS_MASTER_HD_CODEC_PORT1 35
++#define       MSM_BUS_MASTER_SPDM 36
++#define       MSM_BUS_MASTER_RPM 37
++#define       MSM_BUS_MASTER_MSS 38
++#define       MSM_BUS_MASTER_RIVA 39
++#define       MSM_BUS_MASTER_SNOC_VMEM 40
++#define       MSM_BUS_MASTER_MSS_SW_PROC 41
++#define       MSM_BUS_MASTER_MSS_FW_PROC 42
++#define       MSM_BUS_MASTER_HMSS 43
++#define       MSM_BUS_MASTER_GSS_NAV 44
++#define       MSM_BUS_MASTER_PCIE 45
++#define       MSM_BUS_MASTER_SATA 46
++#define       MSM_BUS_MASTER_CRYPTO 47
++#define       MSM_BUS_MASTER_VIDEO_CAP 48
++#define       MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
++#define       MSM_BUS_MASTER_VIDEO_ENC 50
++#define       MSM_BUS_MASTER_VIDEO_DEC 51
++#define       MSM_BUS_MASTER_LPASS_AHB 52
++#define       MSM_BUS_MASTER_QDSS_BAM 53
++#define       MSM_BUS_MASTER_SNOC_CFG 54
++#define       MSM_BUS_MASTER_CRYPTO_CORE0 55
++#define       MSM_BUS_MASTER_CRYPTO_CORE1 56
++#define       MSM_BUS_MASTER_MSS_NAV 57
++#define       MSM_BUS_MASTER_OCMEM_DMA 58
++#define       MSM_BUS_MASTER_WCSS 59
++#define       MSM_BUS_MASTER_QDSS_ETR 60
++#define       MSM_BUS_MASTER_USB3 61
++#define       MSM_BUS_MASTER_JPEG 62
++#define       MSM_BUS_MASTER_VIDEO_P0 63
++#define       MSM_BUS_MASTER_VIDEO_P1 64
++#define       MSM_BUS_MASTER_MSS_PROC 65
++#define       MSM_BUS_MASTER_JPEG_OCMEM 66
++#define       MSM_BUS_MASTER_MDP_OCMEM 67
++#define       MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
++#define       MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
++#define       MSM_BUS_MASTER_VFE_OCMEM 70
++#define       MSM_BUS_MASTER_CNOC_ONOC_CFG 71
++#define       MSM_BUS_MASTER_RPM_INST 72
++#define       MSM_BUS_MASTER_RPM_DATA 73
++#define       MSM_BUS_MASTER_RPM_SYS 74
++#define       MSM_BUS_MASTER_DEHR 75
++#define       MSM_BUS_MASTER_QDSS_DAP 76
++#define       MSM_BUS_MASTER_TIC 77
++#define       MSM_BUS_MASTER_SDCC_1 78
++#define       MSM_BUS_MASTER_SDCC_3 79
++#define       MSM_BUS_MASTER_SDCC_4 80
++#define       MSM_BUS_MASTER_SDCC_2 81
++#define       MSM_BUS_MASTER_TSIF 82
++#define       MSM_BUS_MASTER_BAM_DMA 83
++#define       MSM_BUS_MASTER_BLSP_2 84
++#define       MSM_BUS_MASTER_USB_HSIC 85
++#define       MSM_BUS_MASTER_BLSP_1 86
++#define       MSM_BUS_MASTER_USB_HS 87
++#define       MSM_BUS_MASTER_PNOC_CFG 88
++#define       MSM_BUS_MASTER_V_OCMEM_GFX3D 89
++#define       MSM_BUS_MASTER_IPA 90
++#define       MSM_BUS_MASTER_QPIC 91
++#define       MSM_BUS_MASTER_MDPE 92
++#define       MSM_BUS_MASTER_USB_HS2 93
++#define       MSM_BUS_MASTER_VPU 94
++#define       MSM_BUS_MASTER_UFS 95
++#define       MSM_BUS_MASTER_BCAST 96
++#define       MSM_BUS_MASTER_CRYPTO_CORE2 97
++#define       MSM_BUS_MASTER_EMAC 98
++#define       MSM_BUS_MASTER_VPU_1 99
++#define       MSM_BUS_MASTER_PCIE_1 100
++#define       MSM_BUS_MASTER_USB3_1 101
++#define       MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
++#define       MSM_BUS_MASTER_CNOC_MNOC_CFG 103
++#define       MSM_BUS_MASTER_TCU_0 104
++#define       MSM_BUS_MASTER_TCU_1 105
++#define       MSM_BUS_MASTER_CPP 106
++#define       MSM_BUS_MASTER_AUDIO 107
++#define       MSM_BUS_MASTER_PCIE_2 108
++#define       MSM_BUS_MASTER_BLSP_BAM 109
++#define       MSM_BUS_MASTER_USB2_BAM 110
++#define       MSM_BUS_MASTER_ADDS_DMA0 111
++#define       MSM_BUS_MASTER_ADDS_DMA1 112
++#define       MSM_BUS_MASTER_ADDS_DMA2 113
++#define       MSM_BUS_MASTER_ADDS_DMA3 114
++#define       MSM_BUS_MASTER_QPIC_BAM 115
++#define       MSM_BUS_MASTER_SDCC_BAM 116
++#define       MSM_BUS_MASTER_DDRC_SNOC 117
++#define       MSM_BUS_MASTER_WSS_0  118
++#define       MSM_BUS_MASTER_WSS_1  119
++#define       MSM_BUS_MASTER_ESS 120
++#define       MSM_BUS_MASTER_QDSS_BAMNDP 121
++#define       MSM_BUS_MASTER_QDSS_SNOC_CFG 122
++#define       MSM_BUS_MASTER_LAST 130
++
++#define       MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
++#define       MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
++
++#define       MSM_BUS_SNOC_MM_INT_0 10000
++#define       MSM_BUS_SNOC_MM_INT_1 10001
++#define       MSM_BUS_SNOC_MM_INT_2 10002
++#define       MSM_BUS_SNOC_MM_INT_BIMC 10003
++#define       MSM_BUS_SNOC_INT_0 10004
++#define       MSM_BUS_SNOC_INT_1 10005
++#define       MSM_BUS_SNOC_INT_BIMC 10006
++#define       MSM_BUS_SNOC_BIMC_0_MAS 10007
++#define       MSM_BUS_SNOC_BIMC_1_MAS 10008
++#define       MSM_BUS_SNOC_QDSS_INT 10009
++#define       MSM_BUS_PNOC_SNOC_MAS 10010
++#define       MSM_BUS_PNOC_SNOC_SLV 10011
++#define       MSM_BUS_PNOC_INT_0 10012
++#define       MSM_BUS_PNOC_INT_1 10013
++#define       MSM_BUS_PNOC_M_0 10014
++#define       MSM_BUS_PNOC_M_1 10015
++#define       MSM_BUS_BIMC_SNOC_MAS 10016
++#define       MSM_BUS_BIMC_SNOC_SLV 10017
++#define       MSM_BUS_PNOC_SLV_0 10018
++#define       MSM_BUS_PNOC_SLV_1 10019
++#define       MSM_BUS_PNOC_SLV_2 10020
++#define       MSM_BUS_PNOC_SLV_3 10021
++#define       MSM_BUS_PNOC_SLV_4 10022
++#define       MSM_BUS_PNOC_SLV_8 10023
++#define       MSM_BUS_PNOC_SLV_9 10024
++#define       MSM_BUS_SNOC_BIMC_0_SLV 10025
++#define       MSM_BUS_SNOC_BIMC_1_SLV 10026
++#define       MSM_BUS_MNOC_BIMC_MAS 10027
++#define       MSM_BUS_MNOC_BIMC_SLV 10028
++#define       MSM_BUS_BIMC_MNOC_MAS 10029
++#define       MSM_BUS_BIMC_MNOC_SLV 10030
++#define       MSM_BUS_SNOC_BIMC_MAS 10031
++#define       MSM_BUS_SNOC_BIMC_SLV 10032
++#define       MSM_BUS_CNOC_SNOC_MAS 10033
++#define       MSM_BUS_CNOC_SNOC_SLV 10034
++#define       MSM_BUS_SNOC_CNOC_MAS 10035
++#define       MSM_BUS_SNOC_CNOC_SLV 10036
++#define       MSM_BUS_OVNOC_SNOC_MAS 10037
++#define       MSM_BUS_OVNOC_SNOC_SLV 10038
++#define       MSM_BUS_SNOC_OVNOC_MAS 10039
++#define       MSM_BUS_SNOC_OVNOC_SLV 10040
++#define       MSM_BUS_SNOC_PNOC_MAS 10041
++#define       MSM_BUS_SNOC_PNOC_SLV 10042
++#define       MSM_BUS_BIMC_INT_APPS_EBI 10043
++#define       MSM_BUS_BIMC_INT_APPS_SNOC 10044
++#define       MSM_BUS_SNOC_BIMC_2_MAS 10045
++#define       MSM_BUS_SNOC_BIMC_2_SLV 10046
++#define       MSM_BUS_PNOC_SLV_5 10047
++#define       MSM_BUS_PNOC_SLV_6 10048
++#define       MSM_BUS_PNOC_INT_2 10049
++#define       MSM_BUS_PNOC_INT_3 10050
++#define       MSM_BUS_PNOC_INT_4 10051
++#define       MSM_BUS_PNOC_INT_5 10052
++#define       MSM_BUS_PNOC_INT_6 10053
++#define       MSM_BUS_PNOC_INT_7 10054
++#define       MSM_BUS_BIMC_SNOC_1_MAS 10055
++#define       MSM_BUS_BIMC_SNOC_1_SLV 10056
++#define       MSM_BUS_PNOC_A1NOC_MAS 10057
++#define       MSM_BUS_PNOC_A1NOC_SLV 10058
++#define       MSM_BUS_CNOC_A1NOC_MAS 10059
++#define       MSM_BUS_A0NOC_SNOC_MAS 10060
++#define       MSM_BUS_A0NOC_SNOC_SLV 10061
++#define       MSM_BUS_A1NOC_SNOC_SLV 10062
++#define       MSM_BUS_A1NOC_SNOC_MAS 10063
++#define       MSM_BUS_A2NOC_SNOC_MAS 10064
++#define       MSM_BUS_A2NOC_SNOC_SLV 10065
++#define       MSM_BUS_PNOC_SLV_7 10066
++#define       MSM_BUS_INT_LAST 10067
++
++#define       MSM_BUS_SLAVE_FIRST 512
++#define       MSM_BUS_SLAVE_EBI_CH0 512
++#define       MSM_BUS_SLAVE_EBI_CH1 513
++#define       MSM_BUS_SLAVE_AMPSS_L2 514
++#define       MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
++#define       MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
++#define       MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
++#define       MSM_BUS_SLAVE_SPS 518
++#define       MSM_BUS_SLAVE_SYSTEM_IMEM 519
++#define       MSM_BUS_SLAVE_AMPSS 520
++#define       MSM_BUS_SLAVE_MSS 521
++#define       MSM_BUS_SLAVE_LPASS 522
++#define       MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
++#define       MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
++#define       MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
++#define       MSM_BUS_SLAVE_CORESIGHT 526
++#define       MSM_BUS_SLAVE_RIVA 527
++#define       MSM_BUS_SLAVE_SMI 528
++#define       MSM_BUS_MMSS_SLAVE_FAB_APPS 529
++#define       MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
++#define       MSM_BUS_SLAVE_MM_IMEM 531
++#define       MSM_BUS_SLAVE_CRYPTO 532
++#define       MSM_BUS_SLAVE_SPDM 533
++#define       MSM_BUS_SLAVE_RPM 534
++#define       MSM_BUS_SLAVE_RPM_MSG_RAM 535
++#define       MSM_BUS_SLAVE_MPM 536
++#define       MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
++#define       MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
++#define       MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
++#define       MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
++#define       MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
++#define       MSM_BUS_SLAVE_GSBI1_UART 542
++#define       MSM_BUS_SLAVE_GSBI2_UART 543
++#define       MSM_BUS_SLAVE_GSBI3_UART 544
++#define       MSM_BUS_SLAVE_GSBI4_UART 545
++#define       MSM_BUS_SLAVE_GSBI5_UART 546
++#define       MSM_BUS_SLAVE_GSBI6_UART 547
++#define       MSM_BUS_SLAVE_GSBI7_UART 548
++#define       MSM_BUS_SLAVE_GSBI8_UART 549
++#define       MSM_BUS_SLAVE_GSBI9_UART 550
++#define       MSM_BUS_SLAVE_GSBI10_UART 551
++#define       MSM_BUS_SLAVE_GSBI11_UART 552
++#define       MSM_BUS_SLAVE_GSBI12_UART 553
++#define       MSM_BUS_SLAVE_GSBI1_QUP 554
++#define       MSM_BUS_SLAVE_GSBI2_QUP 555
++#define       MSM_BUS_SLAVE_GSBI3_QUP 556
++#define       MSM_BUS_SLAVE_GSBI4_QUP 557
++#define       MSM_BUS_SLAVE_GSBI5_QUP 558
++#define       MSM_BUS_SLAVE_GSBI6_QUP 559
++#define       MSM_BUS_SLAVE_GSBI7_QUP 560
++#define       MSM_BUS_SLAVE_GSBI8_QUP 561
++#define       MSM_BUS_SLAVE_GSBI9_QUP 562
++#define       MSM_BUS_SLAVE_GSBI10_QUP 563
++#define       MSM_BUS_SLAVE_GSBI11_QUP 564
++#define       MSM_BUS_SLAVE_GSBI12_QUP 565
++#define       MSM_BUS_SLAVE_EBI2_NAND 566
++#define       MSM_BUS_SLAVE_EBI2_CS0 567
++#define       MSM_BUS_SLAVE_EBI2_CS1 568
++#define       MSM_BUS_SLAVE_EBI2_CS2 569
++#define       MSM_BUS_SLAVE_EBI2_CS3 570
++#define       MSM_BUS_SLAVE_EBI2_CS4 571
++#define       MSM_BUS_SLAVE_EBI2_CS5 572
++#define       MSM_BUS_SLAVE_USB_FS1 573
++#define       MSM_BUS_SLAVE_USB_FS2 574
++#define       MSM_BUS_SLAVE_TSIF 575
++#define       MSM_BUS_SLAVE_MSM_TSSC 576
++#define       MSM_BUS_SLAVE_MSM_PDM 577
++#define       MSM_BUS_SLAVE_MSM_DIMEM 578
++#define       MSM_BUS_SLAVE_MSM_TCSR 579
++#define       MSM_BUS_SLAVE_MSM_PRNG 580
++#define       MSM_BUS_SLAVE_GSS 581
++#define       MSM_BUS_SLAVE_SATA 582
++#define       MSM_BUS_SLAVE_USB3 583
++#define       MSM_BUS_SLAVE_WCSS 584
++#define       MSM_BUS_SLAVE_OCIMEM 585
++#define       MSM_BUS_SLAVE_SNOC_OCMEM 586
++#define       MSM_BUS_SLAVE_SERVICE_SNOC 587
++#define       MSM_BUS_SLAVE_QDSS_STM 588
++#define       MSM_BUS_SLAVE_CAMERA_CFG 589
++#define       MSM_BUS_SLAVE_DISPLAY_CFG 590
++#define       MSM_BUS_SLAVE_OCMEM_CFG 591
++#define       MSM_BUS_SLAVE_CPR_CFG 592
++#define       MSM_BUS_SLAVE_CPR_XPU_CFG 593
++#define       MSM_BUS_SLAVE_MISC_CFG 594
++#define       MSM_BUS_SLAVE_MISC_XPU_CFG 595
++#define       MSM_BUS_SLAVE_VENUS_CFG 596
++#define       MSM_BUS_SLAVE_MISC_VENUS_CFG 597
++#define       MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
++#define       MSM_BUS_SLAVE_MMSS_CLK_CFG 599
++#define       MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
++#define       MSM_BUS_SLAVE_MNOC_MPU_CFG 601
++#define       MSM_BUS_SLAVE_ONOC_MPU_CFG 602
++#define       MSM_BUS_SLAVE_SERVICE_MNOC 603
++#define       MSM_BUS_SLAVE_OCMEM 604
++#define       MSM_BUS_SLAVE_SERVICE_ONOC 605
++#define       MSM_BUS_SLAVE_SDCC_1 606
++#define       MSM_BUS_SLAVE_SDCC_3 607
++#define       MSM_BUS_SLAVE_SDCC_2 608
++#define       MSM_BUS_SLAVE_SDCC_4 609
++#define       MSM_BUS_SLAVE_BAM_DMA 610
++#define       MSM_BUS_SLAVE_BLSP_2 611
++#define       MSM_BUS_SLAVE_USB_HSIC 612
++#define       MSM_BUS_SLAVE_BLSP_1 613
++#define       MSM_BUS_SLAVE_USB_HS 614
++#define       MSM_BUS_SLAVE_PDM 615
++#define       MSM_BUS_SLAVE_PERIPH_APU_CFG 616
++#define       MSM_BUS_SLAVE_PNOC_MPU_CFG 617
++#define       MSM_BUS_SLAVE_PRNG 618
++#define       MSM_BUS_SLAVE_SERVICE_PNOC 619
++#define       MSM_BUS_SLAVE_CLK_CTL 620
++#define       MSM_BUS_SLAVE_CNOC_MSS 621
++#define       MSM_BUS_SLAVE_SECURITY 622
++#define       MSM_BUS_SLAVE_TCSR 623
++#define       MSM_BUS_SLAVE_TLMM 624
++#define       MSM_BUS_SLAVE_CRYPTO_0_CFG 625
++#define       MSM_BUS_SLAVE_CRYPTO_1_CFG 626
++#define       MSM_BUS_SLAVE_IMEM_CFG 627
++#define       MSM_BUS_SLAVE_MESSAGE_RAM 628
++#define       MSM_BUS_SLAVE_BIMC_CFG 629
++#define       MSM_BUS_SLAVE_BOOT_ROM 630
++#define       MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
++#define       MSM_BUS_SLAVE_PMIC_ARB 632
++#define       MSM_BUS_SLAVE_SPDM_WRAPPER 633
++#define       MSM_BUS_SLAVE_DEHR_CFG 634
++#define       MSM_BUS_SLAVE_QDSS_CFG 635
++#define       MSM_BUS_SLAVE_RBCPR_CFG 636
++#define       MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
++#define       MSM_BUS_SLAVE_SNOC_MPU_CFG 638
++#define       MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
++#define       MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
++#define       MSM_BUS_SLAVE_PNOC_CFG 641
++#define       MSM_BUS_SLAVE_SNOC_CFG 642
++#define       MSM_BUS_SLAVE_EBI1_DLL_CFG 643
++#define       MSM_BUS_SLAVE_PHY_APU_CFG 644
++#define       MSM_BUS_SLAVE_EBI1_PHY_CFG 645
++#define       MSM_BUS_SLAVE_SERVICE_CNOC 646
++#define       MSM_BUS_SLAVE_IPS_CFG 647
++#define       MSM_BUS_SLAVE_QPIC 648
++#define       MSM_BUS_SLAVE_DSI_CFG 649
++#define       MSM_BUS_SLAVE_UFS_CFG 650
++#define       MSM_BUS_SLAVE_RBCPR_CX_CFG 651
++#define       MSM_BUS_SLAVE_RBCPR_MX_CFG 652
++#define       MSM_BUS_SLAVE_PCIE_CFG 653
++#define       MSM_BUS_SLAVE_USB_PHYS_CFG 654
++#define       MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
++#define       MSM_BUS_SLAVE_AVSYNC_CFG 656
++#define       MSM_BUS_SLAVE_CRYPTO_2_CFG 657
++#define       MSM_BUS_SLAVE_VPU_CFG 658
++#define       MSM_BUS_SLAVE_BCAST_CFG 659
++#define       MSM_BUS_SLAVE_KLM_CFG 660
++#define       MSM_BUS_SLAVE_GENI_IR_CFG 661
++#define       MSM_BUS_SLAVE_OCMEM_GFX 662
++#define       MSM_BUS_SLAVE_CATS_128 663
++#define       MSM_BUS_SLAVE_OCMEM_64 664
++#define MSM_BUS_SLAVE_PCIE_0 665
++#define MSM_BUS_SLAVE_PCIE_1 666
++#define       MSM_BUS_SLAVE_PCIE_0_CFG 667
++#define       MSM_BUS_SLAVE_PCIE_1_CFG 668
++#define       MSM_BUS_SLAVE_SRVC_MNOC 669
++#define       MSM_BUS_SLAVE_USB_HS2 670
++#define       MSM_BUS_SLAVE_AUDIO     671
++#define       MSM_BUS_SLAVE_TCU       672
++#define       MSM_BUS_SLAVE_APPSS     673
++#define       MSM_BUS_SLAVE_PCIE_PARF 674
++#define       MSM_BUS_SLAVE_USB3_PHY_CFG      675
++#define       MSM_BUS_SLAVE_IPA_CFG   676
++#define       MSM_BUS_SLAVE_A0NOC_SNOC 677
++#define       MSM_BUS_SLAVE_A1NOC_SNOC 678
++#define       MSM_BUS_SLAVE_A2NOC_SNOC 679
++#define       MSM_BUS_SLAVE_HMSS_L3 680
++#define       MSM_BUS_SLAVE_PIMEM_CFG 681
++#define       MSM_BUS_SLAVE_DCC_CFG 682
++#define       MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
++#define       MSM_BUS_SLAVE_PCIE_2_CFG 684
++#define       MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
++#define       MSM_BUS_SLAVE_A0NOC_CFG 686
++#define       MSM_BUS_SLAVE_A1NOC_CFG 687
++#define       MSM_BUS_SLAVE_A2NOC_CFG 688
++#define       MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
++#define       MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
++#define       MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
++#define       MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
++#define       MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
++#define       MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
++#define       MSM_BUS_SLAVE_MMAGIC_CFG 695
++#define       MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
++#define       MSM_BUS_SLAVE_SSC_CFG 697
++#define       MSM_BUS_SLAVE_DSA_CFG 698
++#define       MSM_BUS_SLAVE_DSA_MPU_CFG 699
++#define       MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
++#define       MSM_BUS_SLAVE_SMMU_CPP_CFG 701
++#define       MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
++#define       MSM_BUS_SLAVE_SMMU_MDP_CFG 703
++#define       MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
++#define       MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
++#define       MSM_BUS_SLAVE_SMMU_VFE_CFG 706
++#define       MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
++#define       MSM_BUS_SLAVE_VMEM_CFG 708
++#define       MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 700
++#define       MSM_BUS_SLAVE_VMEM 709
++#define       MSM_BUS_SLAVE_AHB2PHY 710
++#define       MSM_BUS_SLAVE_PIMEM 711
++#define       MSM_BUS_SLAVE_SNOC_VMEM 712
++#define       MSM_BUS_SLAVE_PCIE_2 713
++#define       MSM_BUS_SLAVE_RBCPR_MX 714
++#define       MSM_BUS_SLAVE_RBCPR_CX 715
++#define       MSM_BUS_SLAVE_PRNG_APU_CFG 716
++#define       MSM_BUS_SLAVE_PERIPH_MPU_CFG 717
++#define       MSM_BUS_SLAVE_GCNT 718
++#define       MSM_BUS_SLAVE_ADSS_CFG 719
++#define       MSM_BUS_SLAVE_ADSS_VMIDMT_CFG 720
++#define       MSM_BUS_SLAVE_QHSS_APU_CFG 721
++#define       MSM_BUS_SLAVE_MDIO 722
++#define       MSM_BUS_SLAVE_FEPHY_CFG 723
++#define       MSM_BUS_SLAVE_SRIF 724
++#define       MSM_BUS_SLAVE_LAST 730
++#define       MSM_BUS_SLAVE_DDRC_CFG 731
++#define       MSM_BUS_SLAVE_DDRC_APU_CFG 732
++#define       MSM_BUS_SLAVE_MPU0_CFG 733
++#define       MSM_BUS_SLAVE_MPU1_CFG 734
++#define       MSM_BUS_SLAVE_MPU2_CFG 734
++#define       MSM_BUS_SLAVE_ESS_VMIDMT_CFG 735
++#define       MSM_BUS_SLAVE_ESS_APU_CFG 736
++#define       MSM_BUS_SLAVE_USB2_CFG 737
++#define       MSM_BUS_SLAVE_BLSP_CFG 738
++#define       MSM_BUS_SLAVE_QPIC_CFG 739
++#define       MSM_BUS_SLAVE_SDCC_CFG 740
++#define       MSM_BUS_SLAVE_WSS0_VMIDMT_CFG 741
++#define       MSM_BUS_SLAVE_WSS0_APU_CFG 742
++#define       MSM_BUS_SLAVE_WSS1_VMIDMT_CFG 743
++#define       MSM_BUS_SLAVE_WSS1_APU_CFG 744
++#define       MSM_BUS_SLAVE_SRVC_PCNOC 745
++#define       MSM_BUS_SLAVE_SNOC_DDRC 746
++#define       MSM_BUS_SLAVE_A7SS 747
++#define       MSM_BUS_SLAVE_WSS0_CFG 748
++#define       MSM_BUS_SLAVE_WSS1_CFG 749
++#define       MSM_BUS_SLAVE_PCIE 750
++#define       MSM_BUS_SLAVE_USB3_CFG 751
++#define       MSM_BUS_SLAVE_CRYPTO_CFG 752
++#define       MSM_BUS_SLAVE_ESS_CFG 753
++#define       MSM_BUS_SLAVE_SRVC_SNOC 754
++
++#define       MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM  MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
++#define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
++
++/*
++ * ID's used in RPM messages
++ */
++#define ICBID_MASTER_APPSS_PROC 0
++#define ICBID_MASTER_MSS_PROC 1
++#define ICBID_MASTER_MNOC_BIMC 2
++#define ICBID_MASTER_SNOC_BIMC 3
++#define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
++#define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
++#define ICBID_MASTER_CNOC_MNOC_CFG 5
++#define ICBID_MASTER_GFX3D 6
++#define ICBID_MASTER_JPEG 7
++#define ICBID_MASTER_MDP 8
++#define ICBID_MASTER_MDP0 ICBID_MASTER_MDP
++#define ICBID_MASTER_MDPS ICBID_MASTER_MDP
++#define ICBID_MASTER_VIDEO 9
++#define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
++#define ICBID_MASTER_VIDEO_P1 10
++#define ICBID_MASTER_VFE 11
++#define ICBID_MASTER_CNOC_ONOC_CFG 12
++#define ICBID_MASTER_JPEG_OCMEM 13
++#define ICBID_MASTER_MDP_OCMEM 14
++#define ICBID_MASTER_VIDEO_P0_OCMEM 15
++#define ICBID_MASTER_VIDEO_P1_OCMEM 16
++#define ICBID_MASTER_VFE_OCMEM 17
++#define ICBID_MASTER_LPASS_AHB 18
++#define ICBID_MASTER_QDSS_BAM 19
++#define ICBID_MASTER_SNOC_CFG 20
++#define ICBID_MASTER_BIMC_SNOC 21
++#define ICBID_MASTER_CNOC_SNOC 22
++#define ICBID_MASTER_CRYPTO 23
++#define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
++#define ICBID_MASTER_CRYPTO_CORE1 24
++#define ICBID_MASTER_LPASS_PROC 25
++#define ICBID_MASTER_MSS 26
++#define ICBID_MASTER_MSS_NAV 27
++#define ICBID_MASTER_OCMEM_DMA 28
++#define ICBID_MASTER_PNOC_SNOC 29
++#define ICBID_MASTER_WCSS 30
++#define ICBID_MASTER_QDSS_ETR 31
++#define ICBID_MASTER_USB3 32
++#define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
++#define ICBID_MASTER_SDCC_1 33
++#define ICBID_MASTER_SDCC_3 34
++#define ICBID_MASTER_SDCC_2 35
++#define ICBID_MASTER_SDCC_4 36
++#define ICBID_MASTER_TSIF 37
++#define ICBID_MASTER_BAM_DMA 38
++#define ICBID_MASTER_BLSP_2 39
++#define ICBID_MASTER_USB_HSIC 40
++#define ICBID_MASTER_BLSP_1 41
++#define ICBID_MASTER_USB_HS 42
++#define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
++#define ICBID_MASTER_PNOC_CFG 43
++#define ICBID_MASTER_SNOC_PNOC 44
++#define ICBID_MASTER_RPM_INST 45
++#define ICBID_MASTER_RPM_DATA 46
++#define ICBID_MASTER_RPM_SYS 47
++#define ICBID_MASTER_DEHR 48
++#define ICBID_MASTER_QDSS_DAP 49
++#define ICBID_MASTER_SPDM 50
++#define ICBID_MASTER_TIC 51
++#define ICBID_MASTER_SNOC_CNOC 52
++#define ICBID_MASTER_GFX3D_OCMEM 53
++#define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
++#define ICBID_MASTER_OVIRT_SNOC 54
++#define ICBID_MASTER_SNOC_OVIRT 55
++#define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
++#define ICBID_MASTER_ONOC_OVIRT 56
++#define ICBID_MASTER_USB_HS2 57
++#define ICBID_MASTER_QPIC 58
++#define ICBID_MASTER_IPA 59
++#define ICBID_MASTER_DSI 60
++#define ICBID_MASTER_MDP1 61
++#define ICBID_MASTER_MDPE ICBID_MASTER_MDP1
++#define ICBID_MASTER_VPU_PROC 62
++#define ICBID_MASTER_VPU 63
++#define ICBID_MASTER_VPU0 ICBID_MASTER_VPU
++#define ICBID_MASTER_CRYPTO_CORE2 64
++#define ICBID_MASTER_PCIE_0 65
++#define ICBID_MASTER_PCIE_1 66
++#define ICBID_MASTER_SATA 67
++#define ICBID_MASTER_UFS 68
++#define ICBID_MASTER_USB3_1 69
++#define ICBID_MASTER_VIDEO_OCMEM 70
++#define ICBID_MASTER_VPU1 71
++#define ICBID_MASTER_VCAP 72
++#define ICBID_MASTER_EMAC 73
++#define ICBID_MASTER_BCAST 74
++#define ICBID_MASTER_MMSS_PROC 75
++#define ICBID_MASTER_SNOC_BIMC_1 76
++#define ICBID_MASTER_SNOC_PCNOC 77
++#define ICBID_MASTER_AUDIO 78
++#define ICBID_MASTER_MM_INT_0 79
++#define ICBID_MASTER_MM_INT_1 80
++#define ICBID_MASTER_MM_INT_2 81
++#define ICBID_MASTER_MM_INT_BIMC 82
++#define ICBID_MASTER_MSS_INT 83
++#define ICBID_MASTER_PCNOC_CFG 84
++#define ICBID_MASTER_PCNOC_INT_0 85
++#define ICBID_MASTER_PCNOC_INT_1 86
++#define ICBID_MASTER_PCNOC_M_0 87
++#define ICBID_MASTER_PCNOC_M_1 88
++#define ICBID_MASTER_PCNOC_S_0 89
++#define ICBID_MASTER_PCNOC_S_1 90
++#define ICBID_MASTER_PCNOC_S_2 91
++#define ICBID_MASTER_PCNOC_S_3 92
++#define ICBID_MASTER_PCNOC_S_4 93
++#define ICBID_MASTER_PCNOC_S_6 94
++#define ICBID_MASTER_PCNOC_S_7 95
++#define ICBID_MASTER_PCNOC_S_8 96
++#define ICBID_MASTER_PCNOC_S_9 97
++#define ICBID_MASTER_QDSS_INT 98
++#define ICBID_MASTER_SNOC_INT_0 99
++#define ICBID_MASTER_SNOC_INT_1 100
++#define ICBID_MASTER_SNOC_INT_BIMC 101
++#define ICBID_MASTER_TCU_0 102
++#define ICBID_MASTER_TCU_1 103
++#define ICBID_MASTER_BIMC_INT_0 104
++#define ICBID_MASTER_BIMC_INT_1 105
++#define ICBID_MASTER_CAMERA 106
++#define ICBID_MASTER_RICA 107
++#define ICBID_MASTER_PCNOC_S_5        129
++#define ICBID_MASTER_PCNOC_INT_2      124
++#define ICBID_MASTER_PCNOC_INT_3      125
++#define ICBID_MASTER_PCNOC_INT_4      126
++#define ICBID_MASTER_PCNOC_INT_5      127
++#define ICBID_MASTER_PCNOC_INT_6      128
++#define ICBID_MASTER_PCIE_2 119
++#define ICBID_MASTER_MASTER_CNOC_A1NOC 116
++#define ICBID_MASTER_A0NOC_SNOC 110
++#define ICBID_MASTER_A1NOC_SNOC 111
++#define ICBID_MASTER_A2NOC_SNOC 112
++#define ICBID_MASTER_PNOC_A1NOC 117
++#define ICBID_MASTER_ROTATOR 120
++#define ICBID_MASTER_SNOC_VMEM 114
++#define ICBID_MASTER_VENUS_VMEM 121
++#define ICBID_MASTER_HMSS 118
++#define ICBID_MASTER_BIMC_SNOC_1 109
++#define ICBID_MASTER_CNOC_A1NOC 116
++#define ICBID_MASTER_CPP 115
++#define ICBID_MASTER_BLSP_BAM 130
++#define ICBID_MASTER_USB2_BAM 131
++#define ICBID_MASTER_ADSS_DMA0 132
++#define ICBID_MASTER_ADSS_DMA1 133
++#define ICBID_MASTER_ADSS_DMA2 134
++#define ICBID_MASTER_ADSS_DMA3 135
++#define ICBID_MASTER_QPIC_BAM 136
++#define ICBID_MASTER_SDCC_BAM 137
++#define ICBID_MASTER_DDRC_SNOC 138
++#define ICBID_MASTER_WSS_0 139
++#define ICBID_MASTER_WSS_1 140
++#define ICBID_MASTER_ESS 141
++#define ICBID_MASTER_PCIE 142
++#define ICBID_MASTER_QDSS_BAMNDP 143
++#define ICBID_MASTER_QDSS_SNOC_CFG 144
++
++#define ICBID_SLAVE_EBI1 0
++#define ICBID_SLAVE_APPSS_L2 1
++#define ICBID_SLAVE_BIMC_SNOC 2
++#define ICBID_SLAVE_CAMERA_CFG 3
++#define ICBID_SLAVE_DISPLAY_CFG 4
++#define ICBID_SLAVE_OCMEM_CFG 5
++#define ICBID_SLAVE_CPR_CFG 6
++#define ICBID_SLAVE_CPR_XPU_CFG 7
++#define ICBID_SLAVE_MISC_CFG 8
++#define ICBID_SLAVE_MISC_XPU_CFG 9
++#define ICBID_SLAVE_VENUS_CFG 10
++#define ICBID_SLAVE_GFX3D_CFG 11
++#define ICBID_SLAVE_MMSS_CLK_CFG 12
++#define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
++#define ICBID_SLAVE_MNOC_MPU_CFG 14
++#define ICBID_SLAVE_ONOC_MPU_CFG 15
++#define ICBID_SLAVE_MNOC_BIMC 16
++#define ICBID_SLAVE_SERVICE_MNOC 17
++#define ICBID_SLAVE_OCMEM 18
++#define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
++#define ICBID_SLAVE_SERVICE_ONOC 19
++#define ICBID_SLAVE_APPSS 20
++#define ICBID_SLAVE_LPASS 21
++#define ICBID_SLAVE_USB3 22
++#define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
++#define ICBID_SLAVE_WCSS 23
++#define ICBID_SLAVE_SNOC_BIMC 24
++#define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
++#define ICBID_SLAVE_SNOC_CNOC 25
++#define ICBID_SLAVE_IMEM 26
++#define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
++#define ICBID_SLAVE_SNOC_OVIRT 27
++#define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
++#define ICBID_SLAVE_SNOC_PNOC 28
++#define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
++#define ICBID_SLAVE_SERVICE_SNOC 29
++#define ICBID_SLAVE_QDSS_STM 30
++#define ICBID_SLAVE_SDCC_1 31
++#define ICBID_SLAVE_SDCC_3 32
++#define ICBID_SLAVE_SDCC_2 33
++#define ICBID_SLAVE_SDCC_4 34
++#define ICBID_SLAVE_TSIF 35
++#define ICBID_SLAVE_BAM_DMA 36
++#define ICBID_SLAVE_BLSP_2 37
++#define ICBID_SLAVE_USB_HSIC 38
++#define ICBID_SLAVE_BLSP_1 39
++#define ICBID_SLAVE_USB_HS 40
++#define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
++#define ICBID_SLAVE_PDM 41
++#define ICBID_SLAVE_PERIPH_APU_CFG 42
++#define ICBID_SLAVE_PNOC_MPU_CFG 43
++#define ICBID_SLAVE_PRNG 44
++#define ICBID_SLAVE_PNOC_SNOC 45
++#define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
++#define ICBID_SLAVE_SERVICE_PNOC 46
++#define ICBID_SLAVE_CLK_CTL 47
++#define ICBID_SLAVE_CNOC_MSS 48
++#define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
++#define ICBID_SLAVE_SECURITY 49
++#define ICBID_SLAVE_TCSR 50
++#define ICBID_SLAVE_TLMM 51
++#define ICBID_SLAVE_CRYPTO_0_CFG 52
++#define ICBID_SLAVE_CRYPTO_1_CFG 53
++#define ICBID_SLAVE_IMEM_CFG 54
++#define ICBID_SLAVE_MESSAGE_RAM 55
++#define ICBID_SLAVE_BIMC_CFG 56
++#define ICBID_SLAVE_BOOT_ROM 57
++#define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
++#define ICBID_SLAVE_PMIC_ARB 59
++#define ICBID_SLAVE_SPDM_WRAPPER 60
++#define ICBID_SLAVE_DEHR_CFG 61
++#define ICBID_SLAVE_MPM 62
++#define ICBID_SLAVE_QDSS_CFG 63
++#define ICBID_SLAVE_RBCPR_CFG 64
++#define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
++#define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
++#define ICBID_SLAVE_CNOC_MNOC_CFG 66
++#define ICBID_SLAVE_SNOC_MPU_CFG 67
++#define ICBID_SLAVE_CNOC_ONOC_CFG 68
++#define ICBID_SLAVE_PNOC_CFG 69
++#define ICBID_SLAVE_SNOC_CFG 70
++#define ICBID_SLAVE_EBI1_DLL_CFG 71
++#define ICBID_SLAVE_PHY_APU_CFG 72
++#define ICBID_SLAVE_EBI1_PHY_CFG 73
++#define ICBID_SLAVE_RPM 74
++#define ICBID_SLAVE_CNOC_SNOC 75
++#define ICBID_SLAVE_SERVICE_CNOC 76
++#define ICBID_SLAVE_OVIRT_SNOC 77
++#define ICBID_SLAVE_OVIRT_OCMEM 78
++#define ICBID_SLAVE_USB_HS2 79
++#define ICBID_SLAVE_QPIC 80
++#define ICBID_SLAVE_IPS_CFG 81
++#define ICBID_SLAVE_DSI_CFG 82
++#define ICBID_SLAVE_USB3_1 83
++#define ICBID_SLAVE_PCIE_0 84
++#define ICBID_SLAVE_PCIE_1 85
++#define ICBID_SLAVE_PSS_SMMU_CFG 86
++#define ICBID_SLAVE_CRYPTO_2_CFG 87
++#define ICBID_SLAVE_PCIE_0_CFG 88
++#define ICBID_SLAVE_PCIE_1_CFG 89
++#define ICBID_SLAVE_SATA_CFG 90
++#define ICBID_SLAVE_SPSS_GENI_IR 91
++#define ICBID_SLAVE_UFS_CFG 92
++#define ICBID_SLAVE_AVSYNC_CFG 93
++#define ICBID_SLAVE_VPU_CFG 94
++#define ICBID_SLAVE_USB_PHY_CFG 95
++#define ICBID_SLAVE_RBCPR_MX_CFG 96
++#define ICBID_SLAVE_PCIE_PARF 97
++#define ICBID_SLAVE_VCAP_CFG 98
++#define ICBID_SLAVE_EMAC_CFG 99
++#define ICBID_SLAVE_BCAST_CFG 100
++#define ICBID_SLAVE_KLM_CFG 101
++#define ICBID_SLAVE_DISPLAY_PWM 102
++#define ICBID_SLAVE_GENI 103
++#define ICBID_SLAVE_SNOC_BIMC_1 104
++#define ICBID_SLAVE_AUDIO 105
++#define ICBID_SLAVE_CATS_0 106
++#define ICBID_SLAVE_CATS_1 107
++#define ICBID_SLAVE_MM_INT_0 108
++#define ICBID_SLAVE_MM_INT_1 109
++#define ICBID_SLAVE_MM_INT_2 110
++#define ICBID_SLAVE_MM_INT_BIMC 111
++#define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
++#define ICBID_SLAVE_MSS_INT 113
++#define ICBID_SLAVE_PCNOC_INT_0 114
++#define ICBID_SLAVE_PCNOC_INT_1 115
++#define ICBID_SLAVE_PCNOC_M_0 116
++#define ICBID_SLAVE_PCNOC_M_1 117
++#define ICBID_SLAVE_PCNOC_S_0 118
++#define ICBID_SLAVE_PCNOC_S_1 119
++#define ICBID_SLAVE_PCNOC_S_2 120
++#define ICBID_SLAVE_PCNOC_S_3 121
++#define ICBID_SLAVE_PCNOC_S_4 122
++#define ICBID_SLAVE_PCNOC_S_6 123
++#define ICBID_SLAVE_PCNOC_S_7 124
++#define ICBID_SLAVE_PCNOC_S_8 125
++#define ICBID_SLAVE_PCNOC_S_9 126
++#define ICBID_SLAVE_PRNG_XPU_CFG 127
++#define ICBID_SLAVE_QDSS_INT 128
++#define ICBID_SLAVE_RPM_XPU_CFG 129
++#define ICBID_SLAVE_SNOC_INT_0 130
++#define ICBID_SLAVE_SNOC_INT_1 131
++#define ICBID_SLAVE_SNOC_INT_BIMC 132
++#define ICBID_SLAVE_TCU 133
++#define ICBID_SLAVE_BIMC_INT_0 134
++#define ICBID_SLAVE_BIMC_INT_1 135
++#define ICBID_SLAVE_RICA_CFG 136
++#define ICBID_SLAVE_PCNOC_S_5 189
++#define ICBID_SLAVE_PCNOC_S_7 124
++#define ICBID_SLAVE_PCNOC_INT_2 184
++#define ICBID_SLAVE_PCNOC_INT_3 185
++#define ICBID_SLAVE_PCNOC_INT_4 186
++#define ICBID_SLAVE_PCNOC_INT_5 187
++#define ICBID_SLAVE_PCNOC_INT_6 188
++#define ICBID_SLAVE_USB3_PHY_CFG 182
++#define ICBID_SLAVE_IPA_CFG 183
++
++#define ICBID_SLAVE_A0NOC_SNOC 141
++#define ICBID_SLAVE_A1NOC_SNOC 142
++#define ICBID_SLAVE_A2NOC_SNOC 143
++#define ICBID_SLAVE_BIMC_SNOC_1 138
++#define ICBID_SLAVE_PIMEM 167
++#define ICBID_SLAVE_PIMEM_CFG 168
++#define ICBID_SLAVE_DCC_CFG 155
++#define ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
++#define ICBID_SLAVE_A0NOC_CFG 144
++#define ICBID_SLAVE_PCIE_2_CFG 165
++#define ICBID_SLAVE_PCIE20_AHB2PHY 163
++#define ICBID_SLAVE_PCIE_2 164
++#define ICBID_SLAVE_A1NOC_CFG 147
++#define ICBID_SLAVE_A1NOC_MPU_CFG 148
++#define ICBID_SLAVE_A1NOC_SMMU_CFG 149
++#define ICBID_SLAVE_A2NOC_CFG 150
++#define ICBID_SLAVE_A2NOC_MPU_CFG 151
++#define ICBID_SLAVE_A2NOC_SMMU_CFG 152
++#define ICBID_SLAVE_AHB2PHY 153
++#define ICBID_SLAVE_HMSS_L3 161
++#define ICBID_SLAVE_LPASS_SMMU_CFG 161
++#define ICBID_SLAVE_MMAGIC_CFG 162
++#define ICBID_SLAVE_SSC_CFG 177
++#define ICBID_SLAVE_VENUS_THROTTLE_CFG 178
++#define ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
++#define ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
++#define ICBID_SLAVE_DSA_CFG 157
++#define ICBID_SLAVE_DSA_MPU_CFG 158
++#define ICBID_SLAVE_SMMU_CPP_CFG 171
++#define ICBID_SLAVE_SMMU_JPEG_CFG 172
++#define ICBID_SLAVE_SMMU_MDP_CFG 173
++#define ICBID_SLAVE_SMMU_ROTATOR_CFG 174
++#define ICBID_SLAVE_SMMU_VENUS_CFG 175
++#define ICBID_SLAVE_SMMU_VFE_CFG 176
++#define ICBID_SLAVE_A0NOC_MPU_CFG 145
++#define ICBID_SLAVE_A0NOC_SMMU_CFG 146
++#define ICBID_SLAVE_VMEM_CFG 180
++#define ICBID_SLAVE_VMEM 179
++#define ICBID_SLAVE_PNOC_A1NOC 139
++#define ICBID_SLAVE_SNOC_VMEM 140
++#define ICBID_SLAVE_RBCPR_MX 170
++#define ICBID_SLAVE_RBCPR_CX 169
++#define ICBID_SLAVE_PRNG_APU_CFG 190
++#define ICBID_SLAVE_PERIPH_MPU_CFG 191
++#define ICBID_SLAVE_GCNT 192
++#define ICBID_SLAVE_ADSS_CFG 193
++#define ICBID_SLAVE_ADSS_APU 194
++#define ICBID_SLAVE_ADSS_VMIDMT_CFG 195
++#define ICBID_SLAVE_QHSS_APU_CFG 196
++#define ICBID_SLAVE_MDIO 197
++#define ICBID_SLAVE_FEPHY_CFG 198
++#define ICBID_SLAVE_SRIF 199
++#define ICBID_SLAVE_DDRC_CFG 200
++#define ICBID_SLAVE_DDRC_APU_CFG 201
++#define ICBID_SLAVE_DDRC_MPU0_CFG 202
++#define ICBID_SLAVE_DDRC_MPU1_CFG 203
++#define ICBID_SLAVE_DDRC_MPU2_CFG 210
++#define ICBID_SLAVE_ESS_VMIDMT_CFG 211
++#define ICBID_SLAVE_ESS_APU_CFG 212
++#define ICBID_SLAVE_USB2_CFG 213
++#define ICBID_SLAVE_BLSP_CFG 214
++#define ICBID_SLAVE_QPIC_CFG 215
++#define ICBID_SLAVE_SDCC_CFG 216
++#define ICBID_SLAVE_WSS0_VMIDMT_CFG 217
++#define ICBID_SLAVE_WSS0_APU_CFG 218
++#define ICBID_SLAVE_WSS1_VMIDMT_CFG 219
++#define ICBID_SLAVE_WSS1_APU_CFG 220
++#define ICBID_SLAVE_SRVC_PCNOC 221
++#define ICBID_SLAVE_SNOC_DDRC 222
++#define ICBID_SLAVE_A7SS 223
++#define ICBID_SLAVE_WSS0_CFG 224
++#define ICBID_SLAVE_WSS1_CFG 225
++#define ICBID_SLAVE_PCIE 226
++#define ICBID_SLAVE_USB3_CFG 227
++#define ICBID_SLAVE_CRYPTO_CFG 228
++#define ICBID_SLAVE_ESS_CFG 229
++#define ICBID_SLAVE_SRVC_SNOC 230
++#endif
+--- /dev/null
++++ b/include/dt-bindings/msm/msm-bus-rule-ops.h
+@@ -0,0 +1,32 @@
++/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __MSM_BUS_RULE_OPS_H
++#define __MSM_BUS_RULE_OPS_H
++
++#define FLD_IB        0
++#define FLD_AB        1
++#define FLD_CLK       2
++
++#define OP_LE 0
++#define OP_LT 1
++#define OP_GE 2
++#define OP_GT 3
++#define OP_NOOP       4
++
++#define RULE_STATE_NOT_APPLIED        0
++#define RULE_STATE_APPLIED    1
++
++#define THROTTLE_ON   0
++#define THROTTLE_OFF  1
++
++#endif
+--- /dev/null
++++ b/drivers/bus/msm_bus/Kconfig
+@@ -0,0 +1,19 @@
++config BUS_TOPOLOGY_ADHOC
++      bool "ad-hoc bus scaling topology"
++      depends on ARCH_QCOM
++      default n
++      help
++        This option enables a driver that can handle adhoc bus topologies.
++        Adhoc bus topology driver allows one to many connections and maintains
++        directionality of connections by explicitly listing device connections
++        thus avoiding illegal routes.
++
++config MSM_BUS_SCALING
++      bool "Bus scaling driver"
++      depends on BUS_TOPOLOGY_ADHOC
++      default n
++      help
++        This option enables bus scaling on MSM devices.  Bus scaling
++        allows devices to request the clocks be set to rates sufficient
++        for the active devices needs without keeping the clocks at max
++        frequency when a slower speed is sufficient.
+--- /dev/null
++++ b/drivers/bus/msm_bus/Makefile
+@@ -0,0 +1,12 @@
++#
++# Makefile for msm-bus driver specific files
++#
++obj-y += msm_bus_bimc.o msm_bus_noc.o msm_bus_core.o msm_bus_client_api.o \
++       msm_bus_id.o
++obj-$(CONFIG_OF) += msm_bus_of.o
++
++obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o
++obj-$(CONFIG_OF) += msm_bus_of_adhoc.o
++obj-$(CONFIG_CORESIGHT) +=  msm_buspm_coresight_adhoc.o
++
++obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm-bus-board.h
+@@ -0,0 +1,198 @@
++/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ASM_ARCH_MSM_BUS_BOARD_H
++#define __ASM_ARCH_MSM_BUS_BOARD_H
++
++#include <linux/types.h>
++#include <linux/input.h>
++
++enum context {
++      DUAL_CTX,
++      ACTIVE_CTX,
++      NUM_CTX
++};
++
++struct msm_bus_fabric_registration {
++      unsigned int id;
++      const char *name;
++      struct msm_bus_node_info *info;
++      unsigned int len;
++      int ahb;
++      const char *fabclk[NUM_CTX];
++      const char *iface_clk;
++      unsigned int offset;
++      unsigned int haltid;
++      unsigned int rpm_enabled;
++      unsigned int nmasters;
++      unsigned int nslaves;
++      unsigned int ntieredslaves;
++      bool il_flag;
++      const struct msm_bus_board_algorithm *board_algo;
++      int hw_sel;
++      void *hw_data;
++      uint32_t qos_freq;
++      uint32_t qos_baseoffset;
++      u64 nr_lim_thresh;
++      uint32_t eff_fact;
++      uint32_t qos_delta;
++      bool virt;
++};
++
++struct msm_bus_device_node_registration {
++      struct msm_bus_node_device_type *info;
++      unsigned int num_devices;
++      bool virt;
++};
++
++enum msm_bus_bw_tier_type {
++      MSM_BUS_BW_TIER1 = 1,
++      MSM_BUS_BW_TIER2,
++      MSM_BUS_BW_COUNT,
++      MSM_BUS_BW_SIZE = 0x7FFFFFFF,
++};
++
++struct msm_bus_halt_vector {
++      uint32_t haltval;
++      uint32_t haltmask;
++};
++
++extern struct msm_bus_fabric_registration msm_bus_apps_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_sys_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_mm_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_sys_fpb_pdata;
++extern struct msm_bus_fabric_registration msm_bus_cpss_fpb_pdata;
++extern struct msm_bus_fabric_registration msm_bus_def_fab_pdata;
++
++extern struct msm_bus_fabric_registration msm_bus_8960_apps_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8960_sys_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8960_mm_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8960_cpss_fpb_pdata;
++
++extern struct msm_bus_fabric_registration msm_bus_8064_apps_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8064_sys_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8064_mm_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8064_sys_fpb_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8064_cpss_fpb_pdata;
++
++extern struct msm_bus_fabric_registration msm_bus_9615_sys_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_9615_def_fab_pdata;
++
++extern struct msm_bus_fabric_registration msm_bus_8930_apps_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8930_sys_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8930_mm_fabric_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8930_sys_fpb_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8930_cpss_fpb_pdata;
++
++extern struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata;
++extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata;
++
++extern struct msm_bus_fabric_registration msm_bus_9625_sys_noc_pdata;
++extern struct msm_bus_fabric_registration msm_bus_9625_bimc_pdata;
++extern struct msm_bus_fabric_registration msm_bus_9625_periph_noc_pdata;
++extern struct msm_bus_fabric_registration msm_bus_9625_config_noc_pdata;
++
++extern int msm_bus_device_match_adhoc(struct device *dev, void *id);
++
++void msm_bus_rpm_set_mt_mask(void);
++int msm_bus_board_rpm_get_il_ids(uint16_t *id);
++int msm_bus_board_get_iid(int id);
++
++#define NFAB_MSM8226 6
++#define NFAB_MSM8610 5
++
++/*
++ * These macros specify the convention followed for allocating
++ * ids to fabrics, masters and slaves for 8x60.
++ *
++ * A node can be identified as a master/slave/fabric by using
++ * these ids.
++ */
++#define FABRIC_ID_KEY 1024
++#define SLAVE_ID_KEY ((FABRIC_ID_KEY) >> 1)
++#define MAX_FAB_KEY 7168  /* OR(All fabric ids) */
++#define INT_NODE_START 10000
++
++#define GET_FABID(id) ((id) & MAX_FAB_KEY)
++
++#define NODE_ID(id) ((id) & (FABRIC_ID_KEY - 1))
++#define IS_SLAVE(id) ((NODE_ID(id)) >= SLAVE_ID_KEY ? 1 : 0)
++#define CHECK_ID(iid, id) (((iid & id) != id) ? -ENXIO : iid)
++
++/*
++ * The following macros are used to format the data for port halt
++ * and unhalt requests.
++ */
++#define MSM_BUS_CLK_HALT 0x1
++#define MSM_BUS_CLK_HALT_MASK 0x1
++#define MSM_BUS_CLK_HALT_FIELDSIZE 0x1
++#define MSM_BUS_CLK_UNHALT 0x0
++
++#define MSM_BUS_MASTER_SHIFT(master, fieldsize) \
++      ((master) * (fieldsize))
++
++#define MSM_BUS_SET_BITFIELD(word, fieldmask, fieldvalue) \
++      {       \
++              (word) &= ~(fieldmask); \
++              (word) |= (fieldvalue); \
++      }
++
++
++#define MSM_BUS_MASTER_HALT(u32haltmask, u32haltval, master) \
++      MSM_BUS_SET_BITFIELD(u32haltmask, \
++              MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
++              MSM_BUS_CLK_HALT_FIELDSIZE), \
++              MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
++              MSM_BUS_CLK_HALT_FIELDSIZE))\
++      MSM_BUS_SET_BITFIELD(u32haltval, \
++              MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
++              MSM_BUS_CLK_HALT_FIELDSIZE), \
++              MSM_BUS_CLK_HALT<<MSM_BUS_MASTER_SHIFT((master),\
++              MSM_BUS_CLK_HALT_FIELDSIZE))\
++
++#define MSM_BUS_MASTER_UNHALT(u32haltmask, u32haltval, master) \
++      MSM_BUS_SET_BITFIELD(u32haltmask, \
++              MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
++              MSM_BUS_CLK_HALT_FIELDSIZE), \
++              MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
++              MSM_BUS_CLK_HALT_FIELDSIZE))\
++      MSM_BUS_SET_BITFIELD(u32haltval, \
++              MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
++              MSM_BUS_CLK_HALT_FIELDSIZE), \
++              MSM_BUS_CLK_UNHALT<<MSM_BUS_MASTER_SHIFT((master),\
++              MSM_BUS_CLK_HALT_FIELDSIZE))\
++
++#define RPM_BUS_SLAVE_REQ     0x766c7362
++#define RPM_BUS_MASTER_REQ    0x73616d62
++
++enum msm_bus_rpm_slave_field_type {
++      RPM_SLAVE_FIELD_BW = 0x00007762,
++};
++
++enum msm_bus_rpm_mas_field_type {
++      RPM_MASTER_FIELD_BW =           0x00007762,
++      RPM_MASTER_FIELD_BW_T0 =        0x30747762,
++      RPM_MASTER_FIELD_BW_T1 =        0x31747762,
++      RPM_MASTER_FIELD_BW_T2 =        0x32747762,
++};
++
++#include <dt-bindings/msm/msm-bus-ids.h>
++
++
++#endif /*__ASM_ARCH_MSM_BUS_BOARD_H */
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm-bus.h
+@@ -0,0 +1,139 @@
++/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef _ARCH_ARM_MACH_MSM_BUS_H
++#define _ARCH_ARM_MACH_MSM_BUS_H
++
++#include <linux/types.h>
++#include <linux/input.h>
++#include <linux/platform_device.h>
++
++/*
++ * Macros for clients to convert their data to ib and ab
++ * Ws : Time window over which to transfer the data in SECONDS
++ * Bs : Size of the data block in bytes
++ * Per : Recurrence period
++ * Tb : Throughput bandwidth to prevent stalling
++ * R  : Ratio of actual bandwidth used to Tb
++ * Ib : Instantaneous bandwidth
++ * Ab : Arbitrated bandwidth
++ *
++ * IB_RECURRBLOCK and AB_RECURRBLOCK:
++ * These are used if the requirement is to transfer a
++ * recurring block of data over a known time window.
++ *
++ * IB_THROUGHPUTBW and AB_THROUGHPUTBW:
++ * These are used for CPU style masters. Here the requirement
++ * is to have minimum throughput bandwidth available to avoid
++ * stalling.
++ */
++#define IB_RECURRBLOCK(Ws, Bs) ((Ws) == 0 ? 0 : ((Bs)/(Ws)))
++#define AB_RECURRBLOCK(Ws, Per) ((Ws) == 0 ? 0 : ((Bs)/(Per)))
++#define IB_THROUGHPUTBW(Tb) (Tb)
++#define AB_THROUGHPUTBW(Tb, R) ((Tb) * (R))
++
++struct msm_bus_vectors {
++      int src; /* Master */
++      int dst; /* Slave */
++      uint64_t ab; /* Arbitrated bandwidth */
++      uint64_t ib; /* Instantaneous bandwidth */
++};
++
++struct msm_bus_paths {
++      int num_paths;
++      struct msm_bus_vectors *vectors;
++};
++
++struct msm_bus_scale_pdata {
++      struct msm_bus_paths *usecase;
++      int num_usecases;
++      const char *name;
++      /*
++       * If the active_only flag is set to 1, the BW request is applied
++       * only when at least one CPU is active (powered on). If the flag
++       * is set to 0, then the BW request is always applied irrespective
++       * of the CPU state.
++       */
++      unsigned int active_only;
++};
++
++/* Scaling APIs */
++
++/*
++ * This function returns a handle to the client. This should be used to
++ * call msm_bus_scale_client_update_request.
++ * The function returns 0 if bus driver is unable to register a client
++ */
++
++#if (defined(CONFIG_MSM_BUS_SCALING) || defined(CONFIG_BUS_TOPOLOGY_ADHOC))
++int __init msm_bus_fabric_init_driver(void);
++uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata);
++int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index);
++void msm_bus_scale_unregister_client(uint32_t cl);
++/* AXI Port configuration APIs */
++int msm_bus_axi_porthalt(int master_port);
++int msm_bus_axi_portunhalt(int master_port);
++
++#else
++static inline int __init msm_bus_fabric_init_driver(void) { return 0; }
++
++static inline uint32_t
++msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
++{
++      return 1;
++}
++
++static inline int
++msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
++{
++      return 0;
++}
++
++static inline void
++msm_bus_scale_unregister_client(uint32_t cl)
++{
++}
++
++static inline int msm_bus_axi_porthalt(int master_port)
++{
++      return 0;
++}
++
++static inline int msm_bus_axi_portunhalt(int master_port)
++{
++      return 0;
++}
++#endif
++
++#if defined(CONFIG_OF) && defined(CONFIG_MSM_BUS_SCALING)
++struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
++              struct platform_device *pdev, struct device_node *of_node);
++struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev);
++void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata);
++#else
++static inline struct msm_bus_scale_pdata
++*msm_bus_cl_get_pdata(struct platform_device *pdev)
++{
++      return NULL;
++}
++
++static inline struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
++              struct platform_device *pdev, struct device_node *of_node)
++{
++      return NULL;
++}
++
++static inline void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
++{
++}
++#endif
++#endif /*_ARCH_ARM_MACH_MSM_BUS_H*/
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_adhoc.h
+@@ -0,0 +1,141 @@
++/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
++#define _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
++
++#include <linux/types.h>
++#include <linux/device.h>
++#include "msm-bus-board.h"
++#include "msm-bus.h"
++#include "msm_bus_rules.h"
++#include "msm_bus_core.h"
++
++struct msm_bus_node_device_type;
++struct link_node {
++      uint64_t lnode_ib[NUM_CTX];
++      uint64_t lnode_ab[NUM_CTX];
++      int next;
++      struct device *next_dev;
++      struct list_head link;
++      uint32_t in_use;
++};
++
++/* New types introduced for adhoc topology */
++struct msm_bus_noc_ops {
++      int (*qos_init)(struct msm_bus_node_device_type *dev,
++                      void __iomem *qos_base, uint32_t qos_off,
++                      uint32_t qos_delta, uint32_t qos_freq);
++      int (*set_bw)(struct msm_bus_node_device_type *dev,
++                      void __iomem *qos_base, uint32_t qos_off,
++                      uint32_t qos_delta, uint32_t qos_freq);
++      int (*limit_mport)(struct msm_bus_node_device_type *dev,
++                      void __iomem *qos_base, uint32_t qos_off,
++                      uint32_t qos_delta, uint32_t qos_freq, bool enable_lim,
++                      uint64_t lim_bw);
++      bool (*update_bw_reg)(int mode);
++};
++
++struct nodebw {
++      uint64_t ab[NUM_CTX];
++      bool dirty;
++};
++
++struct msm_bus_fab_device_type {
++      void __iomem *qos_base;
++      phys_addr_t pqos_base;
++      size_t qos_range;
++      uint32_t base_offset;
++      uint32_t qos_freq;
++      uint32_t qos_off;
++      uint32_t util_fact;
++      uint32_t vrail_comp;
++      struct msm_bus_noc_ops noc_ops;
++      enum msm_bus_hw_sel bus_type;
++      bool bypass_qos_prg;
++};
++
++struct qos_params_type {
++      int mode;
++      unsigned int prio_lvl;
++      unsigned int prio_rd;
++      unsigned int prio_wr;
++      unsigned int prio1;
++      unsigned int prio0;
++      unsigned int gp;
++      unsigned int thmp;
++      unsigned int ws;
++      int cur_mode;
++      u64 bw_buffer;
++};
++
++struct msm_bus_node_info_type {
++      const char *name;
++      unsigned int id;
++      int mas_rpm_id;
++      int slv_rpm_id;
++      int num_ports;
++      int num_qports;
++      int *qport;
++      struct qos_params_type qos_params;
++      unsigned int num_connections;
++      unsigned int num_blist;
++      bool is_fab_dev;
++      bool virt_dev;
++      bool is_traversed;
++      unsigned int *connections;
++      unsigned int *black_listed_connections;
++      struct device **dev_connections;
++      struct device **black_connections;
++      unsigned int bus_device_id;
++      struct device *bus_device;
++      unsigned int buswidth;
++      struct rule_update_path_info rule;
++      uint64_t lim_bw;
++      uint32_t util_fact;
++      uint32_t vrail_comp;
++};
++
++struct msm_bus_node_device_type {
++      struct msm_bus_node_info_type *node_info;
++      struct msm_bus_fab_device_type *fabdev;
++      int num_lnodes;
++      struct link_node *lnode_list;
++      uint64_t cur_clk_hz[NUM_CTX];
++      struct nodebw node_ab;
++      struct list_head link;
++      unsigned int ap_owned;
++      struct nodeclk clk[NUM_CTX];
++      struct nodeclk qos_clk;
++};
++
++int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev,
++                              bool throttle_en, uint64_t lim_bw);
++int msm_bus_update_clks(struct msm_bus_node_device_type *nodedev,
++      int ctx, int **dirty_nodes, int *num_dirty);
++int msm_bus_commit_data(int *dirty_nodes, int ctx, int num_dirty);
++int msm_bus_update_bw(struct msm_bus_node_device_type *nodedev, int ctx,
++      int64_t add_bw, int **dirty_nodes, int *num_dirty);
++void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
++                                      size_t new_size, gfp_t flags);
++
++extern struct msm_bus_device_node_registration
++      *msm_bus_of_to_pdata(struct platform_device *pdev);
++extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops);
++extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev);
++extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev);
++extern int msm_bus_of_get_static_rules(struct platform_device *pdev,
++                                      struct bus_rule_type **static_rule);
++extern int msm_rules_update_path(struct list_head *input_list,
++                              struct list_head *output_list);
++extern void print_all_rules(void);
++#endif /* _ARCH_ARM_MACH_MSM_BUS_ADHOC_H */
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_arb_adhoc.c
+@@ -0,0 +1,998 @@
++/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is Mree software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/mutex.h>
++#include <linux/clk.h>
++#include "msm-bus.h"
++#include "msm_bus_core.h"
++#include "msm_bus_adhoc.h"
++
++#define NUM_CL_HANDLES        50
++#define NUM_LNODES    3
++
++struct bus_search_type {
++      struct list_head link;
++      struct list_head node_list;
++};
++
++struct handle_type {
++      int num_entries;
++      struct msm_bus_client **cl_list;
++};
++
++static struct handle_type handle_list;
++struct list_head input_list;
++struct list_head apply_list;
++
++DEFINE_MUTEX(msm_bus_adhoc_lock);
++
++static bool chk_bl_list(struct list_head *black_list, unsigned int id)
++{
++      struct msm_bus_node_device_type *bus_node = NULL;
++
++      list_for_each_entry(bus_node, black_list, link) {
++              if (bus_node->node_info->id == id)
++                      return true;
++      }
++      return false;
++}
++
++static void copy_remaining_nodes(struct list_head *edge_list, struct list_head
++      *traverse_list, struct list_head *route_list)
++{
++      struct bus_search_type *search_node;
++
++      if (list_empty(edge_list) && list_empty(traverse_list))
++              return;
++
++      search_node = kzalloc(sizeof(struct bus_search_type), GFP_KERNEL);
++      INIT_LIST_HEAD(&search_node->node_list);
++      list_splice_init(edge_list, traverse_list);
++      list_splice_init(traverse_list, &search_node->node_list);
++      list_add_tail(&search_node->link, route_list);
++}
++
++/*
++ * Duplicate instantiaion from msm_bus_arb.c. Todo there needs to be a
++ * "util" file for these common func/macros.
++ *
++ * */
++uint64_t msm_bus_div64(unsigned int w, uint64_t bw)
++{
++      uint64_t *b = &bw;
++
++      if ((bw > 0) && (bw < w))
++              return 1;
++
++      switch (w) {
++      case 0:
++              WARN(1, "AXI: Divide by 0 attempted\n");
++      case 1: return bw;
++      case 2: return (bw >> 1);
++      case 4: return (bw >> 2);
++      case 8: return (bw >> 3);
++      case 16: return (bw >> 4);
++      case 32: return (bw >> 5);
++      }
++
++      do_div(*b, w);
++      return *b;
++}
++
++int msm_bus_device_match_adhoc(struct device *dev, void *id)
++{
++      int ret = 0;
++      struct msm_bus_node_device_type *bnode = dev->platform_data;
++
++      if (bnode)
++              ret = (bnode->node_info->id == *(unsigned int *)id);
++      else
++              ret = 0;
++
++      return ret;
++}
++
++static int gen_lnode(struct device *dev,
++                      int next_hop, int prev_idx)
++{
++      struct link_node *lnode;
++      struct msm_bus_node_device_type *cur_dev = NULL;
++      int lnode_idx = -1;
++
++      if (!dev)
++              goto exit_gen_lnode;
++
++      cur_dev = dev->platform_data;
++      if (!cur_dev) {
++              MSM_BUS_ERR("%s: Null device ptr", __func__);
++              goto exit_gen_lnode;
++      }
++
++      if (!cur_dev->num_lnodes) {
++              cur_dev->lnode_list = devm_kzalloc(dev,
++                              sizeof(struct link_node) * NUM_LNODES,
++                                                              GFP_KERNEL);
++              if (!cur_dev->lnode_list)
++                      goto exit_gen_lnode;
++
++              lnode = cur_dev->lnode_list;
++              cur_dev->num_lnodes = NUM_LNODES;
++              lnode_idx = 0;
++      } else {
++              int i;
++              for (i = 0; i < cur_dev->num_lnodes; i++) {
++                      if (!cur_dev->lnode_list[i].in_use)
++                              break;
++              }
++
++              if (i < cur_dev->num_lnodes) {
++                      lnode = &cur_dev->lnode_list[i];
++                      lnode_idx = i;
++              } else {
++                      struct link_node *realloc_list;
++                      size_t cur_size = sizeof(struct link_node) *
++                                      cur_dev->num_lnodes;
++
++                      cur_dev->num_lnodes += NUM_LNODES;
++                      realloc_list = msm_bus_realloc_devmem(
++                                      dev,
++                                      cur_dev->lnode_list,
++                                      cur_size,
++                                      sizeof(struct link_node) *
++                                      cur_dev->num_lnodes, GFP_KERNEL);
++
++                      if (!realloc_list)
++                              goto exit_gen_lnode;
++
++                      cur_dev->lnode_list = realloc_list;
++                      lnode = &cur_dev->lnode_list[i];
++                      lnode_idx = i;
++              }
++      }
++
++      lnode->in_use = 1;
++      if (next_hop == cur_dev->node_info->id) {
++              lnode->next = -1;
++              lnode->next_dev = NULL;
++      } else {
++              lnode->next = prev_idx;
++              lnode->next_dev = bus_find_device(&msm_bus_type, NULL,
++                                      (void *) &next_hop,
++                                      msm_bus_device_match_adhoc);
++      }
++
++      memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
++      memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
++
++exit_gen_lnode:
++      return lnode_idx;
++}
++
++static int remove_lnode(struct msm_bus_node_device_type *cur_dev,
++                              int lnode_idx)
++{
++      int ret = 0;
++
++      if (!cur_dev) {
++              MSM_BUS_ERR("%s: Null device ptr", __func__);
++              ret = -ENODEV;
++              goto exit_remove_lnode;
++      }
++
++      if (lnode_idx != -1) {
++              if (!cur_dev->num_lnodes ||
++                              (lnode_idx > (cur_dev->num_lnodes - 1))) {
++                      MSM_BUS_ERR("%s: Invalid Idx %d, num_lnodes %d",
++                              __func__, lnode_idx, cur_dev->num_lnodes);
++                      ret = -ENODEV;
++                      goto exit_remove_lnode;
++              }
++
++              cur_dev->lnode_list[lnode_idx].next = -1;
++              cur_dev->lnode_list[lnode_idx].next_dev = NULL;
++              cur_dev->lnode_list[lnode_idx].in_use = 0;
++      }
++
++exit_remove_lnode:
++      return ret;
++}
++
++static int prune_path(struct list_head *route_list, int dest, int src,
++                              struct list_head *black_list, int found)
++{
++      struct bus_search_type *search_node, *temp_search_node;
++      struct msm_bus_node_device_type *bus_node;
++      struct list_head *bl_list;
++      struct list_head *temp_bl_list;
++      int search_dev_id = dest;
++      struct device *dest_dev = bus_find_device(&msm_bus_type, NULL,
++                                      (void *) &dest,
++                                      msm_bus_device_match_adhoc);
++      int lnode_hop = -1;
++
++      if (!found)
++              goto reset_links;
++
++      if (!dest_dev) {
++              MSM_BUS_ERR("%s: Can't find dest dev %d", __func__, dest);
++              goto exit_prune_path;
++      }
++
++      lnode_hop = gen_lnode(dest_dev, search_dev_id, lnode_hop);
++
++      list_for_each_entry_reverse(search_node, route_list, link) {
++              list_for_each_entry(bus_node, &search_node->node_list, link) {
++                      unsigned int i;
++                      for (i = 0; i < bus_node->node_info->num_connections;
++                                                                      i++) {
++                              if (bus_node->node_info->connections[i] ==
++                                                              search_dev_id) {
++                                      dest_dev = bus_find_device(
++                                              &msm_bus_type,
++                                              NULL,
++                                              (void *)
++                                              &bus_node->node_info->
++                                              id,
++                                              msm_bus_device_match_adhoc);
++
++                                      if (!dest_dev) {
++                                              lnode_hop = -1;
++                                              goto reset_links;
++                                      }
++
++                                      lnode_hop = gen_lnode(dest_dev,
++                                                      search_dev_id,
++                                                      lnode_hop);
++                                      search_dev_id =
++                                              bus_node->node_info->id;
++                                      break;
++                              }
++                      }
++              }
++      }
++reset_links:
++      list_for_each_entry_safe(search_node, temp_search_node, route_list,
++                                                                      link) {
++                      list_for_each_entry(bus_node, &search_node->node_list,
++                                                                      link)
++                              bus_node->node_info->is_traversed = false;
++
++                      list_del(&search_node->link);
++                      kfree(search_node);
++      }
++
++      list_for_each_safe(bl_list, temp_bl_list, black_list)
++              list_del(bl_list);
++
++exit_prune_path:
++      return lnode_hop;
++}
++
++static void setup_bl_list(struct msm_bus_node_device_type *node,
++                              struct list_head *black_list)
++{
++      unsigned int i;
++
++      for (i = 0; i < node->node_info->num_blist; i++) {
++              struct msm_bus_node_device_type *bdev;
++              bdev = node->node_info->black_connections[i]->platform_data;
++              list_add_tail(&bdev->link, black_list);
++      }
++}
++
++static int getpath(int src, int dest)
++{
++      struct list_head traverse_list;
++      struct list_head edge_list;
++      struct list_head route_list;
++      struct list_head black_list;
++      struct device *src_dev = bus_find_device(&msm_bus_type, NULL,
++                                      (void *) &src,
++                                      msm_bus_device_match_adhoc);
++      struct msm_bus_node_device_type *src_node;
++      struct bus_search_type *search_node;
++      int found = 0;
++      int depth_index = 0;
++      int first_hop = -1;
++
++      INIT_LIST_HEAD(&traverse_list);
++      INIT_LIST_HEAD(&edge_list);
++      INIT_LIST_HEAD(&route_list);
++      INIT_LIST_HEAD(&black_list);
++
++      if (!src_dev) {
++              MSM_BUS_ERR("%s: Cannot locate src dev %d", __func__, src);
++              goto exit_getpath;
++      }
++
++      src_node = src_dev->platform_data;
++      if (!src_node) {
++              MSM_BUS_ERR("%s:Fatal, Source dev %d not found", __func__, src);
++              goto exit_getpath;
++      }
++      list_add_tail(&src_node->link, &traverse_list);
++
++      while ((!found && !list_empty(&traverse_list))) {
++              struct msm_bus_node_device_type *bus_node = NULL;
++              /* Locate dest_id in the traverse list */
++              list_for_each_entry(bus_node, &traverse_list, link) {
++                      if (bus_node->node_info->id == dest) {
++                              found = 1;
++                              break;
++                      }
++              }
++
++              if (!found) {
++                      unsigned int i;
++                      /* Setup the new edge list */
++                      list_for_each_entry(bus_node, &traverse_list, link) {
++                              /* Setup list of black-listed nodes */
++                              setup_bl_list(bus_node, &black_list);
++
++                              for (i = 0; i < bus_node->node_info->
++                                              num_connections; i++) {
++                                      bool skip;
++                                      struct msm_bus_node_device_type
++                                                      *node_conn;
++                                      node_conn = bus_node->node_info->
++                                              dev_connections[i]->
++                                              platform_data;
++                                      if (node_conn->node_info->
++                                                      is_traversed) {
++                                              MSM_BUS_ERR("Circ Path %d\n",
++                                              node_conn->node_info->id);
++                                              goto reset_traversed;
++                                      }
++                                      skip = chk_bl_list(&black_list,
++                                                      bus_node->node_info->
++                                                      connections[i]);
++                                      if (!skip) {
++                                              list_add_tail(&node_conn->link,
++                                                      &edge_list);
++                                              node_conn->node_info->
++                                                      is_traversed = true;
++                                      }
++                              }
++                      }
++
++                      /* Keep tabs of the previous search list */
++                      search_node = kzalloc(sizeof(struct bus_search_type),
++                                       GFP_KERNEL);
++                      INIT_LIST_HEAD(&search_node->node_list);
++                      list_splice_init(&traverse_list,
++                                       &search_node->node_list);
++                      /* Add the previous search list to a route list */
++                      list_add_tail(&search_node->link, &route_list);
++                      /* Advancing the list depth */
++                      depth_index++;
++                      list_splice_init(&edge_list, &traverse_list);
++              }
++      }
++reset_traversed:
++      copy_remaining_nodes(&edge_list, &traverse_list, &route_list);
++      first_hop = prune_path(&route_list, dest, src, &black_list, found);
++
++exit_getpath:
++      return first_hop;
++}
++
++static uint64_t arbitrate_bus_req(struct msm_bus_node_device_type *bus_dev,
++                                                              int ctx)
++{
++      int i;
++      uint64_t max_ib = 0;
++      uint64_t sum_ab = 0;
++      uint64_t bw_max_hz;
++      struct msm_bus_node_device_type *fab_dev = NULL;
++      uint32_t util_fact = 0;
++      uint32_t vrail_comp = 0;
++
++      /* Find max ib */
++      for (i = 0; i < bus_dev->num_lnodes; i++) {
++              max_ib = max(max_ib, bus_dev->lnode_list[i].lnode_ib[ctx]);
++              sum_ab += bus_dev->lnode_list[i].lnode_ab[ctx];
++      }
++      /*
++       *  Account for Util factor and vrail comp. The new aggregation
++       *  formula is:
++       *  Freq_hz = max((sum(ab) * util_fact)/num_chan, max(ib)/vrail_comp)
++       *                              / bus-width
++       *  util_fact and vrail comp are obtained from fabric/Node's dts
++       *  properties.
++       *  They default to 100 if absent.
++       */
++      fab_dev = bus_dev->node_info->bus_device->platform_data;
++      /* Don't do this for virtual fabrics */
++      if (fab_dev && fab_dev->fabdev) {
++              util_fact = bus_dev->node_info->util_fact ?
++                      bus_dev->node_info->util_fact :
++                      fab_dev->fabdev->util_fact;
++              vrail_comp = bus_dev->node_info->vrail_comp ?
++                      bus_dev->node_info->vrail_comp :
++                      fab_dev->fabdev->vrail_comp;
++              sum_ab *= util_fact;
++              sum_ab = msm_bus_div64(100, sum_ab);
++              max_ib *= 100;
++              max_ib = msm_bus_div64(vrail_comp, max_ib);
++      }
++
++      /* Account for multiple channels if any */
++      if (bus_dev->node_info->num_qports > 1)
++              sum_ab = msm_bus_div64(bus_dev->node_info->num_qports,
++                                      sum_ab);
++
++      if (!bus_dev->node_info->buswidth) {
++              MSM_BUS_WARN("No bus width found for %d. Using default\n",
++                                      bus_dev->node_info->id);
++              bus_dev->node_info->buswidth = 8;
++      }
++
++      bw_max_hz = max(max_ib, sum_ab);
++      bw_max_hz = msm_bus_div64(bus_dev->node_info->buswidth,
++                                      bw_max_hz);
++
++      return bw_max_hz;
++}
++
++static void del_inp_list(struct list_head *list)
++{
++      struct rule_update_path_info *rule_node;
++      struct rule_update_path_info *rule_node_tmp;
++
++      list_for_each_entry_safe(rule_node, rule_node_tmp, list, link)
++              list_del(&rule_node->link);
++}
++
++static void del_op_list(struct list_head *list)
++{
++      struct rule_apply_rcm_info *rule;
++      struct rule_apply_rcm_info *rule_tmp;
++
++      list_for_each_entry_safe(rule, rule_tmp, list, link)
++              list_del(&rule->link);
++}
++
++static int msm_bus_apply_rules(struct list_head *list, bool after_clk_commit)
++{
++      struct rule_apply_rcm_info *rule;
++      struct device *dev = NULL;
++      struct msm_bus_node_device_type *dev_info = NULL;
++      int ret = 0;
++      bool throttle_en = false;
++
++      list_for_each_entry(rule, list, link) {
++              if (!rule)
++                      break;
++
++              if (rule && (rule->after_clk_commit != after_clk_commit))
++                      continue;
++
++              dev = bus_find_device(&msm_bus_type, NULL,
++                              (void *) &rule->id,
++                              msm_bus_device_match_adhoc);
++
++              if (!dev) {
++                      MSM_BUS_ERR("Can't find dev node for %d", rule->id);
++                      continue;
++              }
++              dev_info = dev->platform_data;
++
++              throttle_en = ((rule->throttle == THROTTLE_ON) ? true : false);
++              ret = msm_bus_enable_limiter(dev_info, throttle_en,
++                                                      rule->lim_bw);
++              if (ret)
++                      MSM_BUS_ERR("Failed to set limiter for %d", rule->id);
++      }
++
++      return ret;
++}
++
++static uint64_t get_node_aggab(struct msm_bus_node_device_type *bus_dev)
++{
++      int i;
++      int ctx;
++      uint64_t max_agg_ab = 0;
++      uint64_t agg_ab = 0;
++
++      for (ctx = 0; ctx < NUM_CTX; ctx++) {
++              for (i = 0; i < bus_dev->num_lnodes; i++)
++                      agg_ab += bus_dev->lnode_list[i].lnode_ab[ctx];
++
++              if (bus_dev->node_info->num_qports > 1)
++                      agg_ab = msm_bus_div64(bus_dev->node_info->num_qports,
++                                                      agg_ab);
++
++              max_agg_ab = max(max_agg_ab, agg_ab);
++      }
++
++      return max_agg_ab;
++}
++
++static uint64_t get_node_ib(struct msm_bus_node_device_type *bus_dev)
++{
++      int i;
++      int ctx;
++      uint64_t max_ib = 0;
++
++      for (ctx = 0; ctx < NUM_CTX; ctx++) {
++              for (i = 0; i < bus_dev->num_lnodes; i++)
++                      max_ib = max(max_ib,
++                              bus_dev->lnode_list[i].lnode_ib[ctx]);
++      }
++      return max_ib;
++}
++
++static int update_path(int src, int dest, uint64_t req_ib, uint64_t req_bw,
++                      uint64_t cur_ib, uint64_t cur_bw, int src_idx, int ctx)
++{
++      struct device *src_dev = NULL;
++      struct device *next_dev = NULL;
++      struct link_node *lnode = NULL;
++      struct msm_bus_node_device_type *dev_info = NULL;
++      int curr_idx;
++      int ret = 0;
++      int *dirty_nodes = NULL;
++      int num_dirty = 0;
++      struct rule_update_path_info *rule_node;
++      bool rules_registered = msm_rule_are_rules_registered();
++
++      src_dev = bus_find_device(&msm_bus_type, NULL,
++                              (void *) &src,
++                              msm_bus_device_match_adhoc);
++
++      if (!src_dev) {
++              MSM_BUS_ERR("%s: Can't find source device %d", __func__, src);
++              ret = -ENODEV;
++              goto exit_update_path;
++      }
++
++      next_dev = src_dev;
++
++      if (src_idx < 0) {
++              MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx);
++              ret = -ENXIO;
++              goto exit_update_path;
++      }
++      curr_idx = src_idx;
++
++      INIT_LIST_HEAD(&input_list);
++      INIT_LIST_HEAD(&apply_list);
++
++      while (next_dev) {
++              dev_info = next_dev->platform_data;
++
++              if (curr_idx >= dev_info->num_lnodes) {
++                      MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
++                       __func__, curr_idx, dev_info->num_lnodes);
++                      ret = -ENXIO;
++                      goto exit_update_path;
++              }
++
++              lnode = &dev_info->lnode_list[curr_idx];
++              lnode->lnode_ib[ctx] = req_ib;
++              lnode->lnode_ab[ctx] = req_bw;
++
++              dev_info->cur_clk_hz[ctx] = arbitrate_bus_req(dev_info, ctx);
++
++              /* Start updating the clocks at the first hop.
++               * Its ok to figure out the aggregated
++               * request at this node.
++               */
++              if (src_dev != next_dev) {
++                      ret = msm_bus_update_clks(dev_info, ctx, &dirty_nodes,
++                                                              &num_dirty);
++                      if (ret) {
++                              MSM_BUS_ERR("%s: Failed to update clks dev %d",
++                                      __func__, dev_info->node_info->id);
++                              goto exit_update_path;
++                      }
++              }
++
++              ret = msm_bus_update_bw(dev_info, ctx, req_bw, &dirty_nodes,
++                                                              &num_dirty);
++              if (ret) {
++                      MSM_BUS_ERR("%s: Failed to update bw dev %d",
++                              __func__, dev_info->node_info->id);
++                      goto exit_update_path;
++              }
++
++              if (rules_registered) {
++                      rule_node = &dev_info->node_info->rule;
++                      rule_node->id = dev_info->node_info->id;
++                      rule_node->ib = get_node_ib(dev_info);
++                      rule_node->ab = get_node_aggab(dev_info);
++                      rule_node->clk = max(dev_info->cur_clk_hz[ACTIVE_CTX],
++                                              dev_info->cur_clk_hz[DUAL_CTX]);
++                      list_add_tail(&rule_node->link, &input_list);
++              }
++
++              next_dev = lnode->next_dev;
++              curr_idx = lnode->next;
++      }
++
++      if (rules_registered) {
++              msm_rules_update_path(&input_list, &apply_list);
++              msm_bus_apply_rules(&apply_list, false);
++      }
++
++      msm_bus_commit_data(dirty_nodes, ctx, num_dirty);
++
++      if (rules_registered) {
++              msm_bus_apply_rules(&apply_list, true);
++              del_inp_list(&input_list);
++              del_op_list(&apply_list);
++      }
++exit_update_path:
++      return ret;
++}
++
++static int remove_path(int src, int dst, uint64_t cur_ib, uint64_t cur_ab,
++                              int src_idx, int active_only)
++{
++      struct device *src_dev = NULL;
++      struct device *next_dev = NULL;
++      struct link_node *lnode = NULL;
++      struct msm_bus_node_device_type *dev_info = NULL;
++      int ret = 0;
++      int cur_idx = src_idx;
++      int next_idx;
++
++      /* Update the current path to zero out all request from
++       * this cient on all paths
++       */
++
++      ret = update_path(src, dst, 0, 0, cur_ib, cur_ab, src_idx,
++                                                      active_only);
++      if (ret) {
++              MSM_BUS_ERR("%s: Error zeroing out path ctx %d",
++                                      __func__, ACTIVE_CTX);
++              goto exit_remove_path;
++      }
++
++      src_dev = bus_find_device(&msm_bus_type, NULL,
++                              (void *) &src,
++                              msm_bus_device_match_adhoc);
++      if (!src_dev) {
++              MSM_BUS_ERR("%s: Can't find source device %d", __func__, src);
++              ret = -ENODEV;
++              goto exit_remove_path;
++      }
++
++      next_dev = src_dev;
++
++      while (next_dev) {
++              dev_info = next_dev->platform_data;
++              lnode = &dev_info->lnode_list[cur_idx];
++              next_idx = lnode->next;
++              next_dev = lnode->next_dev;
++              remove_lnode(dev_info, cur_idx);
++              cur_idx = next_idx;
++      }
++
++exit_remove_path:
++      return ret;
++}
++
++static void getpath_debug(int src, int curr, int active_only)
++{
++      struct device *dev_node;
++      struct device *dev_it;
++      unsigned int hop = 1;
++      int idx;
++      struct msm_bus_node_device_type *devinfo;
++      int i;
++
++      dev_node = bus_find_device(&msm_bus_type, NULL,
++                              (void *) &src,
++                              msm_bus_device_match_adhoc);
++
++      if (!dev_node) {
++              MSM_BUS_ERR("SRC NOT FOUND %d", src);
++              return;
++      }
++
++      idx = curr;
++      devinfo = dev_node->platform_data;
++      dev_it = dev_node;
++
++      MSM_BUS_ERR("Route list Src %d", src);
++      while (dev_it) {
++              struct msm_bus_node_device_type *busdev =
++                      devinfo->node_info->bus_device->platform_data;
++
++              MSM_BUS_ERR("Hop[%d] at Device %d ctx %d", hop,
++                                      devinfo->node_info->id, active_only);
++
++              for (i = 0; i < NUM_CTX; i++) {
++                      MSM_BUS_ERR("dev info sel ib %llu",
++                                              devinfo->cur_clk_hz[i]);
++                      MSM_BUS_ERR("dev info sel ab %llu",
++                                              devinfo->node_ab.ab[i]);
++              }
++
++              dev_it = devinfo->lnode_list[idx].next_dev;
++              idx = devinfo->lnode_list[idx].next;
++              if (dev_it)
++                      devinfo = dev_it->platform_data;
++
++              MSM_BUS_ERR("Bus Device %d", busdev->node_info->id);
++              MSM_BUS_ERR("Bus Clock %llu", busdev->clk[active_only].rate);
++
++              if (idx < 0)
++                      break;
++              hop++;
++      }
++}
++
++static void unregister_client_adhoc(uint32_t cl)
++{
++      int i;
++      struct msm_bus_scale_pdata *pdata;
++      int lnode, src, curr, dest;
++      uint64_t  cur_clk, cur_bw;
++      struct msm_bus_client *client;
++
++      mutex_lock(&msm_bus_adhoc_lock);
++      if (!cl) {
++              MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
++                              __func__);
++              goto exit_unregister_client;
++      }
++      client = handle_list.cl_list[cl];
++      pdata = client->pdata;
++      if (!pdata) {
++              MSM_BUS_ERR("%s: Null pdata passed to unregister\n",
++                              __func__);
++              goto exit_unregister_client;
++      }
++
++      curr = client->curr;
++      if (curr >= pdata->num_usecases) {
++              MSM_BUS_ERR("Invalid index Defaulting curr to 0");
++              curr = 0;
++      }
++
++      MSM_BUS_DBG("%s: Unregistering client %p", __func__, client);
++
++      for (i = 0; i < pdata->usecase->num_paths; i++) {
++              src = client->pdata->usecase[curr].vectors[i].src;
++              dest = client->pdata->usecase[curr].vectors[i].dst;
++
++              lnode = client->src_pnode[i];
++              cur_clk = client->pdata->usecase[curr].vectors[i].ib;
++              cur_bw = client->pdata->usecase[curr].vectors[i].ab;
++              remove_path(src, dest, cur_clk, cur_bw, lnode,
++                                              pdata->active_only);
++      }
++      msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl);
++      kfree(client->src_pnode);
++      kfree(client);
++      handle_list.cl_list[cl] = NULL;
++exit_unregister_client:
++      mutex_unlock(&msm_bus_adhoc_lock);
++      return;
++}
++
++static int alloc_handle_lst(int size)
++{
++      int ret = 0;
++      struct msm_bus_client **t_cl_list;
++
++      if (!handle_list.num_entries) {
++              t_cl_list = kzalloc(sizeof(struct msm_bus_client *)
++                      * NUM_CL_HANDLES, GFP_KERNEL);
++              if (ZERO_OR_NULL_PTR(t_cl_list)) {
++                      ret = -ENOMEM;
++                      MSM_BUS_ERR("%s: Failed to allocate handles list",
++                                                              __func__);
++                      goto exit_alloc_handle_lst;
++              }
++              handle_list.cl_list = t_cl_list;
++              handle_list.num_entries += NUM_CL_HANDLES;
++      } else {
++              t_cl_list = krealloc(handle_list.cl_list,
++                              sizeof(struct msm_bus_client *) *
++                              handle_list.num_entries + NUM_CL_HANDLES,
++                              GFP_KERNEL);
++              if (ZERO_OR_NULL_PTR(t_cl_list)) {
++                      ret = -ENOMEM;
++                      MSM_BUS_ERR("%s: Failed to allocate handles list",
++                                                              __func__);
++                      goto exit_alloc_handle_lst;
++              }
++
++              memset(&handle_list.cl_list[handle_list.num_entries], 0,
++                      NUM_CL_HANDLES * sizeof(struct msm_bus_client *));
++              handle_list.num_entries += NUM_CL_HANDLES;
++              handle_list.cl_list = t_cl_list;
++      }
++exit_alloc_handle_lst:
++      return ret;
++}
++
++static uint32_t gen_handle(struct msm_bus_client *client)
++{
++      uint32_t handle = 0;
++      int i;
++      int ret = 0;
++
++      for (i = 0; i < handle_list.num_entries; i++) {
++              if (i && !handle_list.cl_list[i]) {
++                      handle = i;
++                      break;
++              }
++      }
++
++      if (!handle) {
++              ret = alloc_handle_lst(NUM_CL_HANDLES);
++
++              if (ret) {
++                      MSM_BUS_ERR("%s: Failed to allocate handle list",
++                                                      __func__);
++                      goto exit_gen_handle;
++              }
++              handle = i + 1;
++      }
++      handle_list.cl_list[handle] = client;
++exit_gen_handle:
++      return handle;
++}
++
++static uint32_t register_client_adhoc(struct msm_bus_scale_pdata *pdata)
++{
++      int src, dest;
++      int i;
++      struct msm_bus_client *client = NULL;
++      int *lnode;
++      uint32_t handle = 0;
++
++      mutex_lock(&msm_bus_adhoc_lock);
++      client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
++      if (!client) {
++              MSM_BUS_ERR("%s: Error allocating client data", __func__);
++              goto exit_register_client;
++      }
++      client->pdata = pdata;
++
++      lnode = kzalloc(pdata->usecase->num_paths * sizeof(int), GFP_KERNEL);
++      if (ZERO_OR_NULL_PTR(lnode)) {
++              MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
++              goto exit_register_client;
++      }
++      client->src_pnode = lnode;
++
++      for (i = 0; i < pdata->usecase->num_paths; i++) {
++              src = pdata->usecase->vectors[i].src;
++              dest = pdata->usecase->vectors[i].dst;
++
++              if ((src < 0) || (dest < 0)) {
++                      MSM_BUS_ERR("%s:Invalid src/dst.src %d dest %d",
++                              __func__, src, dest);
++                      goto exit_register_client;
++              }
++
++              lnode[i] = getpath(src, dest);
++              if (lnode[i] < 0) {
++                      MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
++                              __func__, src, dest);
++                      goto exit_register_client;
++              }
++      }
++
++      handle = gen_handle(client);
++      msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
++                                      handle);
++      MSM_BUS_DBG("%s:Client handle %d %s", __func__, handle,
++                                              client->pdata->name);
++exit_register_client:
++      mutex_unlock(&msm_bus_adhoc_lock);
++      return handle;
++}
++
++static int update_request_adhoc(uint32_t cl, unsigned int index)
++{
++      int i, ret = 0;
++      struct msm_bus_scale_pdata *pdata;
++      int lnode, src, curr, dest;
++      uint64_t req_clk, req_bw, curr_clk, curr_bw;
++      struct msm_bus_client *client;
++      const char *test_cl = "Null";
++      bool log_transaction = false;
++
++      mutex_lock(&msm_bus_adhoc_lock);
++
++      if (!cl) {
++              MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
++              ret = -ENXIO;
++              goto exit_update_request;
++      }
++
++      client = handle_list.cl_list[cl];
++      pdata = client->pdata;
++      if (!pdata) {
++              MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
++                              __func__);
++              ret = -ENXIO;
++              goto exit_update_request;
++      }
++
++      if (index >= pdata->num_usecases) {
++              MSM_BUS_ERR("Client %u passed invalid index: %d\n",
++                      cl, index);
++              ret = -ENXIO;
++              goto exit_update_request;
++      }
++
++      if (client->curr == index) {
++              MSM_BUS_DBG("%s: Not updating client request idx %d unchanged",
++                              __func__, index);
++              goto exit_update_request;
++      }
++
++      curr = client->curr;
++      client->curr = index;
++
++      if (!strcmp(test_cl, pdata->name))
++              log_transaction = true;
++
++      MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__,
++              cl, index, client->curr, client->pdata->usecase->num_paths);
++
++      for (i = 0; i < pdata->usecase->num_paths; i++) {
++              src = client->pdata->usecase[index].vectors[i].src;
++              dest = client->pdata->usecase[index].vectors[i].dst;
++
++              lnode = client->src_pnode[i];
++              req_clk = client->pdata->usecase[index].vectors[i].ib;
++              req_bw = client->pdata->usecase[index].vectors[i].ab;
++              if (curr < 0) {
++                      curr_clk = 0;
++                      curr_bw = 0;
++              } else {
++                      curr_clk = client->pdata->usecase[curr].vectors[i].ib;
++                      curr_bw = client->pdata->usecase[curr].vectors[i].ab;
++                      MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__,
++                                      curr_bw, curr_clk);
++              }
++
++              ret = update_path(src, dest, req_clk, req_bw,
++                              curr_clk, curr_bw, lnode, pdata->active_only);
++
++              if (ret) {
++                      MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
++                                      __func__, ret, ACTIVE_CTX);
++                      goto exit_update_request;
++              }
++
++              if (log_transaction)
++                      getpath_debug(src, lnode, pdata->active_only);
++      }
++      msm_bus_dbg_client_data(client->pdata, index , cl);
++exit_update_request:
++      mutex_unlock(&msm_bus_adhoc_lock);
++      return ret;
++}
++
++/**
++ *  msm_bus_arb_setops_adhoc() : Setup the bus arbitration ops
++ *  @ arb_ops: pointer to the arb ops.
++ */
++void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops)
++{
++      arb_ops->register_client = register_client_adhoc;
++      arb_ops->update_request = update_request_adhoc;
++      arb_ops->unregister_client = unregister_client_adhoc;
++}
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_bimc.c
+@@ -0,0 +1,2112 @@
++/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__
++
++#include <linux/slab.h>
++#include <linux/io.h>
++#include "msm-bus-board.h"
++#include "msm_bus_core.h"
++#include "msm_bus_bimc.h"
++#include "msm_bus_adhoc.h"
++#include <trace/events/trace_msm_bus.h>
++
++enum msm_bus_bimc_slave_block {
++      SLAVE_BLOCK_RESERVED = 0,
++      SLAVE_BLOCK_SLAVE_WAY,
++      SLAVE_BLOCK_XPU,
++      SLAVE_BLOCK_ARBITER,
++      SLAVE_BLOCK_SCMO,
++};
++
++enum bke_sw {
++      BKE_OFF = 0,
++      BKE_ON = 1,
++};
++
++/* M_Generic */
++
++#define M_REG_BASE(b)         ((b) + 0x00008000)
++
++#define M_COMPONENT_INFO_ADDR(b, n) \
++              (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000000)
++enum bimc_m_component_info {
++      M_COMPONENT_INFO_RMSK           = 0xffffff,
++      M_COMPONENT_INFO_INSTANCE_BMSK  = 0xff0000,
++      M_COMPONENT_INFO_INSTANCE_SHFT  = 0x10,
++      M_COMPONENT_INFO_SUB_TYPE_BMSK  = 0xff00,
++      M_COMPONENT_INFO_SUB_TYPE_SHFT  = 0x8,
++      M_COMPONENT_INFO_TYPE_BMSK      = 0xff,
++      M_COMPONENT_INFO_TYPE_SHFT      = 0x0,
++};
++
++#define M_CONFIG_INFO_0_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000020)
++enum bimc_m_config_info_0 {
++      M_CONFIG_INFO_0_RMSK                    = 0xff00ffff,
++      M_CONFIG_INFO_0_SYNC_MODE_BMSK          = 0xff000000,
++      M_CONFIG_INFO_0_SYNC_MODE_SHFT          = 0x18,
++      M_CONFIG_INFO_0_CONNECTION_TYPE_BMSK    = 0xff00,
++      M_CONFIG_INFO_0_CONNECTION_TYPE_SHFT    = 0x8,
++      M_CONFIG_INFO_0_FUNC_BMSK               = 0xff,
++      M_CONFIG_INFO_0_FUNC_SHFT               = 0x0,
++};
++
++#define M_CONFIG_INFO_1_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000030)
++enum bimc_m_config_info_1 {
++      M_CONFIG_INFO_1_RMSK                    = 0xffffffff,
++      M_CONFIG_INFO_1_SWAY_CONNECTIVITY_BMSK  = 0xffffffff,
++      M_CONFIG_INFO_1_SWAY_CONNECTIVITY_SHFT  = 0x0,
++};
++
++#define M_CONFIG_INFO_2_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000040)
++enum bimc_m_config_info_2 {
++      M_CONFIG_INFO_2_RMSK                    = 0xffffffff,
++      M_CONFIG_INFO_2_M_DATA_WIDTH_BMSK       = 0xffff0000,
++      M_CONFIG_INFO_2_M_DATA_WIDTH_SHFT       = 0x10,
++      M_CONFIG_INFO_2_M_TID_WIDTH_BMSK        = 0xff00,
++      M_CONFIG_INFO_2_M_TID_WIDTH_SHFT        = 0x8,
++      M_CONFIG_INFO_2_M_MID_WIDTH_BMSK        = 0xff,
++      M_CONFIG_INFO_2_M_MID_WIDTH_SHFT        = 0x0,
++};
++
++#define M_CONFIG_INFO_3_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000050)
++enum bimc_m_config_info_3 {
++      M_CONFIG_INFO_3_RMSK                    = 0xffffffff,
++      M_CONFIG_INFO_3_RCH_DEPTH_BMSK          = 0xff000000,
++      M_CONFIG_INFO_3_RCH_DEPTH_SHFT          = 0x18,
++      M_CONFIG_INFO_3_BCH_DEPTH_BMSK          = 0xff0000,
++      M_CONFIG_INFO_3_BCH_DEPTH_SHFT          = 0x10,
++      M_CONFIG_INFO_3_WCH_DEPTH_BMSK          = 0xff00,
++      M_CONFIG_INFO_3_WCH_DEPTH_SHFT          = 0x8,
++      M_CONFIG_INFO_3_ACH_DEPTH_BMSK          = 0xff,
++      M_CONFIG_INFO_3_ACH_DEPTH_SHFT          = 0x0,
++};
++
++#define M_CONFIG_INFO_4_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000060)
++enum bimc_m_config_info_4 {
++      M_CONFIG_INFO_4_RMSK                    = 0xffff,
++      M_CONFIG_INFO_4_REORDER_BUF_DEPTH_BMSK  = 0xff00,
++      M_CONFIG_INFO_4_REORDER_BUF_DEPTH_SHFT  = 0x8,
++      M_CONFIG_INFO_4_REORDER_TABLE_DEPTH_BMSK        = 0xff,
++      M_CONFIG_INFO_4_REORDER_TABLE_DEPTH_SHFT        = 0x0,
++};
++
++#define M_CONFIG_INFO_5_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000070)
++enum bimc_m_config_info_5 {
++      M_CONFIG_INFO_5_RMSK                    = 0x111,
++      M_CONFIG_INFO_5_MP2ARB_PIPELINE_EN_BMSK = 0x100,
++      M_CONFIG_INFO_5_MP2ARB_PIPELINE_EN_SHFT = 0x8,
++      M_CONFIG_INFO_5_MPBUF_PIPELINE_EN_BMSK  = 0x10,
++      M_CONFIG_INFO_5_MPBUF_PIPELINE_EN_SHFT  = 0x4,
++      M_CONFIG_INFO_5_M2MP_PIPELINE_EN_BMSK   = 0x1,
++      M_CONFIG_INFO_5_M2MP_PIPELINE_EN_SHFT   = 0x0,
++};
++
++#define M_INT_STATUS_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000100)
++enum bimc_m_int_status {
++      M_INT_STATUS_RMSK                       = 0x3,
++};
++
++#define M_INT_CLR_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000108)
++enum bimc_m_int_clr {
++      M_INT_CLR_RMSK                  = 0x3,
++};
++
++#define M_INT_EN_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x0000010c)
++enum bimc_m_int_en {
++      M_INT_EN_RMSK                   = 0x3,
++};
++
++#define M_CLK_CTRL_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000200)
++enum bimc_m_clk_ctrl {
++      M_CLK_CTRL_RMSK                         = 0x3,
++      M_CLK_CTRL_MAS_CLK_GATING_EN_BMSK       = 0x2,
++      M_CLK_CTRL_MAS_CLK_GATING_EN_SHFT       = 0x1,
++      M_CLK_CTRL_CORE_CLK_GATING_EN_BMSK      = 0x1,
++      M_CLK_CTRL_CORE_CLK_GATING_EN_SHFT      = 0x0,
++};
++
++#define M_MODE_ADDR(b, n) \
++              (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210)
++enum bimc_m_mode {
++      M_MODE_RMSK                             = 0xf0000011,
++      M_MODE_WR_GATHER_BEATS_BMSK             = 0xf0000000,
++      M_MODE_WR_GATHER_BEATS_SHFT             = 0x1c,
++      M_MODE_NARROW_WR_BMSK                   = 0x10,
++      M_MODE_NARROW_WR_SHFT                   = 0x4,
++      M_MODE_ORDERING_MODEL_BMSK              = 0x1,
++      M_MODE_ORDERING_MODEL_SHFT              = 0x0,
++};
++
++#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
++enum bimc_m_priolvl_override {
++      M_PRIOLVL_OVERRIDE_RMSK                 = 0x301,
++      M_PRIOLVL_OVERRIDE_BMSK                 = 0x300,
++      M_PRIOLVL_OVERRIDE_SHFT                 = 0x8,
++      M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK        = 0x1,
++      M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT        = 0x0,
++};
++
++#define M_RD_CMD_OVERRIDE_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
++enum bimc_m_read_command_override {
++      M_RD_CMD_OVERRIDE_RMSK                  = 0x3071f7f,
++      M_RD_CMD_OVERRIDE_AREQPRIO_BMSK         = 0x3000000,
++      M_RD_CMD_OVERRIDE_AREQPRIO_SHFT         = 0x18,
++      M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK         = 0x70000,
++      M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT         = 0x10,
++      M_RD_CMD_OVERRIDE_ATRANSIENT_BMSK               = 0x1000,
++      M_RD_CMD_OVERRIDE_ATRANSIENT_SHFT               = 0xc,
++      M_RD_CMD_OVERRIDE_ASHARED_BMSK          = 0x800,
++      M_RD_CMD_OVERRIDE_ASHARED_SHFT          = 0xb,
++      M_RD_CMD_OVERRIDE_AREDIRECT_BMSK                = 0x400,
++      M_RD_CMD_OVERRIDE_AREDIRECT_SHFT                = 0xa,
++      M_RD_CMD_OVERRIDE_AOOO_BMSK                     = 0x200,
++      M_RD_CMD_OVERRIDE_AOOO_SHFT                     = 0x9,
++      M_RD_CMD_OVERRIDE_AINNERSHARED_BMSK             = 0x100,
++      M_RD_CMD_OVERRIDE_AINNERSHARED_SHFT             = 0x8,
++      M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK        = 0x40,
++      M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT        = 0x6,
++      M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK      = 0x20,
++      M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT      = 0x5,
++      M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK        = 0x10,
++      M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT        = 0x4,
++      M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8,
++      M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3,
++      M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK       = 0x4,
++      M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT       = 0x2,
++      M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK            = 0x2,
++      M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT            = 0x1,
++      M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK    = 0x1,
++      M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT    = 0x0,
++};
++
++#define M_WR_CMD_OVERRIDE_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
++enum bimc_m_write_command_override {
++      M_WR_CMD_OVERRIDE_RMSK                  = 0x3071f7f,
++      M_WR_CMD_OVERRIDE_AREQPRIO_BMSK         = 0x3000000,
++      M_WR_CMD_OVERRIDE_AREQPRIO_SHFT         = 0x18,
++      M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK         = 0x70000,
++      M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT         = 0x10,
++      M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK       = 0x1000,
++      M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT       = 0xc,
++      M_WR_CMD_OVERRIDE_ASHARED_BMSK          = 0x800,
++      M_WR_CMD_OVERRIDE_ASHARED_SHFT          = 0xb,
++      M_WR_CMD_OVERRIDE_AREDIRECT_BMSK                = 0x400,
++      M_WR_CMD_OVERRIDE_AREDIRECT_SHFT                = 0xa,
++      M_WR_CMD_OVERRIDE_AOOO_BMSK                     = 0x200,
++      M_WR_CMD_OVERRIDE_AOOO_SHFT                     = 0x9,
++      M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK             = 0x100,
++      M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT             = 0x8,
++      M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK        = 0x40,
++      M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT        = 0x6,
++      M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK      = 0x20,
++      M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT      = 0x5,
++      M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK        = 0x10,
++      M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT        = 0x4,
++      M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8,
++      M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3,
++      M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK       = 0x4,
++      M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT       = 0x2,
++      M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK    = 0x2,
++      M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT    = 0x1,
++      M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK    = 0x1,
++      M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT    = 0x0,
++};
++
++#define M_BKE_EN_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000300)
++enum bimc_m_bke_en {
++      M_BKE_EN_RMSK                   = 0x1,
++      M_BKE_EN_EN_BMSK                = 0x1,
++      M_BKE_EN_EN_SHFT                = 0x0,
++};
++
++/* Grant Period registers */
++#define M_BKE_GP_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000304)
++enum bimc_m_bke_grant_period {
++      M_BKE_GP_RMSK           = 0x3ff,
++      M_BKE_GP_GP_BMSK        = 0x3ff,
++      M_BKE_GP_GP_SHFT        = 0x0,
++};
++
++/* Grant count register.
++ * The Grant count register represents a signed 16 bit
++ * value, range 0-0x7fff
++ */
++#define M_BKE_GC_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000308)
++enum bimc_m_bke_grant_count {
++      M_BKE_GC_RMSK                   = 0xffff,
++      M_BKE_GC_GC_BMSK                = 0xffff,
++      M_BKE_GC_GC_SHFT                = 0x0,
++};
++
++/* Threshold High Registers */
++#define M_BKE_THH_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000320)
++enum bimc_m_bke_thresh_high {
++      M_BKE_THH_RMSK          = 0xffff,
++      M_BKE_THH_THRESH_BMSK   = 0xffff,
++      M_BKE_THH_THRESH_SHFT   = 0x0,
++};
++
++/* Threshold Medium Registers */
++#define M_BKE_THM_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000324)
++enum bimc_m_bke_thresh_medium {
++      M_BKE_THM_RMSK          = 0xffff,
++      M_BKE_THM_THRESH_BMSK   = 0xffff,
++      M_BKE_THM_THRESH_SHFT   = 0x0,
++};
++
++/* Threshold Low Registers */
++#define M_BKE_THL_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000328)
++enum bimc_m_bke_thresh_low {
++      M_BKE_THL_RMSK                  = 0xffff,
++      M_BKE_THL_THRESH_BMSK           = 0xffff,
++      M_BKE_THL_THRESH_SHFT           = 0x0,
++};
++
++#define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340)
++enum bimc_m_bke_health_0 {
++      M_BKE_HEALTH_0_CONFIG_RMSK                      = 0x80000303,
++      M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK           = 0x80000000,
++      M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT           = 0x1f,
++      M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK             = 0x300,
++      M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT             = 0x8,
++      M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK              = 0x3,
++      M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT              = 0x0,
++};
++
++#define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344)
++enum bimc_m_bke_health_1 {
++      M_BKE_HEALTH_1_CONFIG_RMSK                      = 0x80000303,
++      M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK           = 0x80000000,
++      M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT           = 0x1f,
++      M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK             = 0x300,
++      M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT             = 0x8,
++      M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK              = 0x3,
++      M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT              = 0x0,
++};
++
++#define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348)
++enum bimc_m_bke_health_2 {
++      M_BKE_HEALTH_2_CONFIG_RMSK                      = 0x80000303,
++      M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK           = 0x80000000,
++      M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT           = 0x1f,
++      M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK             = 0x300,
++      M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT             = 0x8,
++      M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK              = 0x3,
++      M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT              = 0x0,
++};
++
++#define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \
++      (M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c)
++enum bimc_m_bke_health_3 {
++      M_BKE_HEALTH_3_CONFIG_RMSK                      = 0x303,
++      M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK     = 0x300,
++      M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT     = 0x8,
++      M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK              = 0x3,
++      M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT              = 0x0,
++};
++
++#define M_BUF_STATUS_ADDR(b, n) \
++              (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000400)
++enum bimc_m_buf_status {
++      M_BUF_STATUS_RMSK                       = 0xf03f030,
++      M_BUF_STATUS_RCH_DATA_WR_FULL_BMSK      = 0x8000000,
++      M_BUF_STATUS_RCH_DATA_WR_FULL_SHFT      = 0x1b,
++      M_BUF_STATUS_RCH_DATA_WR_EMPTY_BMSK     = 0x4000000,
++      M_BUF_STATUS_RCH_DATA_WR_EMPTY_SHFT     = 0x1a,
++      M_BUF_STATUS_RCH_CTRL_WR_FULL_BMSK      = 0x2000000,
++      M_BUF_STATUS_RCH_CTRL_WR_FULL_SHFT      = 0x19,
++      M_BUF_STATUS_RCH_CTRL_WR_EMPTY_BMSK     = 0x1000000,
++      M_BUF_STATUS_RCH_CTRL_WR_EMPTY_SHFT     = 0x18,
++      M_BUF_STATUS_BCH_WR_FULL_BMSK           = 0x20000,
++      M_BUF_STATUS_BCH_WR_FULL_SHFT           = 0x11,
++      M_BUF_STATUS_BCH_WR_EMPTY_BMSK          = 0x10000,
++      M_BUF_STATUS_BCH_WR_EMPTY_SHFT          = 0x10,
++      M_BUF_STATUS_WCH_DATA_RD_FULL_BMSK      = 0x8000,
++      M_BUF_STATUS_WCH_DATA_RD_FULL_SHFT      = 0xf,
++      M_BUF_STATUS_WCH_DATA_RD_EMPTY_BMSK     = 0x4000,
++      M_BUF_STATUS_WCH_DATA_RD_EMPTY_SHFT     = 0xe,
++      M_BUF_STATUS_WCH_CTRL_RD_FULL_BMSK      = 0x2000,
++      M_BUF_STATUS_WCH_CTRL_RD_FULL_SHFT      = 0xd,
++      M_BUF_STATUS_WCH_CTRL_RD_EMPTY_BMSK     = 0x1000,
++      M_BUF_STATUS_WCH_CTRL_RD_EMPTY_SHFT     = 0xc,
++      M_BUF_STATUS_ACH_RD_FULL_BMSK           = 0x20,
++      M_BUF_STATUS_ACH_RD_FULL_SHFT           = 0x5,
++      M_BUF_STATUS_ACH_RD_EMPTY_BMSK          = 0x10,
++      M_BUF_STATUS_ACH_RD_EMPTY_SHFT          = 0x4,
++};
++/*BIMC Generic */
++
++#define S_REG_BASE(b) ((b) + 0x00048000)
++
++#define S_COMPONENT_INFO_ADDR(b, n) \
++      (S_REG_BASE(b) + (0x8000 * (n)) + 0x00000000)
++enum bimc_s_component_info {
++      S_COMPONENT_INFO_RMSK                   = 0xffffff,
++      S_COMPONENT_INFO_INSTANCE_BMSK          = 0xff0000,
++      S_COMPONENT_INFO_INSTANCE_SHFT          = 0x10,
++      S_COMPONENT_INFO_SUB_TYPE_BMSK          = 0xff00,
++      S_COMPONENT_INFO_SUB_TYPE_SHFT          = 0x8,
++      S_COMPONENT_INFO_TYPE_BMSK              = 0xff,
++      S_COMPONENT_INFO_TYPE_SHFT              = 0x0,
++};
++
++#define S_HW_INFO_ADDR(b, n) \
++      (S_REG_BASE(b) + (0x80000 * (n)) + 0x00000010)
++enum bimc_s_hw_info {
++      S_HW_INFO_RMSK                          = 0xffffffff,
++      S_HW_INFO_MAJOR_BMSK                    = 0xff000000,
++      S_HW_INFO_MAJOR_SHFT                    = 0x18,
++      S_HW_INFO_BRANCH_BMSK                   = 0xff0000,
++      S_HW_INFO_BRANCH_SHFT                   = 0x10,
++      S_HW_INFO_MINOR_BMSK                    = 0xff00,
++      S_HW_INFO_MINOR_SHFT                    = 0x8,
++      S_HW_INFO_ECO_BMSK                      = 0xff,
++      S_HW_INFO_ECO_SHFT                      = 0x0,
++};
++
++
++/* S_SCMO_GENERIC */
++
++#define S_SCMO_REG_BASE(b)    ((b) + 0x00048000)
++
++#define S_SCMO_CONFIG_INFO_0_ADDR(b, n) \
++              (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000020)
++enum bimc_s_scmo_config_info_0 {
++      S_SCMO_CONFIG_INFO_0_RMSK               = 0xffffffff,
++      S_SCMO_CONFIG_INFO_0_DATA_WIDTH_BMSK    = 0xffff0000,
++      S_SCMO_CONFIG_INFO_0_DATA_WIDTH_SHFT    = 0x10,
++      S_SCMO_CONFIG_INFO_0_TID_WIDTH_BMSK     = 0xff00,
++      S_SCMO_CONFIG_INFO_0_TID_WIDTH_SHFT     = 0x8,
++      S_SCMO_CONFIG_INFO_0_MID_WIDTH_BMSK     = 0xff,
++      S_SCMO_CONFIG_INFO_0_MID_WIDTH_SHFT     = 0x0,
++};
++
++#define S_SCMO_CONFIG_INFO_1_ADDR(b, n) \
++              (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000030)
++enum bimc_s_scmo_config_info_1 {
++      S_SCMO_CONFIG_INFO_1_RMSK                       = 0xffffffff,
++      S_SCMO_CONFIG_INFO_1_MPORT_CONNECTIVITY_BMSK    = 0xffffffff,
++      S_SCMO_CONFIG_INFO_1_MPORT_CONNECTIVITY_SHFT    = 0x0,
++};
++
++#define S_SCMO_CONFIG_INFO_2_ADDR(b, n) \
++              (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000040)
++enum bimc_s_scmo_config_info_2 {
++      S_SCMO_CONFIG_INFO_2_RMSK                       = 0xff00ff,
++      S_SCMO_CONFIG_INFO_2_NUM_GLOBAL_MONS_BMSK       = 0xff0000,
++      S_SCMO_CONFIG_INFO_2_NUM_GLOBAL_MONS_SHFT       = 0x10,
++      S_SCMO_CONFIG_INFO_2_VMID_WIDTH_BMSK    = 0xff,
++      S_SCMO_CONFIG_INFO_2_VMID_WIDTH_SHFT    = 0x0,
++};
++
++#define S_SCMO_CONFIG_INFO_3_ADDR(b, n) \
++              (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000050)
++enum bimc_s_scmo_config_info_3 {
++      S_SCMO_CONFIG_INFO_3_RMSK                       = 0xffffffff,
++      S_SCMO_CONFIG_INFO_3_RCH0_CTRL_DEPTH_BMSK       = 0xff000000,
++      S_SCMO_CONFIG_INFO_3_RCH0_CTRL_DEPTH_SHFT       = 0x18,
++      S_SCMO_CONFIG_INFO_3_RCH0_DEPTH_BMSK            = 0xff0000,
++      S_SCMO_CONFIG_INFO_3_RCH0_DEPTH_SHFT            = 0x10,
++      S_SCMO_CONFIG_INFO_3_BCH_DEPTH_BMSK             = 0xff00,
++      S_SCMO_CONFIG_INFO_3_BCH_DEPTH_SHFT             = 0x8,
++      S_SCMO_CONFIG_INFO_3_WCH_DEPTH_BMSK             = 0xff,
++      S_SCMO_CONFIG_INFO_3_WCH_DEPTH_SHFT             = 0x0,
++};
++
++#define S_SCMO_CONFIG_INFO_4_ADDR(b, n) \
++              (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000060)
++enum bimc_s_scmo_config_info_4 {
++      S_SCMO_CONFIG_INFO_4_RMSK                       = 0xffff,
++      S_SCMO_CONFIG_INFO_4_RCH1_CTRL_DEPTH_BMSK       = 0xff00,
++      S_SCMO_CONFIG_INFO_4_RCH1_CTRL_DEPTH_SHFT       = 0x8,
++      S_SCMO_CONFIG_INFO_4_RCH1_DEPTH_BMSK            = 0xff,
++      S_SCMO_CONFIG_INFO_4_RCH1_DEPTH_SHFT            = 0x0,
++};
++
++#define S_SCMO_CONFIG_INFO_5_ADDR(b, n) \
++              (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000070)
++enum bimc_s_scmo_config_info_5 {
++      S_SCMO_CONFIG_INFO_5_RMSK                       = 0xffff,
++      S_SCMO_CONFIG_INFO_5_DPE_CQ_DEPTH_BMSK          = 0xff00,
++      S_SCMO_CONFIG_INFO_5_DPE_CQ_DEPTH_SHFT          = 0x8,
++      S_SCMO_CONFIG_INFO_5_DDR_BUS_WIDTH_BMSK         = 0xff,
++      S_SCMO_CONFIG_INFO_5_DDR_BUS_WIDTH_SHFT         = 0x0,
++};
++
++#define S_SCMO_CONFIG_INFO_6_ADDR(b, n) \
++              (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000080)
++enum bimc_s_scmo_config_info_6 {
++      S_SCMO_CONFIG_INFO_6_RMSK                       = 0x1111,
++      S_SCMO_CONFIG_INFO_6_WBUFC_PIPE_BMSK            = 0x1000,
++      S_SCMO_CONFIG_INFO_6_WBUFC_PIPE_SHFT            = 0xc,
++      S_SCMO_CONFIG_INFO_6_RDOPT_PIPE_BMSK            = 0x100,
++      S_SCMO_CONFIG_INFO_6_RDOPT_PIPE_SHFT            = 0x8,
++      S_SCMO_CONFIG_INFO_6_ACHAN_INTF_PIPE_BMSK       = 0x10,
++      S_SCMO_CONFIG_INFO_6_ACHAN_INTF_PIPE_SHFT       = 0x4,
++      S_SCMO_CONFIG_INFO_6_ADDR_DECODE_HT_BMSK        = 0x1,
++      S_SCMO_CONFIG_INFO_6_ADDR_DECODE_HT_SHFT        = 0x0,
++};
++
++#define S_SCMO_INT_STATUS_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000100)
++enum bimc_s_scmo_int_status {
++      S_SCMO_INT_STATUS_RMSK                  = 0x1,
++      S_SCMO_INT_STATUS_ERR_OCCURED_BMSK      = 0x1,
++      S_SCMO_INT_STATUS_ERR_OCCURED_SHFT      = 0x0,
++};
++
++#define S_SCMO_INT_CLR_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000108)
++enum bimc_s_scmo_int_clr {
++      S_SCMO_INT_CLR_RMSK             = 0x1,
++      S_SCMO_INT_CLR_IRQ_CLR_BMSK     = 0x1,
++      S_SCMO_INT_CLR_IRQ_CLR_SHFT     = 0x0,
++};
++
++#define S_SCMO_INT_EN_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x0000010c)
++enum bimc_s_scmo_int_en {
++      S_SCMO_INT_EN_RMSK              = 0x1,
++      S_SCMO_INT_EN_IRQ_EN_BMSK       = 0x1,
++      S_SCMO_INT_EN_IRQ_EN_SHFT       = 0x0,
++};
++
++#define S_SCMO_ESYN_ADDR_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000120)
++enum bimc_s_scmo_esyn_addr {
++      S_SCMO_ESYN_ADDR_RMSK                           = 0xffffffff,
++      S_SCMO_ESYN_ADDR_ESYN_ADDR_ERR_ADDR_BMSK        = 0xffffffff,
++      S_SCMO_ESYN_ADDR_ESYN_ADDR_ERR_ADDR_SHFT        = 0x0,
++};
++
++#define S_SCMO_ESYN_APACKET_0_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000128)
++enum bimc_s_scmo_esyn_apacket_0 {
++      S_SCMO_ESYN_APACKET_0_RMSK                      = 0xff1fffff,
++      S_SCMO_ESYN_APACKET_0_ERR_ATID_BMSK             = 0xff000000,
++      S_SCMO_ESYN_APACKET_0_ERR_ATID_SHFT             = 0x18,
++      S_SCMO_ESYN_APACKET_0_ERR_AVMID_BMSK            = 0x1f0000,
++      S_SCMO_ESYN_APACKET_0_ERR_AVMID_SHFT            = 0x10,
++      S_SCMO_ESYN_APACKET_0_ERR_AMID_BMSK             = 0xffff,
++      S_SCMO_ESYN_APACKET_0_ERR_AMID_SHFT             = 0x0,
++};
++
++#define S_SCMO_ESYN_APACKET_1_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x0000012c)
++enum bimc_s_scmo_esyn_apacket_1 {
++      S_SCMO_ESYN_APACKET_1_RMSK                      = 0x10ff117,
++      S_SCMO_ESYN_APACKET_1_ERR_CODE_BMSK             = 0x1000000,
++      S_SCMO_ESYN_APACKET_1_ERR_CODE_SHFT             = 0x18,
++      S_SCMO_ESYN_APACKET_1_ERR_ALEN_BMSK             = 0xf0000,
++      S_SCMO_ESYN_APACKET_1_ERR_ALEN_SHFT             = 0x10,
++      S_SCMO_ESYN_APACKET_1_ERR_ASIZE_BMSK            = 0xe000,
++      S_SCMO_ESYN_APACKET_1_ERR_ASIZE_SHFT            = 0xd,
++      S_SCMO_ESYN_APACKET_1_ERR_ABURST_BMSK           = 0x1000,
++      S_SCMO_ESYN_APACKET_1_ERR_ABURST_SHFT           = 0xc,
++      S_SCMO_ESYN_APACKET_1_ERR_AEXCLUSIVE_BMSK       = 0x100,
++      S_SCMO_ESYN_APACKET_1_ERR_AEXCLUSIVE_SHFT       = 0x8,
++      S_SCMO_ESYN_APACKET_1_ERR_APRONTS_BMSK          = 0x10,
++      S_SCMO_ESYN_APACKET_1_ERR_APRONTS_SHFT          = 0x4,
++      S_SCMO_ESYN_APACKET_1_ERR_AOOORD_BMSK           = 0x4,
++      S_SCMO_ESYN_APACKET_1_ERR_AOOORD_SHFT           = 0x2,
++      S_SCMO_ESYN_APACKET_1_ERR_AOOOWR_BMSK           = 0x2,
++      S_SCMO_ESYN_APACKET_1_ERR_AOOOWR_SHFT           = 0x1,
++      S_SCMO_ESYN_APACKET_1_ERR_AWRITE_BMSK           = 0x1,
++      S_SCMO_ESYN_APACKET_1_ERR_AWRITE_SHFT           = 0x0,
++};
++
++#define S_SCMO_CLK_CTRL_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000200)
++enum bimc_s_scmo_clk_ctrl {
++      S_SCMO_CLK_CTRL_RMSK                            = 0xffff1111,
++      S_SCMO_CLK_CTRL_PEN_CMD_CG_EN_BMSK              = 0x10000,
++      S_SCMO_CLK_CTRL_PEN_CMD_CG_EN_SHFT              = 0x10,
++      S_SCMO_CLK_CTRL_RCH_CG_EN_BMSK                  = 0x1000,
++      S_SCMO_CLK_CTRL_RCH_CG_EN_SHFT                  = 0xc,
++      S_SCMO_CLK_CTRL_FLUSH_CG_EN_BMSK                = 0x100,
++      S_SCMO_CLK_CTRL_FLUSH_CG_EN_SHFT                = 0x8,
++      S_SCMO_CLK_CTRL_WCH_CG_EN_BMSK                  = 0x10,
++      S_SCMO_CLK_CTRL_WCH_CG_EN_SHFT                  = 0x4,
++      S_SCMO_CLK_CTRL_ACH_CG_EN_BMSK                  = 0x1,
++      S_SCMO_CLK_CTRL_ACH_CG_EN_SHFT                  = 0x0,
++};
++
++#define S_SCMO_SLV_INTERLEAVE_CFG_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000400)
++enum bimc_s_scmo_slv_interleave_cfg {
++      S_SCMO_SLV_INTERLEAVE_CFG_RMSK                  = 0xff,
++      S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS1_BMSK   = 0x10,
++      S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS1_SHFT   = 0x4,
++      S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS0_BMSK   = 0x1,
++      S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS0_SHFT   = 0x0,
++};
++
++#define S_SCMO_ADDR_BASE_CSn_ADDR(b, n, o)    \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000410 + 0x4 * (o))
++enum bimc_s_scmo_addr_base_csn {
++      S_SCMO_ADDR_BASE_CSn_RMSK                       = 0xffff,
++      S_SCMO_ADDR_BASE_CSn_MAXn                       = 1,
++      S_SCMO_ADDR_BASE_CSn_ADDR_BASE_BMSK             = 0xfc,
++      S_SCMO_ADDR_BASE_CSn_ADDR_BASE_SHFT             = 0x2,
++};
++
++#define S_SCMO_ADDR_MAP_CSn_ADDR(b, n, o) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000420 + 0x4 * (o))
++enum bimc_s_scmo_addr_map_csn {
++      S_SCMO_ADDR_MAP_CSn_RMSK                = 0xffff,
++      S_SCMO_ADDR_MAP_CSn_MAXn                = 1,
++      S_SCMO_ADDR_MAP_CSn_RANK_EN_BMSK        = 0x8000,
++      S_SCMO_ADDR_MAP_CSn_RANK_EN_SHFT        = 0xf,
++      S_SCMO_ADDR_MAP_CSn_ADDR_MODE_BMSK      = 0x1000,
++      S_SCMO_ADDR_MAP_CSn_ADDR_MODE_SHFT      = 0xc,
++      S_SCMO_ADDR_MAP_CSn_BANK_SIZE_BMSK      = 0x100,
++      S_SCMO_ADDR_MAP_CSn_BANK_SIZE_SHFT      = 0x8,
++      S_SCMO_ADDR_MAP_CSn_ROW_SIZE_BMSK       = 0x30,
++      S_SCMO_ADDR_MAP_CSn_ROW_SIZE_SHFT       = 0x4,
++      S_SCMO_ADDR_MAP_CSn_COL_SIZE_BMSK       = 0x3,
++      S_SCMO_ADDR_MAP_CSn_COL_SIZE_SHFT       = 0x0,
++};
++
++#define S_SCMO_ADDR_MASK_CSn_ADDR(b, n, o) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000430 + 0x4 * (0))
++enum bimc_s_scmo_addr_mask_csn {
++      S_SCMO_ADDR_MASK_CSn_RMSK               = 0xffff,
++      S_SCMO_ADDR_MASK_CSn_MAXn               = 1,
++      S_SCMO_ADDR_MASK_CSn_ADDR_MASK_BMSK     = 0xfc,
++      S_SCMO_ADDR_MASK_CSn_ADDR_MASK_SHFT     = 0x2,
++};
++
++#define S_SCMO_SLV_STATUS_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000450)
++enum bimc_s_scmo_slv_status {
++      S_SCMO_SLV_STATUS_RMSK                          = 0xff3,
++      S_SCMO_SLV_STATUS_GLOBAL_MONS_IN_USE_BMSK       = 0xff0,
++      S_SCMO_SLV_STATUS_GLOBAL_MONS_IN_USE_SHFT       = 0x4,
++      S_SCMO_SLV_STATUS_SLAVE_IDLE_BMSK               = 0x3,
++      S_SCMO_SLV_STATUS_SLAVE_IDLE_SHFT               = 0x0,
++};
++
++#define S_SCMO_CMD_BUF_CFG_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000500)
++enum bimc_s_scmo_cmd_buf_cfg {
++      S_SCMO_CMD_BUF_CFG_RMSK                         = 0xf1f,
++      S_SCMO_CMD_BUF_CFG_CMD_ORDERING_BMSK            = 0x300,
++      S_SCMO_CMD_BUF_CFG_CMD_ORDERING_SHFT            = 0x8,
++      S_SCMO_CMD_BUF_CFG_HP_CMD_AREQPRIO_MAP_BMSK     = 0x10,
++      S_SCMO_CMD_BUF_CFG_HP_CMD_AREQPRIO_MAP_SHFT     = 0x4,
++      S_SCMO_CMD_BUF_CFG_HP_CMD_Q_DEPTH_BMSK          = 0x7,
++      S_SCMO_CMD_BUF_CFG_HP_CMD_Q_DEPTH_SHFT          = 0x0,
++};
++
++#define S_SCM_CMD_BUF_STATUS_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000520)
++enum bimc_s_scm_cmd_buf_status {
++      S_SCMO_CMD_BUF_STATUS_RMSK                              = 0x77,
++      S_SCMO_CMD_BUF_STATUS_HP_CMD_BUF_ENTRIES_IN_USE_BMSK    = 0x70,
++      S_SCMO_CMD_BUF_STATUS_HP_CMD_BUF_ENTRIES_IN_USE_SHFT    = 0x4,
++      S_SCMO_CMD_BUF_STATUS_LP_CMD_BUF_ENTRIES_IN_USE_BMSK    = 0x7,
++      S_SCMO_CMD_BUF_STATUS_LP_CMD_BUF_ENTRIES_IN_USE_SHFT    = 0x0,
++};
++
++#define S_SCMO_RCH_SEL_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000540)
++enum bimc_s_scmo_rch_sel {
++      S_SCMO_RCH_SEL_RMSK                     = 0xffffffff,
++      S_SCMO_CMD_BUF_STATUS_RCH_PORTS_BMSK    = 0xffffffff,
++      S_SCMO_CMD_BUF_STATUS_RCH_PORTS_SHFT    = 0x0,
++};
++
++#define S_SCMO_RCH_BKPR_CFG_ADDR(b, n) \
++              (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000544)
++enum bimc_s_scmo_rch_bkpr_cfg {
++      S_SCMO_RCH_BKPR_CFG_RMSK                        = 0xffffffff,
++      S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_HI_TH_BMSK   = 0x3f000000,
++      S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_HI_TH_SHFT   = 0x18,
++      S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_LO_TH_BMSK   = 0x3f0000,
++      S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_LO_TH_SHFT   = 0x10,
++      S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_HI_TH_BMSK   = 0x3f00,
++      S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_HI_TH_SHFT   = 0x8,
++      S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_LO_TH_BMSK   = 0x3f,
++      S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_LO_TH_SHFT   = 0x0,
++};
++
++#define S_SCMO_RCH_STATUS_ADDR(b, n) \
++              (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000560)
++enum bimc_s_scmo_rch_status {
++      S_SCMO_RCH_STATUS_RMSK                          = 0x33333,
++      S_SCMO_RCH_STATUS_PRQ_FIFO_FULL_BMSK            = 0x20000,
++      S_SCMO_RCH_STATUS_PRQ_FIFO_FULL_SHFT            = 0x11,
++      S_SCMO_RCH_STATUS_PRQ_FIFO_EMPTY_BMSK           = 0x10000,
++      S_SCMO_RCH_STATUS_PRQ_FIFO_EMPTY_SHFT           = 0x10,
++      S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_FULL_BMSK      = 0x2000,
++      S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_FULL_SHFT      = 0xd,
++      S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_EMPTY_BMSK     = 0x1000,
++      S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_EMPTY_SHFT     = 0xc,
++      S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_FULL_BMSK      = 0x200,
++      S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_FULL_SHFT      = 0x9,
++      S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_EMPTY_BMSK     = 0x100,
++      S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_EMPTY_SHFT     = 0x8,
++      S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_FULL_BMSK      = 0x20,
++      S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_FULL_SHFT      = 0x5,
++      S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_EMPTY_BMSK     = 0x10,
++      S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_EMPTY_SHFT     = 0x4,
++      S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_FULL_BMSK      = 0x2,
++      S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_FULL_SHFT      = 0x1,
++      S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_EMPTY_BMSK     = 0x1,
++      S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_EMPTY_SHFT     = 0x0,
++};
++
++#define S_SCMO_WCH_BUF_CFG_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000580)
++enum bimc_s_scmo_wch_buf_cfg {
++      S_SCMO_WCH_BUF_CFG_RMSK                         = 0xff,
++      S_SCMO_WCH_BUF_CFG_WRITE_BLOCK_READ_BMSK        = 0x10,
++      S_SCMO_WCH_BUF_CFG_WRITE_BLOCK_READ_SHFT        = 0x4,
++      S_SCMO_WCH_BUF_CFG_COALESCE_EN_BMSK             = 0x1,
++      S_SCMO_WCH_BUF_CFG_COALESCE_EN_SHFT             = 0x0,
++};
++
++#define S_SCMO_WCH_STATUS_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x000005a0)
++enum bimc_s_scmo_wch_status {
++      S_SCMO_WCH_STATUS_RMSK                          = 0x333,
++      S_SCMO_WCH_STATUS_BRESP_FIFO_FULL_BMSK          = 0x200,
++      S_SCMO_WCH_STATUS_BRESP_FIFO_FULL_SHFT          = 0x9,
++      S_SCMO_WCH_STATUS_BRESP_FIFO_EMPTY_BMSK         = 0x100,
++      S_SCMO_WCH_STATUS_BRESP_FIFO_EMPTY_SHFT         = 0x8,
++      S_SCMO_WCH_STATUS_WDATA_FIFO_FULL_BMSK          = 0x20,
++      S_SCMO_WCH_STATUS_WDATA_FIFO_FULL_SHFT          = 0x5,
++      S_SCMO_WCH_STATUS_WDATA_FIFO_EMPTY_BMSK         = 0x10,
++      S_SCMO_WCH_STATUS_WDATA_FIFO_EMPTY_SHFT         = 0x4,
++      S_SCMO_WCH_STATUS_WBUF_FULL_BMSK                = 0x2,
++      S_SCMO_WCH_STATUS_WBUF_FULL_SHFT                = 0x1,
++      S_SCMO_WCH_STATUS_WBUF_EMPTY_BMSK               = 0x1,
++      S_SCMO_WCH_STATUS_WBUF_EMPTY_SHFT               = 0x0,
++};
++
++#define S_SCMO_FLUSH_CFG_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x000005c0)
++enum bimc_s_scmo_flush_cfg {
++      S_SCMO_FLUSH_CFG_RMSK                           = 0xffffffff,
++      S_SCMO_FLUSH_CFG_FLUSH_IN_ORDER_BMSK            = 0x10000000,
++      S_SCMO_FLUSH_CFG_FLUSH_IN_ORDER_SHFT            = 0x1c,
++      S_SCMO_FLUSH_CFG_FLUSH_IDLE_DELAY_BMSK          = 0x3ff0000,
++      S_SCMO_FLUSH_CFG_FLUSH_IDLE_DELAY_SHFT          = 0x10,
++      S_SCMO_FLUSH_CFG_FLUSH_UPPER_LIMIT_BMSK         = 0xf00,
++      S_SCMO_FLUSH_CFG_FLUSH_UPPER_LIMIT_SHFT         = 0x8,
++      S_SCMO_FLUSH_CFG_FLUSH_LOWER_LIMIT_BMSK         = 0xf,
++      S_SCMO_FLUSH_CFG_FLUSH_LOWER_LIMIT_SHFT         = 0x0,
++};
++
++#define S_SCMO_FLUSH_CMD_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x000005c4)
++enum bimc_s_scmo_flush_cmd {
++      S_SCMO_FLUSH_CMD_RMSK                           = 0xf,
++      S_SCMO_FLUSH_CMD_FLUSH_ALL_BUF_BMSK             = 0x3,
++      S_SCMO_FLUSH_CMD_FLUSH_ALL_BUF_SHFT             = 0x0,
++};
++
++#define S_SCMO_CMD_OPT_CFG0_ADDR(b, n) \
++      (S_SCM0_REG_BASE(b) + (0x8000 * (n)) + 0x00000700)
++enum bimc_s_scmo_cmd_opt_cfg0 {
++      S_SCMO_CMD_OPT_CFG0_RMSK                = 0xffffff,
++      S_SCMO_CMD_OPT_CFG0_IGNORE_BANK_UNAVL_BMSK      = 0x100000,
++      S_SCMO_CMD_OPT_CFG0_IGNORE_BANK_UNAVL_SHFT      = 0x14,
++      S_SCMO_CMD_OPT_CFG0_MASK_CMDOUT_PRI_BMSK        = 0x10000,
++      S_SCMO_CMD_OPT_CFG0_MASK_CMDOUT_PRI_SHFT        = 0x10,
++      S_SCMO_CMD_OPT_CFG0_DPE_CMD_REORDERING_BMSK     = 0x1000,
++      S_SCMO_CMD_OPT_CFG0_DPE_CMD_REORDERING_SHFT     = 0xc,
++      S_SCMO_CMD_OPT_CFG0_WR_OPT_EN_BMSK              = 0x100,
++      S_SCMO_CMD_OPT_CFG0_WR_OPT_EN_SHFT              = 0x8,
++      S_SCMO_CMD_OPT_CFG0_RD_OPT_EN_BMSK              = 0x10,
++      S_SCMO_CMD_OPT_CFG0_RD_OPT_EN_SHFT              = 0x4,
++      S_SCMO_CMD_OPT_CFG0_PAGE_MGMT_POLICY_BMSK       = 0x1,
++      S_SCMO_CMD_OPT_CFG0_PAGE_MGMT_POLICY_SHFT       = 0x0,
++};
++
++#define S_SCMO_CMD_OPT_CFG1_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000704)
++enum bimc_s_scmo_cmd_opt_cfg1 {
++      S_SCMO_CMD_OPT_CFG1_RMSK                        = 0xffffffff,
++      S_SCMO_CMD_OPT_CFG1_HSTP_CMD_TIMEOUT_BMSK       = 0x1f000000,
++      S_SCMO_CMD_OPT_CFG1_HSTP_CMD_TIMEOUT_SHFT       = 0x18,
++      S_SCMO_CMD_OPT_CFG1_HP_CMD_TIMEOUT_BMSK         = 0x1f0000,
++      S_SCMO_CMD_OPT_CFG1_HP_CMD_TIMEOUT_SHFT         = 0x10,
++      S_SCMO_CMD_OPT_CFG1_MP_CMD_TIMEOUT_BMSK         = 0x1f00,
++      S_SCMO_CMD_OPT_CFG1_MP_CMD_TIMEOUT_SHFT         = 0x8,
++      S_SCMO_CMD_OPT_CFG1_LP_CMD_TIMEOUT_BMSK         = 0x1f,
++      S_SCMO_CMD_OPT_CFG1_LP_CMD_TIMEOUT_SHFT         = 0x0,
++};
++
++#define S_SCMO_CMD_OPT_CFG2_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000708)
++enum bimc_s_scmo_cmd_opt_cfg2 {
++      S_SCMO_CMD_OPT_CFG2_RMSK                        = 0xff,
++      S_SCMO_CMD_OPT_CFG2_RWOPT_CMD_TIMEOUT_BMSK      = 0xf,
++      S_SCMO_CMD_OPT_CFG2_RWOPT_CMD_TIMEOUT_SHFT      = 0x0,
++};
++
++#define S_SCMO_CMD_OPT_CFG3_ADDR(b, n) \
++      (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x0000070c)
++enum bimc_s_scmo_cmd_opt_cfg3 {
++      S_SCMO_CMD_OPT_CFG3_RMSK                        = 0xff,
++      S_SCMO_CMD_OPT_CFG3_FLUSH_CMD_TIMEOUT_BMSK      = 0xf,
++      S_SCMO_CMD_OPT_CFG3_FLUSH_CMD_TIMEOUT_SHFT      = 0x0,
++};
++
++/* S_SWAY_GENERIC */
++#define S_SWAY_REG_BASE(b)    ((b) + 0x00048000)
++
++#define S_SWAY_CONFIG_INFO_0_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000020)
++enum bimc_s_sway_config_info_0 {
++      S_SWAY_CONFIG_INFO_0_RMSK               = 0xff0000ff,
++      S_SWAY_CONFIG_INFO_0_SYNC_MODE_BMSK     = 0xff000000,
++      S_SWAY_CONFIG_INFO_0_SYNC_MODE_SHFT     = 0x18,
++      S_SWAY_CONFIG_INFO_0_FUNC_BMSK          = 0xff,
++      S_SWAY_CONFIG_INFO_0_FUNC_SHFT          = 0x0,
++};
++
++#define S_SWAY_CONFIG_INFO_1_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000030)
++enum bimc_s_sway_config_info_1 {
++      S_SWAY_CONFIG_INFO_1_RMSK                       = 0xffffffff,
++      S_SWAY_CONFIG_INFO_1_MPORT_CONNECTIVITY_BMSK    = 0xffffffff,
++      S_SWAY_CONFIG_INFO_1_MPORT_CONNECTIVITY_SHFT    = 0x0,
++};
++
++#define S_SWAY_CONFIG_INFO_2_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000040)
++enum bimc_s_sway_config_info_2 {
++      S_SWAY_CONFIG_INFO_2_RMSK                       = 0xffff0000,
++      S_SWAY_CONFIG_INFO_2_MPORT_CONNECTIVITY_BMSK    = 0xffff0000,
++      S_SWAY_CONFIG_INFO_2_MPORT_CONNECTIVITY_SHFT    = 0x10,
++};
++
++#define S_SWAY_CONFIG_INFO_3_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000050)
++enum bimc_s_sway_config_info_3 {
++      S_SWAY_CONFIG_INFO_3_RMSK                       = 0xffffffff,
++      S_SWAY_CONFIG_INFO_3_RCH0_DEPTH_BMSK            = 0xff000000,
++      S_SWAY_CONFIG_INFO_3_RCH0_DEPTH_SHFT            = 0x18,
++      S_SWAY_CONFIG_INFO_3_BCH_DEPTH_BMSK             = 0xff0000,
++      S_SWAY_CONFIG_INFO_3_BCH_DEPTH_SHFT             = 0x10,
++      S_SWAY_CONFIG_INFO_3_WCH_DEPTH_BMSK             = 0xff,
++      S_SWAY_CONFIG_INFO_3_WCH_DEPTH_SHFT             = 0x0,
++};
++
++#define S_SWAY_CONFIG_INFO_4_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000060)
++enum bimc_s_sway_config_info_4 {
++      S_SWAY_CONFIG_INFO_4_RMSK                       = 0x800000ff,
++      S_SWAY_CONFIG_INFO_4_DUAL_RCH_EN_BMSK           = 0x80000000,
++      S_SWAY_CONFIG_INFO_4_DUAL_RCH_EN_SHFT           = 0x1f,
++      S_SWAY_CONFIG_INFO_4_RCH1_DEPTH_BMSK            = 0xff,
++      S_SWAY_CONFIG_INFO_4_RCH1_DEPTH_SHFT            = 0x0,
++};
++
++#define S_SWAY_CONFIG_INFO_5_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000070)
++enum bimc_s_sway_config_info_5 {
++      S_SWAY_CONFIG_INFO_5_RMSK                       = 0x800000ff,
++      S_SWAY_CONFIG_INFO_5_QCH_EN_BMSK                = 0x80000000,
++      S_SWAY_CONFIG_INFO_5_QCH_EN_SHFT                = 0x1f,
++      S_SWAY_CONFIG_INFO_5_QCH_DEPTH_BMSK             = 0xff,
++      S_SWAY_CONFIG_INFO_5_QCH_DEPTH_SHFT             = 0x0,
++};
++
++#define S_SWAY_CONFIG_INFO_6_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000080)
++enum bimc_s_sway_config_info_6 {
++      S_SWAY_CONFIG_INFO_6_RMSK                       = 0x1,
++      S_SWAY_CONFIG_INFO_6_S2SW_PIPELINE_EN_BMSK      = 0x1,
++      S_SWAY_CONFIG_INFO_6_S2SW_PIPELINE_EN_SHFT      = 0x0,
++};
++
++#define S_SWAY_INT_STATUS_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000100)
++enum bimc_s_sway_int_status {
++      S_SWAY_INT_STATUS_RMSK          = 0x3,
++      S_SWAY_INT_STATUS_RFU_BMSK      = 0x3,
++      S_SWAY_INT_STATUS_RFU_SHFT      = 0x0,
++};
++
++#define S_SWAY_INT_CLR_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000108)
++enum bimc_s_sway_int_clr {
++      S_SWAY_INT_CLR_RMSK             = 0x3,
++      S_SWAY_INT_CLR_RFU_BMSK         = 0x3,
++      S_SWAY_INT_CLR_RFU_SHFT         = 0x0,
++};
++
++
++#define S_SWAY_INT_EN_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x0000010c)
++enum bimc_s_sway_int_en {
++      S_SWAY_INT_EN_RMSK              = 0x3,
++      S_SWAY_INT_EN_RFU_BMSK          = 0x3,
++      S_SWAY_INT_EN_RFU_SHFT          = 0x0,
++};
++
++#define S_SWAY_CLK_CTRL_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000200)
++enum bimc_s_sway_clk_ctrl {
++      S_SWAY_CLK_CTRL_RMSK                            = 0x3,
++      S_SWAY_CLK_CTRL_SLAVE_CLK_GATING_EN_BMSK        = 0x2,
++      S_SWAY_CLK_CTRL_SLAVE_CLK_GATING_EN_SHFT        = 0x1,
++      S_SWAY_CLK_CTRL_CORE_CLK_GATING_EN_BMSK         = 0x1,
++      S_SWAY_CLK_CTRL_CORE_CLK_GATING_EN_SHFT         = 0x0,
++};
++
++#define S_SWAY_RCH_SEL_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000210)
++enum bimc_s_sway_rch_sel {
++      S_SWAY_RCH_SEL_RMSK             = 0x7f,
++      S_SWAY_RCH_SEL_UNUSED_BMSK      = 0x7f,
++      S_SWAY_RCH_SEL_UNUSED_SHFT      = 0x0,
++};
++
++
++#define S_SWAY_MAX_OUTSTANDING_REQS_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000220)
++enum bimc_s_sway_max_outstanding_reqs {
++      S_SWAY_MAX_OUTSTANDING_REQS_RMSK        = 0xffff,
++      S_SWAY_MAX_OUTSTANDING_REQS_WRITE_BMSK  = 0xff00,
++      S_SWAY_MAX_OUTSTANDING_REQS_WRITE_SHFT  = 0x8,
++      S_SWAY_MAX_OUTSTANDING_REQS_READ_BMSK   = 0xff,
++      S_SWAY_MAX_OUTSTANDING_REQS_READ_SHFT   = 0x0,
++};
++
++
++#define S_SWAY_BUF_STATUS_0_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000400)
++enum bimc_s_sway_buf_status_0 {
++      S_SWAY_BUF_STATUS_0_RMSK                        = 0xf0300f03,
++      S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_FULL_BMSK      = 0x80000000,
++      S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_FULL_SHFT      = 0x1f,
++      S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_EMPTY_BMSK     = 0x40000000,
++      S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_EMPTY_SHFT     = 0x1e,
++      S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_FULL_BMSK      = 0x20000000,
++      S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_FULL_SHFT      = 0x1d,
++      S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_EMPTY_BMSK     = 0x10000000,
++      S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_EMPTY_SHFT     = 0x1c,
++      S_SWAY_BUF_STATUS_0_BCH_RD_FULL_BMSK            = 0x200000,
++      S_SWAY_BUF_STATUS_0_BCH_RD_FULL_SHFT            = 0x15,
++      S_SWAY_BUF_STATUS_0_BCH_RD_EMPTY_BMSK           = 0x100000,
++      S_SWAY_BUF_STATUS_0_BCH_RD_EMPTY_SHFT           = 0x14,
++      S_SWAY_BUF_STATUS_0_WCH_DATA_WR_FULL_BMSK       = 0x800,
++      S_SWAY_BUF_STATUS_0_WCH_DATA_WR_FULL_SHFT       = 0xb,
++      S_SWAY_BUF_STATUS_0_WCH_DATA_WR_EMPTY_BMSK      = 0x400,
++      S_SWAY_BUF_STATUS_0_WCH_DATA_WR_EMPTY_SHFT      = 0xa,
++      S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_FULL_BMSK       = 0x200,
++      S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_FULL_SHFT       = 0x9,
++      S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_EMPTY_BMSK      = 0x100,
++      S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_EMPTY_SHFT      = 0x8,
++      S_SWAY_BUF_STATUS_0_ACH_WR_FULL_BMSK            = 0x2,
++      S_SWAY_BUF_STATUS_0_ACH_WR_FULL_SHFT            = 0x1,
++      S_SWAY_BUF_STATUS_0_ACH_WR_EMPTY_BMSK           = 0x1,
++      S_SWAY_BUF_STATUS_0_ACH_WR_EMPTY_SHFT           = 0x0,
++};
++
++#define S_SWAY_BUF_STATUS_1_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000410)
++enum bimc_s_sway_buf_status_1 {
++      S_SWAY_BUF_STATUS_1_RMSK                        = 0xf0,
++      S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_FULL_BMSK      = 0x80,
++      S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_FULL_SHFT      = 0x7,
++      S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_EMPTY_BMSK     = 0x40,
++      S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_EMPTY_SHFT     = 0x6,
++      S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_FULL_BMSK      = 0x20,
++      S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_FULL_SHFT      = 0x5,
++      S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_EMPTY_BMSK     = 0x10,
++      S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_EMPTY_SHFT     = 0x4,
++};
++
++#define S_SWAY_BUF_STATUS_2_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000420)
++enum bimc_s_sway_buf_status_2 {
++      S_SWAY_BUF_STATUS_2_RMSK                = 0x30,
++      S_SWAY_BUF_STATUS_2_QCH_RD_FULL_BMSK    = 0x20,
++      S_SWAY_BUF_STATUS_2_QCH_RD_FULL_SHFT    = 0x5,
++      S_SWAY_BUF_STATUS_2_QCH_RD_EMPTY_BMSK   = 0x10,
++      S_SWAY_BUF_STATUS_2_QCH_RD_EMPTY_SHFT   = 0x4,
++};
++
++/* S_ARB_GENERIC */
++
++#define S_ARB_REG_BASE(b)     ((b) + 0x00049000)
++
++#define S_ARB_COMPONENT_INFO_ADDR(b, n) \
++      (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000000)
++enum bimc_s_arb_component_info {
++      S_ARB_COMPONENT_INFO_RMSK               = 0xffffff,
++      S_ARB_COMPONENT_INFO_INSTANCE_BMSK      = 0xff0000,
++      S_ARB_COMPONENT_INFO_INSTANCE_SHFT      = 0x10,
++      S_ARB_COMPONENT_INFO_SUB_TYPE_BMSK      = 0xff00,
++      S_ARB_COMPONENT_INFO_SUB_TYPE_SHFT      = 0x8,
++      S_ARB_COMPONENT_INFO_TYPE_BMSK          = 0xff,
++      S_ARB_COMPONENT_INFO_TYPE_SHFT          = 0x0,
++};
++
++#define S_ARB_CONFIG_INFO_0_ADDR(b, n) \
++              (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000020)
++enum bimc_s_arb_config_info_0 {
++      S_ARB_CONFIG_INFO_0_RMSK                        = 0x800000ff,
++      S_ARB_CONFIG_INFO_0_ARB2SW_PIPELINE_EN_BMSK     = 0x80000000,
++      S_ARB_CONFIG_INFO_0_ARB2SW_PIPELINE_EN_SHFT     = 0x1f,
++      S_ARB_CONFIG_INFO_0_FUNC_BMSK                   = 0xff,
++      S_ARB_CONFIG_INFO_0_FUNC_SHFT                   = 0x0,
++};
++
++#define S_ARB_CONFIG_INFO_1_ADDR(b, n) \
++              (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000030)
++enum bimc_s_arb_config_info_1 {
++      S_ARB_CONFIG_INFO_1_RMSK                        = 0xffffffff,
++      S_ARB_CONFIG_INFO_1_MPORT_CONNECTIVITY_BMSK     = 0xffffffff,
++      S_ARB_CONFIG_INFO_1_MPORT_CONNECTIVITY_SHFT     = 0x0,
++};
++
++#define S_ARB_CLK_CTRL_ADDR(b) \
++      (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000200)
++enum bimc_s_arb_clk_ctrl {
++      S_ARB_CLK_CTRL_RMSK                             = 0x1,
++      S_ARB_CLK_CTRL_SLAVE_CLK_GATING_EN_BMSK         = 0x2,
++      S_ARB_CLK_CTRL_SLAVE_CLK_GATING_EN_SHFT         = 0x1,
++      S_ARB_CLK_CTRL_CORE_CLK_GATING_EN_BMSK          = 0x1,
++      S_ARB_CLK_CTRL_CORE_CLK_GATING_EN_SHFT          = 0x0,
++      S_ARB_CLK_CTRL_CLK_GATING_EN_BMSK               = 0x1,
++      S_ARB_CLK_CTRL_CLK_GATING_EN_SHFT               = 0x0,
++};
++
++#define S_ARB_MODE_ADDR(b, n) \
++      (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000210)
++enum bimc_s_arb_mode {
++      S_ARB_MODE_RMSK                         = 0xf0000001,
++      S_ARB_MODE_WR_GRANTS_AHEAD_BMSK         = 0xf0000000,
++      S_ARB_MODE_WR_GRANTS_AHEAD_SHFT         = 0x1c,
++      S_ARB_MODE_PRIO_RR_EN_BMSK              = 0x1,
++      S_ARB_MODE_PRIO_RR_EN_SHFT              = 0x0,
++};
++
++#define BKE_HEALTH_MASK \
++      (M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK |\
++      M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK |\
++      M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK)
++
++#define BKE_HEALTH_VAL(limit, areq, plvl) \
++      ((((limit) << M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT) & \
++      M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK) | \
++      (((areq) << M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT) & \
++      M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK) | \
++      (((plvl) << M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT) & \
++      M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK))
++
++#define MAX_GRANT_PERIOD \
++      (M_BKE_GP_GP_BMSK >> \
++      M_BKE_GP_GP_SHFT)
++
++#define MAX_GC \
++      (M_BKE_GC_GC_BMSK >> \
++      (M_BKE_GC_GC_SHFT + 1))
++
++static int bimc_div(int64_t *a, uint32_t b)
++{
++      if ((*a > 0) && (*a < b)) {
++              *a = 0;
++              return 1;
++      } else {
++              return do_div(*a, b);
++      }
++}
++
++#define ENABLE(val) ((val) == 1 ? 1 : 0)
++void msm_bus_bimc_set_mas_clk_gate(struct msm_bus_bimc_info *binfo,
++      uint32_t mas_index, struct msm_bus_bimc_clk_gate *bgate)
++{
++      uint32_t val, mask, reg_val;
++      void __iomem *addr;
++
++      reg_val = readl_relaxed(M_CLK_CTRL_ADDR(binfo->base,
++                      mas_index)) & M_CLK_CTRL_RMSK;
++      addr = M_CLK_CTRL_ADDR(binfo->base, mas_index);
++      mask = (M_CLK_CTRL_MAS_CLK_GATING_EN_BMSK |
++              M_CLK_CTRL_CORE_CLK_GATING_EN_BMSK);
++      val = (bgate->core_clk_gate_en <<
++              M_CLK_CTRL_MAS_CLK_GATING_EN_SHFT) |
++              bgate->port_clk_gate_en;
++      writel_relaxed(((reg_val & (~mask)) | (val & mask)), addr);
++      /* Ensure clock gating enable mask is set before exiting */
++      wmb();
++}
++
++void msm_bus_bimc_arb_en(struct msm_bus_bimc_info *binfo,
++      uint32_t slv_index, bool en)
++{
++      uint32_t reg_val, reg_mask_val, enable, val;
++
++      reg_mask_val = (readl_relaxed(S_ARB_CONFIG_INFO_0_ADDR(binfo->
++              base, slv_index)) & S_ARB_CONFIG_INFO_0_FUNC_BMSK)
++              >> S_ARB_CONFIG_INFO_0_FUNC_SHFT;
++      enable = ENABLE(en);
++      val = enable << S_ARB_MODE_PRIO_RR_EN_SHFT;
++      if (reg_mask_val == BIMC_ARB_MODE_PRIORITY_RR) {
++              reg_val = readl_relaxed(S_ARB_CONFIG_INFO_0_ADDR(binfo->
++                      base, slv_index)) & S_ARB_MODE_RMSK;
++              writel_relaxed(((reg_val & (~(S_ARB_MODE_PRIO_RR_EN_BMSK))) |
++                      (val & S_ARB_MODE_PRIO_RR_EN_BMSK)),
++                      S_ARB_MODE_ADDR(binfo->base, slv_index));
++              /* Ensure arbitration mode is set before returning */
++              wmb();
++      }
++}
++
++static void set_qos_mode(void __iomem *baddr, uint32_t index, uint32_t val0,
++      uint32_t val1, uint32_t val2)
++{
++      uint32_t reg_val, val;
++
++      reg_val = readl_relaxed(M_PRIOLVL_OVERRIDE_ADDR(baddr,
++              index)) & M_PRIOLVL_OVERRIDE_RMSK;
++      val = val0 << M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT;
++      writel_relaxed(((reg_val & ~(M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK))
++              | (val & M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK)),
++              M_PRIOLVL_OVERRIDE_ADDR(baddr, index));
++      reg_val = readl_relaxed(M_RD_CMD_OVERRIDE_ADDR(baddr, index)) &
++              M_RD_CMD_OVERRIDE_RMSK;
++      val = val1 << M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT;
++      writel_relaxed(((reg_val & ~(M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK
++              )) | (val & M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK)),
++              M_RD_CMD_OVERRIDE_ADDR(baddr, index));
++      reg_val = readl_relaxed(M_WR_CMD_OVERRIDE_ADDR(baddr, index)) &
++              M_WR_CMD_OVERRIDE_RMSK;
++      val = val2 << M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT;
++      writel_relaxed(((reg_val & ~(M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK
++              )) | (val & M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK)),
++              M_WR_CMD_OVERRIDE_ADDR(baddr, index));
++      /* Ensure the priority register writes go through */
++      wmb();
++}
++
++static void msm_bus_bimc_set_qos_mode(void __iomem *base,
++      uint32_t mas_index, uint8_t qmode_sel)
++{
++      uint32_t reg_val, val;
++
++      switch (qmode_sel) {
++      case BIMC_QOS_MODE_FIXED:
++              reg_val = readl_relaxed(M_BKE_EN_ADDR(base,
++                      mas_index));
++              writel_relaxed((reg_val & (~M_BKE_EN_EN_BMSK)),
++                      M_BKE_EN_ADDR(base, mas_index));
++              /* Ensure that the book-keeping register writes
++               * go through before setting QoS mode.
++               * QoS mode registers might write beyond 1K
++               * boundary in future
++               */
++              wmb();
++              set_qos_mode(base, mas_index, 1, 1, 1);
++              break;
++
++      case BIMC_QOS_MODE_BYPASS:
++              reg_val = readl_relaxed(M_BKE_EN_ADDR(base,
++                      mas_index));
++              writel_relaxed((reg_val & (~M_BKE_EN_EN_BMSK)),
++                      M_BKE_EN_ADDR(base, mas_index));
++              /* Ensure that the book-keeping register writes
++               * go through before setting QoS mode.
++               * QoS mode registers might write beyond 1K
++               * boundary in future
++               */
++              wmb();
++              set_qos_mode(base, mas_index, 0, 0, 0);
++              break;
++
++      case BIMC_QOS_MODE_REGULATOR:
++      case BIMC_QOS_MODE_LIMITER:
++              set_qos_mode(base, mas_index, 0, 0, 0);
++              reg_val = readl_relaxed(M_BKE_EN_ADDR(base,
++                      mas_index));
++              val = 1 << M_BKE_EN_EN_SHFT;
++              /* Ensure that the book-keeping register writes
++               * go through before setting QoS mode.
++               * QoS mode registers might write beyond 1K
++               * boundary in future
++               */
++              wmb();
++              writel_relaxed(((reg_val & (~M_BKE_EN_EN_BMSK)) | (val &
++                      M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(base,
++                      mas_index));
++              break;
++      default:
++              break;
++      }
++}
++
++static void set_qos_prio_rl(void __iomem *addr, uint32_t rmsk,
++      uint8_t index, struct msm_bus_bimc_qos_mode *qmode)
++{
++      uint32_t reg_val, val0, val;
++
++      /* Note, addr is already passed with right mas_index */
++      reg_val = readl_relaxed(addr) & rmsk;
++      val0 = BKE_HEALTH_VAL(qmode->rl.qhealth[index].limit_commands,
++              qmode->rl.qhealth[index].areq_prio,
++              qmode->rl.qhealth[index].prio_level);
++      val = ((reg_val & (~(BKE_HEALTH_MASK))) | (val0 & BKE_HEALTH_MASK));
++      writel_relaxed(val, addr);
++      /* Ensure that priority for regulator/limiter modes are
++       * set before returning
++       */
++      wmb();
++
++}
++
++static void msm_bus_bimc_set_qos_prio(void __iomem *base,
++      uint32_t mas_index, uint8_t qmode_sel,
++      struct msm_bus_bimc_qos_mode *qmode)
++{
++      uint32_t reg_val, val;
++
++      switch (qmode_sel) {
++      case BIMC_QOS_MODE_FIXED:
++              reg_val = readl_relaxed(M_PRIOLVL_OVERRIDE_ADDR(
++                      base, mas_index)) & M_PRIOLVL_OVERRIDE_RMSK;
++              val =  qmode->fixed.prio_level <<
++                      M_PRIOLVL_OVERRIDE_SHFT;
++              writel_relaxed(((reg_val &
++                      ~(M_PRIOLVL_OVERRIDE_BMSK)) | (val
++                      & M_PRIOLVL_OVERRIDE_BMSK)),
++                      M_PRIOLVL_OVERRIDE_ADDR(base, mas_index));
++
++              reg_val = readl_relaxed(M_RD_CMD_OVERRIDE_ADDR(
++                      base, mas_index)) & M_RD_CMD_OVERRIDE_RMSK;
++              val =  qmode->fixed.areq_prio_rd <<
++                      M_RD_CMD_OVERRIDE_AREQPRIO_SHFT;
++              writel_relaxed(((reg_val & ~(M_RD_CMD_OVERRIDE_AREQPRIO_BMSK))
++                      | (val & M_RD_CMD_OVERRIDE_AREQPRIO_BMSK)),
++                      M_RD_CMD_OVERRIDE_ADDR(base, mas_index));
++
++              reg_val = readl_relaxed(M_WR_CMD_OVERRIDE_ADDR(
++                      base, mas_index)) & M_WR_CMD_OVERRIDE_RMSK;
++              val =  qmode->fixed.areq_prio_wr <<
++                      M_WR_CMD_OVERRIDE_AREQPRIO_SHFT;
++              writel_relaxed(((reg_val & ~(M_WR_CMD_OVERRIDE_AREQPRIO_BMSK))
++                      | (val & M_WR_CMD_OVERRIDE_AREQPRIO_BMSK)),
++                      M_WR_CMD_OVERRIDE_ADDR(base, mas_index));
++              /* Ensure that fixed mode register writes go through
++               * before returning
++               */
++              wmb();
++              break;
++
++      case BIMC_QOS_MODE_REGULATOR:
++      case BIMC_QOS_MODE_LIMITER:
++              set_qos_prio_rl(M_BKE_HEALTH_3_CONFIG_ADDR(base,
++                      mas_index), M_BKE_HEALTH_3_CONFIG_RMSK, 3, qmode);
++              set_qos_prio_rl(M_BKE_HEALTH_2_CONFIG_ADDR(base,
++                      mas_index), M_BKE_HEALTH_2_CONFIG_RMSK, 2, qmode);
++              set_qos_prio_rl(M_BKE_HEALTH_1_CONFIG_ADDR(base,
++                      mas_index), M_BKE_HEALTH_1_CONFIG_RMSK, 1, qmode);
++              set_qos_prio_rl(M_BKE_HEALTH_0_CONFIG_ADDR(base,
++                      mas_index), M_BKE_HEALTH_0_CONFIG_RMSK, 0 , qmode);
++              break;
++      case BIMC_QOS_MODE_BYPASS:
++      default:
++              break;
++      }
++}
++
++static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index,
++      int32_t th, int32_t tm, int32_t tl, uint32_t gp,
++      uint32_t gc)
++{
++      int32_t reg_val, val;
++      int32_t bke_reg_val;
++      int16_t val2;
++
++      /* Disable BKE before writing to registers as per spec */
++      bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index));
++      writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)),
++              M_BKE_EN_ADDR(baddr, mas_index));
++
++      /* Write values of registers calculated */
++      reg_val = readl_relaxed(M_BKE_GP_ADDR(baddr, mas_index))
++              & M_BKE_GP_RMSK;
++      val =  gp << M_BKE_GP_GP_SHFT;
++      writel_relaxed(((reg_val & ~(M_BKE_GP_GP_BMSK)) | (val &
++              M_BKE_GP_GP_BMSK)), M_BKE_GP_ADDR(baddr, mas_index));
++
++      reg_val = readl_relaxed(M_BKE_GC_ADDR(baddr, mas_index)) &
++              M_BKE_GC_RMSK;
++      val =  gc << M_BKE_GC_GC_SHFT;
++      writel_relaxed(((reg_val & ~(M_BKE_GC_GC_BMSK)) | (val &
++              M_BKE_GC_GC_BMSK)), M_BKE_GC_ADDR(baddr, mas_index));
++
++      reg_val = readl_relaxed(M_BKE_THH_ADDR(baddr, mas_index)) &
++              M_BKE_THH_RMSK;
++      val =  th << M_BKE_THH_THRESH_SHFT;
++      writel_relaxed(((reg_val & ~(M_BKE_THH_THRESH_BMSK)) | (val &
++              M_BKE_THH_THRESH_BMSK)), M_BKE_THH_ADDR(baddr, mas_index));
++
++      reg_val = readl_relaxed(M_BKE_THM_ADDR(baddr, mas_index)) &
++              M_BKE_THM_RMSK;
++      val2 =  tm << M_BKE_THM_THRESH_SHFT;
++      writel_relaxed(((reg_val & ~(M_BKE_THM_THRESH_BMSK)) | (val2 &
++              M_BKE_THM_THRESH_BMSK)), M_BKE_THM_ADDR(baddr, mas_index));
++
++      reg_val = readl_relaxed(M_BKE_THL_ADDR(baddr, mas_index)) &
++              M_BKE_THL_RMSK;
++      val2 =  tl << M_BKE_THL_THRESH_SHFT;
++      writel_relaxed(((reg_val & ~(M_BKE_THL_THRESH_BMSK)) |
++              (val2 & M_BKE_THL_THRESH_BMSK)), M_BKE_THL_ADDR(baddr,
++              mas_index));
++
++      /* Ensure that all bandwidth register writes have completed
++       * before returning
++       */
++      wmb();
++}
++
++static void msm_bus_bimc_set_qos_bw(void __iomem *base, uint32_t qos_freq,
++      uint32_t mas_index, struct msm_bus_bimc_qos_bw *qbw)
++{
++      uint32_t bke_en;
++
++      /* Validate QOS Frequency */
++      if (qos_freq == 0) {
++              MSM_BUS_DBG("Zero frequency\n");
++              return;
++      }
++
++      /* Get enable bit for BKE before programming the period */
++      bke_en = (readl_relaxed(M_BKE_EN_ADDR(base, mas_index)) &
++              M_BKE_EN_EN_BMSK) >> M_BKE_EN_EN_SHFT;
++
++      /* Only calculate if there's a requested bandwidth and window */
++      if (qbw->bw && qbw->ws) {
++              int64_t th, tm, tl;
++              uint32_t gp, gc;
++              int64_t gp_nominal, gp_required, gp_calc, data, temp;
++              int64_t win = qbw->ws * qos_freq;
++              temp = win;
++              /*
++               * Calculate nominal grant period defined by requested
++               * window size.
++               * Ceil this value to max grant period.
++               */
++              bimc_div(&temp, 1000000);
++              gp_nominal = min_t(uint64_t, MAX_GRANT_PERIOD, temp);
++              /*
++               * Calculate max window size, defined by bw request.
++               * Units: (KHz, MB/s)
++               */
++              gp_calc = MAX_GC * qos_freq * 1000;
++              gp_required = gp_calc;
++              bimc_div(&gp_required, qbw->bw);
++
++              /* User min of two grant periods */
++              gp = min_t(int64_t, gp_nominal, gp_required);
++
++              /* Calculate bandwith in grants and ceil. */
++              temp = qbw->bw * gp;
++              data = qos_freq * 1000;
++              bimc_div(&temp, data);
++              gc = min_t(int64_t, MAX_GC, temp);
++
++              /* Calculate thresholds */
++              th = qbw->bw - qbw->thh;
++              tm = qbw->bw - qbw->thm;
++              tl = qbw->bw - qbw->thl;
++
++              th = th * gp;
++              bimc_div(&th, data);
++              tm = tm * gp;
++              bimc_div(&tm, data);
++              tl = tl * gp;
++              bimc_div(&tl, data);
++
++              MSM_BUS_DBG("BIMC: BW: mas_index: %d, th: %llu tm: %llu\n",
++                      mas_index, th, tm);
++              MSM_BUS_DBG("BIMC: tl: %llu gp:%u gc: %u bke_en: %u\n",
++                      tl, gp, gc, bke_en);
++              set_qos_bw_regs(base, mas_index, th, tm, tl, gp, gc);
++      } else
++              /* Clear bandwidth registers */
++              set_qos_bw_regs(base, mas_index, 0, 0, 0, 0, 0);
++}
++
++static int msm_bus_bimc_allocate_commit_data(struct msm_bus_fabric_registration
++      *fab_pdata, void **cdata, int ctx)
++{
++      struct msm_bus_bimc_commit **cd = (struct msm_bus_bimc_commit **)cdata;
++      struct msm_bus_bimc_info *binfo =
++              (struct msm_bus_bimc_info *)fab_pdata->hw_data;
++
++      MSM_BUS_DBG("Allocating BIMC commit data\n");
++      *cd = kzalloc(sizeof(struct msm_bus_bimc_commit), GFP_KERNEL);
++      if (!*cd) {
++              MSM_BUS_DBG("Couldn't alloc mem for cdata\n");
++              return -ENOMEM;
++      }
++
++      (*cd)->mas = binfo->cdata[ctx].mas;
++      (*cd)->slv = binfo->cdata[ctx].slv;
++
++      return 0;
++}
++
++static void *msm_bus_bimc_allocate_bimc_data(struct platform_device *pdev,
++      struct msm_bus_fabric_registration *fab_pdata)
++{
++      struct resource *bimc_mem;
++      struct resource *bimc_io;
++      struct msm_bus_bimc_info *binfo;
++      int i;
++
++      MSM_BUS_DBG("Allocating BIMC data\n");
++      binfo = kzalloc(sizeof(struct msm_bus_bimc_info), GFP_KERNEL);
++      if (!binfo) {
++              WARN(!binfo, "Couldn't alloc mem for bimc_info\n");
++              return NULL;
++      }
++
++      binfo->qos_freq = fab_pdata->qos_freq;
++
++      binfo->params.nmasters = fab_pdata->nmasters;
++      binfo->params.nslaves = fab_pdata->nslaves;
++      binfo->params.bus_id = fab_pdata->id;
++
++      for (i = 0; i < NUM_CTX; i++) {
++              binfo->cdata[i].mas = kzalloc(sizeof(struct
++                      msm_bus_node_hw_info) * fab_pdata->nmasters * 2,
++                      GFP_KERNEL);
++              if (!binfo->cdata[i].mas) {
++                      MSM_BUS_ERR("Couldn't alloc mem for bimc master hw\n");
++                      kfree(binfo);
++                      return NULL;
++              }
++
++              binfo->cdata[i].slv = kzalloc(sizeof(struct
++                      msm_bus_node_hw_info) * fab_pdata->nslaves * 2,
++                      GFP_KERNEL);
++              if (!binfo->cdata[i].slv) {
++                      MSM_BUS_DBG("Couldn't alloc mem for bimc slave hw\n");
++                      kfree(binfo->cdata[i].mas);
++                      kfree(binfo);
++                      return NULL;
++              }
++      }
++
++      if (fab_pdata->virt) {
++              MSM_BUS_DBG("Don't get memory regions for virtual fabric\n");
++              goto skip_mem;
++      }
++
++      bimc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      if (!bimc_mem) {
++              MSM_BUS_ERR("Cannot get BIMC Base address\n");
++              kfree(binfo);
++              return NULL;
++      }
++
++      bimc_io = request_mem_region(bimc_mem->start,
++                      resource_size(bimc_mem), pdev->name);
++      if (!bimc_io) {
++              MSM_BUS_ERR("BIMC memory unavailable\n");
++              kfree(binfo);
++              return NULL;
++      }
++
++      binfo->base = ioremap(bimc_mem->start, resource_size(bimc_mem));
++      if (!binfo->base) {
++              MSM_BUS_ERR("IOremap failed for BIMC!\n");
++              release_mem_region(bimc_mem->start, resource_size(bimc_mem));
++              kfree(binfo);
++              return NULL;
++      }
++
++skip_mem:
++      fab_pdata->hw_data = (void *)binfo;
++      return (void *)binfo;
++}
++
++static void free_commit_data(void *cdata)
++{
++      struct msm_bus_bimc_commit *cd = (struct msm_bus_bimc_commit *)cdata;
++
++      kfree(cd->mas);
++      kfree(cd->slv);
++      kfree(cd);
++}
++
++static void bke_switch(
++      void __iomem *baddr, uint32_t mas_index, bool req, int mode)
++{
++      uint32_t reg_val, val, cur_val;
++
++      val = req << M_BKE_EN_EN_SHFT;
++      reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index));
++      cur_val = reg_val & M_BKE_EN_RMSK;
++      if (val == cur_val)
++              return;
++
++      if (!req && mode == BIMC_QOS_MODE_FIXED)
++              set_qos_mode(baddr, mas_index, 1, 1, 1);
++
++      writel_relaxed(((reg_val & ~(M_BKE_EN_EN_BMSK)) | (val &
++              M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(baddr, mas_index));
++      /* Make sure BKE on/off goes through before changing priorities */
++      wmb();
++
++      if (req)
++              set_qos_mode(baddr, mas_index, 0, 0, 0);
++}
++
++static void bimc_set_static_qos_bw(void __iomem *base, unsigned int qos_freq,
++      int mport, struct msm_bus_bimc_qos_bw *qbw)
++{
++      int32_t bw_mbps, thh = 0, thm, thl, gc;
++      int32_t gp;
++      u64 temp;
++
++      if (qos_freq == 0) {
++              MSM_BUS_DBG("No QoS Frequency.\n");
++              return;
++      }
++
++      if (!(qbw->bw && qbw->gp)) {
++              MSM_BUS_DBG("No QoS Bandwidth or Window size\n");
++              return;
++      }
++
++      /* Convert bandwidth to MBPS */
++      temp = qbw->bw;
++      bimc_div(&temp, 1000000);
++      bw_mbps = temp;
++
++      /* Grant period in clock cycles
++       * Grant period from bandwidth structure
++       * is in nano seconds, QoS freq is in KHz.
++       * Divide by 1000 to get clock cycles.
++       */
++      gp = (qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC);
++
++      /* Grant count = BW in MBps * Grant period
++       * in micro seconds
++       */
++      gc = bw_mbps * (qbw->gp / NSEC_PER_USEC);
++      gc = min(gc, MAX_GC);
++
++      /* Medium threshold = -((Medium Threshold percentage *
++       * Grant count) / 100)
++       */
++      thm = -((qbw->thmp * gc) / 100);
++      qbw->thm = thm;
++
++      /* Low threshold = -(Grant count) */
++      thl = -gc;
++      qbw->thl = thl;
++
++      MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d",
++                      __func__, gp, gc, thm, thl, thh);
++
++      trace_bus_bke_params(gc, gp, thl, thm, thl);
++      set_qos_bw_regs(base, mport, thh, thm, thl, gp, gc);
++}
++
++static void msm_bus_bimc_config_master(
++      struct msm_bus_fabric_registration *fab_pdata,
++      struct msm_bus_inode_info *info,
++      uint64_t req_clk, uint64_t req_bw)
++{
++      int mode, i, ports;
++      struct msm_bus_bimc_info *binfo;
++      uint64_t bw = 0;
++
++      binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data;
++      ports = info->node_info->num_mports;
++
++      /**
++       * Here check the details of dual configuration.
++       * Take actions based on different modes.
++       * Check for threshold if limiter mode, etc.
++      */
++
++      if (req_clk <= info->node_info->th[0]) {
++              mode = info->node_info->mode;
++              bw = info->node_info->bimc_bw[0];
++      } else if ((info->node_info->num_thresh > 1) &&
++                      (req_clk <= info->node_info->th[1])) {
++              mode = info->node_info->mode;
++              bw = info->node_info->bimc_bw[1];
++      } else
++              mode = info->node_info->mode_thresh;
++
++      switch (mode) {
++      case BIMC_QOS_MODE_BYPASS:
++      case BIMC_QOS_MODE_FIXED:
++              for (i = 0; i < ports; i++)
++                      bke_switch(binfo->base, info->node_info->qport[i],
++                              BKE_OFF, mode);
++              break;
++      case BIMC_QOS_MODE_REGULATOR:
++      case BIMC_QOS_MODE_LIMITER:
++              for (i = 0; i < ports; i++) {
++                      /* If not in fixed mode, update bandwidth */
++                      if ((info->node_info->cur_lim_bw != bw)
++                                      && (mode != BIMC_QOS_MODE_FIXED)) {
++                              struct msm_bus_bimc_qos_bw qbw;
++                              qbw.ws = info->node_info->ws;
++                              qbw.bw = bw;
++                              qbw.gp = info->node_info->bimc_gp;
++                              qbw.thmp = info->node_info->bimc_thmp;
++                              bimc_set_static_qos_bw(binfo->base,
++                                      binfo->qos_freq,
++                                      info->node_info->qport[i], &qbw);
++                              info->node_info->cur_lim_bw = bw;
++                              MSM_BUS_DBG("%s: Qos is %d reqclk %llu bw %llu",
++                                              __func__, mode, req_clk, bw);
++                      }
++                      bke_switch(binfo->base, info->node_info->qport[i],
++                              BKE_ON, mode);
++              }
++              break;
++      default:
++              break;
++      }
++}
++
++static void msm_bus_bimc_update_bw(struct msm_bus_inode_info *hop,
++      struct msm_bus_inode_info *info,
++      struct msm_bus_fabric_registration *fab_pdata,
++      void *sel_cdata, int *master_tiers,
++      int64_t add_bw)
++{
++      struct msm_bus_bimc_info *binfo;
++      struct msm_bus_bimc_qos_bw qbw;
++      int i;
++      int64_t bw;
++      int ports = info->node_info->num_mports;
++      struct msm_bus_bimc_commit *sel_cd =
++              (struct msm_bus_bimc_commit *)sel_cdata;
++
++      MSM_BUS_DBG("BIMC: Update bw for ID %d, with IID: %d: %lld\n",
++              info->node_info->id, info->node_info->priv_id, add_bw);
++
++      binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data;
++
++      if (info->node_info->num_mports == 0) {
++              MSM_BUS_DBG("BIMC: Skip Master BW\n");
++              goto skip_mas_bw;
++      }
++
++      ports = info->node_info->num_mports;
++      bw = INTERLEAVED_BW(fab_pdata, add_bw, ports);
++
++      for (i = 0; i < ports; i++) {
++              sel_cd->mas[info->node_info->masterp[i]].bw += bw;
++              sel_cd->mas[info->node_info->masterp[i]].hw_id =
++                      info->node_info->mas_hw_id;
++              MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
++                      info->node_info->priv_id,
++                      sel_cd->mas[info->node_info->masterp[i]].bw);
++              if (info->node_info->hw_sel == MSM_BUS_RPM)
++                      sel_cd->mas[info->node_info->masterp[i]].dirty = 1;
++              else {
++                      if (!info->node_info->qport) {
++                              MSM_BUS_DBG("No qos ports to update!\n");
++                              break;
++                      }
++                      if (!(info->node_info->mode == BIMC_QOS_MODE_REGULATOR)
++                                      || (info->node_info->mode ==
++                                              BIMC_QOS_MODE_LIMITER)) {
++                              MSM_BUS_DBG("Skip QoS reg programming\n");
++                              break;
++                      }
++
++                      MSM_BUS_DBG("qport: %d\n", info->node_info->qport[i]);
++                      qbw.bw = sel_cd->mas[info->node_info->masterp[i]].bw;
++                      qbw.ws = info->node_info->ws;
++                      /* Threshold low = 90% of bw */
++                      qbw.thl = div_s64((90 * bw), 100);
++                      /* Threshold medium = bw */
++                      qbw.thm = bw;
++                      /* Threshold high = 10% more than bw */
++                      qbw.thh = div_s64((110 * bw), 100);
++                      /* Check if info is a shared master.
++                       * If it is, mark it dirty
++                       * If it isn't, then set QOS Bandwidth.
++                       * Also if dual-conf is set, don't program bw regs.
++                       **/
++                      if (!info->node_info->dual_conf &&
++                      ((info->node_info->mode == BIMC_QOS_MODE_LIMITER) ||
++                      (info->node_info->mode == BIMC_QOS_MODE_REGULATOR)))
++                              msm_bus_bimc_set_qos_bw(binfo->base,
++                                      binfo->qos_freq,
++                                      info->node_info->qport[i], &qbw);
++              }
++      }
++
++skip_mas_bw:
++      ports = hop->node_info->num_sports;
++      MSM_BUS_DBG("BIMC: ID: %d, Sports: %d\n", hop->node_info->priv_id,
++              ports);
++
++      for (i = 0; i < ports; i++) {
++              sel_cd->slv[hop->node_info->slavep[i]].bw += add_bw;
++              sel_cd->slv[hop->node_info->slavep[i]].hw_id =
++                      hop->node_info->slv_hw_id;
++              MSM_BUS_DBG("BIMC: Update slave_bw: ID: %d -> %llu\n",
++                      hop->node_info->priv_id,
++                      sel_cd->slv[hop->node_info->slavep[i]].bw);
++              MSM_BUS_DBG("BIMC: Update slave_bw: index: %d\n",
++                      hop->node_info->slavep[i]);
++              /* Check if hop is a shared slave.
++               * If it is, mark it dirty
++               * If it isn't, then nothing to be done as the
++               * slaves are in bypass mode.
++               **/
++              if (hop->node_info->hw_sel == MSM_BUS_RPM) {
++                      MSM_BUS_DBG("Slave dirty: %d, slavep: %d\n",
++                              hop->node_info->priv_id,
++                              hop->node_info->slavep[i]);
++                      sel_cd->slv[hop->node_info->slavep[i]].dirty = 1;
++              }
++      }
++}
++
++static int msm_bus_bimc_commit(struct msm_bus_fabric_registration
++      *fab_pdata, void *hw_data, void **cdata)
++{
++      MSM_BUS_DBG("\nReached BIMC Commit\n");
++      msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata);
++      return 0;
++}
++
++static void msm_bus_bimc_config_limiter(
++      struct msm_bus_fabric_registration *fab_pdata,
++      struct msm_bus_inode_info *info)
++{
++      struct msm_bus_bimc_info *binfo;
++      int mode, i, ports;
++
++      binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data;
++      ports = info->node_info->num_mports;
++
++      if (!info->node_info->qport) {
++              MSM_BUS_DBG("No QoS Ports to init\n");
++              return;
++      }
++
++      if (info->cur_lim_bw)
++              mode = BIMC_QOS_MODE_LIMITER;
++      else
++              mode = info->node_info->mode;
++
++      switch (mode) {
++      case BIMC_QOS_MODE_BYPASS:
++      case BIMC_QOS_MODE_FIXED:
++              for (i = 0; i < ports; i++)
++                      bke_switch(binfo->base, info->node_info->qport[i],
++                              BKE_OFF, mode);
++              break;
++      case BIMC_QOS_MODE_REGULATOR:
++      case BIMC_QOS_MODE_LIMITER:
++              if (info->cur_lim_bw != info->cur_prg_bw) {
++                      MSM_BUS_DBG("Enabled BKE throttling node %d to %llu\n",
++                              info->node_info->id, info->cur_lim_bw);
++                      trace_bus_bimc_config_limiter(info->node_info->id,
++                              info->cur_lim_bw);
++                      for (i = 0; i < ports; i++) {
++                              /* If not in fixed mode, update bandwidth */
++                              struct msm_bus_bimc_qos_bw qbw;
++
++                              qbw.ws = info->node_info->ws;
++                              qbw.bw = info->cur_lim_bw;
++                              qbw.gp = info->node_info->bimc_gp;
++                              qbw.thmp = info->node_info->bimc_thmp;
++                              bimc_set_static_qos_bw(binfo->base,
++                                      binfo->qos_freq,
++                                      info->node_info->qport[i], &qbw);
++                              bke_switch(binfo->base,
++                                      info->node_info->qport[i],
++                                      BKE_ON, mode);
++                              info->cur_prg_bw = qbw.bw;
++                      }
++              }
++              break;
++      default:
++              break;
++      }
++}
++
++static void bimc_init_mas_reg(struct msm_bus_bimc_info *binfo,
++      struct msm_bus_inode_info *info,
++      struct msm_bus_bimc_qos_mode *qmode, int mode)
++{
++      int i;
++
++      switch (mode) {
++      case BIMC_QOS_MODE_FIXED:
++              qmode->fixed.prio_level = info->node_info->prio_lvl;
++              qmode->fixed.areq_prio_rd = info->node_info->prio_rd;
++              qmode->fixed.areq_prio_wr = info->node_info->prio_wr;
++              break;
++      case BIMC_QOS_MODE_LIMITER:
++              qmode->rl.qhealth[0].limit_commands = 1;
++              qmode->rl.qhealth[1].limit_commands = 0;
++              qmode->rl.qhealth[2].limit_commands = 0;
++              qmode->rl.qhealth[3].limit_commands = 0;
++              break;
++      default:
++              break;
++      }
++
++      if (!info->node_info->qport) {
++              MSM_BUS_DBG("No QoS Ports to init\n");
++              return;
++      }
++
++      for (i = 0; i < info->node_info->num_mports; i++) {
++              /* If not in bypass mode, update priority */
++              if (mode != BIMC_QOS_MODE_BYPASS) {
++                      msm_bus_bimc_set_qos_prio(binfo->base,
++                              info->node_info->
++                              qport[i], mode, qmode);
++
++                      /* If not in fixed mode, update bandwidth */
++                      if (mode != BIMC_QOS_MODE_FIXED) {
++                              struct msm_bus_bimc_qos_bw qbw;
++                              qbw.ws = info->node_info->ws;
++                              qbw.bw = info->node_info->bimc_bw[0];
++                              qbw.gp = info->node_info->bimc_gp;
++                              qbw.thmp = info->node_info->bimc_thmp;
++                              bimc_set_static_qos_bw(binfo->base,
++                                      binfo->qos_freq,
++                                      info->node_info->qport[i], &qbw);
++                      }
++              }
++
++              /* set mode */
++              msm_bus_bimc_set_qos_mode(binfo->base,
++                                      info->node_info->qport[i],
++                                      mode);
++      }
++}
++
++static void init_health_regs(struct msm_bus_bimc_info *binfo,
++                              struct msm_bus_inode_info *info,
++                              struct msm_bus_bimc_qos_mode *qmode,
++                              int mode)
++{
++      int i;
++
++      if (mode == BIMC_QOS_MODE_LIMITER) {
++              qmode->rl.qhealth[0].limit_commands = 1;
++              qmode->rl.qhealth[1].limit_commands = 0;
++              qmode->rl.qhealth[2].limit_commands = 0;
++              qmode->rl.qhealth[3].limit_commands = 0;
++
++              if (!info->node_info->qport) {
++                      MSM_BUS_DBG("No QoS Ports to init\n");
++                      return;
++              }
++
++              for (i = 0; i < info->node_info->num_mports; i++) {
++                      /* If not in bypass mode, update priority */
++                      if (mode != BIMC_QOS_MODE_BYPASS)
++                              msm_bus_bimc_set_qos_prio(binfo->base,
++                              info->node_info->qport[i], mode, qmode);
++              }
++      }
++}
++
++
++static int msm_bus_bimc_mas_init(struct msm_bus_bimc_info *binfo,
++      struct msm_bus_inode_info *info)
++{
++      struct msm_bus_bimc_qos_mode *qmode;
++      qmode = kzalloc(sizeof(struct msm_bus_bimc_qos_mode),
++              GFP_KERNEL);
++      if (!qmode) {
++              MSM_BUS_WARN("Couldn't alloc prio data for node: %d\n",
++                      info->node_info->id);
++              return -ENOMEM;
++      }
++
++      info->hw_data = (void *)qmode;
++
++      /**
++       * If the master supports dual configuration,
++       * configure registers for both modes
++       */
++      if (info->node_info->dual_conf)
++              bimc_init_mas_reg(binfo, info, qmode,
++                      info->node_info->mode_thresh);
++      else if (info->node_info->nr_lim)
++              init_health_regs(binfo, info, qmode, BIMC_QOS_MODE_LIMITER);
++
++      bimc_init_mas_reg(binfo, info, qmode, info->node_info->mode);
++      return 0;
++}
++
++static void msm_bus_bimc_node_init(void *hw_data,
++      struct msm_bus_inode_info *info)
++{
++      struct msm_bus_bimc_info *binfo =
++              (struct msm_bus_bimc_info *)hw_data;
++
++      if (!IS_SLAVE(info->node_info->priv_id) &&
++              (info->node_info->hw_sel != MSM_BUS_RPM))
++              msm_bus_bimc_mas_init(binfo, info);
++}
++
++static int msm_bus_bimc_port_halt(uint32_t haltid, uint8_t mport)
++{
++      return 0;
++}
++
++static int msm_bus_bimc_port_unhalt(uint32_t haltid, uint8_t mport)
++{
++      return 0;
++}
++
++static int msm_bus_bimc_limit_mport(struct msm_bus_node_device_type *info,
++                              void __iomem *qos_base, uint32_t qos_off,
++                              uint32_t qos_delta, uint32_t qos_freq,
++                              bool enable_lim, u64 lim_bw)
++{
++      int mode;
++      int i;
++
++      if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
++              MSM_BUS_DBG("No QoS Ports to limit\n");
++              return 0;
++      }
++
++      if (enable_lim && lim_bw) {
++              mode =  BIMC_QOS_MODE_LIMITER;
++
++              if (!info->node_info->lim_bw) {
++                      struct msm_bus_bimc_qos_mode qmode;
++                      qmode.rl.qhealth[0].limit_commands = 1;
++                      qmode.rl.qhealth[1].limit_commands = 0;
++                      qmode.rl.qhealth[2].limit_commands = 0;
++                      qmode.rl.qhealth[3].limit_commands = 0;
++
++                      for (i = 0; i < info->node_info->num_qports; i++) {
++                              /* If not in bypass mode, update priority */
++                              if (mode != BIMC_QOS_MODE_BYPASS)
++                                      msm_bus_bimc_set_qos_prio(qos_base,
++                                      info->node_info->qport[i], mode,
++                                      &qmode);
++                      }
++              }
++
++              for (i = 0; i < info->node_info->num_qports; i++) {
++                      struct msm_bus_bimc_qos_bw qbw;
++                      /* If not in fixed mode, update bandwidth */
++                      if ((info->node_info->lim_bw != lim_bw)) {
++                              qbw.ws = info->node_info->qos_params.ws;
++                              qbw.bw = lim_bw;
++                              qbw.gp = info->node_info->qos_params.gp;
++                              qbw.thmp = info->node_info->qos_params.thmp;
++                              bimc_set_static_qos_bw(qos_base, qos_freq,
++                                      info->node_info->qport[i], &qbw);
++                      }
++                      bke_switch(qos_base, info->node_info->qport[i],
++                              BKE_ON, mode);
++              }
++              info->node_info->lim_bw = lim_bw;
++      } else {
++              mode = info->node_info->qos_params.mode;
++              for (i = 0; i < info->node_info->num_qports; i++) {
++                      bke_switch(qos_base, info->node_info->qport[i],
++                              BKE_OFF, mode);
++              }
++      }
++      info->node_info->qos_params.cur_mode = mode;
++      return 0;
++}
++
++static bool msm_bus_bimc_update_bw_reg(int mode)
++{
++      bool ret = false;
++
++      if ((mode == BIMC_QOS_MODE_LIMITER)
++              || (mode == BIMC_QOS_MODE_REGULATOR))
++              ret = true;
++
++      return ret;
++}
++
++static int msm_bus_bimc_qos_init(struct msm_bus_node_device_type *info,
++                              void __iomem *qos_base,
++                              uint32_t qos_off, uint32_t qos_delta,
++                              uint32_t qos_freq)
++{
++      int i;
++      struct msm_bus_bimc_qos_mode qmode;
++
++      switch (info->node_info->qos_params.mode) {
++      case BIMC_QOS_MODE_FIXED:
++              qmode.fixed.prio_level = info->node_info->qos_params.prio_lvl;
++              qmode.fixed.areq_prio_rd = info->node_info->qos_params.prio_rd;
++              qmode.fixed.areq_prio_wr = info->node_info->qos_params.prio_wr;
++              break;
++      case BIMC_QOS_MODE_LIMITER:
++              qmode.rl.qhealth[0].limit_commands = 1;
++              qmode.rl.qhealth[1].limit_commands = 0;
++              qmode.rl.qhealth[2].limit_commands = 0;
++              qmode.rl.qhealth[3].limit_commands = 0;
++              break;
++      default:
++              break;
++      }
++
++      if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
++              MSM_BUS_DBG("No QoS Ports to init\n");
++              return 0;
++      }
++
++      for (i = 0; i < info->node_info->num_qports; i++) {
++              /* If not in bypass mode, update priority */
++              if (info->node_info->qos_params.mode != BIMC_QOS_MODE_BYPASS)
++                      msm_bus_bimc_set_qos_prio(qos_base, info->node_info->
++                              qport[i], info->node_info->qos_params.mode,
++                                                                      &qmode);
++
++              /* set mode */
++              if (info->node_info->qos_params.mode == BIMC_QOS_MODE_LIMITER)
++                      bke_switch(qos_base, info->node_info->qport[i],
++                              BKE_OFF, BIMC_QOS_MODE_FIXED);
++              else
++                     msm_bus_bimc_set_qos_mode(qos_base,
++                              info->node_info->qport[i],
++                              info->node_info->qos_params.mode);
++      }
++
++      return 0;
++}
++
++static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev,
++                              void __iomem *qos_base, uint32_t qos_off,
++                              uint32_t qos_delta, uint32_t qos_freq)
++{
++      struct msm_bus_bimc_qos_bw qbw;
++      int i;
++      int64_t bw = 0;
++      int ret = 0;
++      struct msm_bus_node_info_type *info = dev->node_info;
++
++      if (info && info->num_qports &&
++              ((info->qos_params.mode == BIMC_QOS_MODE_LIMITER) ||
++              (info->qos_params.mode == BIMC_QOS_MODE_REGULATOR))) {
++              bw = msm_bus_div64(info->num_qports,
++                              dev->node_ab.ab[DUAL_CTX]);
++
++              for (i = 0; i < info->num_qports; i++) {
++                      MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
++                              info->id, bw);
++
++                      if (!info->qport) {
++                              MSM_BUS_DBG("No qos ports to update!\n");
++                              break;
++                      }
++
++                      qbw.bw = bw + info->qos_params.bw_buffer;
++                      trace_bus_bimc_config_limiter(info->id, bw);
++
++                      /* Default to gp of 5us */
++                      qbw.gp = (info->qos_params.gp ?
++                                      info->qos_params.gp : 5000);
++                      /* Default to thmp of 50% */
++                      qbw.thmp = (info->qos_params.thmp ?
++                                      info->qos_params.thmp : 50);
++                      /*
++                       * If the BW vote is 0 then set the QoS mode to
++                       * Fixed.
++                       */
++                      if (bw) {
++                              bimc_set_static_qos_bw(qos_base, qos_freq,
++                                      info->qport[i], &qbw);
++                              bke_switch(qos_base, info->qport[i],
++                                      BKE_ON, info->qos_params.mode);
++                      } else {
++                              bke_switch(qos_base, info->qport[i],
++                                      BKE_OFF, BIMC_QOS_MODE_FIXED);
++                      }
++              }
++      }
++      return ret;
++}
++
++int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata,
++      struct msm_bus_hw_algorithm *hw_algo)
++{
++      /* Set interleaving to true by default */
++      MSM_BUS_DBG("\nInitializing BIMC...\n");
++      pdata->il_flag = true;
++      hw_algo->allocate_commit_data = msm_bus_bimc_allocate_commit_data;
++      hw_algo->allocate_hw_data = msm_bus_bimc_allocate_bimc_data;
++      hw_algo->node_init = msm_bus_bimc_node_init;
++      hw_algo->free_commit_data = free_commit_data;
++      hw_algo->update_bw = msm_bus_bimc_update_bw;
++      hw_algo->commit = msm_bus_bimc_commit;
++      hw_algo->port_halt = msm_bus_bimc_port_halt;
++      hw_algo->port_unhalt = msm_bus_bimc_port_unhalt;
++      hw_algo->config_master = msm_bus_bimc_config_master;
++      hw_algo->config_limiter = msm_bus_bimc_config_limiter;
++      hw_algo->update_bw_reg = msm_bus_bimc_update_bw_reg;
++      /* BIMC slaves are shared. Slave registers are set through RPM */
++      if (!pdata->ahb)
++              pdata->rpm_enabled = 1;
++      return 0;
++}
++
++int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev)
++{
++      if (!bus_dev)
++              return -ENODEV;
++      else {
++              bus_dev->fabdev->noc_ops.qos_init = msm_bus_bimc_qos_init;
++              bus_dev->fabdev->noc_ops.set_bw = msm_bus_bimc_set_bw;
++              bus_dev->fabdev->noc_ops.limit_mport = msm_bus_bimc_limit_mport;
++              bus_dev->fabdev->noc_ops.update_bw_reg =
++                                              msm_bus_bimc_update_bw_reg;
++      }
++      return 0;
++}
++EXPORT_SYMBOL(msm_bus_bimc_set_ops);
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_bimc.h
+@@ -0,0 +1,127 @@
++/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H
++#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H
++
++struct msm_bus_bimc_params {
++      uint32_t bus_id;
++      uint32_t addr_width;
++      uint32_t data_width;
++      uint32_t nmasters;
++      uint32_t nslaves;
++};
++
++struct msm_bus_bimc_commit {
++      struct msm_bus_node_hw_info *mas;
++      struct msm_bus_node_hw_info *slv;
++};
++
++struct msm_bus_bimc_info {
++      void __iomem *base;
++      uint32_t base_addr;
++      uint32_t qos_freq;
++      struct msm_bus_bimc_params params;
++      struct msm_bus_bimc_commit cdata[NUM_CTX];
++};
++
++struct msm_bus_bimc_node {
++      uint32_t conn_mask;
++      uint32_t data_width;
++      uint8_t slv_arb_mode;
++};
++
++enum msm_bus_bimc_arb_mode {
++      BIMC_ARB_MODE_RR = 0,
++      BIMC_ARB_MODE_PRIORITY_RR,
++      BIMC_ARB_MODE_TIERED_RR,
++};
++
++
++enum msm_bus_bimc_interleave {
++      BIMC_INTERLEAVE_NONE = 0,
++      BIMC_INTERLEAVE_ODD,
++      BIMC_INTERLEAVE_EVEN,
++};
++
++struct msm_bus_bimc_slave_seg {
++      bool enable;
++      uint64_t start_addr;
++      uint64_t seg_size;
++      uint8_t interleave;
++};
++
++enum msm_bus_bimc_qos_mode_type {
++      BIMC_QOS_MODE_FIXED = 0,
++      BIMC_QOS_MODE_LIMITER,
++      BIMC_QOS_MODE_BYPASS,
++      BIMC_QOS_MODE_REGULATOR,
++};
++
++struct msm_bus_bimc_qos_health {
++      bool limit_commands;
++      uint32_t areq_prio;
++      uint32_t prio_level;
++};
++
++struct msm_bus_bimc_mode_fixed {
++      uint32_t prio_level;
++      uint32_t areq_prio_rd;
++      uint32_t areq_prio_wr;
++};
++
++struct msm_bus_bimc_mode_rl {
++      uint8_t qhealthnum;
++      struct msm_bus_bimc_qos_health qhealth[4];
++};
++
++struct msm_bus_bimc_qos_mode {
++      uint8_t mode;
++      struct msm_bus_bimc_mode_fixed fixed;
++      struct msm_bus_bimc_mode_rl rl;
++};
++
++struct msm_bus_bimc_qos_bw {
++      uint64_t bw;    /* bw is in Bytes/sec */
++      uint32_t ws;    /* Window size in nano seconds*/
++      int64_t thh;    /* Threshold high, bytes per second */
++      int64_t thm;    /* Threshold medium, bytes per second */
++      int64_t thl;    /* Threshold low, bytes per second */
++      u32 gp; /* Grant Period in micro seconds */
++      u32 thmp; /* Threshold medium in percentage */
++};
++
++struct msm_bus_bimc_clk_gate {
++      bool core_clk_gate_en;
++      bool arb_clk_gate_en;   /* For arbiter */
++      bool port_clk_gate_en;  /* For regs on BIMC core clock */
++};
++
++void msm_bus_bimc_set_slave_seg(struct msm_bus_bimc_info *binfo,
++      uint32_t slv_index, uint32_t seg_index,
++      struct msm_bus_bimc_slave_seg *bsseg);
++void msm_bus_bimc_set_slave_clk_gate(struct msm_bus_bimc_info *binfo,
++      uint32_t slv_index, struct msm_bus_bimc_clk_gate *bgate);
++void msm_bus_bimc_set_mas_clk_gate(struct msm_bus_bimc_info *binfo,
++      uint32_t mas_index, struct msm_bus_bimc_clk_gate *bgate);
++void msm_bus_bimc_arb_en(struct msm_bus_bimc_info *binfo,
++      uint32_t slv_index, bool en);
++void msm_bus_bimc_get_params(struct msm_bus_bimc_info *binfo,
++      struct msm_bus_bimc_params *params);
++void msm_bus_bimc_get_mas_params(struct msm_bus_bimc_info *binfo,
++      uint32_t mas_index, struct msm_bus_bimc_node *mparams);
++void msm_bus_bimc_get_slv_params(struct msm_bus_bimc_info *binfo,
++      uint32_t slv_index, struct msm_bus_bimc_node *sparams);
++bool msm_bus_bimc_get_arb_en(struct msm_bus_bimc_info *binfo,
++      uint32_t slv_index);
++
++#endif /*_ARCH_ARM_MACH_MSM_BUS_BIMC_H*/
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_client_api.c
+@@ -0,0 +1,83 @@
++/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/mutex.h>
++#include <linux/radix-tree.h>
++#include <linux/clk.h>
++#include "msm-bus.h"
++#include "msm_bus_core.h"
++
++struct msm_bus_arb_ops arb_ops;
++
++/**
++ * msm_bus_scale_register_client() - Register the clients with the msm bus
++ * driver
++ * @pdata: Platform data of the client, containing src, dest, ab, ib.
++ * Return non-zero value in case of success, 0 in case of failure.
++ *
++ * Client data contains the vectors specifying arbitrated bandwidth (ab)
++ * and instantaneous bandwidth (ib) requested between a particular
++ * src and dest.
++ */
++uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
++{
++      if (arb_ops.register_client)
++              return arb_ops.register_client(pdata);
++      else {
++              pr_err("%s: Bus driver not ready.",
++                              __func__);
++              return 0;
++      }
++}
++EXPORT_SYMBOL(msm_bus_scale_register_client);
++
++/**
++ * msm_bus_scale_client_update_request() - Update the request for bandwidth
++ * from a particular client
++ *
++ * cl: Handle to the client
++ * index: Index into the vector, to which the bw and clock values need to be
++ * updated
++ */
++int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
++{
++      if (arb_ops.update_request)
++              return arb_ops.update_request(cl, index);
++      else {
++              pr_err("%s: Bus driver not ready.",
++                              __func__);
++              return -EPROBE_DEFER;
++      }
++}
++EXPORT_SYMBOL(msm_bus_scale_client_update_request);
++
++/**
++ * msm_bus_scale_unregister_client() - Unregister the client from the bus driver
++ * @cl: Handle to the client
++ */
++void msm_bus_scale_unregister_client(uint32_t cl)
++{
++      if (arb_ops.unregister_client)
++              arb_ops.unregister_client(cl);
++      else {
++              pr_err("%s: Bus driver not ready.",
++                              __func__);
++      }
++}
++EXPORT_SYMBOL(msm_bus_scale_unregister_client);
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_core.c
+@@ -0,0 +1,125 @@
++/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/mutex.h>
++#include <linux/radix-tree.h>
++#include <linux/clk.h>
++#include "msm-bus-board.h"
++#include "msm-bus.h"
++#include "msm_bus_core.h"
++
++static atomic_t num_fab = ATOMIC_INIT(0);
++
++int msm_bus_get_num_fab(void)
++{
++      return atomic_read(&num_fab);
++}
++
++int msm_bus_device_match(struct device *dev, void *id)
++{
++      struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev);
++
++      if (!fabdev) {
++              MSM_BUS_WARN("Fabric %p returning 0\n", fabdev);
++              return 0;
++      }
++      return fabdev->id == *(int *)id;
++}
++
++static void msm_bus_release(struct device *device)
++{
++}
++
++struct bus_type msm_bus_type = {
++      .name      = "msm-bus-type",
++};
++EXPORT_SYMBOL(msm_bus_type);
++
++/**
++ * msm_bus_get_fabric_device() - This function is used to search for
++ * the fabric device on the bus
++ * @fabid: Fabric id
++ * Function returns: Pointer to the fabric device
++ */
++struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid)
++{
++      struct device *dev;
++      struct msm_bus_fabric_device *fabric;
++      dev = bus_find_device(&msm_bus_type, NULL, (void *)&fabid,
++              msm_bus_device_match);
++      if (!dev)
++              return NULL;
++      fabric = to_msm_bus_fabric_device(dev);
++      return fabric;
++}
++
++/**
++ * msm_bus_fabric_device_register() - Registers a fabric on msm bus
++ * @fabdev: Fabric device to be registered
++ */
++int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabdev)
++{
++      int ret = 0;
++      fabdev->dev.bus = &msm_bus_type;
++      fabdev->dev.release = msm_bus_release;
++      ret = dev_set_name(&fabdev->dev, fabdev->name);
++      if (ret) {
++              MSM_BUS_ERR("error setting dev name\n");
++              goto err;
++      }
++
++      ret = device_register(&fabdev->dev);
++      if (ret < 0) {
++              MSM_BUS_ERR("error registering device%d %s\n",
++                              ret, fabdev->name);
++              goto err;
++      }
++      atomic_inc(&num_fab);
++err:
++      return ret;
++}
++
++/**
++ * msm_bus_fabric_device_unregister() - Unregisters the fabric
++ * devices from the msm bus
++ */
++void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabdev)
++{
++      device_unregister(&fabdev->dev);
++      atomic_dec(&num_fab);
++}
++
++static void __exit msm_bus_exit(void)
++{
++      bus_unregister(&msm_bus_type);
++}
++
++static int __init msm_bus_init(void)
++{
++      int retval = 0;
++      retval = bus_register(&msm_bus_type);
++      if (retval)
++              MSM_BUS_ERR("bus_register error! %d\n",
++                      retval);
++      return retval;
++}
++postcore_initcall(msm_bus_init);
++module_exit(msm_bus_exit);
++MODULE_LICENSE("GPL v2");
++MODULE_VERSION("0.2");
++MODULE_ALIAS("platform:msm_bus");
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_core.h
+@@ -0,0 +1,375 @@
++/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef _ARCH_ARM_MACH_MSM_BUS_CORE_H
++#define _ARCH_ARM_MACH_MSM_BUS_CORE_H
++
++#include <linux/types.h>
++#include <linux/device.h>
++#include <linux/radix-tree.h>
++#include <linux/platform_device.h>
++#include "msm-bus-board.h"
++#include "msm-bus.h"
++
++#define MSM_BUS_DBG(msg, ...) \
++      pr_debug(msg, ## __VA_ARGS__)
++#define MSM_BUS_ERR(msg, ...) \
++      pr_err(msg, ## __VA_ARGS__)
++#define MSM_BUS_WARN(msg, ...) \
++      pr_warn(msg, ## __VA_ARGS__)
++#define MSM_FAB_ERR(msg, ...) \
++      dev_err(&fabric->fabdev.dev, msg, ## __VA_ARGS__)
++
++#define IS_MASTER_VALID(mas) \
++      (((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \
++       ? 1 : 0)
++#define IS_SLAVE_VALID(slv) \
++      (((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0)
++
++#define INTERLEAVED_BW(fab_pdata, bw, ports) \
++      ((fab_pdata->il_flag) ? ((bw < 0) \
++      ? -msm_bus_div64((ports), (-bw)) : msm_bus_div64((ports), (bw))) : (bw))
++#define INTERLEAVED_VAL(fab_pdata, n) \
++      ((fab_pdata->il_flag) ? (n) : 1)
++#define KBTOB(a) (a * 1000ULL)
++
++enum msm_bus_dbg_op_type {
++      MSM_BUS_DBG_UNREGISTER = -2,
++      MSM_BUS_DBG_REGISTER,
++      MSM_BUS_DBG_OP = 1,
++};
++
++enum msm_bus_hw_sel {
++      MSM_BUS_RPM = 0,
++      MSM_BUS_NOC,
++      MSM_BUS_BIMC,
++};
++
++struct msm_bus_arb_ops {
++      uint32_t (*register_client)(struct msm_bus_scale_pdata *pdata);
++      int (*update_request)(uint32_t cl, unsigned int index);
++      void (*unregister_client)(uint32_t cl);
++};
++
++enum {
++      SLAVE_NODE,
++      MASTER_NODE,
++      CLK_NODE,
++      NR_LIM_NODE,
++};
++
++
++extern struct bus_type msm_bus_type;
++extern struct msm_bus_arb_ops arb_ops;
++extern void msm_bus_arb_setops_legacy(struct msm_bus_arb_ops *arb_ops);
++
++struct msm_bus_node_info {
++      unsigned int id;
++      unsigned int priv_id;
++      unsigned int mas_hw_id;
++      unsigned int slv_hw_id;
++      int gateway;
++      int *masterp;
++      int *qport;
++      int num_mports;
++      int *slavep;
++      int num_sports;
++      int *tier;
++      int num_tiers;
++      int ahb;
++      int hw_sel;
++      const char *slaveclk[NUM_CTX];
++      const char *memclk[NUM_CTX];
++      const char *iface_clk_node;
++      unsigned int buswidth;
++      unsigned int ws;
++      unsigned int mode;
++      unsigned int perm_mode;
++      unsigned int prio_lvl;
++      unsigned int prio_rd;
++      unsigned int prio_wr;
++      unsigned int prio1;
++      unsigned int prio0;
++      unsigned int num_thresh;
++      u64 *th;
++      u64 cur_lim_bw;
++      unsigned int mode_thresh;
++      bool dual_conf;
++      u64 *bimc_bw;
++      bool nr_lim;
++      u32 ff;
++      bool rt_mas;
++      u32 bimc_gp;
++      u32 bimc_thmp;
++      u64 floor_bw;
++      const char *name;
++};
++
++struct path_node {
++      uint64_t clk[NUM_CTX];
++      uint64_t bw[NUM_CTX];
++      uint64_t *sel_clk;
++      uint64_t *sel_bw;
++      int next;
++};
++
++struct msm_bus_link_info {
++      uint64_t clk[NUM_CTX];
++      uint64_t *sel_clk;
++      uint64_t memclk;
++      int64_t bw[NUM_CTX];
++      int64_t *sel_bw;
++      int *tier;
++      int num_tiers;
++};
++
++struct nodeclk {
++      struct clk *clk;
++      uint64_t rate;
++      bool dirty;
++      bool enable;
++};
++
++struct msm_bus_inode_info {
++      struct msm_bus_node_info *node_info;
++      uint64_t max_bw;
++      uint64_t max_clk;
++      uint64_t cur_lim_bw;
++      uint64_t cur_prg_bw;
++      struct msm_bus_link_info link_info;
++      int num_pnodes;
++      struct path_node *pnode;
++      int commit_index;
++      struct nodeclk nodeclk[NUM_CTX];
++      struct nodeclk memclk[NUM_CTX];
++      struct nodeclk iface_clk;
++      void *hw_data;
++};
++
++struct msm_bus_node_hw_info {
++      bool dirty;
++      unsigned int hw_id;
++      uint64_t bw;
++};
++
++struct msm_bus_hw_algorithm {
++      int (*allocate_commit_data)(struct msm_bus_fabric_registration
++              *fab_pdata, void **cdata, int ctx);
++      void *(*allocate_hw_data)(struct platform_device *pdev,
++              struct msm_bus_fabric_registration *fab_pdata);
++      void (*node_init)(void *hw_data, struct msm_bus_inode_info *info);
++      void (*free_commit_data)(void *cdata);
++      void (*update_bw)(struct msm_bus_inode_info *hop,
++              struct msm_bus_inode_info *info,
++              struct msm_bus_fabric_registration *fab_pdata,
++              void *sel_cdata, int *master_tiers,
++              int64_t add_bw);
++      void (*fill_cdata_buffer)(int *curr, char *buf, const int max_size,
++              void *cdata, int nmasters, int nslaves, int ntslaves);
++      int (*commit)(struct msm_bus_fabric_registration
++              *fab_pdata, void *hw_data, void **cdata);
++      int (*port_unhalt)(uint32_t haltid, uint8_t mport);
++      int (*port_halt)(uint32_t haltid, uint8_t mport);
++      void (*config_master)(struct msm_bus_fabric_registration *fab_pdata,
++              struct msm_bus_inode_info *info,
++              uint64_t req_clk, uint64_t req_bw);
++      void (*config_limiter)(struct msm_bus_fabric_registration *fab_pdata,
++              struct msm_bus_inode_info *info);
++      bool (*update_bw_reg)(int mode);
++};
++
++struct msm_bus_fabric_device {
++      int id;
++      const char *name;
++      struct device dev;
++      const struct msm_bus_fab_algorithm *algo;
++      const struct msm_bus_board_algorithm *board_algo;
++      struct msm_bus_hw_algorithm hw_algo;
++      int visited;
++      int num_nr_lim;
++      u64 nr_lim_thresh;
++      u32 eff_fact;
++};
++#define to_msm_bus_fabric_device(d) container_of(d, \
++              struct msm_bus_fabric_device, d)
++
++struct msm_bus_fabric {
++      struct msm_bus_fabric_device fabdev;
++      int ahb;
++      void *cdata[NUM_CTX];
++      bool arb_dirty;
++      bool clk_dirty;
++      struct radix_tree_root fab_tree;
++      int num_nodes;
++      struct list_head gateways;
++      struct msm_bus_inode_info info;
++      struct msm_bus_fabric_registration *pdata;
++      void *hw_data;
++};
++#define to_msm_bus_fabric(d) container_of(d, \
++      struct msm_bus_fabric, d)
++
++
++struct msm_bus_fab_algorithm {
++      int (*update_clks)(struct msm_bus_fabric_device *fabdev,
++              struct msm_bus_inode_info *pme, int index,
++              uint64_t curr_clk, uint64_t req_clk,
++              uint64_t bwsum, int flag, int ctx,
++              unsigned int cl_active_flag);
++      int (*port_halt)(struct msm_bus_fabric_device *fabdev, int portid);
++      int (*port_unhalt)(struct msm_bus_fabric_device *fabdev, int portid);
++      int (*commit)(struct msm_bus_fabric_device *fabdev);
++      struct msm_bus_inode_info *(*find_node)(struct msm_bus_fabric_device
++              *fabdev, int id);
++      struct msm_bus_inode_info *(*find_gw_node)(struct msm_bus_fabric_device
++              *fabdev, int id);
++      struct list_head *(*get_gw_list)(struct msm_bus_fabric_device *fabdev);
++      void (*update_bw)(struct msm_bus_fabric_device *fabdev, struct
++              msm_bus_inode_info * hop, struct msm_bus_inode_info *info,
++              int64_t add_bw, int *master_tiers, int ctx);
++      void (*config_master)(struct msm_bus_fabric_device *fabdev,
++              struct msm_bus_inode_info *info, uint64_t req_clk,
++              uint64_t req_bw);
++      void (*config_limiter)(struct msm_bus_fabric_device *fabdev,
++              struct msm_bus_inode_info *info);
++};
++
++struct msm_bus_board_algorithm {
++      int board_nfab;
++      void (*assign_iids)(struct msm_bus_fabric_registration *fabreg,
++              int fabid);
++      int (*get_iid)(int id);
++};
++
++/**
++ * Used to store the list of fabrics and other info to be
++ * maintained outside the fabric structure.
++ * Used while calculating path, and to find fabric ptrs
++ */
++struct msm_bus_fabnodeinfo {
++      struct list_head list;
++      struct msm_bus_inode_info *info;
++};
++
++struct msm_bus_client {
++      int id;
++      struct msm_bus_scale_pdata *pdata;
++      int *src_pnode;
++      int curr;
++};
++
++uint64_t msm_bus_div64(unsigned int width, uint64_t bw);
++int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
++void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
++struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
++int msm_bus_get_num_fab(void);
++
++
++int msm_bus_hw_fab_init(struct msm_bus_fabric_registration *pdata,
++      struct msm_bus_hw_algorithm *hw_algo);
++void msm_bus_board_init(struct msm_bus_fabric_registration *pdata);
++void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata,
++      int nfab);
++#if defined(CONFIG_MSM_RPM_SMD)
++int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
++      struct msm_bus_hw_algorithm *hw_algo);
++int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
++      *fab_pdata, void *hw_data, void **cdata);
++void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
++      void *cdata, int nmasters, int nslaves, int ntslaves);
++#else
++static inline int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
++      struct msm_bus_hw_algorithm *hw_algo)
++{
++      return 0;
++}
++static inline int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
++      *fab_pdata, void *hw_data, void **cdata)
++{
++      return 0;
++}
++static inline void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf,
++      const int max_size, void *cdata, int nmasters, int nslaves,
++      int ntslaves)
++{
++}
++#endif
++
++int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata,
++      struct msm_bus_hw_algorithm *hw_algo);
++int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata,
++      struct msm_bus_hw_algorithm *hw_algo);
++#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING)
++void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
++      uint32_t cl);
++void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
++      int nmasters, int nslaves, int ntslaves, int op);
++#else
++static inline void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata,
++      int index, uint32_t cl)
++{
++}
++static inline void msm_bus_dbg_commit_data(const char *fabname,
++      void *cdata, int nmasters, int nslaves, int ntslaves,
++      int op)
++{
++}
++#endif
++
++#ifdef CONFIG_CORESIGHT
++int msmbus_coresight_init(struct platform_device *pdev);
++void msmbus_coresight_remove(struct platform_device *pdev);
++int msmbus_coresight_init_adhoc(struct platform_device *pdev,
++              struct device_node *of_node);
++void msmbus_coresight_remove_adhoc(struct platform_device *pdev);
++#else
++static inline int msmbus_coresight_init(struct platform_device *pdev)
++{
++      return 0;
++}
++
++static inline void msmbus_coresight_remove(struct platform_device *pdev)
++{
++}
++
++static inline int msmbus_coresight_init_adhoc(struct platform_device *pdev,
++              struct device_node *of_node)
++{
++      return 0;
++}
++
++static inline void msmbus_coresight_remove_adhoc(struct platform_device *pdev)
++{
++}
++#endif
++
++
++#ifdef CONFIG_OF
++void msm_bus_of_get_nfab(struct platform_device *pdev,
++              struct msm_bus_fabric_registration *pdata);
++struct msm_bus_fabric_registration
++      *msm_bus_of_get_fab_data(struct platform_device *pdev);
++#else
++static inline void msm_bus_of_get_nfab(struct platform_device *pdev,
++              struct msm_bus_fabric_registration *pdata)
++{
++      return;
++}
++
++static inline struct msm_bus_fabric_registration
++      *msm_bus_of_get_fab_data(struct platform_device *pdev)
++{
++      return NULL;
++}
++#endif
++
++#endif /*_ARCH_ARM_MACH_MSM_BUS_CORE_H*/
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_dbg.c
+@@ -0,0 +1,810 @@
++/* Copyright (c) 2010-2012, 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ */
++
++#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <linux/debugfs.h>
++#include <linux/slab.h>
++#include <linux/mutex.h>
++#include <linux/string.h>
++#include <linux/uaccess.h>
++#include <linux/hrtimer.h>
++#include "msm-bus-board.h"
++#include "msm-bus.h"
++#include "msm_bus_rules.h"
++#include "msm_bus_core.h"
++#include "msm_bus_adhoc.h"
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/trace_msm_bus.h>
++
++#define MAX_BUFF_SIZE 4096
++#define FILL_LIMIT 128
++
++static struct dentry *clients;
++static struct dentry *dir;
++static DEFINE_MUTEX(msm_bus_dbg_fablist_lock);
++struct msm_bus_dbg_state {
++      uint32_t cl;
++      uint8_t enable;
++      uint8_t current_index;
++} clstate;
++
++struct msm_bus_cldata {
++      const struct msm_bus_scale_pdata *pdata;
++      int index;
++      uint32_t clid;
++      int size;
++      struct dentry *file;
++      struct list_head list;
++      char buffer[MAX_BUFF_SIZE];
++};
++
++struct msm_bus_fab_list {
++      const char *name;
++      int size;
++      struct dentry *file;
++      struct list_head list;
++      char buffer[MAX_BUFF_SIZE];
++};
++
++static char *rules_buf;
++
++LIST_HEAD(fabdata_list);
++LIST_HEAD(cl_list);
++
++/**
++ * The following structures and funtions are used for
++ * the test-client which can be created at run-time.
++ */
++
++static struct msm_bus_vectors init_vectors[1];
++static struct msm_bus_vectors current_vectors[1];
++static struct msm_bus_vectors requested_vectors[1];
++
++static struct msm_bus_paths shell_client_usecases[] = {
++      {
++              .num_paths = ARRAY_SIZE(init_vectors),
++              .vectors = init_vectors,
++      },
++      {
++              .num_paths = ARRAY_SIZE(current_vectors),
++              .vectors = current_vectors,
++      },
++      {
++              .num_paths = ARRAY_SIZE(requested_vectors),
++              .vectors = requested_vectors,
++      },
++};
++
++static struct msm_bus_scale_pdata shell_client = {
++      .usecase = shell_client_usecases,
++      .num_usecases = ARRAY_SIZE(shell_client_usecases),
++      .name = "test-client",
++};
++
++static void msm_bus_dbg_init_vectors(void)
++{
++      init_vectors[0].src = -1;
++      init_vectors[0].dst = -1;
++      init_vectors[0].ab = 0;
++      init_vectors[0].ib = 0;
++      current_vectors[0].src = -1;
++      current_vectors[0].dst = -1;
++      current_vectors[0].ab = 0;
++      current_vectors[0].ib = 0;
++      requested_vectors[0].src = -1;
++      requested_vectors[0].dst = -1;
++      requested_vectors[0].ab = 0;
++      requested_vectors[0].ib = 0;
++      clstate.enable = 0;
++      clstate.current_index = 0;
++}
++
++static int msm_bus_dbg_update_cl_request(uint32_t cl)
++{
++      int ret = 0;
++
++      if (clstate.current_index < 2)
++              clstate.current_index = 2;
++      else {
++              clstate.current_index = 1;
++              current_vectors[0].ab = requested_vectors[0].ab;
++              current_vectors[0].ib = requested_vectors[0].ib;
++      }
++
++      if (clstate.enable) {
++              MSM_BUS_DBG("Updating request for shell client, index: %d\n",
++                      clstate.current_index);
++              ret = msm_bus_scale_client_update_request(clstate.cl,
++                      clstate.current_index);
++      } else
++              MSM_BUS_DBG("Enable bit not set. Skipping update request\n");
++
++      return ret;
++}
++
++static void msm_bus_dbg_unregister_client(uint32_t cl)
++{
++      MSM_BUS_DBG("Unregistering shell client\n");
++      msm_bus_scale_unregister_client(clstate.cl);
++      clstate.cl = 0;
++}
++
++static uint32_t msm_bus_dbg_register_client(void)
++{
++      int ret = 0;
++
++      if (init_vectors[0].src != requested_vectors[0].src) {
++              MSM_BUS_DBG("Shell client master changed. Unregistering\n");
++              msm_bus_dbg_unregister_client(clstate.cl);
++      }
++      if (init_vectors[0].dst != requested_vectors[0].dst) {
++              MSM_BUS_DBG("Shell client slave changed. Unregistering\n");
++              msm_bus_dbg_unregister_client(clstate.cl);
++      }
++
++      current_vectors[0].src = init_vectors[0].src;
++      requested_vectors[0].src = init_vectors[0].src;
++      current_vectors[0].dst = init_vectors[0].dst;
++      requested_vectors[0].dst = init_vectors[0].dst;
++
++      if (!clstate.enable) {
++              MSM_BUS_DBG("Enable bit not set, skipping registration: cl "
++                      "%d\n", clstate.cl);
++              return 0;
++      }
++
++      if (clstate.cl) {
++              MSM_BUS_DBG("Client  registered, skipping registration\n");
++              return clstate.cl;
++      }
++
++      MSM_BUS_DBG("Registering shell client\n");
++      ret = msm_bus_scale_register_client(&shell_client);
++      return ret;
++}
++
++static int msm_bus_dbg_mas_get(void  *data, u64 *val)
++{
++      *val = init_vectors[0].src;
++      MSM_BUS_DBG("Get master: %llu\n", *val);
++      return 0;
++}
++
++static int msm_bus_dbg_mas_set(void  *data, u64 val)
++{
++      init_vectors[0].src = val;
++      MSM_BUS_DBG("Set master: %llu\n", val);
++      clstate.cl = msm_bus_dbg_register_client();
++      return 0;
++}
++DEFINE_SIMPLE_ATTRIBUTE(shell_client_mas_fops, msm_bus_dbg_mas_get,
++      msm_bus_dbg_mas_set, "%llu\n");
++
++static int msm_bus_dbg_slv_get(void  *data, u64 *val)
++{
++      *val = init_vectors[0].dst;
++      MSM_BUS_DBG("Get slave: %llu\n", *val);
++      return 0;
++}
++
++static int msm_bus_dbg_slv_set(void  *data, u64 val)
++{
++      init_vectors[0].dst = val;
++      MSM_BUS_DBG("Set slave: %llu\n", val);
++      clstate.cl = msm_bus_dbg_register_client();
++      return 0;
++}
++DEFINE_SIMPLE_ATTRIBUTE(shell_client_slv_fops, msm_bus_dbg_slv_get,
++      msm_bus_dbg_slv_set, "%llu\n");
++
++static int msm_bus_dbg_ab_get(void  *data, u64 *val)
++{
++      *val = requested_vectors[0].ab;
++      MSM_BUS_DBG("Get ab: %llu\n", *val);
++      return 0;
++}
++
++static int msm_bus_dbg_ab_set(void  *data, u64 val)
++{
++      requested_vectors[0].ab = val;
++      MSM_BUS_DBG("Set ab: %llu\n", val);
++      return 0;
++}
++DEFINE_SIMPLE_ATTRIBUTE(shell_client_ab_fops, msm_bus_dbg_ab_get,
++      msm_bus_dbg_ab_set, "%llu\n");
++
++static int msm_bus_dbg_ib_get(void  *data, u64 *val)
++{
++      *val = requested_vectors[0].ib;
++      MSM_BUS_DBG("Get ib: %llu\n", *val);
++      return 0;
++}
++
++static int msm_bus_dbg_ib_set(void  *data, u64 val)
++{
++      requested_vectors[0].ib = val;
++      MSM_BUS_DBG("Set ib: %llu\n", val);
++      return 0;
++}
++DEFINE_SIMPLE_ATTRIBUTE(shell_client_ib_fops, msm_bus_dbg_ib_get,
++      msm_bus_dbg_ib_set, "%llu\n");
++
++static int msm_bus_dbg_en_get(void  *data, u64 *val)
++{
++      *val = clstate.enable;
++      MSM_BUS_DBG("Get enable: %llu\n", *val);
++      return 0;
++}
++
++static int msm_bus_dbg_en_set(void  *data, u64 val)
++{
++      int ret = 0;
++
++      clstate.enable = val;
++      if (clstate.enable) {
++              if (!clstate.cl) {
++                      MSM_BUS_DBG("client: %u\n", clstate.cl);
++                      clstate.cl = msm_bus_dbg_register_client();
++                      if (clstate.cl)
++                              ret = msm_bus_dbg_update_cl_request(clstate.cl);
++              } else {
++                      MSM_BUS_DBG("update request for cl: %u\n", clstate.cl);
++                      ret = msm_bus_dbg_update_cl_request(clstate.cl);
++              }
++      }
++
++      MSM_BUS_DBG("Set enable: %llu\n", val);
++      return ret;
++}
++DEFINE_SIMPLE_ATTRIBUTE(shell_client_en_fops, msm_bus_dbg_en_get,
++      msm_bus_dbg_en_set, "%llu\n");
++
++/**
++ * The following funtions are used for viewing the client data
++ * and changing the client request at run-time
++ */
++
++static ssize_t client_data_read(struct file *file, char __user *buf,
++      size_t count, loff_t *ppos)
++{
++      int bsize = 0;
++      uint32_t cl = (uint32_t)(uintptr_t)file->private_data;
++      struct msm_bus_cldata *cldata = NULL;
++      int found = 0;
++
++      list_for_each_entry(cldata, &cl_list, list) {
++              if (cldata->clid == cl) {
++                      found = 1;
++                      break;
++              }
++      }
++      if (!found)
++              return 0;
++
++      bsize = cldata->size;
++      return simple_read_from_buffer(buf, count, ppos,
++              cldata->buffer, bsize);
++}
++
++static int client_data_open(struct inode *inode, struct file *file)
++{
++      file->private_data = inode->i_private;
++      return 0;
++}
++
++static const struct file_operations client_data_fops = {
++      .open           = client_data_open,
++      .read           = client_data_read,
++};
++
++struct dentry *msm_bus_dbg_create(const char *name, mode_t mode,
++      struct dentry *dent, uint32_t clid)
++{
++      if (dent == NULL) {
++              MSM_BUS_DBG("debugfs not ready yet\n");
++              return NULL;
++      }
++      return debugfs_create_file(name, mode, dent, (void *)(uintptr_t)clid,
++              &client_data_fops);
++}
++
++#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING)
++static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata,
++      int index, uint32_t clid, struct dentry *file)
++{
++      struct msm_bus_cldata *cldata;
++
++      cldata = kmalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
++      if (!cldata) {
++              MSM_BUS_DBG("Failed to allocate memory for client data\n");
++              return -ENOMEM;
++      }
++      cldata->pdata = pdata;
++      cldata->index = index;
++      cldata->clid = clid;
++      cldata->file = file;
++      cldata->size = 0;
++      list_add_tail(&cldata->list, &cl_list);
++      return 0;
++}
++
++static void msm_bus_dbg_free_client(uint32_t clid)
++{
++      struct msm_bus_cldata *cldata = NULL;
++
++      list_for_each_entry(cldata, &cl_list, list) {
++              if (cldata->clid == clid) {
++                      debugfs_remove(cldata->file);
++                      list_del(&cldata->list);
++                      kfree(cldata);
++                      break;
++              }
++      }
++}
++
++static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
++      int index, uint32_t clid)
++{
++      int i = 0, j;
++      char *buf = NULL;
++      struct msm_bus_cldata *cldata = NULL;
++      struct timespec ts;
++      int found = 0;
++
++      list_for_each_entry(cldata, &cl_list, list) {
++              if (cldata->clid == clid) {
++                      found = 1;
++                      break;
++              }
++      }
++
++      if (!found)
++              return -ENOENT;
++
++      if (cldata->file == NULL) {
++              if (pdata->name == NULL) {
++                      MSM_BUS_DBG("Client doesn't have a name\n");
++                      return -EINVAL;
++              }
++              cldata->file = msm_bus_dbg_create(pdata->name, S_IRUGO,
++                      clients, clid);
++      }
++
++      if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
++              i = cldata->size;
++      else {
++              i = 0;
++              cldata->size = 0;
++      }
++      buf = cldata->buffer;
++      ts = ktime_to_timespec(ktime_get());
++      i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n",
++              (int)ts.tv_sec, (int)ts.tv_nsec);
++      i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "curr   : %d\n", index);
++      i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "masters: ");
++
++      for (j = 0; j < pdata->usecase->num_paths; j++)
++              i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ",
++                      pdata->usecase[index].vectors[j].src);
++      i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslaves : ");
++      for (j = 0; j < pdata->usecase->num_paths; j++)
++              i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ",
++                      pdata->usecase[index].vectors[j].dst);
++      i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab     : ");
++      for (j = 0; j < pdata->usecase->num_paths; j++)
++              i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ",
++                      pdata->usecase[index].vectors[j].ab);
++      i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib     : ");
++      for (j = 0; j < pdata->usecase->num_paths; j++)
++              i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ",
++                      pdata->usecase[index].vectors[j].ib);
++      i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
++
++      for (j = 0; j < pdata->usecase->num_paths; j++)
++              trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
++              pdata->name, index,
++              pdata->usecase[index].vectors[j].src,
++              pdata->usecase[index].vectors[j].dst,
++              pdata->usecase[index].vectors[j].ab,
++              pdata->usecase[index].vectors[j].ib);
++
++      cldata->size = i;
++      return i;
++}
++#endif
++
++static int msm_bus_dbg_update_request(struct msm_bus_cldata *cldata, int index)
++{
++      int ret = 0;
++
++      if ((index < 0) || (index > cldata->pdata->num_usecases)) {
++              MSM_BUS_DBG("Invalid index!\n");
++              return -EINVAL;
++      }
++      ret = msm_bus_scale_client_update_request(cldata->clid, index);
++      return ret;
++}
++
++static ssize_t  msm_bus_dbg_update_request_write(struct file *file,
++      const char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++      struct msm_bus_cldata *cldata;
++      unsigned long index = 0;
++      int ret = 0;
++      char *chid;
++      char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL);
++      int found = 0;
++
++      if (!buf || IS_ERR(buf)) {
++              MSM_BUS_ERR("Memory allocation for buffer failed\n");
++              return -ENOMEM;
++      }
++      if (cnt == 0) {
++              kfree(buf);
++              return 0;
++      }
++      if (copy_from_user(buf, ubuf, cnt)) {
++              kfree(buf);
++              return -EFAULT;
++      }
++      buf[cnt] = '\0';
++      chid = buf;
++      MSM_BUS_DBG("buffer: %s\n size: %zu\n", buf, sizeof(ubuf));
++
++      list_for_each_entry(cldata, &cl_list, list) {
++              if (strnstr(chid, cldata->pdata->name, cnt)) {
++                      found = 1;
++                      cldata = cldata;
++                      strsep(&chid, " ");
++                      if (chid) {
++                              ret = kstrtoul(chid, 10, &index);
++                              if (ret) {
++                                      MSM_BUS_DBG("Index conversion"
++                                              " failed\n");
++                                      return -EFAULT;
++                              }
++                      } else {
++                              MSM_BUS_DBG("Error parsing input. Index not"
++                                      " found\n");
++                              found = 0;
++                      }
++                      break;
++              }
++      }
++
++      if (found)
++              msm_bus_dbg_update_request(cldata, index);
++      kfree(buf);
++      return cnt;
++}
++
++/**
++ * The following funtions are used for viewing the commit data
++ * for each fabric
++ */
++static ssize_t fabric_data_read(struct file *file, char __user *buf,
++      size_t count, loff_t *ppos)
++{
++      struct msm_bus_fab_list *fablist = NULL;
++      int bsize = 0;
++      ssize_t ret;
++      const char *name = file->private_data;
++      int found = 0;
++
++      mutex_lock(&msm_bus_dbg_fablist_lock);
++      list_for_each_entry(fablist, &fabdata_list, list) {
++              if (strcmp(fablist->name, name) == 0) {
++                      found = 1;
++                      break;
++              }
++      }
++      if (!found)
++              return -ENOENT;
++      bsize = fablist->size;
++      ret = simple_read_from_buffer(buf, count, ppos,
++              fablist->buffer, bsize);
++      mutex_unlock(&msm_bus_dbg_fablist_lock);
++      return ret;
++}
++
++static const struct file_operations fabric_data_fops = {
++      .open           = client_data_open,
++      .read           = fabric_data_read,
++};
++
++static ssize_t rules_dbg_read(struct file *file, char __user *buf,
++      size_t count, loff_t *ppos)
++{
++      ssize_t ret;
++      memset(rules_buf, 0, MAX_BUFF_SIZE);
++      print_rules_buf(rules_buf, MAX_BUFF_SIZE);
++      ret = simple_read_from_buffer(buf, count, ppos,
++              rules_buf, MAX_BUFF_SIZE);
++      return ret;
++}
++
++static int rules_dbg_open(struct inode *inode, struct file *file)
++{
++      file->private_data = inode->i_private;
++      return 0;
++}
++
++static const struct file_operations rules_dbg_fops = {
++      .open           = rules_dbg_open,
++      .read           = rules_dbg_read,
++};
++
++#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING)
++static int msm_bus_dbg_record_fabric(const char *fabname, struct dentry *file)
++{
++      struct msm_bus_fab_list *fablist;
++      int ret = 0;
++
++      mutex_lock(&msm_bus_dbg_fablist_lock);
++      fablist = kmalloc(sizeof(struct msm_bus_fab_list), GFP_KERNEL);
++      if (!fablist) {
++              MSM_BUS_DBG("Failed to allocate memory for commit data\n");
++              ret =  -ENOMEM;
++              goto err;
++      }
++
++      fablist->name = fabname;
++      fablist->size = 0;
++      list_add_tail(&fablist->list, &fabdata_list);
++err:
++      mutex_unlock(&msm_bus_dbg_fablist_lock);
++      return ret;
++}
++
++static void msm_bus_dbg_free_fabric(const char *fabname)
++{
++      struct msm_bus_fab_list *fablist = NULL;
++
++      mutex_lock(&msm_bus_dbg_fablist_lock);
++      list_for_each_entry(fablist, &fabdata_list, list) {
++              if (strcmp(fablist->name, fabname) == 0) {
++                      debugfs_remove(fablist->file);
++                      list_del(&fablist->list);
++                      kfree(fablist);
++                      break;
++              }
++      }
++      mutex_unlock(&msm_bus_dbg_fablist_lock);
++}
++
++static int msm_bus_dbg_fill_fab_buffer(const char *fabname,
++      void *cdata, int nmasters, int nslaves,
++      int ntslaves)
++{
++      int i;
++      char *buf = NULL;
++      struct msm_bus_fab_list *fablist = NULL;
++      struct timespec ts;
++      int found = 0;
++
++      mutex_lock(&msm_bus_dbg_fablist_lock);
++      list_for_each_entry(fablist, &fabdata_list, list) {
++              if (strcmp(fablist->name, fabname) == 0) {
++                      found = 1;
++                      break;
++              }
++      }
++      if (!found)
++              return -ENOENT;
++
++      if (fablist->file == NULL) {
++              MSM_BUS_DBG("Fabric dbg entry does not exist\n");
++              mutex_unlock(&msm_bus_dbg_fablist_lock);
++              return -EFAULT;
++      }
++
++      if (fablist->size < MAX_BUFF_SIZE - 256)
++              i = fablist->size;
++      else {
++              i = 0;
++              fablist->size = 0;
++      }
++      buf = fablist->buffer;
++      mutex_unlock(&msm_bus_dbg_fablist_lock);
++      ts = ktime_to_timespec(ktime_get());
++      i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n",
++              (int)ts.tv_sec, (int)ts.tv_nsec);
++
++      msm_bus_rpm_fill_cdata_buffer(&i, buf, MAX_BUFF_SIZE, cdata,
++              nmasters, nslaves, ntslaves);
++      i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
++      mutex_lock(&msm_bus_dbg_fablist_lock);
++      fablist->size = i;
++      mutex_unlock(&msm_bus_dbg_fablist_lock);
++      return 0;
++}
++#endif
++
++static const struct file_operations msm_bus_dbg_update_request_fops = {
++      .open = client_data_open,
++      .write = msm_bus_dbg_update_request_write,
++};
++
++#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING)
++/**
++ * msm_bus_dbg_client_data() - Add debug data for clients
++ * @pdata: Platform data of the client
++ * @index: The current index or operation to be performed
++ * @clid: Client handle obtained during registration
++ */
++void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
++      uint32_t clid)
++{
++      struct dentry *file = NULL;
++
++      if (index == MSM_BUS_DBG_REGISTER) {
++              msm_bus_dbg_record_client(pdata, index, clid, file);
++              if (!pdata->name) {
++                      MSM_BUS_DBG("Cannot create debugfs entry. Null name\n");
++                      return;
++              }
++      } else if (index == MSM_BUS_DBG_UNREGISTER) {
++              msm_bus_dbg_free_client(clid);
++              MSM_BUS_DBG("Client %d unregistered\n", clid);
++      } else
++              msm_bus_dbg_fill_cl_buffer(pdata, index, clid);
++}
++EXPORT_SYMBOL(msm_bus_dbg_client_data);
++
++/**
++ * msm_bus_dbg_commit_data() - Add commit data from fabrics
++ * @fabname: Fabric name specified in platform data
++ * @cdata: Commit Data
++ * @nmasters: Number of masters attached to fabric
++ * @nslaves: Number of slaves attached to fabric
++ * @ntslaves: Number of tiered slaves attached to fabric
++ * @op: Operation to be performed
++ */
++void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
++      int nmasters, int nslaves, int ntslaves, int op)
++{
++      struct dentry *file = NULL;
++
++      if (op == MSM_BUS_DBG_REGISTER)
++              msm_bus_dbg_record_fabric(fabname, file);
++      else if (op == MSM_BUS_DBG_UNREGISTER)
++              msm_bus_dbg_free_fabric(fabname);
++      else
++              msm_bus_dbg_fill_fab_buffer(fabname, cdata, nmasters,
++                      nslaves, ntslaves);
++}
++EXPORT_SYMBOL(msm_bus_dbg_commit_data);
++#endif
++
++static int __init msm_bus_debugfs_init(void)
++{
++      struct dentry *commit, *shell_client, *rules_dbg;
++      struct msm_bus_fab_list *fablist;
++      struct msm_bus_cldata *cldata = NULL;
++      uint64_t val = 0;
++
++      dir = debugfs_create_dir("msm-bus-dbg", NULL);
++      if ((!dir) || IS_ERR(dir)) {
++              MSM_BUS_ERR("Couldn't create msm-bus-dbg\n");
++              goto err;
++      }
++
++      clients = debugfs_create_dir("client-data", dir);
++      if ((!dir) || IS_ERR(dir)) {
++              MSM_BUS_ERR("Couldn't create clients\n");
++              goto err;
++      }
++
++      shell_client = debugfs_create_dir("shell-client", dir);
++      if ((!dir) || IS_ERR(dir)) {
++              MSM_BUS_ERR("Couldn't create clients\n");
++              goto err;
++      }
++
++      commit = debugfs_create_dir("commit-data", dir);
++      if ((!dir) || IS_ERR(dir)) {
++              MSM_BUS_ERR("Couldn't create commit\n");
++              goto err;
++      }
++
++      rules_dbg = debugfs_create_dir("rules-dbg", dir);
++      if ((!rules_dbg) || IS_ERR(rules_dbg)) {
++              MSM_BUS_ERR("Couldn't create rules-dbg\n");
++              goto err;
++      }
++
++      if (debugfs_create_file("print_rules", S_IRUGO | S_IWUSR,
++              rules_dbg, &val, &rules_dbg_fops) == NULL)
++              goto err;
++
++      if (debugfs_create_file("update_request", S_IRUGO | S_IWUSR,
++              shell_client, &val, &shell_client_en_fops) == NULL)
++              goto err;
++      if (debugfs_create_file("ib", S_IRUGO | S_IWUSR, shell_client, &val,
++              &shell_client_ib_fops) == NULL)
++              goto err;
++      if (debugfs_create_file("ab", S_IRUGO | S_IWUSR, shell_client, &val,
++              &shell_client_ab_fops) == NULL)
++              goto err;
++      if (debugfs_create_file("slv", S_IRUGO | S_IWUSR, shell_client,
++              &val, &shell_client_slv_fops) == NULL)
++              goto err;
++      if (debugfs_create_file("mas", S_IRUGO | S_IWUSR, shell_client,
++              &val, &shell_client_mas_fops) == NULL)
++              goto err;
++      if (debugfs_create_file("update-request", S_IRUGO | S_IWUSR,
++              clients, NULL, &msm_bus_dbg_update_request_fops) == NULL)
++              goto err;
++
++      rules_buf = kzalloc(MAX_BUFF_SIZE, GFP_KERNEL);
++      if (!rules_buf) {
++              MSM_BUS_ERR("Failed to alloc rules_buf");
++              goto err;
++      }
++
++      list_for_each_entry(cldata, &cl_list, list) {
++              if (cldata->pdata->name == NULL) {
++                      MSM_BUS_DBG("Client name not found\n");
++                      continue;
++              }
++              cldata->file = msm_bus_dbg_create(cldata->
++                      pdata->name, S_IRUGO, clients, cldata->clid);
++      }
++
++      mutex_lock(&msm_bus_dbg_fablist_lock);
++      list_for_each_entry(fablist, &fabdata_list, list) {
++              fablist->file = debugfs_create_file(fablist->name, S_IRUGO,
++                      commit, (void *)fablist->name, &fabric_data_fops);
++              if (fablist->file == NULL) {
++                      MSM_BUS_DBG("Cannot create files for commit data\n");
++                      kfree(rules_buf);
++                      goto err;
++              }
++      }
++      mutex_unlock(&msm_bus_dbg_fablist_lock);
++
++      msm_bus_dbg_init_vectors();
++      return 0;
++err:
++      debugfs_remove_recursive(dir);
++      return -ENODEV;
++}
++late_initcall(msm_bus_debugfs_init);
++
++static void __exit msm_bus_dbg_teardown(void)
++{
++      struct msm_bus_fab_list *fablist = NULL, *fablist_temp;
++      struct msm_bus_cldata *cldata = NULL, *cldata_temp;
++
++      debugfs_remove_recursive(dir);
++      list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) {
++              list_del(&cldata->list);
++              kfree(cldata);
++      }
++      mutex_lock(&msm_bus_dbg_fablist_lock);
++      list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) {
++              list_del(&fablist->list);
++              kfree(fablist);
++      }
++      kfree(rules_buf);
++      mutex_unlock(&msm_bus_dbg_fablist_lock);
++}
++module_exit(msm_bus_dbg_teardown);
++MODULE_DESCRIPTION("Debugfs for msm bus scaling client");
++MODULE_LICENSE("GPL v2");
++MODULE_AUTHOR("Gagan Mac <gmac@codeaurora.org>");
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_fabric_adhoc.c
+@@ -0,0 +1,1281 @@
++/* Copyright (c) 2014, Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/clk.h>
++#include <linux/device.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include "rpm-smd.h"
++#include "msm_bus_core.h"
++#include "msm_bus_adhoc.h"
++#include "msm_bus_noc.h"
++#include "msm_bus_bimc.h"
++
++ssize_t vrail_show(struct device *dev, struct device_attribute *attr,
++                        char *buf)
++{
++      struct msm_bus_node_info_type *node_info = NULL;
++      struct msm_bus_node_device_type *bus_node = NULL;
++
++      bus_node = dev->platform_data;
++      if (!bus_node)
++              return -EINVAL;
++      node_info = bus_node->node_info;
++
++      return snprintf(buf, PAGE_SIZE, "%u", node_info->vrail_comp);
++}
++
++ssize_t vrail_store(struct device *dev, struct device_attribute *attr,
++                         const char *buf, size_t count)
++{
++      struct msm_bus_node_info_type *node_info = NULL;
++      struct msm_bus_node_device_type *bus_node = NULL;
++      int ret = 0;
++
++      bus_node = dev->platform_data;
++      if (!bus_node)
++              return -EINVAL;
++      node_info = bus_node->node_info;
++
++      ret = sscanf(buf, "%u", &node_info->vrail_comp);
++      if (ret != 1)
++              return -EINVAL;
++      return count;
++}
++
++DEVICE_ATTR(vrail, 0600, vrail_show, vrail_store);
++
++struct static_rules_type {
++      int num_rules;
++      struct bus_rule_type *rules;
++};
++
++static struct static_rules_type static_rules;
++
++static int enable_nodeclk(struct nodeclk *nclk)
++{
++      int ret = 0;
++
++      if (!nclk->enable) {
++              ret = clk_prepare_enable(nclk->clk);
++
++              if (ret) {
++                      MSM_BUS_ERR("%s: failed to enable clk ", __func__);
++                      nclk->enable = false;
++              } else
++                      nclk->enable = true;
++      }
++      return ret;
++}
++
++static int disable_nodeclk(struct nodeclk *nclk)
++{
++      int ret = 0;
++
++      if (nclk->enable) {
++              clk_disable_unprepare(nclk->clk);
++              nclk->enable = false;
++      }
++      return ret;
++}
++
++static int setrate_nodeclk(struct nodeclk *nclk, long rate)
++{
++      int ret = 0;
++
++      ret = clk_set_rate(nclk->clk, rate);
++
++      if (ret)
++              MSM_BUS_ERR("%s: failed to setrate clk", __func__);
++      return ret;
++}
++
++static int msm_bus_agg_fab_clks(struct device *bus_dev, void *data)
++{
++      struct msm_bus_node_device_type *node = NULL;
++      int ret = 0;
++      int ctx = *(int *)data;
++
++      if (ctx >= NUM_CTX) {
++              MSM_BUS_ERR("%s: Invalid Context %d", __func__, ctx);
++              goto exit_agg_fab_clks;
++      }
++
++      node = bus_dev->platform_data;
++      if (!node) {
++              MSM_BUS_ERR("%s: Can't get device info", __func__);
++              goto exit_agg_fab_clks;
++      }
++
++      if (!node->node_info->is_fab_dev) {
++              struct msm_bus_node_device_type *bus_dev = NULL;
++
++              bus_dev = node->node_info->bus_device->platform_data;
++
++              if (node->cur_clk_hz[ctx] >= bus_dev->cur_clk_hz[ctx])
++                      bus_dev->cur_clk_hz[ctx] = node->cur_clk_hz[ctx];
++      }
++
++exit_agg_fab_clks:
++      return ret;
++}
++
++static int msm_bus_reset_fab_clks(struct device *bus_dev, void *data)
++{
++      struct msm_bus_node_device_type *node = NULL;
++      int ret = 0;
++      int ctx = *(int *)data;
++
++      if (ctx >= NUM_CTX) {
++              MSM_BUS_ERR("%s: Invalid Context %d", __func__, ctx);
++              goto exit_reset_fab_clks;
++      }
++
++      node = bus_dev->platform_data;
++      if (!node) {
++              MSM_BUS_ERR("%s: Can't get device info", __func__);
++              goto exit_reset_fab_clks;
++      }
++
++      if (node->node_info->is_fab_dev) {
++              node->cur_clk_hz[ctx] = 0;
++              MSM_BUS_DBG("Resetting for node %d", node->node_info->id);
++      }
++exit_reset_fab_clks:
++      return ret;
++}
++
++
++static int send_rpm_msg(struct device *device)
++{
++      int ret = 0;
++      int ctx;
++      int rsc_type;
++      struct msm_bus_node_device_type *ndev =
++                                      device->platform_data;
++      struct msm_rpm_kvp rpm_kvp;
++
++      if (!ndev) {
++              MSM_BUS_ERR("%s: Error getting node info.", __func__);
++              ret = -ENODEV;
++              goto exit_send_rpm_msg;
++      }
++
++      rpm_kvp.length = sizeof(uint64_t);
++      rpm_kvp.key = RPM_MASTER_FIELD_BW;
++
++      for (ctx = MSM_RPM_CTX_ACTIVE_SET; ctx <= MSM_RPM_CTX_SLEEP_SET;
++                                      ctx++) {
++              if (ctx == MSM_RPM_CTX_ACTIVE_SET)
++                      rpm_kvp.data =
++                      (uint8_t *)&ndev->node_ab.ab[MSM_RPM_CTX_ACTIVE_SET];
++              else {
++                      rpm_kvp.data =
++                      (uint8_t *) &ndev->node_ab.ab[MSM_RPM_CTX_SLEEP_SET];
++              }
++
++              if (ndev->node_info->mas_rpm_id != -1) {
++                      rsc_type = RPM_BUS_MASTER_REQ;
++                      ret = msm_rpm_send_message(ctx, rsc_type,
++                              ndev->node_info->mas_rpm_id, &rpm_kvp, 1);
++                      if (ret) {
++                              MSM_BUS_ERR("%s: Failed to send RPM message:",
++                                              __func__);
++                              MSM_BUS_ERR("%s:Node Id %d RPM id %d",
++                              __func__, ndev->node_info->id,
++                                       ndev->node_info->mas_rpm_id);
++                              goto exit_send_rpm_msg;
++                      }
++              }
++
++              if (ndev->node_info->slv_rpm_id != -1) {
++                      rsc_type = RPM_BUS_SLAVE_REQ;
++                      ret = msm_rpm_send_message(ctx, rsc_type,
++                              ndev->node_info->slv_rpm_id, &rpm_kvp, 1);
++                      if (ret) {
++                              MSM_BUS_ERR("%s: Failed to send RPM message:",
++                                                      __func__);
++                              MSM_BUS_ERR("%s: Node Id %d RPM id %d",
++                              __func__, ndev->node_info->id,
++                                      ndev->node_info->slv_rpm_id);
++                              goto exit_send_rpm_msg;
++                      }
++              }
++      }
++exit_send_rpm_msg:
++      return ret;
++}
++
++static int flush_bw_data(struct device *node_device, int ctx)
++{
++      struct msm_bus_node_device_type *node_info;
++      int ret = 0;
++
++      node_info = node_device->platform_data;
++      if (!node_info) {
++              MSM_BUS_ERR("%s: Unable to find bus device for device",
++                      __func__);
++              ret = -ENODEV;
++              goto exit_flush_bw_data;
++      }
++
++      if (node_info->node_ab.dirty) {
++              if (node_info->ap_owned) {
++                      struct msm_bus_node_device_type *bus_device =
++                              node_info->node_info->bus_device->platform_data;
++                      struct msm_bus_fab_device_type *fabdev =
++                                                      bus_device->fabdev;
++
++                      if (fabdev && fabdev->noc_ops.update_bw_reg &&
++                              fabdev->noc_ops.update_bw_reg
++                                      (node_info->node_info->qos_params.mode))
++                              ret = fabdev->noc_ops.set_bw(node_info,
++                                                      fabdev->qos_base,
++                                                      fabdev->base_offset,
++                                                      fabdev->qos_off,
++                                                      fabdev->qos_freq);
++              } else {
++                      ret = send_rpm_msg(node_device);
++
++                      if (ret)
++                              MSM_BUS_ERR("%s: Failed to send RPM msg for%d",
++                              __func__, node_info->node_info->id);
++              }
++              node_info->node_ab.dirty = false;
++      }
++
++exit_flush_bw_data:
++      return ret;
++
++}
++
++static int flush_clk_data(struct device *node_device, int ctx)
++{
++      struct msm_bus_node_device_type *node;
++      struct nodeclk *nodeclk = NULL;
++      int ret = 0;
++
++      node = node_device->platform_data;
++      if (!node) {
++              MSM_BUS_ERR("Unable to find bus device");
++              ret = -ENODEV;
++              goto exit_flush_clk_data;
++      }
++
++      nodeclk = &node->clk[ctx];
++      if (node->node_info->is_fab_dev) {
++              if (nodeclk->rate != node->cur_clk_hz[ctx]) {
++                      nodeclk->rate = node->cur_clk_hz[ctx];
++                      nodeclk->dirty = true;
++              }
++      }
++
++      if (nodeclk && nodeclk->clk && nodeclk->dirty) {
++              long rounded_rate;
++
++              if (nodeclk->rate) {
++                      rounded_rate = clk_round_rate(nodeclk->clk,
++                                                      nodeclk->rate);
++                      ret = setrate_nodeclk(nodeclk, rounded_rate);
++
++                      if (ret) {
++                              MSM_BUS_ERR("%s: Failed to set_rate %lu for %d",
++                                      __func__, rounded_rate,
++                                              node->node_info->id);
++                              ret = -ENODEV;
++                              goto exit_flush_clk_data;
++                      }
++
++                      ret = enable_nodeclk(nodeclk);
++              } else
++                      ret = disable_nodeclk(nodeclk);
++
++              if (ret) {
++                      MSM_BUS_ERR("%s: Failed to enable for %d", __func__,
++                                              node->node_info->id);
++                      ret = -ENODEV;
++                      goto exit_flush_clk_data;
++              }
++              MSM_BUS_DBG("%s: Updated %d clk to %llu", __func__,
++                              node->node_info->id, nodeclk->rate);
++
++      }
++exit_flush_clk_data:
++      /* Reset the aggregated clock rate for fab devices*/
++      if (node && node->node_info->is_fab_dev)
++              node->cur_clk_hz[ctx] = 0;
++
++      if (nodeclk)
++              nodeclk->dirty = 0;
++      return ret;
++}
++
++int msm_bus_commit_data(int *dirty_nodes, int ctx, int num_dirty)
++{
++      int ret = 0;
++      int i = 0;
++
++      /* Aggregate the bus clocks */
++      bus_for_each_dev(&msm_bus_type, NULL, (void *)&ctx,
++                              msm_bus_agg_fab_clks);
++
++      for (i = 0; i < num_dirty; i++) {
++              struct device *node_device =
++                                      bus_find_device(&msm_bus_type, NULL,
++                                              (void *)&dirty_nodes[i],
++                                              msm_bus_device_match_adhoc);
++
++              if (!node_device) {
++                      MSM_BUS_ERR("Can't find device for %d", dirty_nodes[i]);
++                      continue;
++              }
++
++              ret = flush_bw_data(node_device, ctx);
++              if (ret)
++                      MSM_BUS_ERR("%s: Error flushing bw data for node %d",
++                                      __func__, dirty_nodes[i]);
++
++              ret = flush_clk_data(node_device, ctx);
++              if (ret)
++                      MSM_BUS_ERR("%s: Error flushing clk data for node %d",
++                                      __func__, dirty_nodes[i]);
++      }
++      kfree(dirty_nodes);
++      /* Aggregate the bus clocks */
++      bus_for_each_dev(&msm_bus_type, NULL, (void *)&ctx,
++                              msm_bus_reset_fab_clks);
++      return ret;
++}
++
++void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
++                                      size_t new_size, gfp_t flags)
++{
++      void *ret;
++      size_t copy_size = old_size;
++
++      if (!new_size) {
++              devm_kfree(dev, p);
++              return ZERO_SIZE_PTR;
++      }
++
++      if (new_size < old_size)
++              copy_size = new_size;
++
++      ret = devm_kzalloc(dev, new_size, flags);
++      if (!ret) {
++              MSM_BUS_ERR("%s: Error Reallocating memory", __func__);
++              goto exit_realloc_devmem;
++      }
++
++      memcpy(ret, p, copy_size);
++      devm_kfree(dev, p);
++exit_realloc_devmem:
++      return ret;
++}
++
++
++static int add_dirty_node(int **dirty_nodes, int id, int *num_dirty)
++{
++      int i;
++      int found = 0;
++      int ret = 0;
++      int *dnode = NULL;
++
++      for (i = 0; i < *num_dirty; i++) {
++              if ((*dirty_nodes)[i] == id) {
++                      found = 1;
++                      break;
++              }
++      }
++
++      if (!found) {
++              (*num_dirty)++;
++              dnode =
++                      krealloc(*dirty_nodes, sizeof(int) * (*num_dirty),
++                                                              GFP_KERNEL);
++
++              if (ZERO_OR_NULL_PTR(dnode)) {
++                      MSM_BUS_ERR("%s: Failure allocating dirty nodes array",
++                                                               __func__);
++                      ret = -ENOMEM;
++              } else {
++                      *dirty_nodes = dnode;
++                      (*dirty_nodes)[(*num_dirty) - 1] = id;
++              }
++      }
++
++      return ret;
++}
++
++int msm_bus_update_bw(struct msm_bus_node_device_type *nodedev, int ctx,
++                      int64_t add_bw, int **dirty_nodes, int *num_dirty)
++{
++      int ret = 0;
++      int i, j;
++      uint64_t cur_ab_slp = 0;
++      uint64_t cur_ab_act = 0;
++
++      if (nodedev->node_info->virt_dev)
++              goto exit_update_bw;
++
++      for (i = 0; i < NUM_CTX; i++) {
++              for (j = 0; j < nodedev->num_lnodes; j++) {
++                      if (i == DUAL_CTX) {
++                              cur_ab_act +=
++                                      nodedev->lnode_list[j].lnode_ab[i];
++                              cur_ab_slp +=
++                                      nodedev->lnode_list[j].lnode_ab[i];
++                      } else
++                              cur_ab_act +=
++                                      nodedev->lnode_list[j].lnode_ab[i];
++              }
++      }
++
++      if (nodedev->node_ab.ab[MSM_RPM_CTX_ACTIVE_SET] != cur_ab_act) {
++              nodedev->node_ab.ab[MSM_RPM_CTX_ACTIVE_SET] = cur_ab_act;
++              nodedev->node_ab.ab[MSM_RPM_CTX_SLEEP_SET] = cur_ab_slp;
++              nodedev->node_ab.dirty = true;
++              ret = add_dirty_node(dirty_nodes, nodedev->node_info->id,
++                                                              num_dirty);
++
++              if (ret) {
++                      MSM_BUS_ERR("%s: Failed to add dirty node %d", __func__,
++                                              nodedev->node_info->id);
++                      goto exit_update_bw;
++              }
++      }
++
++exit_update_bw:
++      return ret;
++}
++
++int msm_bus_update_clks(struct msm_bus_node_device_type *nodedev,
++              int ctx, int **dirty_nodes, int *num_dirty)
++{
++      int status = 0;
++      struct nodeclk *nodeclk;
++      struct nodeclk *busclk;
++      struct msm_bus_node_device_type *bus_info = NULL;
++      uint64_t req_clk;
++
++      bus_info = nodedev->node_info->bus_device->platform_data;
++
++      if (!bus_info) {
++              MSM_BUS_ERR("%s: Unable to find bus device for device %d",
++                      __func__, nodedev->node_info->id);
++              status = -ENODEV;
++              goto exit_set_clks;
++      }
++
++      req_clk = nodedev->cur_clk_hz[ctx];
++      busclk = &bus_info->clk[ctx];
++
++      if (busclk->rate != req_clk) {
++              busclk->rate = req_clk;
++              busclk->dirty = 1;
++              MSM_BUS_DBG("%s: Modifying bus clk %d Rate %llu", __func__,
++                                      bus_info->node_info->id, req_clk);
++              status = add_dirty_node(dirty_nodes, bus_info->node_info->id,
++                                                              num_dirty);
++
++              if (status) {
++                      MSM_BUS_ERR("%s: Failed to add dirty node %d", __func__,
++                                              bus_info->node_info->id);
++                      goto exit_set_clks;
++              }
++      }
++
++      req_clk = nodedev->cur_clk_hz[ctx];
++      nodeclk = &nodedev->clk[ctx];
++
++      if (IS_ERR_OR_NULL(nodeclk))
++              goto exit_set_clks;
++
++      if (!nodeclk->dirty || (nodeclk->dirty && (nodeclk->rate < req_clk))) {
++              nodeclk->rate = req_clk;
++              nodeclk->dirty = 1;
++              MSM_BUS_DBG("%s: Modifying node clk %d Rate %llu", __func__,
++                                      nodedev->node_info->id, req_clk);
++              status = add_dirty_node(dirty_nodes, nodedev->node_info->id,
++                                                              num_dirty);
++              if (status) {
++                      MSM_BUS_ERR("%s: Failed to add dirty node %d", __func__,
++                                              nodedev->node_info->id);
++                      goto exit_set_clks;
++              }
++      }
++
++exit_set_clks:
++      return status;
++}
++
++static void msm_bus_fab_init_noc_ops(struct msm_bus_node_device_type *bus_dev)
++{
++      switch (bus_dev->fabdev->bus_type) {
++      case MSM_BUS_NOC:
++              msm_bus_noc_set_ops(bus_dev);
++              break;
++      case MSM_BUS_BIMC:
++              msm_bus_bimc_set_ops(bus_dev);
++              break;
++      default:
++              MSM_BUS_ERR("%s: Invalid Bus type", __func__);
++      }
++}
++
++static int msm_bus_qos_disable_clk(struct msm_bus_node_device_type *node,
++                              int disable_bus_qos_clk)
++{
++      struct msm_bus_node_device_type *bus_node = NULL;
++      int ret = 0;
++
++      if (!node) {
++              ret = -ENXIO;
++              goto exit_disable_qos_clk;
++      }
++
++      bus_node = node->node_info->bus_device->platform_data;
++
++      if (!bus_node) {
++              ret = -ENXIO;
++              goto exit_disable_qos_clk;
++      }
++
++      if (disable_bus_qos_clk)
++              ret = disable_nodeclk(&bus_node->clk[DUAL_CTX]);
++
++      if (ret) {
++              MSM_BUS_ERR("%s: Failed to disable bus clk, node %d",
++                      __func__, node->node_info->id);
++              goto exit_disable_qos_clk;
++      }
++
++      if (!IS_ERR_OR_NULL(node->qos_clk.clk)) {
++              ret = disable_nodeclk(&node->qos_clk);
++
++              if (ret) {
++                      MSM_BUS_ERR("%s: Failed to disable mas qos clk,node %d",
++                              __func__, node->node_info->id);
++                      goto exit_disable_qos_clk;
++              }
++      }
++
++exit_disable_qos_clk:
++      return ret;
++}
++
++static int msm_bus_qos_enable_clk(struct msm_bus_node_device_type *node)
++{
++      struct msm_bus_node_device_type *bus_node = NULL;
++      long rounded_rate;
++      int ret = 0;
++      int bus_qos_enabled = 0;
++
++      if (!node) {
++              ret = -ENXIO;
++              goto exit_enable_qos_clk;
++      }
++
++      bus_node = node->node_info->bus_device->platform_data;
++
++      if (!bus_node) {
++              ret = -ENXIO;
++              goto exit_enable_qos_clk;
++      }
++
++      /* Check if the bus clk is already set before trying to set it
++       * Do this only during
++       *      a. Bootup
++       *      b. Only for bus clks
++       **/
++      if (!clk_get_rate(bus_node->clk[DUAL_CTX].clk)) {
++              rounded_rate = clk_round_rate(bus_node->clk[DUAL_CTX].clk, 1);
++              ret = setrate_nodeclk(&bus_node->clk[DUAL_CTX], rounded_rate);
++              if (ret) {
++                      MSM_BUS_ERR("%s: Failed to set bus clk, node %d",
++                              __func__, node->node_info->id);
++                      goto exit_enable_qos_clk;
++              }
++
++              ret = enable_nodeclk(&bus_node->clk[DUAL_CTX]);
++              if (ret) {
++                      MSM_BUS_ERR("%s: Failed to enable bus clk, node %d",
++                              __func__, node->node_info->id);
++                      goto exit_enable_qos_clk;
++              }
++              bus_qos_enabled = 1;
++      }
++
++      if (!IS_ERR_OR_NULL(node->qos_clk.clk)) {
++              rounded_rate = clk_round_rate(node->qos_clk.clk, 1);
++              ret = setrate_nodeclk(&node->qos_clk, rounded_rate);
++              if (ret) {
++                      MSM_BUS_ERR("%s: Failed to enable mas qos clk, node %d",
++                              __func__, node->node_info->id);
++                      goto exit_enable_qos_clk;
++              }
++
++              ret = enable_nodeclk(&node->qos_clk);
++              if (ret) {
++                      MSM_BUS_ERR("Err enable mas qos clk, node %d ret %d",
++                              node->node_info->id, ret);
++                      goto exit_enable_qos_clk;
++              }
++      }
++      ret = bus_qos_enabled;
++
++exit_enable_qos_clk:
++      return ret;
++}
++
++int msm_bus_enable_limiter(struct msm_bus_node_device_type *node_dev,
++                              bool enable, uint64_t lim_bw)
++{
++      int ret = 0;
++      struct msm_bus_node_device_type *bus_node_dev;
++
++      if (!node_dev) {
++              MSM_BUS_ERR("No device specified");
++              ret = -ENXIO;
++              goto exit_enable_limiter;
++      }
++
++      if (!node_dev->ap_owned) {
++              MSM_BUS_ERR("Device is not AP owned %d.",
++                                              node_dev->node_info->id);
++              ret = -ENXIO;
++              goto exit_enable_limiter;
++      }
++
++      bus_node_dev = node_dev->node_info->bus_device->platform_data;
++      if (!bus_node_dev) {
++              MSM_BUS_ERR("Unable to get bus device infofor %d",
++                      node_dev->node_info->id);
++              ret = -ENXIO;
++              goto exit_enable_limiter;
++      }
++      if (bus_node_dev->fabdev &&
++              bus_node_dev->fabdev->noc_ops.limit_mport) {
++              ret = msm_bus_qos_enable_clk(node_dev);
++              if (ret < 0) {
++                      MSM_BUS_ERR("Can't Enable QoS clk %d",
++                              node_dev->node_info->id);
++                      goto exit_enable_limiter;
++              }
++              bus_node_dev->fabdev->noc_ops.limit_mport(
++                              node_dev,
++                              bus_node_dev->fabdev->qos_base,
++                              bus_node_dev->fabdev->base_offset,
++                              bus_node_dev->fabdev->qos_off,
++                              bus_node_dev->fabdev->qos_freq,
++                              enable, lim_bw);
++              msm_bus_qos_disable_clk(node_dev, ret);
++      }
++
++exit_enable_limiter:
++      return ret;
++}
++
++static int msm_bus_dev_init_qos(struct device *dev, void *data)
++{
++      int ret = 0;
++      struct msm_bus_node_device_type *node_dev = NULL;
++
++      node_dev = dev->platform_data;
++
++      if (!node_dev) {
++              MSM_BUS_ERR("%s: Unable to get node device info" , __func__);
++              ret = -ENXIO;
++              goto exit_init_qos;
++      }
++
++      MSM_BUS_DBG("Device = %d", node_dev->node_info->id);
++
++      if (node_dev->ap_owned) {
++              struct msm_bus_node_device_type *bus_node_info;
++
++              bus_node_info = node_dev->node_info->bus_device->platform_data;
++
++              if (!bus_node_info) {
++                      MSM_BUS_ERR("%s: Unable to get bus device infofor %d",
++                              __func__,
++                              node_dev->node_info->id);
++                      ret = -ENXIO;
++                      goto exit_init_qos;
++              }
++
++              if (bus_node_info->fabdev &&
++                      bus_node_info->fabdev->noc_ops.qos_init) {
++                      int ret = 0;
++
++                      if (node_dev->ap_owned &&
++                              (node_dev->node_info->qos_params.mode) != -1) {
++
++                              if (bus_node_info->fabdev->bypass_qos_prg)
++                                      goto exit_init_qos;
++
++                              ret = msm_bus_qos_enable_clk(node_dev);
++                              if (ret < 0) {
++                                      MSM_BUS_ERR("Can't Enable QoS clk %d",
++                                      node_dev->node_info->id);
++                                      goto exit_init_qos;
++                              }
++
++                              bus_node_info->fabdev->noc_ops.qos_init(
++                                      node_dev,
++                                      bus_node_info->fabdev->qos_base,
++                                      bus_node_info->fabdev->base_offset,
++                                      bus_node_info->fabdev->qos_off,
++                                      bus_node_info->fabdev->qos_freq);
++                              msm_bus_qos_disable_clk(node_dev, ret);
++                      }
++              } else
++                      MSM_BUS_ERR("%s: Skipping QOS init for %d",
++                              __func__, node_dev->node_info->id);
++      }
++exit_init_qos:
++      return ret;
++}
++
++static int msm_bus_fabric_init(struct device *dev,
++                      struct msm_bus_node_device_type *pdata)
++{
++      struct msm_bus_fab_device_type *fabdev;
++      struct msm_bus_node_device_type *node_dev = NULL;
++      int ret = 0;
++
++      node_dev = dev->platform_data;
++      if (!node_dev) {
++              MSM_BUS_ERR("%s: Unable to get bus device info" , __func__);
++              ret = -ENXIO;
++              goto exit_fabric_init;
++      }
++
++      if (node_dev->node_info->virt_dev) {
++              MSM_BUS_ERR("%s: Skip Fab init for virtual device %d", __func__,
++                                              node_dev->node_info->id);
++              goto exit_fabric_init;
++      }
++
++      fabdev = devm_kzalloc(dev, sizeof(struct msm_bus_fab_device_type),
++                                                              GFP_KERNEL);
++      if (!fabdev) {
++              MSM_BUS_ERR("Fabric alloc failed\n");
++              ret = -ENOMEM;
++              goto exit_fabric_init;
++      }
++
++      node_dev->fabdev = fabdev;
++      fabdev->pqos_base = pdata->fabdev->pqos_base;
++      fabdev->qos_range = pdata->fabdev->qos_range;
++      fabdev->base_offset = pdata->fabdev->base_offset;
++      fabdev->qos_off = pdata->fabdev->qos_off;
++      fabdev->qos_freq = pdata->fabdev->qos_freq;
++      fabdev->bus_type = pdata->fabdev->bus_type;
++      fabdev->bypass_qos_prg = pdata->fabdev->bypass_qos_prg;
++      fabdev->util_fact = pdata->fabdev->util_fact;
++      fabdev->vrail_comp = pdata->fabdev->vrail_comp;
++      msm_bus_fab_init_noc_ops(node_dev);
++
++      fabdev->qos_base = devm_ioremap(dev,
++                              fabdev->pqos_base, fabdev->qos_range);
++      if (!fabdev->qos_base) {
++              MSM_BUS_ERR("%s: Error remapping address 0x%zx :bus device %d",
++                      __func__,
++                       (size_t)fabdev->pqos_base, node_dev->node_info->id);
++              ret = -ENOMEM;
++              goto exit_fabric_init;
++      }
++
++      /*if (msmbus_coresight_init(pdev))
++              pr_warn("Coresight support absent for bus: %d\n", pdata->id);*/
++exit_fabric_init:
++      return ret;
++}
++
++static int msm_bus_init_clk(struct device *bus_dev,
++                              struct msm_bus_node_device_type *pdata)
++{
++      unsigned int ctx;
++      int ret = 0;
++      struct msm_bus_node_device_type *node_dev = bus_dev->platform_data;
++
++      for (ctx = 0; ctx < NUM_CTX; ctx++) {
++              if (!IS_ERR_OR_NULL(pdata->clk[ctx].clk)) {
++                      node_dev->clk[ctx].clk = pdata->clk[ctx].clk;
++                      node_dev->clk[ctx].enable = false;
++                      node_dev->clk[ctx].dirty = false;
++                      MSM_BUS_ERR("%s: Valid node clk node %d ctx %d",
++                              __func__, node_dev->node_info->id, ctx);
++              }
++      }
++
++      if (!IS_ERR_OR_NULL(pdata->qos_clk.clk)) {
++              node_dev->qos_clk.clk = pdata->qos_clk.clk;
++              node_dev->qos_clk.enable = false;
++              MSM_BUS_ERR("%s: Valid Iface clk node %d", __func__,
++                                              node_dev->node_info->id);
++      }
++
++      return ret;
++}
++
++static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
++                              struct device *bus_dev)
++{
++      int ret = 0;
++      struct msm_bus_node_info_type *node_info = NULL;
++      struct msm_bus_node_info_type *pdata_node_info = NULL;
++      struct msm_bus_node_device_type *bus_node = NULL;
++
++      bus_node = bus_dev->platform_data;
++
++      if (!bus_node || !pdata) {
++              ret = -ENXIO;
++              MSM_BUS_ERR("%s: Invalid pointers pdata %p, bus_node %p",
++                      __func__, pdata, bus_node);
++              goto exit_copy_node_info;
++      }
++
++      node_info = bus_node->node_info;
++      pdata_node_info = pdata->node_info;
++
++      node_info->name = pdata_node_info->name;
++      node_info->id =  pdata_node_info->id;
++      node_info->bus_device_id = pdata_node_info->bus_device_id;
++      node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
++      node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
++      node_info->num_connections = pdata_node_info->num_connections;
++      node_info->num_blist = pdata_node_info->num_blist;
++      node_info->num_qports = pdata_node_info->num_qports;
++      node_info->buswidth = pdata_node_info->buswidth;
++      node_info->virt_dev = pdata_node_info->virt_dev;
++      node_info->is_fab_dev = pdata_node_info->is_fab_dev;
++      node_info->qos_params.mode = pdata_node_info->qos_params.mode;
++      node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1;
++      node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0;
++      node_info->qos_params.prio_lvl = pdata_node_info->qos_params.prio_lvl;
++      node_info->qos_params.prio_rd = pdata_node_info->qos_params.prio_rd;
++      node_info->qos_params.prio_wr = pdata_node_info->qos_params.prio_wr;
++      node_info->qos_params.gp = pdata_node_info->qos_params.gp;
++      node_info->qos_params.thmp = pdata_node_info->qos_params.thmp;
++      node_info->qos_params.ws = pdata_node_info->qos_params.ws;
++      node_info->qos_params.bw_buffer = pdata_node_info->qos_params.bw_buffer;
++      node_info->util_fact = pdata_node_info->util_fact;
++      node_info->vrail_comp = pdata_node_info->vrail_comp;
++
++      node_info->dev_connections = devm_kzalloc(bus_dev,
++                      sizeof(struct device *) *
++                              pdata_node_info->num_connections,
++                      GFP_KERNEL);
++      if (!node_info->dev_connections) {
++              MSM_BUS_ERR("%s:Bus dev connections alloc failed\n", __func__);
++              ret = -ENOMEM;
++              goto exit_copy_node_info;
++      }
++
++      node_info->connections = devm_kzalloc(bus_dev,
++                      sizeof(int) * pdata_node_info->num_connections,
++                      GFP_KERNEL);
++      if (!node_info->connections) {
++              MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
++              devm_kfree(bus_dev, node_info->dev_connections);
++              ret = -ENOMEM;
++              goto exit_copy_node_info;
++      }
++
++      memcpy(node_info->connections,
++              pdata_node_info->connections,
++              sizeof(int) * pdata_node_info->num_connections);
++
++      node_info->black_connections = devm_kzalloc(bus_dev,
++                      sizeof(struct device *) *
++                              pdata_node_info->num_blist,
++                      GFP_KERNEL);
++      if (!node_info->black_connections) {
++              MSM_BUS_ERR("%s: Bus black connections alloc failed\n",
++                      __func__);
++              devm_kfree(bus_dev, node_info->dev_connections);
++              devm_kfree(bus_dev, node_info->connections);
++              ret = -ENOMEM;
++              goto exit_copy_node_info;
++      }
++
++      node_info->black_listed_connections = devm_kzalloc(bus_dev,
++                      pdata_node_info->num_blist * sizeof(int),
++                      GFP_KERNEL);
++      if (!node_info->black_listed_connections) {
++              MSM_BUS_ERR("%s:Bus black list connections alloc failed\n",
++                                      __func__);
++              devm_kfree(bus_dev, node_info->black_connections);
++              devm_kfree(bus_dev, node_info->dev_connections);
++              devm_kfree(bus_dev, node_info->connections);
++              ret = -ENOMEM;
++              goto exit_copy_node_info;
++      }
++
++      memcpy(node_info->black_listed_connections,
++              pdata_node_info->black_listed_connections,
++              sizeof(int) * pdata_node_info->num_blist);
++
++      node_info->qport = devm_kzalloc(bus_dev,
++                      sizeof(int) * pdata_node_info->num_qports,
++                      GFP_KERNEL);
++      if (!node_info->qport) {
++              MSM_BUS_ERR("%s:Bus qport allocation failed\n", __func__);
++              devm_kfree(bus_dev, node_info->dev_connections);
++              devm_kfree(bus_dev, node_info->connections);
++              devm_kfree(bus_dev, node_info->black_listed_connections);
++              ret = -ENOMEM;
++              goto exit_copy_node_info;
++      }
++
++      memcpy(node_info->qport,
++              pdata_node_info->qport,
++              sizeof(int) * pdata_node_info->num_qports);
++
++exit_copy_node_info:
++      return ret;
++}
++
++static struct device *msm_bus_device_init(
++                      struct msm_bus_node_device_type *pdata)
++{
++      struct device *bus_dev = NULL;
++      struct msm_bus_node_device_type *bus_node = NULL;
++      struct msm_bus_node_info_type *node_info = NULL;
++      int ret = 0;
++
++      bus_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
++      if (!bus_dev) {
++              MSM_BUS_ERR("%s:Device alloc failed\n", __func__);
++              bus_dev = NULL;
++              goto exit_device_init;
++      }
++      /**
++      * Init here so we can use devm calls
++      */
++      device_initialize(bus_dev);
++
++      bus_node = devm_kzalloc(bus_dev,
++                      sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
++      if (!bus_node) {
++              MSM_BUS_ERR("%s:Bus node alloc failed\n", __func__);
++              kfree(bus_dev);
++              bus_dev = NULL;
++              goto exit_device_init;
++      }
++
++      node_info = devm_kzalloc(bus_dev,
++                      sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
++      if (!node_info) {
++              MSM_BUS_ERR("%s:Bus node info alloc failed\n", __func__);
++              devm_kfree(bus_dev, bus_node);
++              kfree(bus_dev);
++              bus_dev = NULL;
++              goto exit_device_init;
++      }
++
++      bus_node->node_info = node_info;
++      bus_node->ap_owned = pdata->ap_owned;
++      bus_dev->platform_data = bus_node;
++
++      if (msm_bus_copy_node_info(pdata, bus_dev) < 0) {
++              devm_kfree(bus_dev, bus_node);
++              devm_kfree(bus_dev, node_info);
++              kfree(bus_dev);
++              bus_dev = NULL;
++              goto exit_device_init;
++      }
++
++      bus_dev->bus = &msm_bus_type;
++      dev_set_name(bus_dev, bus_node->node_info->name);
++
++      ret = device_add(bus_dev);
++      if (ret < 0) {
++              MSM_BUS_ERR("%s: Error registering device %d",
++                              __func__, pdata->node_info->id);
++              devm_kfree(bus_dev, bus_node);
++              devm_kfree(bus_dev, node_info->dev_connections);
++              devm_kfree(bus_dev, node_info->connections);
++              devm_kfree(bus_dev, node_info->black_connections);
++              devm_kfree(bus_dev, node_info->black_listed_connections);
++              devm_kfree(bus_dev, node_info);
++              kfree(bus_dev);
++              bus_dev = NULL;
++              goto exit_device_init;
++      }
++      device_create_file(bus_dev, &dev_attr_vrail);
++
++exit_device_init:
++      return bus_dev;
++}
++
++static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data)
++{
++      struct msm_bus_node_device_type *bus_node = NULL;
++      int ret = 0;
++      int j;
++
++      bus_node = bus_dev->platform_data;
++      if (!bus_node) {
++              MSM_BUS_ERR("%s: Can't get device info", __func__);
++              ret = -ENODEV;
++              goto exit_setup_dev_conn;
++      }
++
++      /* Setup parent bus device for this node */
++      if (!bus_node->node_info->is_fab_dev) {
++              struct device *bus_parent_device =
++                      bus_find_device(&msm_bus_type, NULL,
++                              (void *)&bus_node->node_info->bus_device_id,
++                              msm_bus_device_match_adhoc);
++
++              if (!bus_parent_device) {
++                      MSM_BUS_ERR("%s: Error finding parentdev %d parent %d",
++                              __func__,
++                              bus_node->node_info->id,
++                              bus_node->node_info->bus_device_id);
++                      ret = -ENXIO;
++                      goto exit_setup_dev_conn;
++              }
++              bus_node->node_info->bus_device = bus_parent_device;
++      }
++
++      bus_node->node_info->is_traversed = false;
++
++      for (j = 0; j < bus_node->node_info->num_connections; j++) {
++              bus_node->node_info->dev_connections[j] =
++                      bus_find_device(&msm_bus_type, NULL,
++                              (void *)&bus_node->node_info->connections[j],
++                              msm_bus_device_match_adhoc);
++
++              if (!bus_node->node_info->dev_connections[j]) {
++                      MSM_BUS_ERR("%s: Error finding conn %d for device %d",
++                              __func__, bus_node->node_info->connections[j],
++                               bus_node->node_info->id);
++                      ret = -ENODEV;
++                      goto exit_setup_dev_conn;
++              }
++      }
++
++      for (j = 0; j < bus_node->node_info->num_blist; j++) {
++              bus_node->node_info->black_connections[j] =
++                      bus_find_device(&msm_bus_type, NULL,
++                              (void *)&bus_node->node_info->
++                              black_listed_connections[j],
++                              msm_bus_device_match_adhoc);
++
++              if (!bus_node->node_info->black_connections[j]) {
++                      MSM_BUS_ERR("%s: Error finding conn %d for device %d\n",
++                              __func__, bus_node->node_info->
++                              black_listed_connections[j],
++                              bus_node->node_info->id);
++                      ret = -ENODEV;
++                      goto exit_setup_dev_conn;
++              }
++      }
++
++exit_setup_dev_conn:
++      return ret;
++}
++
++static int msm_bus_node_debug(struct device *bus_dev, void *data)
++{
++      int j;
++      int ret = 0;
++      struct msm_bus_node_device_type *bus_node = NULL;
++
++      bus_node = bus_dev->platform_data;
++      if (!bus_node) {
++              MSM_BUS_ERR("%s: Can't get device info", __func__);
++              ret = -ENODEV;
++              goto exit_node_debug;
++      }
++
++      MSM_BUS_DBG("Device = %d buswidth %u", bus_node->node_info->id,
++                              bus_node->node_info->buswidth);
++      for (j = 0; j < bus_node->node_info->num_connections; j++) {
++              struct msm_bus_node_device_type *bdev =
++                      (struct msm_bus_node_device_type *)
++                      bus_node->node_info->dev_connections[j]->platform_data;
++              MSM_BUS_DBG("\n\t Connection[%d] %d", j, bdev->node_info->id);
++      }
++
++exit_node_debug:
++      return ret;
++}
++
++static int msm_bus_device_probe(struct platform_device *pdev)
++{
++      unsigned int i, ret;
++      struct msm_bus_device_node_registration *pdata;
++
++      /* If possible, get pdata from device-tree */
++      if (pdev->dev.of_node)
++              pdata = msm_bus_of_to_pdata(pdev);
++      else {
++              pdata = (struct msm_bus_device_node_registration *)pdev->
++                      dev.platform_data;
++      }
++
++      if (IS_ERR_OR_NULL(pdata)) {
++              MSM_BUS_ERR("No platform data found");
++              ret = -ENODATA;
++              goto exit_device_probe;
++      }
++
++      for (i = 0; i < pdata->num_devices; i++) {
++              struct device *node_dev = NULL;
++
++              node_dev = msm_bus_device_init(&pdata->info[i]);
++
++              if (!node_dev) {
++                      MSM_BUS_ERR("%s: Error during dev init for %d",
++                              __func__, pdata->info[i].node_info->id);
++                      ret = -ENXIO;
++                      goto exit_device_probe;
++              }
++
++              ret = msm_bus_init_clk(node_dev, &pdata->info[i]);
++              /*Is this a fabric device ?*/
++              if (pdata->info[i].node_info->is_fab_dev) {
++                      MSM_BUS_DBG("%s: %d is a fab", __func__,
++                                              pdata->info[i].node_info->id);
++                      ret = msm_bus_fabric_init(node_dev, &pdata->info[i]);
++                      if (ret) {
++                              MSM_BUS_ERR("%s: Error intializing fab %d",
++                                      __func__, pdata->info[i].node_info->id);
++                              goto exit_device_probe;
++                      }
++              }
++      }
++
++      ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
++                                              msm_bus_setup_dev_conn);
++      if (ret) {
++              MSM_BUS_ERR("%s: Error setting up dev connections", __func__);
++              goto exit_device_probe;
++      }
++
++      ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_dev_init_qos);
++      if (ret) {
++              MSM_BUS_ERR("%s: Error during qos init", __func__);
++              goto exit_device_probe;
++      }
++
++      bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug);
++
++      /* Register the arb layer ops */
++      msm_bus_arb_setops_adhoc(&arb_ops);
++      devm_kfree(&pdev->dev, pdata->info);
++      devm_kfree(&pdev->dev, pdata);
++exit_device_probe:
++      return ret;
++}
++
++static int msm_bus_device_rules_probe(struct platform_device *pdev)
++{
++      struct bus_rule_type *rule_data = NULL;
++      int num_rules = 0;
++
++      num_rules = msm_bus_of_get_static_rules(pdev, &rule_data);
++
++      if (!rule_data)
++              goto exit_rules_probe;
++
++      msm_rule_register(num_rules, rule_data, NULL);
++      static_rules.num_rules = num_rules;
++      static_rules.rules = rule_data;
++      pdev->dev.platform_data = &static_rules;
++
++exit_rules_probe:
++      return 0;
++}
++
++int msm_bus_device_rules_remove(struct platform_device *pdev)
++{
++      struct static_rules_type *static_rules = NULL;
++
++      static_rules = pdev->dev.platform_data;
++      if (static_rules)
++              msm_rule_unregister(static_rules->num_rules,
++                                      static_rules->rules, NULL);
++      return 0;
++}
++
++static int msm_bus_free_dev(struct device *dev, void *data)
++{
++      struct msm_bus_node_device_type *bus_node = NULL;
++
++      bus_node = dev->platform_data;
++
++      if (bus_node)
++              MSM_BUS_ERR("\n%s: Removing device %d", __func__,
++                                              bus_node->node_info->id);
++      device_unregister(dev);
++      return 0;
++}
++
++int msm_bus_device_remove(struct platform_device *pdev)
++{
++      bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_free_dev);
++      return 0;
++}
++
++static struct of_device_id rules_match[] = {
++      {.compatible = "qcom,msm-bus-static-bw-rules"},
++      {}
++};
++
++static struct platform_driver msm_bus_rules_driver = {
++      .probe = msm_bus_device_rules_probe,
++      .remove = msm_bus_device_rules_remove,
++      .driver = {
++              .name = "msm_bus_rules_device",
++              .owner = THIS_MODULE,
++              .of_match_table = rules_match,
++      },
++};
++
++static struct of_device_id fabric_match[] = {
++      {.compatible = "qcom,msm-bus-device"},
++      {}
++};
++
++static struct platform_driver msm_bus_device_driver = {
++      .probe = msm_bus_device_probe,
++      .remove = msm_bus_device_remove,
++      .driver = {
++              .name = "msm_bus_device",
++              .owner = THIS_MODULE,
++              .of_match_table = fabric_match,
++      },
++};
++
++int __init msm_bus_device_init_driver(void)
++{
++      int rc;
++
++      MSM_BUS_ERR("msm_bus_fabric_init_driver\n");
++      rc =  platform_driver_register(&msm_bus_device_driver);
++
++      if (rc) {
++              MSM_BUS_ERR("Failed to register bus device driver");
++              return rc;
++      }
++      return platform_driver_register(&msm_bus_rules_driver);
++}
++subsys_initcall(msm_bus_device_init_driver);
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_id.c
+@@ -0,0 +1,94 @@
++/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/device.h>
++#include <linux/module.h>
++#include "msm-bus.h"
++#include "msm-bus-board.h"
++#include "msm_bus_core.h"
++#include "msm_bus_noc.h"
++#include "msm_bus_bimc.h"
++
++static uint32_t master_iids[MSM_BUS_MASTER_LAST];
++static uint32_t slave_iids[MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY];
++
++static void msm_bus_assign_iids(struct msm_bus_fabric_registration
++      *fabreg, int fabid)
++{
++      int i;
++      for (i = 0; i < fabreg->len; i++) {
++              if (!fabreg->info[i].gateway) {
++                      fabreg->info[i].priv_id = fabid + fabreg->info[i].id;
++                      if (fabreg->info[i].id < SLAVE_ID_KEY) {
++                              if (fabreg->info[i].id >= MSM_BUS_MASTER_LAST) {
++                                      WARN(1, "id %d exceeds array size!\n",
++                                              fabreg->info[i].id);
++                                      continue;
++                              }
++
++                              master_iids[fabreg->info[i].id] =
++                                      fabreg->info[i].priv_id;
++                      } else {
++                              if ((fabreg->info[i].id - SLAVE_ID_KEY) >=
++                                      (MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY)) {
++                                      WARN(1, "id %d exceeds array size!\n",
++                                              fabreg->info[i].id);
++                                      continue;
++                              }
++
++                              slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)]
++                                      = fabreg->info[i].priv_id;
++                      }
++              } else {
++                      fabreg->info[i].priv_id = fabreg->info[i].id;
++              }
++      }
++}
++
++static int msm_bus_get_iid(int id)
++{
++      if ((id < SLAVE_ID_KEY && id >= MSM_BUS_MASTER_LAST) ||
++              id >= MSM_BUS_SLAVE_LAST) {
++              MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id);
++              return -EINVAL;
++      }
++
++      return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] :
++              slave_iids[id - SLAVE_ID_KEY]), id);
++}
++
++static struct msm_bus_board_algorithm msm_bus_id_algo = {
++      .get_iid = msm_bus_get_iid,
++      .assign_iids = msm_bus_assign_iids,
++};
++
++int msm_bus_board_rpm_get_il_ids(uint16_t *id)
++{
++      return -ENXIO;
++}
++
++void msm_bus_board_init(struct msm_bus_fabric_registration *pdata)
++{
++      pdata->board_algo = &msm_bus_id_algo;
++}
++
++void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata,
++      int nfab)
++{
++      if (nfab <= 0)
++              return;
++
++      msm_bus_id_algo.board_nfab = nfab;
++}
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_noc.c
+@@ -0,0 +1,770 @@
++/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#define pr_fmt(fmt) "AXI: NOC: %s(): " fmt, __func__
++
++#include <linux/slab.h>
++#include <linux/io.h>
++#include "msm-bus-board.h"
++#include "msm_bus_core.h"
++#include "msm_bus_noc.h"
++#include "msm_bus_adhoc.h"
++
++/* NOC_QOS generic */
++#define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x))
++#define SAT_SCALE 16  /* 16 bytes minimum for saturation */
++#define BW_SCALE  256 /* 1/256 byte per cycle unit */
++#define QOS_DEFAULT_BASEOFFSET                0x00003000
++#define QOS_DEFAULT_DELTA             0x80
++#define MAX_BW_FIELD (NOC_QOS_BWn_BW_BMSK >> NOC_QOS_BWn_BW_SHFT)
++#define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT)
++
++#define NOC_QOS_REG_BASE(b, o)                ((b) + (o))
++
++#define NOC_QOS_ID_COREIDn_ADDR(b, o, n, d) \
++      (NOC_QOS_REG_BASE(b, o) + (d) * (n))
++enum noc_qos_id_coreidn {
++      NOC_QOS_ID_COREIDn_RMSK                 = 0xffffffff,
++      NOC_QOS_ID_COREIDn_MAXn                 = 32,
++      NOC_QOS_ID_COREIDn_CORECHSUM_BMSK       = 0xffffff00,
++      NOC_QOS_ID_COREIDn_CORECHSUM_SHFT       = 0x8,
++      NOC_QOS_ID_COREIDn_CORETYPEID_BMSK      = 0xff,
++      NOC_QOS_ID_COREIDn_CORETYPEID_SHFT      = 0x0,
++};
++
++#define NOC_QOS_ID_REVISIONIDn_ADDR(b, o, n, d) \
++      (NOC_QOS_REG_BASE(b, o) + 0x4 + (d) * (n))
++enum noc_qos_id_revisionidn {
++      NOC_QOS_ID_REVISIONIDn_RMSK             = 0xffffffff,
++      NOC_QOS_ID_REVISIONIDn_MAXn             = 32,
++      NOC_QOS_ID_REVISIONIDn_FLEXNOCID_BMSK   = 0xffffff00,
++      NOC_QOS_ID_REVISIONIDn_FLEXNOCID_SHFT   = 0x8,
++      NOC_QOS_ID_REVISIONIDn_USERID_BMSK      = 0xff,
++      NOC_QOS_ID_REVISIONIDn_USERID_SHFT      = 0x0,
++};
++
++#define NOC_QOS_PRIORITYn_ADDR(b, o, n, d)    \
++      (NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n))
++enum noc_qos_id_priorityn {
++      NOC_QOS_PRIORITYn_RMSK          = 0x0000000f,
++      NOC_QOS_PRIORITYn_MAXn          = 32,
++      NOC_QOS_PRIORITYn_P1_BMSK       = 0xc,
++      NOC_QOS_PRIORITYn_P1_SHFT       = 0x2,
++      NOC_QOS_PRIORITYn_P0_BMSK       = 0x3,
++      NOC_QOS_PRIORITYn_P0_SHFT       = 0x0,
++};
++
++#define NOC_QOS_MODEn_ADDR(b, o, n, d) \
++      (NOC_QOS_REG_BASE(b, o) + 0xC + (d) * (n))
++enum noc_qos_id_moden_rmsk {
++      NOC_QOS_MODEn_RMSK              = 0x00000003,
++      NOC_QOS_MODEn_MAXn              = 32,
++      NOC_QOS_MODEn_MODE_BMSK         = 0x3,
++      NOC_QOS_MODEn_MODE_SHFT         = 0x0,
++};
++
++#define NOC_QOS_BWn_ADDR(b, o, n, d) \
++      (NOC_QOS_REG_BASE(b, o) + 0x10 + (d) * (n))
++enum noc_qos_id_bwn {
++      NOC_QOS_BWn_RMSK                = 0x0000ffff,
++      NOC_QOS_BWn_MAXn                = 32,
++      NOC_QOS_BWn_BW_BMSK             = 0xffff,
++      NOC_QOS_BWn_BW_SHFT             = 0x0,
++};
++
++/* QOS Saturation registers */
++#define NOC_QOS_SATn_ADDR(b, o, n, d) \
++      (NOC_QOS_REG_BASE(b, o) + 0x14 + (d) * (n))
++enum noc_qos_id_saturationn {
++      NOC_QOS_SATn_RMSK               = 0x000003ff,
++      NOC_QOS_SATn_MAXn               = 32,
++      NOC_QOS_SATn_SAT_BMSK           = 0x3ff,
++      NOC_QOS_SATn_SAT_SHFT           = 0x0,
++};
++
++static int noc_div(uint64_t *a, uint32_t b)
++{
++      if ((*a > 0) && (*a < b))
++              return 1;
++      else
++              return do_div(*a, b);
++}
++
++/**
++ * Calculates bw hardware is using from register values
++ * bw returned is in bytes/sec
++ */
++static uint64_t noc_bw(uint32_t bw_field, uint32_t qos_freq)
++{
++      uint64_t res;
++      uint32_t rem, scale;
++
++      res = 2 * qos_freq * bw_field;
++      scale = BW_SCALE * 1000;
++      rem = noc_div(&res, scale);
++      MSM_BUS_DBG("NOC: Calculated bw: %llu\n", res * 1000000ULL);
++      return res * 1000000ULL;
++}
++
++static uint32_t noc_bw_ceil(long int bw_field, uint32_t qos_freq)
++{
++      uint64_t bw_temp = 2 * qos_freq * bw_field;
++      uint32_t scale = 1000 * BW_SCALE;
++      noc_div(&bw_temp, scale);
++      return bw_temp * 1000000;
++}
++#define MAX_BW(timebase) noc_bw_ceil(MAX_BW_FIELD, (timebase))
++
++/**
++ * Calculates ws hardware is using from register values
++ * ws returned is in nanoseconds
++ */
++static uint32_t noc_ws(uint64_t bw, uint32_t sat, uint32_t qos_freq)
++{
++      if (bw && qos_freq) {
++              uint32_t bwf = bw * qos_freq;
++              uint64_t scale = 1000000000000LL * BW_SCALE *
++                      SAT_SCALE * sat;
++              noc_div(&scale, bwf);
++              MSM_BUS_DBG("NOC: Calculated ws: %llu\n", scale);
++              return scale;
++      }
++
++      return 0;
++}
++#define MAX_WS(bw, timebase) noc_ws((bw), MAX_SAT_FIELD, (timebase))
++
++/* Calculate bandwidth field value for requested bandwidth  */
++static uint32_t noc_bw_field(uint64_t bw, uint32_t qos_freq)
++{
++      uint32_t bw_field = 0;
++
++      if (bw) {
++              uint32_t rem;
++              uint64_t bw_capped = min_t(uint64_t, bw, MAX_BW(qos_freq));
++              uint64_t bwc = bw_capped * BW_SCALE;
++              uint64_t qf = 2 * qos_freq * 1000;
++
++              rem = noc_div(&bwc, qf);
++              bw_field = (uint32_t)min_t(uint64_t, bwc, MAX_BW_FIELD);
++      }
++
++      MSM_BUS_DBG("NOC: bw_field: %u\n", bw_field);
++      return bw_field;
++}
++
++static uint32_t noc_sat_field(uint64_t bw, uint32_t ws, uint32_t qos_freq)
++{
++      uint32_t sat_field = 0, win;
++
++      if (bw) {
++              /* Limit to max bw and scale bw to 100 KB increments */
++              uint64_t tbw, tscale;
++              uint64_t bw_scaled = min_t(uint64_t, bw, MAX_BW(qos_freq));
++              uint32_t rem = noc_div(&bw_scaled, 100000);
++
++              /**
++               * Calculate saturation from windows size.
++               * WS must be at least one arb period.
++               * Saturation must not exceed max field size
++               *
++               * Bandwidth is in 100KB increments
++               * Window size is in ns
++               * qos_freq is in KHz
++               */
++              win = max(ws, 1000000 / qos_freq);
++              tbw = bw_scaled * win * qos_freq;
++              tscale = 10000000ULL * BW_SCALE * SAT_SCALE;
++              rem = noc_div(&tbw, tscale);
++              sat_field = (uint32_t)min_t(uint64_t, tbw, MAX_SAT_FIELD);
++      }
++
++      MSM_BUS_DBG("NOC: sat_field: %d\n", sat_field);
++      return sat_field;
++}
++
++static void noc_set_qos_mode(void __iomem *base, uint32_t qos_off,
++              uint32_t mport, uint32_t qos_delta, uint8_t mode,
++              uint8_t perm_mode)
++{
++      if (mode < NOC_QOS_MODE_MAX &&
++              ((1 << mode) & perm_mode)) {
++              uint32_t reg_val;
++
++              reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
++                      mport, qos_delta)) & NOC_QOS_MODEn_RMSK;
++              writel_relaxed(((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) |
++                      (mode & NOC_QOS_MODEn_MODE_BMSK)),
++                      NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
++      }
++      /* Ensure qos mode is set before exiting */
++      wmb();
++}
++
++static void noc_set_qos_priority(void __iomem *base, uint32_t qos_off,
++              uint32_t mport, uint32_t qos_delta,
++              struct msm_bus_noc_qos_priority *priority)
++{
++      uint32_t reg_val, val;
++
++      reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
++              qos_delta)) & NOC_QOS_PRIORITYn_RMSK;
++      val = priority->p1 << NOC_QOS_PRIORITYn_P1_SHFT;
++      writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P1_BMSK))) |
++              (val & NOC_QOS_PRIORITYn_P1_BMSK)),
++              NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
++
++      reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
++                                                              qos_delta))
++              & NOC_QOS_PRIORITYn_RMSK;
++      writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P0_BMSK))) |
++              (priority->p0 & NOC_QOS_PRIORITYn_P0_BMSK)),
++              NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
++      /* Ensure qos priority is set before exiting */
++      wmb();
++}
++
++static void msm_bus_noc_set_qos_bw(void __iomem *base, uint32_t qos_off,
++              uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
++              uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw)
++{
++      uint32_t reg_val, val, mode;
++
++      if (!qos_freq) {
++              MSM_BUS_DBG("Zero QoS Freq\n");
++              return;
++      }
++
++
++      /* If Limiter or Regulator modes are not supported, bw not available*/
++      if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
++              NOC_QOS_PERM_MODE_REGULATOR)) {
++              uint32_t bw_val = noc_bw_field(qbw->bw, qos_freq);
++              uint32_t sat_val = noc_sat_field(qbw->bw, qbw->ws,
++                      qos_freq);
++
++              MSM_BUS_DBG("NOC: BW: perm_mode: %d bw_val: %d, sat_val: %d\n",
++                      perm_mode, bw_val, sat_val);
++              /*
++               * If in Limiter/Regulator mode, first go to fixed mode.
++               * Clear QoS accumulator
++               **/
++              mode = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
++                      mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
++              if (mode == NOC_QOS_MODE_REGULATOR || mode ==
++                      NOC_QOS_MODE_LIMITER) {
++                      reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(
++                              base, qos_off, mport, qos_delta));
++                      val = NOC_QOS_MODE_FIXED;
++                      writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
++                              | (val & NOC_QOS_MODEn_MODE_BMSK),
++                              NOC_QOS_MODEn_ADDR(base, qos_off, mport,
++                                                              qos_delta));
++              }
++
++              reg_val = readl_relaxed(NOC_QOS_BWn_ADDR(base, qos_off, mport,
++                                                              qos_delta));
++              val = bw_val << NOC_QOS_BWn_BW_SHFT;
++              writel_relaxed(((reg_val & (~(NOC_QOS_BWn_BW_BMSK))) |
++                      (val & NOC_QOS_BWn_BW_BMSK)),
++                      NOC_QOS_BWn_ADDR(base, qos_off, mport, qos_delta));
++
++              MSM_BUS_DBG("NOC: BW: Wrote value: 0x%x\n", ((reg_val &
++                      (~NOC_QOS_BWn_BW_BMSK)) | (val &
++                      NOC_QOS_BWn_BW_BMSK)));
++
++              reg_val = readl_relaxed(NOC_QOS_SATn_ADDR(base, qos_off,
++                      mport, qos_delta));
++              val = sat_val << NOC_QOS_SATn_SAT_SHFT;
++              writel_relaxed(((reg_val & (~(NOC_QOS_SATn_SAT_BMSK))) |
++                      (val & NOC_QOS_SATn_SAT_BMSK)),
++                      NOC_QOS_SATn_ADDR(base, qos_off, mport, qos_delta));
++
++              MSM_BUS_DBG("NOC: SAT: Wrote value: 0x%x\n", ((reg_val &
++                      (~NOC_QOS_SATn_SAT_BMSK)) | (val &
++                      NOC_QOS_SATn_SAT_BMSK)));
++
++              /* Set mode back to what it was initially */
++              reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
++                      mport, qos_delta));
++              writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
++                      | (mode & NOC_QOS_MODEn_MODE_BMSK),
++                      NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
++              /* Ensure that all writes for bandwidth registers have
++               * completed before returning
++               */
++              wmb();
++      }
++}
++
++uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
++      uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode)
++{
++      if (NOC_QOS_MODES_ALL_PERM == perm_mode)
++              return readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
++                      mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
++      else
++              return 31 - __CLZ(mode &
++                      NOC_QOS_MODES_ALL_PERM);
++}
++
++void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
++      uint32_t mport, uint32_t qos_delta,
++      struct msm_bus_noc_qos_priority *priority)
++{
++      priority->p1 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
++              mport, qos_delta)) & NOC_QOS_PRIORITYn_P1_BMSK) >>
++              NOC_QOS_PRIORITYn_P1_SHFT;
++
++      priority->p0 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
++              mport, qos_delta)) & NOC_QOS_PRIORITYn_P0_BMSK) >>
++              NOC_QOS_PRIORITYn_P0_SHFT;
++}
++
++void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
++      uint32_t qos_freq,
++      uint32_t mport, uint32_t qos_delta, uint8_t perm_mode,
++      struct msm_bus_noc_qos_bw *qbw)
++{
++      if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
++              NOC_QOS_PERM_MODE_REGULATOR)) {
++              uint32_t bw_val = readl_relaxed(NOC_QOS_BWn_ADDR(
++                      base, qos_off, mport, qos_delta)) & NOC_QOS_BWn_BW_BMSK;
++              uint32_t sat = readl_relaxed(NOC_QOS_SATn_ADDR(
++                      base, qos_off, mport, qos_delta))
++                                              & NOC_QOS_SATn_SAT_BMSK;
++
++              qbw->bw = noc_bw(bw_val, qos_freq);
++              qbw->ws = noc_ws(qbw->bw, sat, qos_freq);
++      } else {
++              qbw->bw = 0;
++              qbw->ws = 0;
++      }
++}
++
++static int msm_bus_noc_mas_init(struct msm_bus_noc_info *ninfo,
++      struct msm_bus_inode_info *info)
++{
++      int i;
++      struct msm_bus_noc_qos_priority *prio;
++      prio = kzalloc(sizeof(struct msm_bus_noc_qos_priority),
++              GFP_KERNEL);
++      if (!prio) {
++              MSM_BUS_WARN("Couldn't alloc prio data for node: %d\n",
++                      info->node_info->id);
++              return -ENOMEM;
++      }
++
++      prio->read_prio = info->node_info->prio_rd;
++      prio->write_prio = info->node_info->prio_wr;
++      prio->p1 = info->node_info->prio1;
++      prio->p0 = info->node_info->prio0;
++      info->hw_data = (void *)prio;
++
++      if (!info->node_info->qport) {
++              MSM_BUS_DBG("No QoS Ports to init\n");
++              return 0;
++      }
++
++      for (i = 0; i < info->node_info->num_mports; i++) {
++              if (info->node_info->mode != NOC_QOS_MODE_BYPASS) {
++                      noc_set_qos_priority(ninfo->base, ninfo->qos_baseoffset,
++                              info->node_info->qport[i], ninfo->qos_delta,
++                              prio);
++
++                      if (info->node_info->mode != NOC_QOS_MODE_FIXED) {
++                              struct msm_bus_noc_qos_bw qbw;
++                              qbw.ws = info->node_info->ws;
++                              qbw.bw = 0;
++                              msm_bus_noc_set_qos_bw(ninfo->base,
++                                      ninfo->qos_baseoffset,
++                                      ninfo->qos_freq, info->node_info->
++                                      qport[i], ninfo->qos_delta,
++                                      info->node_info->perm_mode,
++                                      &qbw);
++                      }
++              }
++
++              noc_set_qos_mode(ninfo->base, ninfo->qos_baseoffset,
++                      info->node_info->qport[i], ninfo->qos_delta,
++                      info->node_info->mode,
++                      info->node_info->perm_mode);
++      }
++
++      return 0;
++}
++
++static void msm_bus_noc_node_init(void *hw_data,
++      struct msm_bus_inode_info *info)
++{
++      struct msm_bus_noc_info *ninfo =
++              (struct msm_bus_noc_info *)hw_data;
++
++      if (!IS_SLAVE(info->node_info->priv_id))
++              if (info->node_info->hw_sel != MSM_BUS_RPM)
++                      msm_bus_noc_mas_init(ninfo, info);
++}
++
++static int msm_bus_noc_allocate_commit_data(struct msm_bus_fabric_registration
++      *fab_pdata, void **cdata, int ctx)
++{
++      struct msm_bus_noc_commit **cd = (struct msm_bus_noc_commit **)cdata;
++      struct msm_bus_noc_info *ninfo =
++              (struct msm_bus_noc_info *)fab_pdata->hw_data;
++
++      *cd = kzalloc(sizeof(struct msm_bus_noc_commit), GFP_KERNEL);
++      if (!*cd) {
++              MSM_BUS_DBG("Couldn't alloc mem for cdata\n");
++              return -ENOMEM;
++      }
++
++      (*cd)->mas = ninfo->cdata[ctx].mas;
++      (*cd)->slv = ninfo->cdata[ctx].slv;
++
++      return 0;
++}
++
++static void *msm_bus_noc_allocate_noc_data(struct platform_device *pdev,
++      struct msm_bus_fabric_registration *fab_pdata)
++{
++      struct resource *noc_mem;
++      struct resource *noc_io;
++      struct msm_bus_noc_info *ninfo;
++      int i;
++
++      ninfo = kzalloc(sizeof(struct msm_bus_noc_info), GFP_KERNEL);
++      if (!ninfo) {
++              MSM_BUS_DBG("Couldn't alloc mem for noc info\n");
++              return NULL;
++      }
++
++      ninfo->nmasters = fab_pdata->nmasters;
++      ninfo->nqos_masters = fab_pdata->nmasters;
++      ninfo->nslaves = fab_pdata->nslaves;
++      ninfo->qos_freq = fab_pdata->qos_freq;
++
++      if (!fab_pdata->qos_baseoffset)
++              ninfo->qos_baseoffset = QOS_DEFAULT_BASEOFFSET;
++      else
++              ninfo->qos_baseoffset = fab_pdata->qos_baseoffset;
++
++      if (!fab_pdata->qos_delta)
++              ninfo->qos_delta = QOS_DEFAULT_DELTA;
++      else
++              ninfo->qos_delta = fab_pdata->qos_delta;
++
++      ninfo->mas_modes = kzalloc(sizeof(uint32_t) * fab_pdata->nmasters,
++              GFP_KERNEL);
++      if (!ninfo->mas_modes) {
++              MSM_BUS_DBG("Couldn't alloc mem for noc master-modes\n");
++              kfree(ninfo);
++              return NULL;
++      }
++
++      for (i = 0; i < NUM_CTX; i++) {
++              ninfo->cdata[i].mas = kzalloc(sizeof(struct
++                      msm_bus_node_hw_info) * fab_pdata->nmasters * 2,
++                      GFP_KERNEL);
++              if (!ninfo->cdata[i].mas) {
++                      MSM_BUS_DBG("Couldn't alloc mem for noc master-bw\n");
++                      kfree(ninfo->mas_modes);
++                      kfree(ninfo);
++                      return NULL;
++              }
++
++              ninfo->cdata[i].slv = kzalloc(sizeof(struct
++                      msm_bus_node_hw_info) * fab_pdata->nslaves * 2,
++                      GFP_KERNEL);
++              if (!ninfo->cdata[i].slv) {
++                      MSM_BUS_DBG("Couldn't alloc mem for noc master-bw\n");
++                      kfree(ninfo->cdata[i].mas);
++                      goto err;
++              }
++      }
++
++      /* If it's a virtual fabric, don't get memory info */
++      if (fab_pdata->virt)
++              goto skip_mem;
++
++      noc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      if (!noc_mem && !fab_pdata->virt) {
++              MSM_BUS_ERR("Cannot get NoC Base address\n");
++              goto err;
++      }
++
++      noc_io = request_mem_region(noc_mem->start,
++                      resource_size(noc_mem), pdev->name);
++      if (!noc_io) {
++              MSM_BUS_ERR("NoC memory unavailable\n");
++              goto err;
++      }
++
++      ninfo->base = ioremap(noc_mem->start, resource_size(noc_mem));
++      if (!ninfo->base) {
++              MSM_BUS_ERR("IOremap failed for NoC!\n");
++              release_mem_region(noc_mem->start, resource_size(noc_mem));
++              goto err;
++      }
++
++skip_mem:
++      fab_pdata->hw_data = (void *)ninfo;
++      return (void *)ninfo;
++
++err:
++      kfree(ninfo->mas_modes);
++      kfree(ninfo);
++      return NULL;
++}
++
++static void free_commit_data(void *cdata)
++{
++      struct msm_bus_noc_commit *cd = (struct msm_bus_noc_commit *)cdata;
++
++      kfree(cd->mas);
++      kfree(cd->slv);
++      kfree(cd);
++}
++
++static bool msm_bus_noc_update_bw_reg(int mode)
++{
++      bool ret = false;
++
++      if ((mode == NOC_QOS_MODE_LIMITER) ||
++                      (mode == NOC_QOS_MODE_REGULATOR))
++                      ret = true;
++
++      return ret;
++}
++
++static void msm_bus_noc_update_bw(struct msm_bus_inode_info *hop,
++      struct msm_bus_inode_info *info,
++      struct msm_bus_fabric_registration *fab_pdata,
++      void *sel_cdata, int *master_tiers,
++      int64_t add_bw)
++{
++      struct msm_bus_noc_info *ninfo;
++      struct msm_bus_noc_qos_bw qos_bw;
++      int i, ports;
++      int64_t bw;
++      struct msm_bus_noc_commit *sel_cd =
++              (struct msm_bus_noc_commit *)sel_cdata;
++
++      ninfo = (struct msm_bus_noc_info *)fab_pdata->hw_data;
++      if (!ninfo->qos_freq) {
++              MSM_BUS_DBG("NOC: No qos frequency to update bw\n");
++              return;
++      }
++
++      if (info->node_info->num_mports == 0) {
++              MSM_BUS_DBG("NOC: Skip Master BW\n");
++              goto skip_mas_bw;
++      }
++
++      ports = info->node_info->num_mports;
++      bw = INTERLEAVED_BW(fab_pdata, add_bw, ports);
++
++      MSM_BUS_DBG("NOC: Update bw for: %d: %lld\n",
++              info->node_info->priv_id, add_bw);
++      for (i = 0; i < ports; i++) {
++              sel_cd->mas[info->node_info->masterp[i]].bw += bw;
++              sel_cd->mas[info->node_info->masterp[i]].hw_id =
++                      info->node_info->mas_hw_id;
++              MSM_BUS_DBG("NOC: Update mas_bw: ID: %d, BW: %llu ports:%d\n",
++                      info->node_info->priv_id,
++                      sel_cd->mas[info->node_info->masterp[i]].bw,
++                      ports);
++              /* Check if info is a shared master.
++               * If it is, mark it dirty
++               * If it isn't, then set QOS Bandwidth
++               **/
++              if (info->node_info->hw_sel == MSM_BUS_RPM)
++                      sel_cd->mas[info->node_info->masterp[i]].dirty = 1;
++              else {
++                      if (!info->node_info->qport) {
++                              MSM_BUS_DBG("No qos ports to update!\n");
++                              break;
++                      }
++
++                      if (!(info->node_info->mode == NOC_QOS_MODE_REGULATOR)
++                                      || (info->node_info->mode ==
++                                              NOC_QOS_MODE_LIMITER)) {
++                              MSM_BUS_DBG("Skip QoS reg programming\n");
++                              break;
++                      }
++                      qos_bw.bw = sel_cd->mas[info->node_info->masterp[i]].
++                              bw;
++                      qos_bw.ws = info->node_info->ws;
++                      msm_bus_noc_set_qos_bw(ninfo->base,
++                              ninfo->qos_baseoffset,
++                              ninfo->qos_freq,
++                              info->node_info->qport[i], ninfo->qos_delta,
++                              info->node_info->perm_mode, &qos_bw);
++                      MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n",
++                              qos_bw.ws);
++              }
++      }
++
++skip_mas_bw:
++      ports = hop->node_info->num_sports;
++      for (i = 0; i < ports; i++) {
++              sel_cd->slv[hop->node_info->slavep[i]].bw += add_bw;
++              sel_cd->slv[hop->node_info->slavep[i]].hw_id =
++                      hop->node_info->slv_hw_id;
++              MSM_BUS_DBG("NOC: Update slave_bw for ID: %d -> %llu\n",
++                      hop->node_info->priv_id,
++                      sel_cd->slv[hop->node_info->slavep[i]].bw);
++              MSM_BUS_DBG("NOC: Update slave_bw for hw_id: %d, index: %d\n",
++                      hop->node_info->slv_hw_id, hop->node_info->slavep[i]);
++              /* Check if hop is a shared slave.
++               * If it is, mark it dirty
++               * If it isn't, then nothing to be done as the
++               * slaves are in bypass mode.
++               **/
++              if (hop->node_info->hw_sel == MSM_BUS_RPM)
++                      sel_cd->slv[hop->node_info->slavep[i]].dirty = 1;
++      }
++}
++
++static int msm_bus_noc_commit(struct msm_bus_fabric_registration
++      *fab_pdata, void *hw_data, void **cdata)
++{
++      MSM_BUS_DBG("\nReached NOC Commit\n");
++      msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata);
++      return 0;
++}
++
++static int msm_bus_noc_port_halt(uint32_t haltid, uint8_t mport)
++{
++      return 0;
++}
++
++static int msm_bus_noc_port_unhalt(uint32_t haltid, uint8_t mport)
++{
++      return 0;
++}
++
++static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info,
++                              void __iomem *qos_base,
++                              uint32_t qos_off, uint32_t qos_delta,
++                              uint32_t qos_freq)
++{
++      struct msm_bus_noc_qos_priority prio;
++      int ret = 0;
++      int i;
++
++      prio.p1 = info->node_info->qos_params.prio1;
++      prio.p0 = info->node_info->qos_params.prio0;
++
++      if (!info->node_info->qport) {
++              MSM_BUS_DBG("No QoS Ports to init\n");
++              ret = 0;
++              goto err_qos_init;
++      }
++
++      for (i = 0; i < info->node_info->num_qports; i++) {
++              if (info->node_info->qos_params.mode != NOC_QOS_MODE_BYPASS) {
++                      noc_set_qos_priority(qos_base, qos_off,
++                                      info->node_info->qport[i], qos_delta,
++                                      &prio);
++
++                      if (info->node_info->qos_params.mode !=
++                                                      NOC_QOS_MODE_FIXED) {
++                              struct msm_bus_noc_qos_bw qbw;
++                              qbw.ws = info->node_info->qos_params.ws;
++                              qbw.bw = 0;
++                              msm_bus_noc_set_qos_bw(qos_base, qos_off,
++                                      qos_freq,
++                                      info->node_info->qport[i],
++                                      qos_delta,
++                                      info->node_info->qos_params.mode,
++                                      &qbw);
++                      }
++              }
++
++              noc_set_qos_mode(qos_base, qos_off, info->node_info->qport[i],
++                              qos_delta, info->node_info->qos_params.mode,
++                              (1 << info->node_info->qos_params.mode));
++      }
++err_qos_init:
++      return ret;
++}
++
++static int msm_bus_noc_set_bw(struct msm_bus_node_device_type *dev,
++                              void __iomem *qos_base,
++                              uint32_t qos_off, uint32_t qos_delta,
++                              uint32_t qos_freq)
++{
++      int ret = 0;
++      uint64_t bw = 0;
++      int i;
++      struct msm_bus_node_info_type *info = dev->node_info;
++
++      if (info && info->num_qports &&
++              ((info->qos_params.mode == NOC_QOS_MODE_REGULATOR) ||
++              (info->qos_params.mode ==
++                      NOC_QOS_MODE_LIMITER))) {
++              struct msm_bus_noc_qos_bw qos_bw;
++
++              bw = msm_bus_div64(info->num_qports,
++                              dev->node_ab.ab[DUAL_CTX]);
++
++              for (i = 0; i < info->num_qports; i++) {
++                      if (!info->qport) {
++                              MSM_BUS_DBG("No qos ports to update!\n");
++                              break;
++                      }
++
++                      qos_bw.bw = bw;
++                      qos_bw.ws = info->qos_params.ws;
++                      msm_bus_noc_set_qos_bw(qos_base, qos_off, qos_freq,
++                              info->qport[i], qos_delta,
++                              info->qos_params.mode, &qos_bw);
++                      MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n",
++                              qos_bw.ws);
++              }
++      }
++      return ret;
++}
++int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata,
++      struct msm_bus_hw_algorithm *hw_algo)
++{
++      /* Set interleaving to true by default */
++      pdata->il_flag = true;
++      hw_algo->allocate_commit_data = msm_bus_noc_allocate_commit_data;
++      hw_algo->allocate_hw_data = msm_bus_noc_allocate_noc_data;
++      hw_algo->node_init = msm_bus_noc_node_init;
++      hw_algo->free_commit_data = free_commit_data;
++      hw_algo->update_bw = msm_bus_noc_update_bw;
++      hw_algo->commit = msm_bus_noc_commit;
++      hw_algo->port_halt = msm_bus_noc_port_halt;
++      hw_algo->port_unhalt = msm_bus_noc_port_unhalt;
++      hw_algo->update_bw_reg = msm_bus_noc_update_bw_reg;
++      hw_algo->config_master = NULL;
++      hw_algo->config_limiter = NULL;
++
++      return 0;
++}
++
++int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev)
++{
++      if (!bus_dev)
++              return -ENODEV;
++      else {
++              bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init;
++              bus_dev->fabdev->noc_ops.set_bw = msm_bus_noc_set_bw;
++              bus_dev->fabdev->noc_ops.limit_mport = NULL;
++              bus_dev->fabdev->noc_ops.update_bw_reg =
++                                              msm_bus_noc_update_bw_reg;
++      }
++      return 0;
++}
++EXPORT_SYMBOL(msm_bus_noc_set_ops);
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_noc.h
+@@ -0,0 +1,76 @@
++/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H
++#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H
++
++enum msm_bus_noc_qos_mode_type {
++      NOC_QOS_MODE_FIXED = 0,
++      NOC_QOS_MODE_LIMITER,
++      NOC_QOS_MODE_BYPASS,
++      NOC_QOS_MODE_REGULATOR,
++      NOC_QOS_MODE_MAX,
++};
++
++enum msm_bus_noc_qos_mode_perm {
++      NOC_QOS_PERM_MODE_FIXED = (1 << NOC_QOS_MODE_FIXED),
++      NOC_QOS_PERM_MODE_LIMITER = (1 << NOC_QOS_MODE_LIMITER),
++      NOC_QOS_PERM_MODE_BYPASS = (1 << NOC_QOS_MODE_BYPASS),
++      NOC_QOS_PERM_MODE_REGULATOR = (1 << NOC_QOS_MODE_REGULATOR),
++};
++
++#define NOC_QOS_MODES_ALL_PERM (NOC_QOS_PERM_MODE_FIXED | \
++      NOC_QOS_PERM_MODE_LIMITER | NOC_QOS_PERM_MODE_BYPASS | \
++      NOC_QOS_PERM_MODE_REGULATOR)
++
++struct msm_bus_noc_commit {
++      struct msm_bus_node_hw_info *mas;
++      struct msm_bus_node_hw_info *slv;
++};
++
++struct msm_bus_noc_info {
++      void __iomem *base;
++      uint32_t base_addr;
++      uint32_t nmasters;
++      uint32_t nqos_masters;
++      uint32_t nslaves;
++      uint32_t qos_freq; /* QOS Clock in KHz */
++      uint32_t qos_baseoffset;
++      uint32_t qos_delta;
++      uint32_t *mas_modes;
++      struct msm_bus_noc_commit cdata[NUM_CTX];
++};
++
++struct msm_bus_noc_qos_priority {
++      uint32_t high_prio;
++      uint32_t low_prio;
++      uint32_t read_prio;
++      uint32_t write_prio;
++      uint32_t p1;
++      uint32_t p0;
++};
++
++struct msm_bus_noc_qos_bw {
++      uint64_t bw; /* Bandwidth in bytes per second */
++      uint32_t ws; /* Window size in nano seconds */
++};
++
++void msm_bus_noc_init(struct msm_bus_noc_info *ninfo);
++uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
++      uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode);
++void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
++      uint32_t mport, uint32_t qos_delta,
++      struct msm_bus_noc_qos_priority *qprio);
++void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
++      uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
++      uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw);
++#endif /*_ARCH_ARM_MACH_MSM_BUS_NOC_H */
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_of.c
+@@ -0,0 +1,705 @@
++/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include "msm-bus.h"
++#include "msm-bus-board.h"
++#include "msm_bus_core.h"
++
++static const char * const hw_sel_name[] = {"RPM", "NoC", "BIMC", NULL};
++static const char * const mode_sel_name[] = {"Fixed", "Limiter", "Bypass",
++                                              "Regulator", NULL};
++
++static int get_num(const char *const str[], const char *name)
++{
++      int i = 0;
++
++      do {
++              if (!strcmp(name, str[i]))
++                      return i;
++
++              i++;
++      } while (str[i] != NULL);
++
++      pr_err("Error: string %s not found\n", name);
++      return -EINVAL;
++}
++
++#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING)
++static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev,
++      struct device_node *of_node)
++{
++      struct msm_bus_scale_pdata *pdata = NULL;
++      struct msm_bus_paths *usecase = NULL;
++      int i = 0, j, ret, num_usecases = 0, num_paths, len;
++      const uint32_t *vec_arr = NULL;
++      bool mem_err = false;
++
++      if (!pdev) {
++              pr_err("Error: Null Platform device\n");
++              return NULL;
++      }
++
++      pdata = devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_scale_pdata),
++              GFP_KERNEL);
++      if (!pdata) {
++              pr_err("Error: Memory allocation for pdata failed\n");
++              mem_err = true;
++              goto err;
++      }
++
++      ret = of_property_read_string(of_node, "qcom,msm-bus,name",
++              &pdata->name);
++      if (ret) {
++              pr_err("Error: Client name not found\n");
++              goto err;
++      }
++
++      ret = of_property_read_u32(of_node, "qcom,msm-bus,num-cases",
++              &num_usecases);
++      if (ret) {
++              pr_err("Error: num-usecases not found\n");
++              goto err;
++      }
++
++      pdata->num_usecases = num_usecases;
++
++      if (of_property_read_bool(of_node, "qcom,msm-bus,active-only"))
++              pdata->active_only = 1;
++      else {
++              pr_debug("active_only flag absent.\n");
++              pr_debug("Using dual context by default\n");
++      }
++
++      usecase = devm_kzalloc(&pdev->dev, (sizeof(struct msm_bus_paths) *
++              pdata->num_usecases), GFP_KERNEL);
++      if (!usecase) {
++              pr_err("Error: Memory allocation for paths failed\n");
++              mem_err = true;
++              goto err;
++      }
++
++      ret = of_property_read_u32(of_node, "qcom,msm-bus,num-paths",
++              &num_paths);
++      if (ret) {
++              pr_err("Error: num_paths not found\n");
++              goto err;
++      }
++
++      vec_arr = of_get_property(of_node, "qcom,msm-bus,vectors-KBps", &len);
++      if (vec_arr == NULL) {
++              pr_err("Error: Vector array not found\n");
++              goto err;
++      }
++
++      if (len != num_usecases * num_paths * sizeof(uint32_t) * 4) {
++              pr_err("Error: Length-error on getting vectors\n");
++              goto err;
++      }
++
++      for (i = 0; i < num_usecases; i++) {
++              usecase[i].num_paths = num_paths;
++              usecase[i].vectors = devm_kzalloc(&pdev->dev, num_paths *
++                      sizeof(struct msm_bus_vectors), GFP_KERNEL);
++              if (!usecase[i].vectors) {
++                      mem_err = true;
++                      pr_err("Error: Mem alloc failure in vectors\n");
++                      goto err;
++              }
++
++              for (j = 0; j < num_paths; j++) {
++                      int index = ((i * num_paths) + j) * 4;
++                      usecase[i].vectors[j].src = be32_to_cpu(vec_arr[index]);
++                      usecase[i].vectors[j].dst =
++                              be32_to_cpu(vec_arr[index + 1]);
++                      usecase[i].vectors[j].ab = (uint64_t)
++                              KBTOB(be32_to_cpu(vec_arr[index + 2]));
++                      usecase[i].vectors[j].ib = (uint64_t)
++                              KBTOB(be32_to_cpu(vec_arr[index + 3]));
++              }
++      }
++
++      pdata->usecase = usecase;
++      return pdata;
++err:
++      if (mem_err) {
++              for (; i > 0; i--)
++                      kfree(usecase[i-1].vectors);
++
++              kfree(usecase);
++              kfree(pdata);
++      }
++
++      return NULL;
++}
++
++/**
++ * msm_bus_cl_get_pdata() - Generate bus client data from device tree
++ * provided by clients.
++ *
++ * of_node: Device tree node to extract information from
++ *
++ * The function returns a valid pointer to the allocated bus-scale-pdata
++ * if the vectors were correctly read from the client's device node.
++ * Any error in reading or parsing the device node will return NULL
++ * to the caller.
++ */
++struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev)
++{
++      struct device_node *of_node;
++      struct msm_bus_scale_pdata *pdata = NULL;
++
++      if (!pdev) {
++              pr_err("Error: Null Platform device\n");
++              return NULL;
++      }
++
++      of_node = pdev->dev.of_node;
++      pdata = get_pdata(pdev, of_node);
++      if (!pdata) {
++              pr_err("client has to provide missing entry for successful registration\n");
++              return NULL;
++      }
++
++      return pdata;
++}
++EXPORT_SYMBOL(msm_bus_cl_get_pdata);
++
++/**
++ * msm_bus_cl_pdata_from_node() - Generate bus client data from device tree
++ * node provided by clients. This function should be used when a client
++ * driver needs to register multiple bus-clients from a single device-tree
++ * node associated with the platform-device.
++ *
++ * of_node: The subnode containing information about the bus scaling
++ * data
++ *
++ * pdev: Platform device associated with the device-tree node
++ *
++ * The function returns a valid pointer to the allocated bus-scale-pdata
++ * if the vectors were correctly read from the client's device node.
++ * Any error in reading or parsing the device node will return NULL
++ * to the caller.
++ */
++struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
++              struct platform_device *pdev, struct device_node *of_node)
++{
++      struct msm_bus_scale_pdata *pdata = NULL;
++
++      if (!pdev) {
++              pr_err("Error: Null Platform device\n");
++              return NULL;
++      }
++
++      if (!of_node) {
++              pr_err("Error: Null of_node passed to bus driver\n");
++              return NULL;
++      }
++
++      pdata = get_pdata(pdev, of_node);
++      if (!pdata) {
++              pr_err("client has to provide missing entry for successful registration\n");
++              return NULL;
++      }
++
++      return pdata;
++}
++EXPORT_SYMBOL(msm_bus_pdata_from_node);
++
++/**
++ * msm_bus_cl_clear_pdata() - Clear pdata allocated from device-tree
++ * of_node: Device tree node to extract information from
++ */
++void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
++{
++      int i;
++
++      for (i = 0; i < pdata->num_usecases; i++)
++              kfree(pdata->usecase[i].vectors);
++
++      kfree(pdata->usecase);
++      kfree(pdata);
++}
++EXPORT_SYMBOL(msm_bus_cl_clear_pdata);
++#endif
++
++static int *get_arr(struct platform_device *pdev,
++              const struct device_node *node, const char *prop,
++              int *nports)
++{
++      int size = 0, ret;
++      int *arr = NULL;
++
++      if (of_get_property(node, prop, &size)) {
++              *nports = size / sizeof(int);
++      } else {
++              pr_debug("Property %s not available\n", prop);
++              *nports = 0;
++              return NULL;
++      }
++
++      if (!size) {
++              *nports = 0;
++              return NULL;
++      }
++
++      arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
++      if (ZERO_OR_NULL_PTR(arr)) {
++              pr_err("Error: Failed to alloc mem for %s\n", prop);
++              return NULL;
++      }
++
++      ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
++      if (ret) {
++              pr_err("Error in reading property: %s\n", prop);
++              goto err;
++      }
++
++      return arr;
++err:
++      devm_kfree(&pdev->dev, arr);
++      return NULL;
++}
++
++static u64 *get_th_params(struct platform_device *pdev,
++              const struct device_node *node, const char *prop,
++              int *nports)
++{
++      int size = 0, ret;
++      u64 *ret_arr = NULL;
++      int *arr = NULL;
++      int i;
++
++      if (of_get_property(node, prop, &size)) {
++              *nports = size / sizeof(int);
++      } else {
++              pr_debug("Property %s not available\n", prop);
++              *nports = 0;
++              return NULL;
++      }
++
++      if (!size) {
++              *nports = 0;
++              return NULL;
++      }
++
++      ret_arr = devm_kzalloc(&pdev->dev, (*nports * sizeof(u64)),
++                                                      GFP_KERNEL);
++      if (ZERO_OR_NULL_PTR(ret_arr)) {
++              pr_err("Error: Failed to alloc mem for ret arr %s\n", prop);
++              return NULL;
++      }
++
++      arr = kzalloc(size, GFP_KERNEL);
++      if ((ZERO_OR_NULL_PTR(arr))) {
++              pr_err("Error: Failed to alloc temp mem for %s\n", prop);
++              return NULL;
++      }
++
++      ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
++      if (ret) {
++              pr_err("Error in reading property: %s\n", prop);
++              goto err;
++      }
++
++      for (i = 0; i < *nports; i++)
++              ret_arr[i] = (uint64_t)KBTOB(arr[i]);
++
++      MSM_BUS_DBG("%s: num entries %d prop %s", __func__, *nports, prop);
++
++      for (i = 0; i < *nports; i++)
++              MSM_BUS_DBG("Th %d val %llu", i, ret_arr[i]);
++
++      kfree(arr);
++      return ret_arr;
++err:
++      kfree(arr);
++      devm_kfree(&pdev->dev, ret_arr);
++      return NULL;
++}
++
++static struct msm_bus_node_info *get_nodes(struct device_node *of_node,
++      struct platform_device *pdev,
++      struct msm_bus_fabric_registration *pdata)
++{
++      struct msm_bus_node_info *info;
++      struct device_node *child_node = NULL;
++      int i = 0, ret;
++      int num_bw = 0;
++      u32 temp;
++
++      for_each_child_of_node(of_node, child_node) {
++              i++;
++      }
++
++      pdata->len = i;
++      info = (struct msm_bus_node_info *)
++              devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_node_info) *
++                      pdata->len, GFP_KERNEL);
++      if (ZERO_OR_NULL_PTR(info)) {
++              pr_err("Failed to alloc memory for nodes: %d\n", pdata->len);
++              goto err;
++      }
++
++      i = 0;
++      child_node = NULL;
++      for_each_child_of_node(of_node, child_node) {
++              const char *sel_str;
++
++              ret = of_property_read_string(child_node, "label",
++                      &info[i].name);
++              if (ret)
++                      pr_err("Error reading node label\n");
++
++              ret = of_property_read_u32(child_node, "cell-id", &info[i].id);
++              if (ret) {
++                      pr_err("Error reading node id\n");
++                      goto err;
++              }
++
++              if (of_property_read_bool(child_node, "qcom,gateway"))
++                      info[i].gateway = 1;
++
++              of_property_read_u32(child_node, "qcom,mas-hw-id",
++                      &info[i].mas_hw_id);
++
++              of_property_read_u32(child_node, "qcom,slv-hw-id",
++                      &info[i].slv_hw_id);
++              info[i].masterp = get_arr(pdev, child_node,
++                                      "qcom,masterp", &info[i].num_mports);
++              /* No need to store number of qports */
++              info[i].qport = get_arr(pdev, child_node,
++                                      "qcom,qport", &ret);
++              pdata->nmasters += info[i].num_mports;
++
++
++              info[i].slavep = get_arr(pdev, child_node,
++                                      "qcom,slavep", &info[i].num_sports);
++              pdata->nslaves += info[i].num_sports;
++
++
++              info[i].tier = get_arr(pdev, child_node,
++                                      "qcom,tier", &info[i].num_tiers);
++
++              if (of_property_read_bool(child_node, "qcom,ahb"))
++                      info[i].ahb = 1;
++
++              ret = of_property_read_string(child_node, "qcom,hw-sel",
++                      &sel_str);
++              if (ret)
++                      info[i].hw_sel = 0;
++              else {
++                      ret =  get_num(hw_sel_name, sel_str);
++                      if (ret < 0) {
++                              pr_err("Invalid hw-sel\n");
++                              goto err;
++                      }
++
++                      info[i].hw_sel = ret;
++              }
++
++              of_property_read_u32(child_node, "qcom,buswidth",
++                      &info[i].buswidth);
++              of_property_read_u32(child_node, "qcom,ws", &info[i].ws);
++
++              info[i].dual_conf =
++                      of_property_read_bool(child_node, "qcom,dual-conf");
++
++
++              info[i].th = get_th_params(pdev, child_node, "qcom,thresh",
++                                              &info[i].num_thresh);
++
++              info[i].bimc_bw = get_th_params(pdev, child_node,
++                                              "qcom,bimc,bw", &num_bw);
++
++              if (num_bw != info[i].num_thresh) {
++                      pr_err("%s:num_bw %d must equal num_thresh %d",
++                              __func__, num_bw, info[i].num_thresh);
++                      pr_err("%s:Err setting up dual conf for %s",
++                              __func__, info[i].name);
++                      goto err;
++              }
++
++              of_property_read_u32(child_node, "qcom,bimc,gp",
++                      &info[i].bimc_gp);
++              of_property_read_u32(child_node, "qcom,bimc,thmp",
++                      &info[i].bimc_thmp);
++
++              ret = of_property_read_string(child_node, "qcom,mode-thresh",
++                      &sel_str);
++              if (ret)
++                      info[i].mode_thresh = 0;
++              else {
++                      ret = get_num(mode_sel_name, sel_str);
++                      if (ret < 0) {
++                              pr_err("Unknown mode :%s\n", sel_str);
++                              goto err;
++                      }
++
++                      info[i].mode_thresh = ret;
++                      MSM_BUS_DBG("AXI: THreshold mode set: %d\n",
++                                      info[i].mode_thresh);
++              }
++
++              ret = of_property_read_string(child_node, "qcom,mode",
++                              &sel_str);
++
++              if (ret)
++                      info[i].mode = 0;
++              else {
++                      ret = get_num(mode_sel_name, sel_str);
++                      if (ret < 0) {
++                              pr_err("Unknown mode :%s\n", sel_str);
++                              goto err;
++                      }
++
++                      info[i].mode = ret;
++              }
++
++              info[i].nr_lim =
++                      of_property_read_bool(child_node, "qcom,nr-lim");
++
++              ret = of_property_read_u32(child_node, "qcom,ff",
++                                                      &info[i].ff);
++              if (ret) {
++                      pr_debug("fudge factor not present %d", info[i].id);
++                      info[i].ff = 0;
++              }
++
++              ret = of_property_read_u32(child_node, "qcom,floor-bw",
++                                              &temp);
++              if (ret) {
++                      pr_debug("fabdev floor bw not present %d", info[i].id);
++                      info[i].floor_bw = 0;
++              } else {
++                      info[i].floor_bw = KBTOB(temp);
++              }
++
++              info[i].rt_mas =
++                      of_property_read_bool(child_node, "qcom,rt-mas");
++
++              ret = of_property_read_string(child_node, "qcom,perm-mode",
++                      &sel_str);
++              if (ret)
++                      info[i].perm_mode = 0;
++              else {
++                      ret = get_num(mode_sel_name, sel_str);
++                      if (ret < 0)
++                              goto err;
++
++                      info[i].perm_mode = 1 << ret;
++              }
++
++              of_property_read_u32(child_node, "qcom,prio-lvl",
++                      &info[i].prio_lvl);
++              of_property_read_u32(child_node, "qcom,prio-rd",
++                      &info[i].prio_rd);
++              of_property_read_u32(child_node, "qcom,prio-wr",
++                      &info[i].prio_wr);
++              of_property_read_u32(child_node, "qcom,prio0", &info[i].prio0);
++              of_property_read_u32(child_node, "qcom,prio1", &info[i].prio1);
++              ret = of_property_read_string(child_node, "qcom,slaveclk-dual",
++                      &info[i].slaveclk[DUAL_CTX]);
++              if (!ret)
++                      pr_debug("Got slaveclk_dual: %s\n",
++                              info[i].slaveclk[DUAL_CTX]);
++              else
++                      info[i].slaveclk[DUAL_CTX] = NULL;
++
++              ret = of_property_read_string(child_node,
++                      "qcom,slaveclk-active", &info[i].slaveclk[ACTIVE_CTX]);
++              if (!ret)
++                      pr_debug("Got slaveclk_active\n");
++              else
++                      info[i].slaveclk[ACTIVE_CTX] = NULL;
++
++              ret = of_property_read_string(child_node, "qcom,memclk-dual",
++                      &info[i].memclk[DUAL_CTX]);
++              if (!ret)
++                      pr_debug("Got memclk_dual\n");
++              else
++                      info[i].memclk[DUAL_CTX] = NULL;
++
++              ret = of_property_read_string(child_node, "qcom,memclk-active",
++                      &info[i].memclk[ACTIVE_CTX]);
++              if (!ret)
++                      pr_debug("Got memclk_active\n");
++              else
++                      info[i].memclk[ACTIVE_CTX] = NULL;
++
++              ret = of_property_read_string(child_node, "qcom,iface-clk-node",
++                      &info[i].iface_clk_node);
++              if (!ret)
++                      pr_debug("Got iface_clk_node\n");
++              else
++                      info[i].iface_clk_node = NULL;
++
++              pr_debug("Node name: %s\n", info[i].name);
++              of_node_put(child_node);
++              i++;
++      }
++
++      pr_debug("Bus %d added: %d masters\n", pdata->id, pdata->nmasters);
++      pr_debug("Bus %d added: %d slaves\n", pdata->id, pdata->nslaves);
++      return info;
++err:
++      return NULL;
++}
++
++void msm_bus_of_get_nfab(struct platform_device *pdev,
++              struct msm_bus_fabric_registration *pdata)
++{
++      struct device_node *of_node;
++      int ret, nfab = 0;
++
++      if (!pdev) {
++              pr_err("Error: Null platform device\n");
++              return;
++      }
++
++      of_node = pdev->dev.of_node;
++      ret = of_property_read_u32(of_node, "qcom,nfab",
++              &nfab);
++      if (!ret)
++              pr_debug("Fab_of: Read number of buses: %u\n", nfab);
++
++      msm_bus_board_set_nfab(pdata, nfab);
++}
++
++struct msm_bus_fabric_registration
++      *msm_bus_of_get_fab_data(struct platform_device *pdev)
++{
++      struct device_node *of_node;
++      struct msm_bus_fabric_registration *pdata;
++      bool mem_err = false;
++      int ret = 0;
++      const char *sel_str;
++      u32 temp;
++
++      if (!pdev) {
++              pr_err("Error: Null platform device\n");
++              return NULL;
++      }
++
++      of_node = pdev->dev.of_node;
++      pdata = devm_kzalloc(&pdev->dev,
++                      sizeof(struct msm_bus_fabric_registration), GFP_KERNEL);
++      if (!pdata) {
++              pr_err("Error: Memory allocation for pdata failed\n");
++              mem_err = true;
++              goto err;
++      }
++
++      ret = of_property_read_string(of_node, "label", &pdata->name);
++      if (ret) {
++              pr_err("Error: label not found\n");
++              goto err;
++      }
++      pr_debug("Fab_of: Read name: %s\n", pdata->name);
++
++      ret = of_property_read_u32(of_node, "cell-id",
++              &pdata->id);
++      if (ret) {
++              pr_err("Error: num-usecases not found\n");
++              goto err;
++      }
++      pr_debug("Fab_of: Read id: %u\n", pdata->id);
++
++      if (of_property_read_bool(of_node, "qcom,ahb"))
++              pdata->ahb = 1;
++
++      ret = of_property_read_string(of_node, "qcom,fabclk-dual",
++              &pdata->fabclk[DUAL_CTX]);
++      if (ret) {
++              pr_debug("fabclk_dual not available\n");
++              pdata->fabclk[DUAL_CTX] = NULL;
++      } else
++              pr_debug("Fab_of: Read clk dual ctx: %s\n",
++                      pdata->fabclk[DUAL_CTX]);
++      ret = of_property_read_string(of_node, "qcom,fabclk-active",
++              &pdata->fabclk[ACTIVE_CTX]);
++      if (ret) {
++              pr_debug("Error: fabclk_active not available\n");
++              pdata->fabclk[ACTIVE_CTX] = NULL;
++      } else
++              pr_debug("Fab_of: Read clk act ctx: %s\n",
++                      pdata->fabclk[ACTIVE_CTX]);
++
++      ret = of_property_read_u32(of_node, "qcom,ntieredslaves",
++              &pdata->ntieredslaves);
++      if (ret) {
++              pr_err("Error: ntieredslaves not found\n");
++              goto err;
++      }
++
++      ret = of_property_read_u32(of_node, "qcom,qos-freq", &pdata->qos_freq);
++      if (ret)
++              pr_debug("qos_freq not available\n");
++
++      ret = of_property_read_string(of_node, "qcom,hw-sel", &sel_str);
++      if (ret) {
++              pr_err("Error: hw_sel not found\n");
++              goto err;
++      } else {
++              ret = get_num(hw_sel_name, sel_str);
++              if (ret < 0)
++                      goto err;
++
++              pdata->hw_sel = ret;
++      }
++
++      if (of_property_read_bool(of_node, "qcom,virt"))
++              pdata->virt = true;
++
++      ret = of_property_read_u32(of_node, "qcom,qos-baseoffset",
++                                              &pdata->qos_baseoffset);
++      if (ret)
++              pr_debug("%s:qos_baseoffset not available\n", __func__);
++
++      ret = of_property_read_u32(of_node, "qcom,qos-delta",
++                                              &pdata->qos_delta);
++      if (ret)
++              pr_debug("%s:qos_delta not available\n", __func__);
++
++      if (of_property_read_bool(of_node, "qcom,rpm-en"))
++              pdata->rpm_enabled = 1;
++
++      ret = of_property_read_u32(of_node, "qcom,nr-lim-thresh",
++                                              &temp);
++
++      if (ret) {
++              pr_err("nr-lim threshold not specified");
++              pdata->nr_lim_thresh = 0;
++      } else {
++              pdata->nr_lim_thresh = KBTOB(temp);
++      }
++
++      ret = of_property_read_u32(of_node, "qcom,eff-fact",
++                                              &pdata->eff_fact);
++      if (ret) {
++              pr_err("Fab eff-factor not present");
++              pdata->eff_fact = 0;
++      }
++
++      pdata->info = get_nodes(of_node, pdev, pdata);
++      return pdata;
++err:
++      return NULL;
++}
++EXPORT_SYMBOL(msm_bus_of_get_fab_data);
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_of_adhoc.c
+@@ -0,0 +1,641 @@
++/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
++
++#include <linux/clk.h>
++#include <linux/device.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include "msm-bus.h"
++#include "msm-bus-board.h"
++#include "msm_bus_rules.h"
++#include "msm_bus_core.h"
++#include "msm_bus_adhoc.h"
++
++#define DEFAULT_QOS_FREQ      19200
++#define DEFAULT_UTIL_FACT     100
++#define DEFAULT_VRAIL_COMP    100
++
++static int get_qos_mode(struct platform_device *pdev,
++                      struct device_node *node, const char *qos_mode)
++{
++      const char *qos_names[] = {"fixed", "limiter", "bypass", "regulator"};
++      int i = 0;
++      int ret = -1;
++
++      if (!qos_mode)
++              goto exit_get_qos_mode;
++
++      for (i = 0; i < ARRAY_SIZE(qos_names); i++) {
++              if (!strcmp(qos_mode, qos_names[i]))
++                      break;
++      }
++      if (i == ARRAY_SIZE(qos_names))
++              dev_err(&pdev->dev, "Cannot match mode qos %s using Bypass",
++                              qos_mode);
++      else
++              ret = i;
++
++exit_get_qos_mode:
++      return ret;
++}
++
++static int *get_arr(struct platform_device *pdev,
++              struct device_node *node, const char *prop,
++              int *nports)
++{
++      int size = 0, ret;
++      int *arr = NULL;
++
++      if (of_get_property(node, prop, &size)) {
++              *nports = size / sizeof(int);
++      } else {
++              dev_dbg(&pdev->dev, "Property %s not available\n", prop);
++              *nports = 0;
++              return NULL;
++      }
++
++      arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
++      if ((size > 0) && ZERO_OR_NULL_PTR(arr)) {
++              dev_err(&pdev->dev, "Error: Failed to alloc mem for %s\n",
++                              prop);
++              return NULL;
++      }
++
++      ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
++      if (ret) {
++              dev_err(&pdev->dev, "Error in reading property: %s\n", prop);
++              goto arr_err;
++      }
++
++      return arr;
++arr_err:
++      devm_kfree(&pdev->dev, arr);
++      return NULL;
++}
++
++static struct msm_bus_fab_device_type *get_fab_device_info(
++              struct device_node *dev_node,
++              struct platform_device *pdev)
++{
++      struct msm_bus_fab_device_type *fab_dev;
++      unsigned int ret;
++      struct resource *res;
++      const char *base_name;
++
++      fab_dev = devm_kzalloc(&pdev->dev,
++                      sizeof(struct msm_bus_fab_device_type),
++                      GFP_KERNEL);
++      if (!fab_dev) {
++              dev_err(&pdev->dev,
++                      "Error: Unable to allocate memory for fab_dev\n");
++              return NULL;
++      }
++
++      ret = of_property_read_string(dev_node, "qcom,base-name", &base_name);
++      if (ret) {
++              dev_err(&pdev->dev, "Error: Unable to get base address name\n");
++              goto fab_dev_err;
++      }
++
++      res = platform_get_resource_byname(pdev, IORESOURCE_MEM, base_name);
++      if (!res) {
++              dev_err(&pdev->dev, "Error getting qos base addr %s\n",
++                                                              base_name);
++              goto fab_dev_err;
++      }
++      fab_dev->pqos_base = res->start;
++      fab_dev->qos_range = resource_size(res);
++      fab_dev->bypass_qos_prg = of_property_read_bool(dev_node,
++                                              "qcom,bypass-qos-prg");
++
++      ret = of_property_read_u32(dev_node, "qcom,base-offset",
++                      &fab_dev->base_offset);
++      if (ret)
++              dev_dbg(&pdev->dev, "Bus base offset is missing\n");
++
++      ret = of_property_read_u32(dev_node, "qcom,qos-off",
++                      &fab_dev->qos_off);
++      if (ret)
++              dev_dbg(&pdev->dev, "Bus qos off is missing\n");
++
++
++      ret = of_property_read_u32(dev_node, "qcom,bus-type",
++                                              &fab_dev->bus_type);
++      if (ret) {
++              dev_warn(&pdev->dev, "Bus type is missing\n");
++              goto fab_dev_err;
++      }
++
++      ret = of_property_read_u32(dev_node, "qcom,qos-freq",
++                                              &fab_dev->qos_freq);
++      if (ret) {
++              dev_dbg(&pdev->dev, "Bus qos freq is missing\n");
++              fab_dev->qos_freq = DEFAULT_QOS_FREQ;
++      }
++
++      ret = of_property_read_u32(dev_node, "qcom,util-fact",
++                                              &fab_dev->util_fact);
++      if (ret) {
++              dev_info(&pdev->dev, "Util-fact is missing, default to %d\n",
++                              DEFAULT_UTIL_FACT);
++              fab_dev->util_fact = DEFAULT_UTIL_FACT;
++      }
++
++      ret = of_property_read_u32(dev_node, "qcom,vrail-comp",
++                                              &fab_dev->vrail_comp);
++      if (ret) {
++              dev_info(&pdev->dev, "Vrail-comp is missing, default to %d\n",
++                              DEFAULT_VRAIL_COMP);
++              fab_dev->vrail_comp = DEFAULT_VRAIL_COMP;
++      }
++
++      return fab_dev;
++
++fab_dev_err:
++      devm_kfree(&pdev->dev, fab_dev);
++      fab_dev = 0;
++      return NULL;
++}
++
++static void get_qos_params(
++              struct device_node * const dev_node,
++              struct platform_device * const pdev,
++              struct msm_bus_node_info_type *node_info)
++{
++      const char *qos_mode = NULL;
++      unsigned int ret;
++      unsigned int temp;
++
++      ret = of_property_read_string(dev_node, "qcom,qos-mode", &qos_mode);
++
++      if (ret)
++              node_info->qos_params.mode = -1;
++      else
++              node_info->qos_params.mode = get_qos_mode(pdev, dev_node,
++                                                              qos_mode);
++
++      of_property_read_u32(dev_node, "qcom,prio-lvl",
++                                      &node_info->qos_params.prio_lvl);
++
++      of_property_read_u32(dev_node, "qcom,prio1",
++                                              &node_info->qos_params.prio1);
++
++      of_property_read_u32(dev_node, "qcom,prio0",
++                                              &node_info->qos_params.prio0);
++
++      of_property_read_u32(dev_node, "qcom,prio-rd",
++                                      &node_info->qos_params.prio_rd);
++
++      of_property_read_u32(dev_node, "qcom,prio-wr",
++                                              &node_info->qos_params.prio_wr);
++
++      of_property_read_u32(dev_node, "qcom,gp",
++                                              &node_info->qos_params.gp);
++
++      of_property_read_u32(dev_node, "qcom,thmp",
++                                              &node_info->qos_params.thmp);
++
++      of_property_read_u32(dev_node, "qcom,ws",
++                                              &node_info->qos_params.ws);
++
++      ret = of_property_read_u32(dev_node, "qcom,bw_buffer", &temp);
++
++      if (ret)
++              node_info->qos_params.bw_buffer = 0;
++      else
++              node_info->qos_params.bw_buffer = KBTOB(temp);
++
++}
++
++
++static struct msm_bus_node_info_type *get_node_info_data(
++              struct device_node * const dev_node,
++              struct platform_device * const pdev)
++{
++      struct msm_bus_node_info_type *node_info;
++      unsigned int ret;
++      int size;
++      int i;
++      struct device_node *con_node;
++      struct device_node *bus_dev;
++
++      node_info = devm_kzalloc(&pdev->dev,
++                      sizeof(struct msm_bus_node_info_type),
++                      GFP_KERNEL);
++      if (!node_info) {
++              dev_err(&pdev->dev,
++                      "Error: Unable to allocate memory for node_info\n");
++              return NULL;
++      }
++
++      ret = of_property_read_u32(dev_node, "cell-id", &node_info->id);
++      if (ret) {
++              dev_warn(&pdev->dev, "Bus node is missing cell-id\n");
++              goto node_info_err;
++      }
++      ret = of_property_read_string(dev_node, "label", &node_info->name);
++      if (ret) {
++              dev_warn(&pdev->dev, "Bus node is missing name\n");
++              goto node_info_err;
++      }
++      node_info->qport = get_arr(pdev, dev_node, "qcom,qport",
++                      &node_info->num_qports);
++
++      if (of_get_property(dev_node, "qcom,connections", &size)) {
++              node_info->num_connections = size / sizeof(int);
++              node_info->connections = devm_kzalloc(&pdev->dev, size,
++                              GFP_KERNEL);
++      } else {
++              node_info->num_connections = 0;
++              node_info->connections = 0;
++      }
++
++      for (i = 0; i < node_info->num_connections; i++) {
++              con_node = of_parse_phandle(dev_node, "qcom,connections", i);
++              if (IS_ERR_OR_NULL(con_node))
++                      goto node_info_err;
++
++              if (of_property_read_u32(con_node, "cell-id",
++                              &node_info->connections[i]))
++                      goto node_info_err;
++              of_node_put(con_node);
++      }
++
++      if (of_get_property(dev_node, "qcom,blacklist", &size)) {
++              node_info->num_blist = size/sizeof(u32);
++              node_info->black_listed_connections = devm_kzalloc(&pdev->dev,
++              size, GFP_KERNEL);
++      } else {
++              node_info->num_blist = 0;
++              node_info->black_listed_connections = 0;
++      }
++
++      for (i = 0; i < node_info->num_blist; i++) {
++              con_node = of_parse_phandle(dev_node, "qcom,blacklist", i);
++              if (IS_ERR_OR_NULL(con_node))
++                      goto node_info_err;
++
++              if (of_property_read_u32(con_node, "cell-id",
++                              &node_info->black_listed_connections[i]))
++                      goto node_info_err;
++              of_node_put(con_node);
++      }
++
++      bus_dev = of_parse_phandle(dev_node, "qcom,bus-dev", 0);
++      if (!IS_ERR_OR_NULL(bus_dev)) {
++              if (of_property_read_u32(bus_dev, "cell-id",
++                      &node_info->bus_device_id)) {
++                      dev_err(&pdev->dev, "Can't find bus device. Node %d",
++                                      node_info->id);
++                      goto node_info_err;
++              }
++
++              of_node_put(bus_dev);
++      } else
++              dev_dbg(&pdev->dev, "Can't find bdev phandle for %d",
++                                      node_info->id);
++
++      node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev");
++      node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev");
++
++      ret = of_property_read_u32(dev_node, "qcom,buswidth",
++                                              &node_info->buswidth);
++      if (ret) {
++              dev_dbg(&pdev->dev, "Using default 8 bytes %d", node_info->id);
++              node_info->buswidth = 8;
++      }
++
++      ret = of_property_read_u32(dev_node, "qcom,mas-rpm-id",
++                                              &node_info->mas_rpm_id);
++      if (ret) {
++              dev_dbg(&pdev->dev, "mas rpm id is missing\n");
++              node_info->mas_rpm_id = -1;
++      }
++
++      ret = of_property_read_u32(dev_node, "qcom,slv-rpm-id",
++                                              &node_info->slv_rpm_id);
++      if (ret) {
++              dev_dbg(&pdev->dev, "slv rpm id is missing\n");
++              node_info->slv_rpm_id = -1;
++      }
++      ret = of_property_read_u32(dev_node, "qcom,util-fact",
++                                              &node_info->util_fact);
++      if (ret)
++              node_info->util_fact = 0;
++      ret = of_property_read_u32(dev_node, "qcom,vrail-comp",
++                                              &node_info->vrail_comp);
++      if (ret)
++              node_info->vrail_comp = 0;
++      get_qos_params(dev_node, pdev, node_info);
++
++      return node_info;
++
++node_info_err:
++      devm_kfree(&pdev->dev, node_info);
++      node_info = 0;
++      return NULL;
++}
++
++static unsigned int get_bus_node_device_data(
++              struct device_node * const dev_node,
++              struct platform_device * const pdev,
++              struct msm_bus_node_device_type * const node_device)
++{
++      node_device->node_info = get_node_info_data(dev_node, pdev);
++      if (IS_ERR_OR_NULL(node_device->node_info)) {
++              dev_err(&pdev->dev, "Error: Node info missing\n");
++              return -ENODATA;
++      }
++      node_device->ap_owned = of_property_read_bool(dev_node,
++                                                      "qcom,ap-owned");
++
++      if (node_device->node_info->is_fab_dev) {
++              dev_dbg(&pdev->dev, "Dev %d\n", node_device->node_info->id);
++
++              if (!node_device->node_info->virt_dev) {
++                      node_device->fabdev =
++                              get_fab_device_info(dev_node, pdev);
++                      if (IS_ERR_OR_NULL(node_device->fabdev)) {
++                              dev_err(&pdev->dev,
++                                      "Error: Fabric device info missing\n");
++                              devm_kfree(&pdev->dev, node_device->node_info);
++                              return -ENODATA;
++                      }
++              }
++              node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
++                                                      "bus_clk");
++
++              if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk))
++                      dev_dbg(&pdev->dev,
++                              "%s:Failed to get bus clk for bus%d ctx%d",
++                              __func__, node_device->node_info->id,
++                                                              DUAL_CTX);
++
++              node_device->clk[ACTIVE_CTX].clk = of_clk_get_by_name(dev_node,
++                                                      "bus_a_clk");
++              if (IS_ERR_OR_NULL(node_device->clk[ACTIVE_CTX].clk))
++                      dev_err(&pdev->dev,
++                              "Failed to get bus clk for bus%d ctx%d",
++                               node_device->node_info->id, ACTIVE_CTX);
++              if (msmbus_coresight_init_adhoc(pdev, dev_node))
++                      dev_warn(&pdev->dev,
++                               "Coresight support absent for bus: %d\n",
++                                node_device->node_info->id);
++      } else {
++              node_device->qos_clk.clk = of_clk_get_by_name(dev_node,
++                                                      "bus_qos_clk");
++
++              if (IS_ERR_OR_NULL(node_device->qos_clk.clk))
++                      dev_dbg(&pdev->dev,
++                              "%s:Failed to get bus qos clk for mas%d",
++                              __func__, node_device->node_info->id);
++
++              node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
++                                                      "node_clk");
++
++              if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk))
++                      dev_dbg(&pdev->dev,
++                              "%s:Failed to get bus clk for bus%d ctx%d",
++                              __func__, node_device->node_info->id,
++                                                              DUAL_CTX);
++
++      }
++      return 0;
++}
++
++struct msm_bus_device_node_registration
++      *msm_bus_of_to_pdata(struct platform_device *pdev)
++{
++      struct device_node *of_node, *child_node;
++      struct msm_bus_device_node_registration *pdata;
++      unsigned int i = 0, j;
++      unsigned int ret;
++
++      if (!pdev) {
++              pr_err("Error: Null platform device\n");
++              return NULL;
++      }
++
++      of_node = pdev->dev.of_node;
++
++      pdata = devm_kzalloc(&pdev->dev,
++                      sizeof(struct msm_bus_device_node_registration),
++                      GFP_KERNEL);
++      if (!pdata) {
++              dev_err(&pdev->dev,
++                              "Error: Memory allocation for pdata failed\n");
++              return NULL;
++      }
++
++      pdata->num_devices = of_get_child_count(of_node);
++
++      pdata->info = devm_kzalloc(&pdev->dev,
++                      sizeof(struct msm_bus_node_device_type) *
++                      pdata->num_devices, GFP_KERNEL);
++
++      if (!pdata->info) {
++              dev_err(&pdev->dev,
++                      "Error: Memory allocation for pdata->info failed\n");
++              goto node_reg_err;
++      }
++
++      ret = 0;
++      for_each_child_of_node(of_node, child_node) {
++              ret = get_bus_node_device_data(child_node, pdev,
++                              &pdata->info[i]);
++              if (ret) {
++                      dev_err(&pdev->dev, "Error: unable to initialize bus nodes\n");
++                      goto node_reg_err_1;
++              }
++              i++;
++      }
++
++      dev_dbg(&pdev->dev, "bus topology:\n");
++      for (i = 0; i < pdata->num_devices; i++) {
++              dev_dbg(&pdev->dev, "id %d\nnum_qports %d\nnum_connections %d",
++                              pdata->info[i].node_info->id,
++                              pdata->info[i].node_info->num_qports,
++                              pdata->info[i].node_info->num_connections);
++              dev_dbg(&pdev->dev, "\nbus_device_id %d\n buswidth %d\n",
++                              pdata->info[i].node_info->bus_device_id,
++                              pdata->info[i].node_info->buswidth);
++              for (j = 0; j < pdata->info[i].node_info->num_connections;
++                                                                      j++) {
++                      dev_dbg(&pdev->dev, "connection[%d]: %d\n", j,
++                              pdata->info[i].node_info->connections[j]);
++              }
++              for (j = 0; j < pdata->info[i].node_info->num_blist;
++                                                                       j++) {
++                      dev_dbg(&pdev->dev, "black_listed_node[%d]: %d\n", j,
++                              pdata->info[i].node_info->
++                              black_listed_connections[j]);
++              }
++              if (pdata->info[i].fabdev)
++                      dev_dbg(&pdev->dev, "base_addr %zu\nbus_type %d\n",
++                                      (size_t)pdata->info[i].
++                                              fabdev->pqos_base,
++                                      pdata->info[i].fabdev->bus_type);
++      }
++      return pdata;
++
++node_reg_err_1:
++      devm_kfree(&pdev->dev, pdata->info);
++node_reg_err:
++      devm_kfree(&pdev->dev, pdata);
++      pdata = NULL;
++      return NULL;
++}
++
++static int msm_bus_of_get_ids(struct platform_device *pdev,
++                      struct device_node *dev_node, int **dev_ids,
++                      int *num_ids, char *prop_name)
++{
++      int ret = 0;
++      int size, i;
++      struct device_node *rule_node;
++      int *ids = NULL;
++
++      if (of_get_property(dev_node, prop_name, &size)) {
++              *num_ids = size / sizeof(int);
++              ids = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
++      } else {
++              dev_err(&pdev->dev, "No rule nodes, skipping node");
++              ret = -ENXIO;
++              goto exit_get_ids;
++      }
++
++      *dev_ids = ids;
++      for (i = 0; i < *num_ids; i++) {
++              rule_node = of_parse_phandle(dev_node, prop_name, i);
++              if (IS_ERR_OR_NULL(rule_node)) {
++                      dev_err(&pdev->dev, "Can't get rule node id");
++                      ret = -ENXIO;
++                      goto err_get_ids;
++              }
++
++              if (of_property_read_u32(rule_node, "cell-id",
++                              &ids[i])) {
++                      dev_err(&pdev->dev, "Can't get rule node id");
++                      ret = -ENXIO;
++                      goto err_get_ids;
++              }
++              of_node_put(rule_node);
++      }
++exit_get_ids:
++      return ret;
++err_get_ids:
++      devm_kfree(&pdev->dev, ids);
++      of_node_put(rule_node);
++      ids = NULL;
++      return ret;
++}
++
++int msm_bus_of_get_static_rules(struct platform_device *pdev,
++                                      struct bus_rule_type **static_rules)
++{
++      int ret = 0;
++      struct device_node *of_node, *child_node;
++      int num_rules = 0;
++      int rule_idx = 0;
++      int bw_fld = 0;
++      int i;
++      struct bus_rule_type *static_rule = NULL;
++
++      of_node = pdev->dev.of_node;
++      num_rules = of_get_child_count(of_node);
++      static_rule = devm_kzalloc(&pdev->dev,
++                              sizeof(struct bus_rule_type) * num_rules,
++                              GFP_KERNEL);
++
++      if (IS_ERR_OR_NULL(static_rule)) {
++              ret = -ENOMEM;
++              goto exit_static_rules;
++      }
++
++      *static_rules = static_rule;
++      for_each_child_of_node(of_node, child_node) {
++              ret = msm_bus_of_get_ids(pdev, child_node,
++                      &static_rule[rule_idx].src_id,
++                      &static_rule[rule_idx].num_src,
++                      "qcom,src-nodes");
++
++              ret = msm_bus_of_get_ids(pdev, child_node,
++                      &static_rule[rule_idx].dst_node,
++                      &static_rule[rule_idx].num_dst,
++                      "qcom,dest-node");
++
++              ret = of_property_read_u32(child_node, "qcom,src-field",
++                              &static_rule[rule_idx].src_field);
++              if (ret) {
++                      dev_err(&pdev->dev, "src-field missing");
++                      ret = -ENXIO;
++                      goto err_static_rules;
++              }
++
++              ret = of_property_read_u32(child_node, "qcom,src-op",
++                              &static_rule[rule_idx].op);
++              if (ret) {
++                      dev_err(&pdev->dev, "src-op missing");
++                      ret = -ENXIO;
++                      goto err_static_rules;
++              }
++
++              ret = of_property_read_u32(child_node, "qcom,mode",
++                              &static_rule[rule_idx].mode);
++              if (ret) {
++                      dev_err(&pdev->dev, "mode missing");
++                      ret = -ENXIO;
++                      goto err_static_rules;
++              }
++
++              ret = of_property_read_u32(child_node, "qcom,thresh", &bw_fld);
++              if (ret) {
++                      dev_err(&pdev->dev, "thresh missing");
++                      ret = -ENXIO;
++                      goto err_static_rules;
++              } else
++                      static_rule[rule_idx].thresh = KBTOB(bw_fld);
++
++              ret = of_property_read_u32(child_node, "qcom,dest-bw",
++                                                              &bw_fld);
++              if (ret)
++                      static_rule[rule_idx].dst_bw = 0;
++              else
++                      static_rule[rule_idx].dst_bw = KBTOB(bw_fld);
++
++              rule_idx++;
++      }
++      ret = rule_idx;
++exit_static_rules:
++      return ret;
++err_static_rules:
++      for (i = 0; i < num_rules; i++) {
++              if (!IS_ERR_OR_NULL(static_rule)) {
++                      if (!IS_ERR_OR_NULL(static_rule[i].src_id))
++                              devm_kfree(&pdev->dev,
++                                              static_rule[i].src_id);
++                      if (!IS_ERR_OR_NULL(static_rule[i].dst_node))
++                              devm_kfree(&pdev->dev,
++                                              static_rule[i].dst_node);
++                      devm_kfree(&pdev->dev, static_rule);
++              }
++      }
++      devm_kfree(&pdev->dev, *static_rules);
++      static_rules = NULL;
++      return ret;
++}
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_rules.c
+@@ -0,0 +1,624 @@
++/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/list_sort.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include "msm-bus-board.h"
++#include "msm_bus_rules.h"
++#include <trace/events/trace_msm_bus.h>
++
++struct node_vote_info {
++      int id;
++      u64 ib;
++      u64 ab;
++      u64 clk;
++};
++
++struct rules_def {
++      int rule_id;
++      int num_src;
++      int state;
++      struct node_vote_info *src_info;
++      struct bus_rule_type rule_ops;
++      bool state_change;
++      struct list_head link;
++};
++
++struct rule_node_info {
++      int id;
++      void *data;
++      struct raw_notifier_head rule_notify_list;
++      int cur_rule;
++      int num_rules;
++      struct list_head node_rules;
++      struct list_head link;
++      struct rule_apply_rcm_info apply;
++};
++
++DEFINE_MUTEX(msm_bus_rules_lock);
++static LIST_HEAD(node_list);
++static struct rule_node_info *get_node(u32 id, void *data);
++
++#define LE(op1, op2)  (op1 <= op2)
++#define LT(op1, op2)  (op1 < op2)
++#define GE(op1, op2)  (op1 >= op2)
++#define GT(op1, op2)  (op1 > op2)
++#define NB_ID         (0x201)
++
++static struct rule_node_info *get_node(u32 id, void *data)
++{
++      struct rule_node_info *node_it = NULL;
++      struct rule_node_info *node_match = NULL;
++
++      list_for_each_entry(node_it, &node_list, link) {
++              if (node_it->id == id) {
++                      if ((id == NB_ID)) {
++                              if ((node_it->data == data)) {
++                                      node_match = node_it;
++                                      break;
++                              }
++                      } else {
++                              node_match = node_it;
++                              break;
++                      }
++              }
++      }
++      return node_match;
++}
++
++static struct rule_node_info *gen_node(u32 id, void *data)
++{
++      struct rule_node_info *node_it = NULL;
++      struct rule_node_info *node_match = NULL;
++
++      list_for_each_entry(node_it, &node_list, link) {
++              if (node_it->id == id) {
++                      node_match = node_it;
++                      break;
++              }
++      }
++
++      if (!node_match) {
++              node_match = kzalloc(sizeof(struct rule_node_info), GFP_KERNEL);
++              if (!node_match) {
++                      pr_err("%s: Cannot allocate memory", __func__);
++                      goto exit_node_match;
++              }
++
++              node_match->id = id;
++              node_match->cur_rule = -1;
++              node_match->num_rules = 0;
++              node_match->data = data;
++              list_add_tail(&node_match->link, &node_list);
++              INIT_LIST_HEAD(&node_match->node_rules);
++              RAW_INIT_NOTIFIER_HEAD(&node_match->rule_notify_list);
++              pr_debug("Added new node %d to list\n", id);
++      }
++exit_node_match:
++      return node_match;
++}
++
++static bool do_compare_op(u64 op1, u64 op2, int op)
++{
++      bool ret = false;
++
++      switch (op) {
++      case OP_LE:
++              ret = LE(op1, op2);
++              break;
++      case OP_LT:
++              ret = LT(op1, op2);
++              break;
++      case OP_GT:
++              ret = GT(op1, op2);
++              break;
++      case OP_GE:
++              ret = GE(op1, op2);
++              break;
++      case OP_NOOP:
++              ret = true;
++              break;
++      default:
++              pr_info("Invalid OP %d", op);
++              break;
++      }
++      return ret;
++}
++
++static void update_src_id_vote(struct rule_update_path_info *inp_node,
++                              struct rule_node_info *rule_node)
++{
++      struct rules_def *rule;
++      int i;
++
++      list_for_each_entry(rule, &rule_node->node_rules, link) {
++              for (i = 0; i < rule->num_src; i++) {
++                      if (rule->src_info[i].id == inp_node->id) {
++                              rule->src_info[i].ib = inp_node->ib;
++                              rule->src_info[i].ab = inp_node->ab;
++                              rule->src_info[i].clk = inp_node->clk;
++                      }
++              }
++      }
++}
++
++static u64 get_field(struct rules_def *rule, int src_id)
++{
++      u64 field = 0;
++      int i;
++
++      for (i = 0; i < rule->num_src; i++) {
++              switch (rule->rule_ops.src_field) {
++              case FLD_IB:
++                      field += rule->src_info[i].ib;
++                      break;
++              case FLD_AB:
++                      field += rule->src_info[i].ab;
++                      break;
++              case FLD_CLK:
++                      field += rule->src_info[i].clk;
++                      break;
++              }
++      }
++
++      return field;
++}
++
++static bool check_rule(struct rules_def *rule,
++                      struct rule_update_path_info *inp)
++{
++      bool ret = false;
++
++      if (!rule)
++              return ret;
++
++      switch (rule->rule_ops.op) {
++      case OP_LE:
++      case OP_LT:
++      case OP_GT:
++      case OP_GE:
++      {
++              u64 src_field = get_field(rule, inp->id);
++              if (!src_field)
++                      ret = false;
++              else
++                      ret = do_compare_op(src_field, rule->rule_ops.thresh,
++                                                      rule->rule_ops.op);
++              break;
++      }
++      default:
++              pr_err("Unsupported op %d", rule->rule_ops.op);
++              break;
++      }
++      return ret;
++}
++
++static void match_rule(struct rule_update_path_info *inp_node,
++                      struct rule_node_info *node)
++{
++      struct rules_def *rule;
++      int i;
++
++      list_for_each_entry(rule, &node->node_rules, link) {
++              for (i = 0; i < rule->num_src; i++) {
++                      if (rule->src_info[i].id == inp_node->id) {
++                              if (check_rule(rule, inp_node)) {
++                                      trace_bus_rules_matches(node->cur_rule,
++                                              inp_node->id, inp_node->ab,
++                                              inp_node->ib, inp_node->clk);
++                                      if (rule->state ==
++                                              RULE_STATE_NOT_APPLIED)
++                                              rule->state_change = true;
++                                      rule->state = RULE_STATE_APPLIED;
++                              } else {
++                                      if (rule->state ==
++                                              RULE_STATE_APPLIED)
++                                              rule->state_change = true;
++                                      rule->state = RULE_STATE_NOT_APPLIED;
++                              }
++                      }
++              }
++      }
++}
++
++static void apply_rule(struct rule_node_info *node,
++                      struct list_head *output_list)
++{
++      struct rules_def *rule;
++
++      node->cur_rule = -1;
++      list_for_each_entry(rule, &node->node_rules, link) {
++              if ((rule->state == RULE_STATE_APPLIED) &&
++                                              (node->cur_rule == -1))
++                      node->cur_rule = rule->rule_id;
++
++              if (node->id == NB_ID) {
++                      if (rule->state_change) {
++                              rule->state_change = false;
++                              raw_notifier_call_chain(&node->rule_notify_list,
++                                      rule->state, (void *)&rule->rule_ops);
++                      }
++              } else {
++                      if ((rule->state == RULE_STATE_APPLIED) &&
++                              (node->cur_rule == rule->rule_id)) {
++                              node->apply.id = rule->rule_ops.dst_node[0];
++                              node->apply.throttle = rule->rule_ops.mode;
++                              node->apply.lim_bw = rule->rule_ops.dst_bw;
++                              list_add_tail(&node->apply.link, output_list);
++                      }
++                      rule->state_change = false;
++              }
++      }
++
++}
++
++int msm_rules_update_path(struct list_head *input_list,
++                      struct list_head *output_list)
++{
++      int ret = 0;
++      struct rule_update_path_info  *inp_node;
++      struct rule_node_info *node_it = NULL;
++
++      mutex_lock(&msm_bus_rules_lock);
++      list_for_each_entry(inp_node, input_list, link) {
++              list_for_each_entry(node_it, &node_list, link) {
++                      update_src_id_vote(inp_node, node_it);
++                      match_rule(inp_node, node_it);
++              }
++      }
++
++      list_for_each_entry(node_it, &node_list, link)
++              apply_rule(node_it, output_list);
++
++      mutex_unlock(&msm_bus_rules_lock);
++      return ret;
++}
++
++static bool ops_equal(int op1, int op2)
++{
++      bool ret = false;
++
++      switch (op1) {
++      case OP_GT:
++      case OP_GE:
++      case OP_LT:
++      case OP_LE:
++              if (abs(op1 - op2) <= 1)
++                      ret = true;
++              break;
++      default:
++              ret = (op1 == op2);
++      }
++
++      return ret;
++}
++
++static int node_rules_compare(void *priv, struct list_head *a,
++                                      struct list_head *b)
++{
++      struct rules_def *ra = container_of(a, struct rules_def, link);
++      struct rules_def *rb = container_of(b, struct rules_def, link);
++      int ret = -1;
++      int64_t th_diff = 0;
++
++
++      if (ra->rule_ops.mode == rb->rule_ops.mode) {
++              if (ops_equal(ra->rule_ops.op, rb->rule_ops.op)) {
++                      if ((ra->rule_ops.op == OP_LT) ||
++                              (ra->rule_ops.op == OP_LE)) {
++                              th_diff = ra->rule_ops.thresh -
++                                              rb->rule_ops.thresh;
++                              if (th_diff > 0)
++                                      ret = 1;
++                               else
++                                      ret = -1;
++                      } else if ((ra->rule_ops.op == OP_GT) ||
++                                      (ra->rule_ops.op == OP_GE)) {
++                              th_diff = rb->rule_ops.thresh -
++                                                      ra->rule_ops.thresh;
++                              if (th_diff > 0)
++                                      ret = 1;
++                               else
++                                      ret = -1;
++                      }
++              } else
++                      ret = ra->rule_ops.op - rb->rule_ops.op;
++      } else if ((ra->rule_ops.mode == THROTTLE_OFF) &&
++              (rb->rule_ops.mode == THROTTLE_ON)) {
++              ret = 1;
++      } else if ((ra->rule_ops.mode == THROTTLE_ON) &&
++              (rb->rule_ops.mode == THROTTLE_OFF)) {
++              ret = -1;
++      }
++
++      return ret;
++}
++
++static void print_rules(struct rule_node_info *node_it)
++{
++      struct rules_def *node_rule = NULL;
++      int i;
++
++      if (!node_it) {
++              pr_err("%s: no node for found", __func__);
++              return;
++      }
++
++      pr_info("\n Now printing rules for Node %d  cur rule %d\n",
++                                              node_it->id, node_it->cur_rule);
++      list_for_each_entry(node_rule, &node_it->node_rules, link) {
++              pr_info("\n num Rules %d  rule Id %d\n",
++                              node_it->num_rules, node_rule->rule_id);
++              pr_info("Rule: src_field %d\n", node_rule->rule_ops.src_field);
++              for (i = 0; i < node_rule->rule_ops.num_src; i++)
++                      pr_info("Rule: src %d\n",
++                                      node_rule->rule_ops.src_id[i]);
++              for (i = 0; i < node_rule->rule_ops.num_dst; i++)
++                      pr_info("Rule: dst %d dst_bw %llu\n",
++                                              node_rule->rule_ops.dst_node[i],
++                                              node_rule->rule_ops.dst_bw);
++              pr_info("Rule: thresh %llu op %d mode %d State %d\n",
++                                      node_rule->rule_ops.thresh,
++                                      node_rule->rule_ops.op,
++                                      node_rule->rule_ops.mode,
++                                      node_rule->state);
++      }
++}
++
++void print_all_rules(void)
++{
++      struct rule_node_info *node_it = NULL;
++
++      list_for_each_entry(node_it, &node_list, link)
++              print_rules(node_it);
++}
++
++void print_rules_buf(char *buf, int max_buf)
++{
++      struct rule_node_info *node_it = NULL;
++      struct rules_def *node_rule = NULL;
++      int i;
++      int cnt = 0;
++
++      list_for_each_entry(node_it, &node_list, link) {
++              cnt += scnprintf(buf + cnt, max_buf - cnt,
++                                      "\n Now printing rules for Node %d cur_rule %d\n",
++                                      node_it->id, node_it->cur_rule);
++              list_for_each_entry(node_rule, &node_it->node_rules, link) {
++                      cnt += scnprintf(buf + cnt, max_buf - cnt,
++                              "\nNum Rules:%d ruleId %d STATE:%d change:%d\n",
++                              node_it->num_rules, node_rule->rule_id,
++                              node_rule->state, node_rule->state_change);
++                      cnt += scnprintf(buf + cnt, max_buf - cnt,
++                              "Src_field %d\n",
++                              node_rule->rule_ops.src_field);
++                      for (i = 0; i < node_rule->rule_ops.num_src; i++)
++                              cnt += scnprintf(buf + cnt, max_buf - cnt,
++                                      "Src %d Cur Ib %llu Ab %llu\n",
++                                      node_rule->rule_ops.src_id[i],
++                                      node_rule->src_info[i].ib,
++                                      node_rule->src_info[i].ab);
++                      for (i = 0; i < node_rule->rule_ops.num_dst; i++)
++                              cnt += scnprintf(buf + cnt, max_buf - cnt,
++                                      "Dst %d dst_bw %llu\n",
++                                      node_rule->rule_ops.dst_node[0],
++                                      node_rule->rule_ops.dst_bw);
++                      cnt += scnprintf(buf + cnt, max_buf - cnt,
++                                      "Thresh %llu op %d mode %d\n",
++                                      node_rule->rule_ops.thresh,
++                                      node_rule->rule_ops.op,
++                                      node_rule->rule_ops.mode);
++              }
++      }
++}
++
++static int copy_rule(struct bus_rule_type *src, struct rules_def *node_rule,
++                      struct notifier_block *nb)
++{
++      int i;
++      int ret = 0;
++
++      memcpy(&node_rule->rule_ops, src,
++                              sizeof(struct bus_rule_type));
++      node_rule->rule_ops.src_id = kzalloc(
++                      (sizeof(int) * node_rule->rule_ops.num_src),
++                                                      GFP_KERNEL);
++      if (!node_rule->rule_ops.src_id) {
++              pr_err("%s:Failed to allocate for src_id",
++                                      __func__);
++              return -ENOMEM;
++      }
++      memcpy(node_rule->rule_ops.src_id, src->src_id,
++                              sizeof(int) * src->num_src);
++
++
++      if (!nb) {
++              node_rule->rule_ops.dst_node = kzalloc(
++                      (sizeof(int) * node_rule->rule_ops.num_dst),
++                                              GFP_KERNEL);
++              if (!node_rule->rule_ops.dst_node) {
++                      pr_err("%s:Failed to allocate for src_id",
++                                                      __func__);
++                      return -ENOMEM;
++              }
++              memcpy(node_rule->rule_ops.dst_node, src->dst_node,
++                                              sizeof(int) * src->num_dst);
++      }
++
++      node_rule->num_src = src->num_src;
++      node_rule->src_info = kzalloc(
++              (sizeof(struct node_vote_info) * node_rule->rule_ops.num_src),
++                                                      GFP_KERNEL);
++      if (!node_rule->src_info) {
++              pr_err("%s:Failed to allocate for src_id",
++                                              __func__);
++              return -ENOMEM;
++      }
++      for (i = 0; i < src->num_src; i++)
++              node_rule->src_info[i].id = src->src_id[i];
++
++      return ret;
++}
++
++void msm_rule_register(int num_rules, struct bus_rule_type *rule,
++                                      struct notifier_block *nb)
++{
++      struct rule_node_info *node = NULL;
++      int i, j;
++      struct rules_def *node_rule = NULL;
++      int num_dst = 0;
++
++      if (!rule)
++              return;
++
++      mutex_lock(&msm_bus_rules_lock);
++      for (i = 0; i < num_rules; i++) {
++              if (nb)
++                      num_dst = 1;
++              else
++                      num_dst = rule[i].num_dst;
++
++              for (j = 0; j < num_dst; j++) {
++                      int id = 0;
++
++                      if (nb)
++                              id = NB_ID;
++                      else
++                              id = rule[i].dst_node[j];
++
++                      node = gen_node(id, nb);
++                      if (!node) {
++                              pr_info("Error getting rule");
++                              goto exit_rule_register;
++                      }
++                      node_rule = kzalloc(sizeof(struct rules_def),
++                                              GFP_KERNEL);
++                      if (!node_rule) {
++                              pr_err("%s: Failed to allocate for rule",
++                                                              __func__);
++                              goto exit_rule_register;
++                      }
++
++                      if (copy_rule(&rule[i], node_rule, nb)) {
++                              pr_err("Error copying rule");
++                              goto exit_rule_register;
++                      }
++
++                      node_rule->rule_id = node->num_rules++;
++                      if (nb)
++                              node->data = nb;
++
++                      list_add_tail(&node_rule->link, &node->node_rules);
++              }
++      }
++      list_sort(NULL, &node->node_rules, node_rules_compare);
++
++      if (nb)
++              raw_notifier_chain_register(&node->rule_notify_list, nb);
++exit_rule_register:
++      mutex_unlock(&msm_bus_rules_lock);
++      return;
++}
++
++static int comp_rules(struct bus_rule_type *rulea, struct bus_rule_type *ruleb)
++{
++      int ret = 1;
++
++      if (rulea->num_src == ruleb->num_src)
++              ret = memcmp(rulea->src_id, ruleb->src_id,
++                              (sizeof(int) * rulea->num_src));
++      if (!ret && (rulea->num_dst == ruleb->num_dst))
++              ret = memcmp(rulea->dst_node, ruleb->dst_node,
++                              (sizeof(int) * rulea->num_dst));
++      if (!ret && (rulea->dst_bw == ruleb->dst_bw) &&
++              (rulea->op == ruleb->op) && (rulea->thresh == ruleb->thresh))
++              ret = 0;
++
++      return ret;
++}
++
++void msm_rule_unregister(int num_rules, struct bus_rule_type *rule,
++                                      struct notifier_block *nb)
++{
++      int i;
++      struct rule_node_info *node = NULL;
++      struct rule_node_info *node_tmp = NULL;
++      struct rules_def *node_rule;
++      struct rules_def *node_rule_tmp;
++      bool match_found = false;
++
++      if (!rule)
++              return;
++
++      mutex_lock(&msm_bus_rules_lock);
++      if (nb) {
++              node = get_node(NB_ID, nb);
++              if (!node) {
++                      pr_err("%s: Can't find node", __func__);
++                      goto exit_unregister_rule;
++              }
++
++              list_for_each_entry_safe(node_rule, node_rule_tmp,
++                                      &node->node_rules, link) {
++                      list_del(&node_rule->link);
++                      kfree(node_rule);
++                      node->num_rules--;
++              }
++              raw_notifier_chain_unregister(&node->rule_notify_list, nb);
++      } else {
++              for (i = 0; i < num_rules; i++) {
++                      match_found = false;
++
++                      list_for_each_entry(node, &node_list, link) {
++                              list_for_each_entry_safe(node_rule,
++                              node_rule_tmp, &node->node_rules, link) {
++                                      if (comp_rules(&node_rule->rule_ops,
++                                              &rule[i]) == 0) {
++                                              list_del(&node_rule->link);
++                                              kfree(node_rule);
++                                              match_found = true;
++                                              node->num_rules--;
++                                              list_sort(NULL,
++                                                      &node->node_rules,
++                                                      node_rules_compare);
++                                              break;
++                                      }
++                              }
++                      }
++              }
++      }
++
++      list_for_each_entry_safe(node, node_tmp,
++                                      &node_list, link) {
++              if (!node->num_rules) {
++                      pr_debug("Deleting Rule node %d", node->id);
++                      list_del(&node->link);
++                      kfree(node);
++              }
++      }
++exit_unregister_rule:
++      mutex_unlock(&msm_bus_rules_lock);
++}
++
++bool msm_rule_are_rules_registered(void)
++{
++      bool ret = false;
++
++      if (list_empty(&node_list))
++              ret = false;
++      else
++              ret = true;
++
++      return ret;
++}
++
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_bus_rules.h
+@@ -0,0 +1,77 @@
++/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef _ARCH_ARM_MACH_MSM_BUS_RULES_H
++#define _ARCH_ARM_MACH_MSM_BUS_RULES_H
++
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/notifier.h>
++#include <dt-bindings/msm/msm-bus-rule-ops.h>
++
++#define MAX_NODES             (5)
++
++struct rule_update_path_info {
++      u32 id;
++      u64 ab;
++      u64 ib;
++      u64 clk;
++      struct list_head link;
++};
++
++struct rule_apply_rcm_info {
++      u32 id;
++      u64 lim_bw;
++      int throttle;
++      bool after_clk_commit;
++      struct list_head link;
++};
++
++struct bus_rule_type {
++      int num_src;
++      int *src_id;
++      int src_field;
++      int op;
++      u64 thresh;
++      int num_dst;
++      int *dst_node;
++      u64 dst_bw;
++      int mode;
++      void *client_data;
++};
++
++#if (defined(CONFIG_BUS_TOPOLOGY_ADHOC))
++void msm_rule_register(int num_rules, struct bus_rule_type *rule,
++                              struct notifier_block *nb);
++void msm_rule_unregister(int num_rules, struct bus_rule_type *rule,
++                                              struct notifier_block *nb);
++void print_rules_buf(char *buf, int count);
++bool msm_rule_are_rules_registered(void);
++#else
++static inline void msm_rule_register(int num_rules, struct bus_rule_type *rule,
++                              struct notifier_block *nb)
++{
++}
++static inline void msm_rule_unregister(int num_rules,
++                                      struct bus_rule_type *rule,
++                                      struct notifier_block *nb)
++{
++}
++static inline void print_rules_buf(char *buf, int count)
++{
++}
++static inline bool msm_rule_are_rules_registered(void)
++{
++      return false;
++}
++#endif /* defined(CONFIG_BUS_TOPOLOGY_ADHOC) */
++#endif /* _ARCH_ARM_MACH_MSM_BUS_RULES_H */
+--- /dev/null
++++ b/drivers/bus/msm_bus/msm_buspm_coresight_adhoc.c
+@@ -0,0 +1,189 @@
++/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/clk.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <linux/uaccess.h>
++#include <linux/miscdevice.h>
++#include <linux/of_coresight.h>
++#include <linux/coresight.h>
++#include <linux/io.h>
++#include <linux/of.h>
++#include <linux/list.h>
++
++struct msmbus_coresight_adhoc_clock_drvdata {
++      int                              id;
++      struct clk                      *clk;
++      struct list_head                 list;
++};
++
++struct msmbus_coresight_adhoc_drvdata {
++      struct device                   *dev;
++      struct coresight_device         *csdev;
++      struct coresight_desc           *desc;
++      struct list_head                 clocks;
++};
++
++static int msmbus_coresight_enable_adhoc(struct coresight_device *csdev)
++{
++      struct msmbus_coresight_adhoc_clock_drvdata *clk;
++      struct msmbus_coresight_adhoc_drvdata *drvdata =
++              dev_get_drvdata(csdev->dev.parent);
++      long rate;
++
++      list_for_each_entry(clk, &drvdata->clocks, list) {
++              if (clk->id == csdev->id) {
++                      rate = clk_round_rate(clk->clk, 1L);
++                      clk_set_rate(clk->clk, rate);
++                      return clk_prepare_enable(clk->clk);
++              }
++      }
++
++      return -ENOENT;
++}
++
++static void msmbus_coresight_disable_adhoc(struct coresight_device *csdev)
++{
++      struct msmbus_coresight_adhoc_clock_drvdata *clk;
++      struct msmbus_coresight_adhoc_drvdata *drvdata =
++              dev_get_drvdata(csdev->dev.parent);
++
++      list_for_each_entry(clk, &drvdata->clocks, list) {
++              if (clk->id == csdev->id)
++                      clk_disable_unprepare(clk->clk);
++      }
++}
++
++static const struct coresight_ops_source msmbus_coresight_adhoc_source_ops = {
++      .enable         = msmbus_coresight_enable_adhoc,
++      .disable        = msmbus_coresight_disable_adhoc,
++};
++
++static const struct coresight_ops msmbus_coresight_cs_ops = {
++      .source_ops     = &msmbus_coresight_adhoc_source_ops,
++};
++
++void msmbus_coresight_remove_adhoc(struct platform_device *pdev)
++{
++      struct msmbus_coresight_adhoc_clock_drvdata *clk, *next_clk;
++      struct msmbus_coresight_adhoc_drvdata *drvdata =
++              platform_get_drvdata(pdev);
++
++      msmbus_coresight_disable_adhoc(drvdata->csdev);
++      coresight_unregister(drvdata->csdev);
++      list_for_each_entry_safe(clk, next_clk, &drvdata->clocks, list) {
++              list_del(&clk->list);
++              devm_kfree(&pdev->dev, clk);
++      }
++      devm_kfree(&pdev->dev, drvdata->desc);
++      devm_kfree(&pdev->dev, drvdata);
++      platform_set_drvdata(pdev, NULL);
++}
++EXPORT_SYMBOL(msmbus_coresight_remove_adhoc);
++
++static int buspm_of_get_clk_adhoc(struct device_node *of_node,
++      struct msmbus_coresight_adhoc_drvdata *drvdata, int id)
++{
++      struct msmbus_coresight_adhoc_clock_drvdata *clk;
++      clk = devm_kzalloc(drvdata->dev, sizeof(*clk), GFP_KERNEL);
++
++      if (!clk)
++              return -ENOMEM;
++
++      clk->id = id;
++
++      clk->clk = of_clk_get_by_name(of_node, "bus_clk");
++      if (IS_ERR(clk->clk)) {
++              pr_err("Error: unable to get clock for coresight node %d\n",
++                      id);
++              goto err;
++      }
++
++      list_add(&clk->list, &drvdata->clocks);
++      return 0;
++
++err:
++      devm_kfree(drvdata->dev, clk);
++      return -EINVAL;
++}
++
++int msmbus_coresight_init_adhoc(struct platform_device *pdev,
++              struct device_node *of_node)
++{
++      int ret;
++      struct device *dev = &pdev->dev;
++      struct coresight_platform_data *pdata;
++      struct msmbus_coresight_adhoc_drvdata *drvdata;
++      struct coresight_desc *desc;
++
++      pdata = of_get_coresight_platform_data(dev, of_node);
++      if (IS_ERR(pdata))
++              return PTR_ERR(pdata);
++
++      drvdata = platform_get_drvdata(pdev);
++      if (IS_ERR_OR_NULL(drvdata)) {
++              drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
++              if (!drvdata) {
++                      pr_err("coresight: Alloc for drvdata failed\n");
++                      return -ENOMEM;
++              }
++              INIT_LIST_HEAD(&drvdata->clocks);
++              drvdata->dev = &pdev->dev;
++              platform_set_drvdata(pdev, drvdata);
++      }
++      ret = buspm_of_get_clk_adhoc(of_node, drvdata, pdata->id);
++      if (ret) {
++              pr_err("Error getting clocks\n");
++              ret = -ENXIO;
++              goto err1;
++      }
++
++      desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
++      if (!desc) {
++              pr_err("coresight: Error allocating memory\n");
++              ret = -ENOMEM;
++              goto err1;
++      }
++
++      desc->type = CORESIGHT_DEV_TYPE_SOURCE;
++      desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_BUS;
++      desc->ops = &msmbus_coresight_cs_ops;
++      desc->pdata = pdata;
++      desc->dev = &pdev->dev;
++      desc->owner = THIS_MODULE;
++      drvdata->desc = desc;
++      drvdata->csdev = coresight_register(desc);
++      if (IS_ERR(drvdata->csdev)) {
++              pr_err("coresight: Coresight register failed\n");
++              ret = PTR_ERR(drvdata->csdev);
++              goto err0;
++      }
++
++      dev_info(dev, "msmbus_coresight initialized\n");
++
++      return 0;
++err0:
++      devm_kfree(dev, desc);
++err1:
++      devm_kfree(dev, drvdata);
++      platform_set_drvdata(pdev, NULL);
++      return ret;
++}
++EXPORT_SYMBOL(msmbus_coresight_init_adhoc);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("MSM BusPM Adhoc CoreSight Driver");
+--- /dev/null
++++ b/drivers/bus/msm_bus/rpm-smd.h
+@@ -0,0 +1,268 @@
++/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ */
++
++#ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H
++#define __ARCH_ARM_MACH_MSM_RPM_SMD_H
++
++/**
++ * enum msm_rpm_set - RPM enumerations for sleep/active set
++ * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode.
++ * %MSM_RPM_CTX_SET_SLEEP: Set resource parameters for sleep.
++ */
++enum msm_rpm_set {
++      MSM_RPM_CTX_ACTIVE_SET,
++      MSM_RPM_CTX_SLEEP_SET,
++};
++
++struct msm_rpm_request;
++
++struct msm_rpm_kvp {
++      uint32_t key;
++      uint32_t length;
++      uint8_t *data;
++};
++#ifdef CONFIG_MSM_RPM_SMD
++/**
++ * msm_rpm_request() - Creates a parent element to identify the
++ * resource on the RPM, that stores the KVPs for different fields modified
++ * for a hardware resource
++ *
++ * @set: if the device is setting the active/sleep set parameter
++ * for the resource
++ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
++ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
++ * @num_elements: number of KVPs pairs associated with the resource
++ *
++ * returns pointer to a msm_rpm_request on success, NULL on error
++ */
++struct msm_rpm_request *msm_rpm_create_request(
++              enum msm_rpm_set set, uint32_t rsc_type,
++              uint32_t rsc_id, int num_elements);
++
++/**
++ * msm_rpm_request_noirq() - Creates a parent element to identify the
++ * resource on the RPM, that stores the KVPs for different fields modified
++ * for a hardware resource. This function is similar to msm_rpm_create_request
++ * except that it has to be called with interrupts masked.
++ *
++ * @set: if the device is setting the active/sleep set parameter
++ * for the resource
++ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
++ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
++ * @num_elements: number of KVPs pairs associated with the resource
++ *
++ * returns pointer to a msm_rpm_request on success, NULL on error
++ */
++struct msm_rpm_request *msm_rpm_create_request_noirq(
++              enum msm_rpm_set set, uint32_t rsc_type,
++              uint32_t rsc_id, int num_elements);
++
++/**
++ * msm_rpm_add_kvp_data() - Adds a Key value pair to a existing RPM resource.
++ *
++ * @handle: RPM resource handle to which the data should be appended
++ * @key:  unsigned integer identify the parameter modified
++ * @data: byte array that contains the value corresponding to key.
++ * @size:   size of data in bytes.
++ *
++ * returns 0 on success or errno
++ */
++int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
++              uint32_t key, const uint8_t *data, int size);
++
++/**
++ * msm_rpm_add_kvp_data_noirq() - Adds a Key value pair to a existing RPM
++ * resource. This function is similar to msm_rpm_add_kvp_data except that it
++ * has to be called with interrupts masked.
++ *
++ * @handle: RPM resource handle to which the data should be appended
++ * @key:  unsigned integer identify the parameter modified
++ * @data: byte array that contains the value corresponding to key.
++ * @size:   size of data in bytes.
++ *
++ * returns 0 on success or errno
++ */
++int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
++              uint32_t key, const uint8_t *data, int size);
++
++/** msm_rpm_free_request() - clean up the RPM request handle created with
++ * msm_rpm_create_request
++ *
++ * @handle: RPM resource handle to be cleared.
++ */
++
++void msm_rpm_free_request(struct msm_rpm_request *handle);
++
++/**
++ * msm_rpm_send_request() - Send the RPM messages using SMD. The function
++ * assigns a message id before sending the data out to the RPM. RPM hardware
++ * uses the message id to acknowledge the messages.
++ *
++ * @handle: pointer to the msm_rpm_request for the resource being modified.
++ *
++ * returns non-zero message id on success and zero on a failed transaction.
++ * The drivers use message id to wait for ACK from RPM.
++ */
++int msm_rpm_send_request(struct msm_rpm_request *handle);
++
++/**
++ * msm_rpm_send_request_noirq() - Send the RPM messages using SMD. The
++ * function assigns a message id before sending the data out to the RPM.
++ * RPM hardware uses the message id to acknowledge the messages. This function
++ * is similar to msm_rpm_send_request except that it has to be called with
++ * interrupts masked.
++ *
++ * @handle: pointer to the msm_rpm_request for the resource being modified.
++ *
++ * returns non-zero message id on success and zero on a failed transaction.
++ * The drivers use message id to wait for ACK from RPM.
++ */
++int msm_rpm_send_request_noirq(struct msm_rpm_request *handle);
++
++/**
++ * msm_rpm_wait_for_ack() - A blocking call that waits for acknowledgment of
++ * a message from RPM.
++ *
++ * @msg_id: the return from msm_rpm_send_requests
++ *
++ * returns 0 on success or errno
++ */
++int msm_rpm_wait_for_ack(uint32_t msg_id);
++
++/**
++ * msm_rpm_wait_for_ack_noirq() - A blocking call that waits for acknowledgment
++ * of a message from RPM. This function is similar to msm_rpm_wait_for_ack
++ * except that it has to be called with interrupts masked.
++ *
++ * @msg_id: the return from msm_rpm_send_request
++ *
++ * returns 0 on success or errno
++ */
++int msm_rpm_wait_for_ack_noirq(uint32_t msg_id);
++
++/**
++ * msm_rpm_send_message() -Wrapper function for clients to send data given an
++ * array of key value pairs.
++ *
++ * @set: if the device is setting the active/sleep set parameter
++ * for the resource
++ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
++ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
++ * @kvp: array of KVP data.
++ * @nelem: number of KVPs pairs associated with the message.
++ *
++ * returns  0 on success and errno on failure.
++ */
++int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
++              uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
++
++/**
++ * msm_rpm_send_message_noirq() -Wrapper function for clients to send data
++ * given an array of key value pairs. This function is similar to the
++ * msm_rpm_send_message() except that it has to be called with interrupts
++ * disabled. Clients should choose the irq version when possible for system
++ * performance.
++ *
++ * @set: if the device is setting the active/sleep set parameter
++ * for the resource
++ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
++ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
++ * @kvp: array of KVP data.
++ * @nelem: number of KVPs pairs associated with the message.
++ *
++ * returns  0 on success and errno on failure.
++ */
++int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
++              uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
++
++/**
++ * msm_rpm_driver_init() - Initialization function that registers for a
++ * rpm platform driver.
++ *
++ * returns 0 on success.
++ */
++int __init msm_rpm_driver_init(void);
++
++#else
++
++static inline struct msm_rpm_request *msm_rpm_create_request(
++              enum msm_rpm_set set, uint32_t rsc_type,
++              uint32_t rsc_id, int num_elements)
++{
++      return NULL;
++}
++
++static inline struct msm_rpm_request *msm_rpm_create_request_noirq(
++              enum msm_rpm_set set, uint32_t rsc_type,
++              uint32_t rsc_id, int num_elements)
++{
++      return NULL;
++
++}
++static inline uint32_t msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
++              uint32_t key, const uint8_t *data, int count)
++{
++      return 0;
++}
++static inline uint32_t msm_rpm_add_kvp_data_noirq(
++              struct msm_rpm_request *handle, uint32_t key,
++              const uint8_t *data, int count)
++{
++      return 0;
++}
++
++static inline void msm_rpm_free_request(struct msm_rpm_request *handle)
++{
++      return;
++}
++
++static inline int msm_rpm_send_request(struct msm_rpm_request *handle)
++{
++      return 0;
++}
++
++static inline int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
++{
++      return 0;
++
++}
++
++static inline int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
++              uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
++{
++      return 0;
++}
++
++static inline int msm_rpm_send_message_noirq(enum msm_rpm_set set,
++              uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp,
++              int nelems)
++{
++      return 0;
++}
++
++static inline int msm_rpm_wait_for_ack(uint32_t msg_id)
++{
++      return 0;
++
++}
++static inline int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
++{
++      return 0;
++}
++
++static inline int __init msm_rpm_driver_init(void)
++{
++      return 0;
++}
++#endif
++#endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/
+--- /dev/null
++++ b/include/trace/events/trace_msm_bus.h
+@@ -0,0 +1,163 @@
++/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM msm_bus
++
++#if !defined(_TRACE_MSM_BUS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_MSM_BUS_H
++
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(bus_update_request,
++
++      TP_PROTO(int sec, int nsec, const char *name, unsigned int index,
++              int src, int dest, unsigned long long ab,
++              unsigned long long ib),
++
++      TP_ARGS(sec, nsec, name, index, src, dest, ab, ib),
++
++      TP_STRUCT__entry(
++              __field(int, sec)
++              __field(int, nsec)
++              __string(name, name)
++              __field(u32, index)
++              __field(int, src)
++              __field(int, dest)
++              __field(u64, ab)
++              __field(u64, ib)
++      ),
++
++      TP_fast_assign(
++              __entry->sec = sec;
++              __entry->nsec = nsec;
++              __assign_str(name, name);
++              __entry->index = index;
++              __entry->src = src;
++              __entry->dest = dest;
++              __entry->ab = ab;
++              __entry->ib = ib;
++      ),
++
++      TP_printk("time= %d.%d name=%s index=%u src=%d dest=%d ab=%llu ib=%llu",
++              __entry->sec,
++              __entry->nsec,
++              __get_str(name),
++              (unsigned int)__entry->index,
++              __entry->src,
++              __entry->dest,
++              (unsigned long long)__entry->ab,
++              (unsigned long long)__entry->ib)
++);
++
++TRACE_EVENT(bus_bimc_config_limiter,
++
++      TP_PROTO(int mas_id, unsigned long long cur_lim_bw),
++
++      TP_ARGS(mas_id, cur_lim_bw),
++
++      TP_STRUCT__entry(
++              __field(int, mas_id)
++              __field(u64, cur_lim_bw)
++      ),
++
++      TP_fast_assign(
++              __entry->mas_id = mas_id;
++              __entry->cur_lim_bw = cur_lim_bw;
++      ),
++
++      TP_printk("Master=%d cur_lim_bw=%llu",
++              __entry->mas_id,
++              (unsigned long long)__entry->cur_lim_bw)
++);
++
++TRACE_EVENT(bus_avail_bw,
++
++      TP_PROTO(unsigned long long cur_bimc_bw, unsigned long long cur_mdp_bw),
++
++      TP_ARGS(cur_bimc_bw, cur_mdp_bw),
++
++      TP_STRUCT__entry(
++              __field(u64, cur_bimc_bw)
++              __field(u64, cur_mdp_bw)
++      ),
++
++      TP_fast_assign(
++              __entry->cur_bimc_bw = cur_bimc_bw;
++              __entry->cur_mdp_bw = cur_mdp_bw;
++      ),
++
++      TP_printk("cur_bimc_bw = %llu cur_mdp_bw = %llu",
++              (unsigned long long)__entry->cur_bimc_bw,
++              (unsigned long long)__entry->cur_mdp_bw)
++);
++
++TRACE_EVENT(bus_rules_matches,
++
++      TP_PROTO(int node_id, int rule_id, unsigned long long node_ab,
++              unsigned long long node_ib, unsigned long long node_clk),
++
++      TP_ARGS(node_id, rule_id, node_ab, node_ib, node_clk),
++
++      TP_STRUCT__entry(
++              __field(int, node_id)
++              __field(int, rule_id)
++              __field(u64, node_ab)
++              __field(u64, node_ib)
++              __field(u64, node_clk)
++      ),
++
++      TP_fast_assign(
++              __entry->node_id = node_id;
++              __entry->rule_id = rule_id;
++              __entry->node_ab = node_ab;
++              __entry->node_ib = node_ib;
++              __entry->node_clk = node_clk;
++      ),
++
++      TP_printk("Rule match node%d rule%d node-ab%llu:ib%llu:clk%llu",
++              __entry->node_id, __entry->rule_id,
++              (unsigned long long)__entry->node_ab,
++              (unsigned long long)__entry->node_ib,
++              (unsigned long long)__entry->node_clk)
++);
++
++TRACE_EVENT(bus_bke_params,
++
++      TP_PROTO(u32 gc, u32 gp, u32 thl, u32 thm, u32 thh),
++
++      TP_ARGS(gc, gp, thl, thm, thh),
++
++      TP_STRUCT__entry(
++              __field(u32, gc)
++              __field(u32, gp)
++              __field(u32, thl)
++              __field(u32, thm)
++              __field(u32, thh)
++      ),
++
++      TP_fast_assign(
++              __entry->gc = gc;
++              __entry->gp = gp;
++              __entry->thl = thl;
++              __entry->thm = thm;
++              __entry->thh = thh;
++      ),
++
++      TP_printk("BKE Params GC=0x%x GP=0x%x THL=0x%x THM=0x%x THH=0x%x",
++              __entry->gc, __entry->gp, __entry->thl, __entry->thm,
++                      __entry->thh)
++);
++
++#endif
++#define TRACE_INCLUDE_FILE trace_msm_bus
++#include <trace/define_trace.h>
diff --git a/target/linux/ipq40xx/patches-4.14/605-net-IPQ4019-needs-rfs-vlan_tag-callbacks-in.patch b/target/linux/ipq40xx/patches-4.14/605-net-IPQ4019-needs-rfs-vlan_tag-callbacks-in.patch
new file mode 100644 (file)
index 0000000..a52fe28
--- /dev/null
@@ -0,0 +1,53 @@
+From 7c129254adb1093d10a62ed7bf7b956fcc6ffe34 Mon Sep 17 00:00:00 2001
+From: Rakesh Nair <ranair@codeaurora.org>
+Date: Wed, 20 Jul 2016 15:02:01 +0530
+Subject: [PATCH] net: IPQ4019 needs rfs/vlan_tag callbacks in
+ netdev_ops
+
+Add callback support to get default vlan tag and register
+receive flow steering filter.
+
+Used by IPQ4019 ess-edma driver.
+
+BUG=chrome-os-partner:33096
+TEST=none
+
+Change-Id: I266070e4a0fbe4a0d9966fe79a71e50ec4f26c75
+Signed-off-by: Rakesh Nair <ranair@codeaurora.org>
+Reviewed-on: https://chromium-review.googlesource.com/362203
+Commit-Ready: Grant Grundler <grundler@chromium.org>
+Tested-by: Grant Grundler <grundler@chromium.org>
+Reviewed-by: Grant Grundler <grundler@chromium.org>
+---
+ include/linux/netdevice.h | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -713,6 +713,16 @@ struct xps_map {
+ #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
+        - sizeof(struct xps_map)) / sizeof(u16))
++#ifdef CONFIG_RFS_ACCEL
++typedef int (*set_rfs_filter_callback_t)(struct net_device *dev,
++                                     __be32 src,
++                                     __be32 dst,
++                                     __be16 sport,
++                                     __be16 dport,
++                                     u8 proto,
++                                     u16 rxq_index,
++                                     u32 action);
++#endif
+ /*
+  * This structure holds all XPS maps for device.  Maps are indexed by CPU.
+  */
+@@ -1239,6 +1249,9 @@ struct net_device_ops {
+                                                    const struct sk_buff *skb,
+                                                    u16 rxq_index,
+                                                    u32 flow_id);
++        int                     (*ndo_register_rfs_filter)(struct net_device *dev,
++                                                              set_rfs_filter_callback_t set_filter);
++        int                     (*ndo_get_default_vlan_tag)(struct net_device *net);
+ #endif
+       int                     (*ndo_add_slave)(struct net_device *dev,
+                                                struct net_device *slave_dev);
diff --git a/target/linux/ipq40xx/patches-4.14/700-net-add-qualcomm-mdio-and-phy.patch b/target/linux/ipq40xx/patches-4.14/700-net-add-qualcomm-mdio-and-phy.patch
new file mode 100644 (file)
index 0000000..c6e7155
--- /dev/null
@@ -0,0 +1,2690 @@
+From 5a71a2005a2e1e6bbe36f00386c495ad6626beb2 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@googlemail.com>
+Date: Thu, 19 Jan 2017 01:59:43 +0100
+Subject: [PATCH 30/38] NET: add qualcomm mdio and PHY
+
+---
+ drivers/net/phy/Kconfig  | 14 ++++++++++++++
+ drivers/net/phy/Makefile |  2 ++
+ 2 files changed, 16 insertions(+)
+
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -481,6 +481,20 @@ config XILINX_GMII2RGMII
+         the Reduced Gigabit Media Independent Interface(RGMII) between
+         Ethernet physical media devices and the Gigabit Ethernet controller.
++config MDIO_IPQ40XX
++      tristate "Qualcomm Atheros ipq40xx MDIO interface"
++      depends on HAS_IOMEM && OF
++      ---help---
++        This driver supports the MDIO interface found in Qualcomm
++        Atheros ipq40xx Soc chip.
++
++config AR40XX_PHY
++      tristate "Driver for Qualcomm Atheros IPQ40XX switches"
++      depends on HAS_IOMEM && OF
++      select SWCONFIG
++      ---help---
++         This is the driver for Qualcomm Atheros IPQ40XX ESS switches.
++
+ endif # PHYLIB
+ config MICREL_KS8995MA
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -48,6 +48,7 @@ obj-$(CONFIG_MDIO_CAVIUM)    += mdio-cavium
+ obj-$(CONFIG_MDIO_GPIO)               += mdio-gpio.o
+ obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
+ obj-$(CONFIG_MDIO_I2C)                += mdio-i2c.o
++obj-$(CONFIG_MDIO_IPQ40XX)    += mdio-ipq40xx.o
+ obj-$(CONFIG_MDIO_MOXART)     += mdio-moxart.o
+ obj-$(CONFIG_MDIO_OCTEON)     += mdio-octeon.o
+ obj-$(CONFIG_MDIO_SUN4I)      += mdio-sun4i.o
+@@ -60,6 +61,7 @@ obj-y                                += $(sfp-obj-y) $(sfp-obj-m)
+ obj-$(CONFIG_AMD_PHY)         += amd.o
+ obj-$(CONFIG_AQUANTIA_PHY)    += aquantia.o
++obj-$(CONFIG_AR40XX_PHY)      += ar40xx.o
+ obj-$(CONFIG_AT803X_PHY)      += at803x.o
+ obj-$(CONFIG_BCM63XX_PHY)     += bcm63xx.o
+ obj-$(CONFIG_BCM7XXX_PHY)     += bcm7xxx.o
+--- /dev/null
++++ b/drivers/net/phy/ar40xx.c
+@@ -0,0 +1,2090 @@
++/*
++ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/bitops.h>
++#include <linux/switch.h>
++#include <linux/delay.h>
++#include <linux/phy.h>
++#include <linux/clk.h>
++#include <linux/reset.h>
++#include <linux/lockdep.h>
++#include <linux/workqueue.h>
++#include <linux/of_device.h>
++#include <linux/of_address.h>
++#include <linux/mdio.h>
++#include <linux/gpio.h>
++
++#include "ar40xx.h"
++
++static struct ar40xx_priv *ar40xx_priv;
++
++#define MIB_DESC(_s , _o, _n) \
++      {                       \
++              .size = (_s),   \
++              .offset = (_o), \
++              .name = (_n),   \
++      }
++
++static const struct ar40xx_mib_desc ar40xx_mibs[] = {
++      MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"),
++      MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"),
++      MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"),
++      MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"),
++      MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"),
++      MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"),
++      MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"),
++      MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"),
++      MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"),
++      MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"),
++      MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"),
++      MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"),
++      MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"),
++      MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"),
++      MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"),
++      MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"),
++      MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"),
++      MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"),
++      MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"),
++      MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"),
++      MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"),
++      MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"),
++      MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"),
++      MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"),
++      MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"),
++      MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"),
++      MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"),
++      MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"),
++      MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"),
++      MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"),
++      MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"),
++      MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"),
++      MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"),
++      MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"),
++      MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"),
++      MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"),
++      MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"),
++      MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"),
++      MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"),
++};
++
++static u32
++ar40xx_read(struct ar40xx_priv *priv, int reg)
++{
++      return readl(priv->hw_addr + reg);
++}
++
++static u32
++ar40xx_psgmii_read(struct ar40xx_priv *priv, int reg)
++{
++      return readl(priv->psgmii_hw_addr + reg);
++}
++
++static void
++ar40xx_write(struct ar40xx_priv *priv, int reg, u32 val)
++{
++      writel(val, priv->hw_addr + reg);
++}
++
++static u32
++ar40xx_rmw(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
++{
++      u32 ret;
++
++      ret = ar40xx_read(priv, reg);
++      ret &= ~mask;
++      ret |= val;
++      ar40xx_write(priv, reg, ret);
++      return ret;
++}
++
++static void
++ar40xx_psgmii_write(struct ar40xx_priv *priv, int reg, u32 val)
++{
++      writel(val, priv->psgmii_hw_addr + reg);
++}
++
++static void
++ar40xx_phy_dbg_write(struct ar40xx_priv *priv, int phy_addr,
++                   u16 dbg_addr, u16 dbg_data)
++{
++      struct mii_bus *bus = priv->mii_bus;
++
++      mutex_lock(&bus->mdio_lock);
++      bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
++      bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA, dbg_data);
++      mutex_unlock(&bus->mdio_lock);
++}
++
++static void
++ar40xx_phy_dbg_read(struct ar40xx_priv *priv, int phy_addr,
++                  u16 dbg_addr, u16 *dbg_data)
++{
++      struct mii_bus *bus = priv->mii_bus;
++
++      mutex_lock(&bus->mdio_lock);
++      bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
++      *dbg_data = bus->read(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA);
++      mutex_unlock(&bus->mdio_lock);
++}
++
++static void
++ar40xx_phy_mmd_write(struct ar40xx_priv *priv, u32 phy_id,
++                   u16 mmd_num, u16 reg_id, u16 reg_val)
++{
++      struct mii_bus *bus = priv->mii_bus;
++
++      mutex_lock(&bus->mdio_lock);
++      bus->write(bus, phy_id,
++                      AR40XX_MII_ATH_MMD_ADDR, mmd_num);
++      bus->write(bus, phy_id,
++                      AR40XX_MII_ATH_MMD_DATA, reg_id);
++      bus->write(bus, phy_id,
++                      AR40XX_MII_ATH_MMD_ADDR,
++                      0x4000 | mmd_num);
++      bus->write(bus, phy_id,
++              AR40XX_MII_ATH_MMD_DATA, reg_val);
++      mutex_unlock(&bus->mdio_lock);
++}
++
++static u16
++ar40xx_phy_mmd_read(struct ar40xx_priv *priv, u32 phy_id,
++                  u16 mmd_num, u16 reg_id)
++{
++      u16 value;
++      struct mii_bus *bus = priv->mii_bus;
++
++      mutex_lock(&bus->mdio_lock);
++      bus->write(bus, phy_id,
++                      AR40XX_MII_ATH_MMD_ADDR, mmd_num);
++      bus->write(bus, phy_id,
++                      AR40XX_MII_ATH_MMD_DATA, reg_id);
++      bus->write(bus, phy_id,
++                      AR40XX_MII_ATH_MMD_ADDR,
++                      0x4000 | mmd_num);
++      value = bus->read(bus, phy_id, AR40XX_MII_ATH_MMD_DATA);
++      mutex_unlock(&bus->mdio_lock);
++      return value;
++}
++
++/* Start of swconfig support */
++
++static void
++ar40xx_phy_poll_reset(struct ar40xx_priv *priv)
++{
++      u32 i, in_reset, retries = 500;
++      struct mii_bus *bus = priv->mii_bus;
++
++      /* Assume RESET was recently issued to some or all of the phys */
++      in_reset = GENMASK(AR40XX_NUM_PHYS - 1, 0);
++
++      while (retries--) {
++              /* 1ms should be plenty of time.
++               * 802.3 spec allows for a max wait time of 500ms
++               */
++              usleep_range(1000, 2000);
++
++              for (i = 0; i < AR40XX_NUM_PHYS; i++) {
++                      int val;
++
++                      /* skip devices which have completed reset */
++                      if (!(in_reset & BIT(i)))
++                              continue;
++
++                      val = mdiobus_read(bus, i, MII_BMCR);
++                      if (val < 0)
++                              continue;
++
++                      /* mark when phy is no longer in reset state */
++                      if (!(val & BMCR_RESET))
++                              in_reset &= ~BIT(i);
++              }
++
++              if (!in_reset)
++                      return;
++      }
++
++      dev_warn(&bus->dev, "Failed to reset all phys! (in_reset: 0x%x)\n",
++               in_reset);
++}
++
++static void
++ar40xx_phy_init(struct ar40xx_priv *priv)
++{
++      int i;
++      struct mii_bus *bus;
++      u16 val;
++
++      bus = priv->mii_bus;
++      for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
++              ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
++              val &= ~AR40XX_PHY_MANU_CTRL_EN;
++              ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
++              mdiobus_write(bus, i,
++                            MII_ADVERTISE, ADVERTISE_ALL |
++                            ADVERTISE_PAUSE_CAP |
++                            ADVERTISE_PAUSE_ASYM);
++              mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
++              mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
++      }
++
++      ar40xx_phy_poll_reset(priv);
++}
++
++static void
++ar40xx_port_phy_linkdown(struct ar40xx_priv *priv)
++{
++      struct mii_bus *bus;
++      int i;
++      u16 val;
++
++      bus = priv->mii_bus;
++      for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
++              mdiobus_write(bus, i, MII_CTRL1000, 0);
++              mdiobus_write(bus, i, MII_ADVERTISE, 0);
++              mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
++              ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
++              val |= AR40XX_PHY_MANU_CTRL_EN;
++              ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
++              /* disable transmit */
++              ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_2, &val);
++              val &= 0xf00f;
++              ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_2, val);
++      }
++}
++
++static void
++ar40xx_set_mirror_regs(struct ar40xx_priv *priv)
++{
++      int port;
++
++      /* reset all mirror registers */
++      ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
++                 AR40XX_FWD_CTRL0_MIRROR_PORT,
++                 (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
++      for (port = 0; port < AR40XX_NUM_PORTS; port++) {
++              ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(port),
++                         AR40XX_PORT_LOOKUP_ING_MIRROR_EN, 0);
++
++              ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(port),
++                         AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN, 0);
++      }
++
++      /* now enable mirroring if necessary */
++      if (priv->source_port >= AR40XX_NUM_PORTS ||
++          priv->monitor_port >= AR40XX_NUM_PORTS ||
++          priv->source_port == priv->monitor_port) {
++              return;
++      }
++
++      ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
++                 AR40XX_FWD_CTRL0_MIRROR_PORT,
++                 (priv->monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
++
++      if (priv->mirror_rx)
++              ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(priv->source_port), 0,
++                         AR40XX_PORT_LOOKUP_ING_MIRROR_EN);
++
++      if (priv->mirror_tx)
++              ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(priv->source_port),
++                         0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN);
++}
++
++static int
++ar40xx_sw_get_ports(struct switch_dev *dev, struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      u8 ports = priv->vlan_table[val->port_vlan];
++      int i;
++
++      val->len = 0;
++      for (i = 0; i < dev->ports; i++) {
++              struct switch_port *p;
++
++              if (!(ports & BIT(i)))
++                      continue;
++
++              p = &val->value.ports[val->len++];
++              p->id = i;
++              if ((priv->vlan_tagged & BIT(i)) ||
++                  (priv->pvid[i] != val->port_vlan))
++                      p->flags = BIT(SWITCH_PORT_FLAG_TAGGED);
++              else
++                      p->flags = 0;
++      }
++      return 0;
++}
++
++static int
++ar40xx_sw_set_ports(struct switch_dev *dev, struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      u8 *vt = &priv->vlan_table[val->port_vlan];
++      int i;
++
++      *vt = 0;
++      for (i = 0; i < val->len; i++) {
++              struct switch_port *p = &val->value.ports[i];
++
++              if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) {
++                      if (val->port_vlan == priv->pvid[p->id])
++                              priv->vlan_tagged |= BIT(p->id);
++              } else {
++                      priv->vlan_tagged &= ~BIT(p->id);
++                      priv->pvid[p->id] = val->port_vlan;
++              }
++
++              *vt |= BIT(p->id);
++      }
++      return 0;
++}
++
++static int
++ar40xx_reg_wait(struct ar40xx_priv *priv, u32 reg, u32 mask, u32 val,
++              unsigned timeout)
++{
++      int i;
++
++      for (i = 0; i < timeout; i++) {
++              u32 t;
++
++              t = ar40xx_read(priv, reg);
++              if ((t & mask) == val)
++                      return 0;
++
++              usleep_range(1000, 2000);
++      }
++
++      return -ETIMEDOUT;
++}
++
++static int
++ar40xx_mib_op(struct ar40xx_priv *priv, u32 op)
++{
++      int ret;
++
++      lockdep_assert_held(&priv->mib_lock);
++
++      /* Capture the hardware statistics for all ports */
++      ar40xx_rmw(priv, AR40XX_REG_MIB_FUNC,
++                 AR40XX_MIB_FUNC, (op << AR40XX_MIB_FUNC_S));
++
++      /* Wait for the capturing to complete. */
++      ret = ar40xx_reg_wait(priv, AR40XX_REG_MIB_FUNC,
++                            AR40XX_MIB_BUSY, 0, 10);
++
++      return ret;
++}
++
++static void
++ar40xx_mib_fetch_port_stat(struct ar40xx_priv *priv, int port, bool flush)
++{
++      unsigned int base;
++      u64 *mib_stats;
++      int i;
++      u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
++
++      WARN_ON(port >= priv->dev.ports);
++
++      lockdep_assert_held(&priv->mib_lock);
++
++      base = AR40XX_REG_PORT_STATS_START +
++             AR40XX_REG_PORT_STATS_LEN * port;
++
++      mib_stats = &priv->mib_stats[port * num_mibs];
++      if (flush) {
++              u32 len;
++
++              len = num_mibs * sizeof(*mib_stats);
++              memset(mib_stats, 0, len);
++              return;
++      }
++      for (i = 0; i < num_mibs; i++) {
++              const struct ar40xx_mib_desc *mib;
++              u64 t;
++
++              mib = &ar40xx_mibs[i];
++              t = ar40xx_read(priv, base + mib->offset);
++              if (mib->size == 2) {
++                      u64 hi;
++
++                      hi = ar40xx_read(priv, base + mib->offset + 4);
++                      t |= hi << 32;
++              }
++
++              mib_stats[i] += t;
++      }
++}
++
++static int
++ar40xx_mib_capture(struct ar40xx_priv *priv)
++{
++      return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_CAPTURE);
++}
++
++static int
++ar40xx_mib_flush(struct ar40xx_priv *priv)
++{
++      return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_FLUSH);
++}
++
++static int
++ar40xx_sw_set_reset_mibs(struct switch_dev *dev,
++                       const struct switch_attr *attr,
++                       struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      unsigned int len;
++      int ret;
++      u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
++
++      mutex_lock(&priv->mib_lock);
++
++      len = priv->dev.ports * num_mibs * sizeof(*priv->mib_stats);
++      memset(priv->mib_stats, 0, len);
++      ret = ar40xx_mib_flush(priv);
++
++      mutex_unlock(&priv->mib_lock);
++      return ret;
++}
++
++static int
++ar40xx_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
++                 struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      priv->vlan = !!val->value.i;
++      return 0;
++}
++
++static int
++ar40xx_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
++                 struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      val->value.i = priv->vlan;
++      return 0;
++}
++
++static int
++ar40xx_sw_set_mirror_rx_enable(struct switch_dev *dev,
++                             const struct switch_attr *attr,
++                             struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      priv->mirror_rx = !!val->value.i;
++      ar40xx_set_mirror_regs(priv);
++      mutex_unlock(&priv->reg_mutex);
++
++      return 0;
++}
++
++static int
++ar40xx_sw_get_mirror_rx_enable(struct switch_dev *dev,
++                             const struct switch_attr *attr,
++                             struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      val->value.i = priv->mirror_rx;
++      mutex_unlock(&priv->reg_mutex);
++      return 0;
++}
++
++static int
++ar40xx_sw_set_mirror_tx_enable(struct switch_dev *dev,
++                             const struct switch_attr *attr,
++                             struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      priv->mirror_tx = !!val->value.i;
++      ar40xx_set_mirror_regs(priv);
++      mutex_unlock(&priv->reg_mutex);
++
++      return 0;
++}
++
++static int
++ar40xx_sw_get_mirror_tx_enable(struct switch_dev *dev,
++                             const struct switch_attr *attr,
++                             struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      val->value.i = priv->mirror_tx;
++      mutex_unlock(&priv->reg_mutex);
++      return 0;
++}
++
++static int
++ar40xx_sw_set_mirror_monitor_port(struct switch_dev *dev,
++                                const struct switch_attr *attr,
++                                struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      priv->monitor_port = val->value.i;
++      ar40xx_set_mirror_regs(priv);
++      mutex_unlock(&priv->reg_mutex);
++
++      return 0;
++}
++
++static int
++ar40xx_sw_get_mirror_monitor_port(struct switch_dev *dev,
++                                const struct switch_attr *attr,
++                                struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      val->value.i = priv->monitor_port;
++      mutex_unlock(&priv->reg_mutex);
++      return 0;
++}
++
++static int
++ar40xx_sw_set_mirror_source_port(struct switch_dev *dev,
++                               const struct switch_attr *attr,
++                               struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      priv->source_port = val->value.i;
++      ar40xx_set_mirror_regs(priv);
++      mutex_unlock(&priv->reg_mutex);
++
++      return 0;
++}
++
++static int
++ar40xx_sw_get_mirror_source_port(struct switch_dev *dev,
++                               const struct switch_attr *attr,
++                               struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      mutex_lock(&priv->reg_mutex);
++      val->value.i = priv->source_port;
++      mutex_unlock(&priv->reg_mutex);
++      return 0;
++}
++
++static int
++ar40xx_sw_set_linkdown(struct switch_dev *dev,
++                     const struct switch_attr *attr,
++                     struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      if (val->value.i == 1)
++              ar40xx_port_phy_linkdown(priv);
++      else
++              ar40xx_phy_init(priv);
++
++      return 0;
++}
++
++static int
++ar40xx_sw_set_port_reset_mib(struct switch_dev *dev,
++                           const struct switch_attr *attr,
++                           struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      int port;
++      int ret;
++
++      port = val->port_vlan;
++      if (port >= dev->ports)
++              return -EINVAL;
++
++      mutex_lock(&priv->mib_lock);
++      ret = ar40xx_mib_capture(priv);
++      if (ret)
++              goto unlock;
++
++      ar40xx_mib_fetch_port_stat(priv, port, true);
++
++unlock:
++      mutex_unlock(&priv->mib_lock);
++      return ret;
++}
++
++static int
++ar40xx_sw_get_port_mib(struct switch_dev *dev,
++                     const struct switch_attr *attr,
++                     struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      u64 *mib_stats;
++      int port;
++      int ret;
++      char *buf = priv->buf;
++      int i, len = 0;
++      u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
++
++      port = val->port_vlan;
++      if (port >= dev->ports)
++              return -EINVAL;
++
++      mutex_lock(&priv->mib_lock);
++      ret = ar40xx_mib_capture(priv);
++      if (ret)
++              goto unlock;
++
++      ar40xx_mib_fetch_port_stat(priv, port, false);
++
++      len += snprintf(buf + len, sizeof(priv->buf) - len,
++                      "Port %d MIB counters\n",
++                      port);
++
++      mib_stats = &priv->mib_stats[port * num_mibs];
++      for (i = 0; i < num_mibs; i++)
++              len += snprintf(buf + len, sizeof(priv->buf) - len,
++                              "%-12s: %llu\n",
++                              ar40xx_mibs[i].name,
++                              mib_stats[i]);
++
++      val->value.s = buf;
++      val->len = len;
++
++unlock:
++      mutex_unlock(&priv->mib_lock);
++      return ret;
++}
++
++static int
++ar40xx_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
++                struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      priv->vlan_id[val->port_vlan] = val->value.i;
++      return 0;
++}
++
++static int
++ar40xx_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
++                struct switch_val *val)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      val->value.i = priv->vlan_id[val->port_vlan];
++      return 0;
++}
++
++static int
++ar40xx_sw_get_pvid(struct switch_dev *dev, int port, int *vlan)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      *vlan = priv->pvid[port];
++      return 0;
++}
++
++static int
++ar40xx_sw_set_pvid(struct switch_dev *dev, int port, int vlan)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      /* make sure no invalid PVIDs get set */
++      if (vlan >= dev->vlans)
++              return -EINVAL;
++
++      priv->pvid[port] = vlan;
++      return 0;
++}
++
++static void
++ar40xx_read_port_link(struct ar40xx_priv *priv, int port,
++                    struct switch_port_link *link)
++{
++      u32 status;
++      u32 speed;
++
++      memset(link, 0, sizeof(*link));
++
++      status = ar40xx_read(priv, AR40XX_REG_PORT_STATUS(port));
++
++      link->aneg = !!(status & AR40XX_PORT_AUTO_LINK_EN);
++      if (link->aneg || (port != AR40XX_PORT_CPU))
++              link->link = !!(status & AR40XX_PORT_STATUS_LINK_UP);
++      else
++              link->link = true;
++
++      if (!link->link)
++              return;
++
++      link->duplex = !!(status & AR40XX_PORT_DUPLEX);
++      link->tx_flow = !!(status & AR40XX_PORT_STATUS_TXFLOW);
++      link->rx_flow = !!(status & AR40XX_PORT_STATUS_RXFLOW);
++
++      speed = (status & AR40XX_PORT_SPEED) >>
++               AR40XX_PORT_STATUS_SPEED_S;
++
++      switch (speed) {
++      case AR40XX_PORT_SPEED_10M:
++              link->speed = SWITCH_PORT_SPEED_10;
++              break;
++      case AR40XX_PORT_SPEED_100M:
++              link->speed = SWITCH_PORT_SPEED_100;
++              break;
++      case AR40XX_PORT_SPEED_1000M:
++              link->speed = SWITCH_PORT_SPEED_1000;
++              break;
++      default:
++              link->speed = SWITCH_PORT_SPEED_UNKNOWN;
++              break;
++      }
++}
++
++static int
++ar40xx_sw_get_port_link(struct switch_dev *dev, int port,
++                      struct switch_port_link *link)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++
++      ar40xx_read_port_link(priv, port, link);
++      return 0;
++}
++
++static const struct switch_attr ar40xx_sw_attr_globals[] = {
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "enable_vlan",
++              .description = "Enable VLAN mode",
++              .set = ar40xx_sw_set_vlan,
++              .get = ar40xx_sw_get_vlan,
++              .max = 1
++      },
++      {
++              .type = SWITCH_TYPE_NOVAL,
++              .name = "reset_mibs",
++              .description = "Reset all MIB counters",
++              .set = ar40xx_sw_set_reset_mibs,
++      },
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "enable_mirror_rx",
++              .description = "Enable mirroring of RX packets",
++              .set = ar40xx_sw_set_mirror_rx_enable,
++              .get = ar40xx_sw_get_mirror_rx_enable,
++              .max = 1
++      },
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "enable_mirror_tx",
++              .description = "Enable mirroring of TX packets",
++              .set = ar40xx_sw_set_mirror_tx_enable,
++              .get = ar40xx_sw_get_mirror_tx_enable,
++              .max = 1
++      },
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "mirror_monitor_port",
++              .description = "Mirror monitor port",
++              .set = ar40xx_sw_set_mirror_monitor_port,
++              .get = ar40xx_sw_get_mirror_monitor_port,
++              .max = AR40XX_NUM_PORTS - 1
++      },
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "mirror_source_port",
++              .description = "Mirror source port",
++              .set = ar40xx_sw_set_mirror_source_port,
++              .get = ar40xx_sw_get_mirror_source_port,
++              .max = AR40XX_NUM_PORTS - 1
++      },
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "linkdown",
++              .description = "Link down all the PHYs",
++              .set = ar40xx_sw_set_linkdown,
++              .max = 1
++      },
++};
++
++static const struct switch_attr ar40xx_sw_attr_port[] = {
++      {
++              .type = SWITCH_TYPE_NOVAL,
++              .name = "reset_mib",
++              .description = "Reset single port MIB counters",
++              .set = ar40xx_sw_set_port_reset_mib,
++      },
++      {
++              .type = SWITCH_TYPE_STRING,
++              .name = "mib",
++              .description = "Get port's MIB counters",
++              .set = NULL,
++              .get = ar40xx_sw_get_port_mib,
++      },
++};
++
++const struct switch_attr ar40xx_sw_attr_vlan[] = {
++      {
++              .type = SWITCH_TYPE_INT,
++              .name = "vid",
++              .description = "VLAN ID (0-4094)",
++              .set = ar40xx_sw_set_vid,
++              .get = ar40xx_sw_get_vid,
++              .max = 4094,
++      },
++};
++
++/* End of swconfig support */
++
++static int
++ar40xx_wait_bit(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
++{
++      int timeout = 20;
++      u32 t;
++
++      while (1) {
++              t = ar40xx_read(priv, reg);
++              if ((t & mask) == val)
++                      return 0;
++
++              if (timeout-- <= 0)
++                      break;
++
++              usleep_range(10, 20);
++      }
++
++      pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n",
++             (unsigned int)reg, t, mask, val);
++      return -ETIMEDOUT;
++}
++
++static int
++ar40xx_atu_flush(struct ar40xx_priv *priv)
++{
++      int ret;
++
++      ret = ar40xx_wait_bit(priv, AR40XX_REG_ATU_FUNC,
++                            AR40XX_ATU_FUNC_BUSY, 0);
++      if (!ret)
++              ar40xx_write(priv, AR40XX_REG_ATU_FUNC,
++                           AR40XX_ATU_FUNC_OP_FLUSH |
++                           AR40XX_ATU_FUNC_BUSY);
++
++      return ret;
++}
++
++static void
++ar40xx_ess_reset(struct ar40xx_priv *priv)
++{
++      reset_control_assert(priv->ess_rst);
++      mdelay(10);
++      reset_control_deassert(priv->ess_rst);
++      /* Waiting for all inner tables init done.
++        * It cost 5~10ms.
++        */
++      mdelay(10);
++
++      pr_info("ESS reset ok!\n");
++}
++
++/* Start of psgmii self test */
++
++static void
++ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv *priv)
++{
++      u32 n;
++      struct mii_bus *bus = priv->mii_bus;
++      /* reset phy psgmii */
++      /* fix phy psgmii RX 20bit */
++      mdiobus_write(bus, 5, 0x0, 0x005b);
++      /* reset phy psgmii */
++      mdiobus_write(bus, 5, 0x0, 0x001b);
++      /* release reset phy psgmii */
++      mdiobus_write(bus, 5, 0x0, 0x005b);
++
++      for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
++              u16 status;
++
++              status = ar40xx_phy_mmd_read(priv, 5, 1, 0x28);
++              if (status & BIT(0))
++                      break;
++              /* Polling interval to check PSGMII PLL in malibu is ready
++                * the worst time is 8.67ms
++                * for 25MHz reference clock
++                * [512+(128+2048)*49]*80ns+100us
++                */
++              mdelay(2);
++      }
++
++      /*check malibu psgmii calibration done end..*/
++
++      /*freeze phy psgmii RX CDR*/
++      mdiobus_write(bus, 5, 0x1a, 0x2230);
++
++      ar40xx_ess_reset(priv);
++
++      /*check psgmii calibration done start*/
++      for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
++              u32 status;
++
++              status = ar40xx_psgmii_read(priv, 0xa0);
++              if (status & BIT(0))
++                      break;
++              /* Polling interval to check PSGMII PLL in ESS is ready */
++              mdelay(2);
++      }
++
++      /* check dakota psgmii calibration done end..*/
++
++      /* relesae phy psgmii RX CDR */
++      mdiobus_write(bus, 5, 0x1a, 0x3230);
++      /* release phy psgmii RX 20bit */
++      mdiobus_write(bus, 5, 0x0, 0x005f);
++}
++
++static void
++ar40xx_psgmii_single_phy_testing(struct ar40xx_priv *priv, int phy)
++{
++      int j;
++      u32 tx_ok, tx_error;
++      u32 rx_ok, rx_error;
++      u32 tx_ok_high16;
++      u32 rx_ok_high16;
++      u32 tx_all_ok, rx_all_ok;
++      struct mii_bus *bus = priv->mii_bus;
++
++      mdiobus_write(bus, phy, 0x0, 0x9000);
++      mdiobus_write(bus, phy, 0x0, 0x4140);
++
++      for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
++              u16 status;
++
++              status = mdiobus_read(bus, phy, 0x11);
++              if (status & AR40XX_PHY_SPEC_STATUS_LINK)
++                      break;
++              /* the polling interval to check if the PHY link up or not
++                * maxwait_timer: 750 ms +/-10 ms
++                * minwait_timer : 1 us +/- 0.1us
++                * time resides in minwait_timer ~ maxwait_timer
++                * see IEEE 802.3 section 40.4.5.2
++                */
++              mdelay(8);
++      }
++
++      /* enable check */
++      ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0000);
++      ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0003);
++
++      /* start traffic */
++      ar40xx_phy_mmd_write(priv, phy, 7, 0x8020, 0xa000);
++      /* wait for all traffic end
++        * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
++        */
++      mdelay(50);
++
++      /* check counter */
++      tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
++      tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
++      tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
++      rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
++      rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
++      rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
++      tx_all_ok = tx_ok + (tx_ok_high16 << 16);
++      rx_all_ok = rx_ok + (rx_ok_high16 << 16);
++      if (tx_all_ok == 0x1000 && tx_error == 0) {
++              /* success */
++              priv->phy_t_status &= (~BIT(phy));
++      } else {
++              pr_info("PHY %d single test PSGMII issue happen!\n", phy);
++              priv->phy_t_status |= BIT(phy);
++      }
++
++      mdiobus_write(bus, phy, 0x0, 0x1840);
++}
++
++static void
++ar40xx_psgmii_all_phy_testing(struct ar40xx_priv *priv)
++{
++      int phy, j;
++      struct mii_bus *bus = priv->mii_bus;
++
++      mdiobus_write(bus, 0x1f, 0x0, 0x9000);
++      mdiobus_write(bus, 0x1f, 0x0, 0x4140);
++
++      for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
++              for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
++                      u16 status;
++
++                      status = mdiobus_read(bus, phy, 0x11);
++                      if (!(status & BIT(10)))
++                              break;
++              }
++
++              if (phy >= (AR40XX_NUM_PORTS - 1))
++                      break;
++              /* The polling interva to check if the PHY link up or not */
++              mdelay(8);
++      }
++      /* enable check */
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0000);
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0003);
++
++      /* start traffic */
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0xa000);
++      /* wait for all traffic end
++        * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
++        */
++      mdelay(50);
++
++      for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
++              u32 tx_ok, tx_error;
++              u32 rx_ok, rx_error;
++              u32 tx_ok_high16;
++              u32 rx_ok_high16;
++              u32 tx_all_ok, rx_all_ok;
++
++              /* check counter */
++              tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
++              tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
++              tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
++              rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
++              rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
++              rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
++              tx_all_ok = tx_ok + (tx_ok_high16<<16);
++              rx_all_ok = rx_ok + (rx_ok_high16<<16);
++              if (tx_all_ok == 0x1000 && tx_error == 0) {
++                      /* success */
++                      priv->phy_t_status &= ~BIT(phy + 8);
++              } else {
++                      pr_info("PHY%d test see issue!\n", phy);
++                      priv->phy_t_status |= BIT(phy + 8);
++              }
++      }
++
++      pr_debug("PHY all test 0x%x \r\n", priv->phy_t_status);
++}
++
++void
++ar40xx_psgmii_self_test(struct ar40xx_priv *priv)
++{
++      u32 i, phy;
++      struct mii_bus *bus = priv->mii_bus;
++
++      ar40xx_malibu_psgmii_ess_reset(priv);
++
++      /* switch to access MII reg for copper */
++      mdiobus_write(bus, 4, 0x1f, 0x8500);
++      for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
++              /*enable phy mdio broadcast write*/
++              ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x801f);
++      }
++      /* force no link by power down */
++      mdiobus_write(bus, 0x1f, 0x0, 0x1840);
++      /*packet number*/
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x1000);
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8062, 0x05e0);
++
++      /*fix mdi status */
++      mdiobus_write(bus, 0x1f, 0x10, 0x6800);
++      for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) {
++              priv->phy_t_status = 0;
++
++              for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
++                      ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
++                              AR40XX_PORT_LOOKUP_LOOPBACK,
++                              AR40XX_PORT_LOOKUP_LOOPBACK);
++              }
++
++              for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++)
++                      ar40xx_psgmii_single_phy_testing(priv, phy);
++
++              ar40xx_psgmii_all_phy_testing(priv);
++
++              if (priv->phy_t_status)
++                      ar40xx_malibu_psgmii_ess_reset(priv);
++              else
++                      break;
++      }
++
++      if (i >= AR40XX_PSGMII_CALB_NUM)
++              pr_info("PSGMII cannot recover\n");
++      else
++              pr_debug("PSGMII recovered after %d times reset\n", i);
++
++      /* configuration recover */
++      /* packet number */
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x0);
++      /* disable check */
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0);
++      /* disable traffic */
++      ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0x0);
++}
++
++void
++ar40xx_psgmii_self_test_clean(struct ar40xx_priv *priv)
++{
++      int phy;
++      struct mii_bus *bus = priv->mii_bus;
++
++      /* disable phy internal loopback */
++      mdiobus_write(bus, 0x1f, 0x10, 0x6860);
++      mdiobus_write(bus, 0x1f, 0x0, 0x9040);
++
++      for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
++              /* disable mac loop back */
++              ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
++                              AR40XX_PORT_LOOKUP_LOOPBACK, 0);
++              /* disable phy mdio broadcast write */
++              ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x001f);
++      }
++
++      /* clear fdb entry */
++      ar40xx_atu_flush(priv);
++}
++
++/* End of psgmii self test */
++
++static void
++ar40xx_mac_mode_init(struct ar40xx_priv *priv, u32 mode)
++{
++      if (mode == PORT_WRAPPER_PSGMII) {
++              ar40xx_psgmii_write(priv, AR40XX_PSGMII_MODE_CONTROL, 0x2200);
++              ar40xx_psgmii_write(priv, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380);
++      }
++}
++
++static
++int ar40xx_cpuport_setup(struct ar40xx_priv *priv)
++{
++      u32 t;
++
++      t = AR40XX_PORT_STATUS_TXFLOW |
++           AR40XX_PORT_STATUS_RXFLOW |
++           AR40XX_PORT_TXHALF_FLOW |
++           AR40XX_PORT_DUPLEX |
++           AR40XX_PORT_SPEED_1000M;
++      ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
++      usleep_range(10, 20);
++
++      t |= AR40XX_PORT_TX_EN |
++             AR40XX_PORT_RX_EN;
++      ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
++
++      return 0;
++}
++
++static void
++ar40xx_init_port(struct ar40xx_priv *priv, int port)
++{
++      u32 t;
++
++      ar40xx_rmw(priv, AR40XX_REG_PORT_STATUS(port),
++                      AR40XX_PORT_AUTO_LINK_EN, 0);
++
++      ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0);
++
++      ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), 0);
++
++      t = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S;
++      ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
++
++      t = AR40XX_PORT_LOOKUP_LEARN;
++      t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
++      ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
++}
++
++void
++ar40xx_init_globals(struct ar40xx_priv *priv)
++{
++      u32 t;
++
++      /* enable CPU port and disable mirror port */
++      t = AR40XX_FWD_CTRL0_CPU_PORT_EN |
++          AR40XX_FWD_CTRL0_MIRROR_PORT;
++      ar40xx_write(priv, AR40XX_REG_FWD_CTRL0, t);
++
++      /* forward multicast and broadcast frames to CPU */
++      t = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) |
++          (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) |
++          (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S);
++      ar40xx_write(priv, AR40XX_REG_FWD_CTRL1, t);
++
++      /* enable jumbo frames */
++      ar40xx_rmw(priv, AR40XX_REG_MAX_FRAME_SIZE,
++                 AR40XX_MAX_FRAME_SIZE_MTU, 9018 + 8 + 2);
++
++      /* Enable MIB counters */
++      ar40xx_rmw(priv, AR40XX_REG_MODULE_EN, 0,
++                 AR40XX_MODULE_EN_MIB);
++
++      /* Disable AZ */
++      ar40xx_write(priv, AR40XX_REG_EEE_CTRL, 0);
++
++      /* set flowctrl thershold for cpu port */
++      t = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) |
++            AR40XX_PORT0_FC_THRESH_OFF_DFLT;
++      ar40xx_write(priv, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t);
++}
++
++static void
++ar40xx_malibu_init(struct ar40xx_priv *priv)
++{
++      int i;
++      struct mii_bus *bus;
++      u16 val;
++
++      bus = priv->mii_bus;
++
++      /* war to enable AZ transmitting ability */
++      ar40xx_phy_mmd_write(priv, AR40XX_PSGMII_ID, 1,
++                           AR40XX_MALIBU_PSGMII_MODE_CTRL,
++                           AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL);
++      for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
++              /* change malibu control_dac */
++              val = ar40xx_phy_mmd_read(priv, i, 7,
++                                        AR40XX_MALIBU_PHY_MMD7_DAC_CTRL);
++              val &= ~AR40XX_MALIBU_DAC_CTRL_MASK;
++              val |= AR40XX_MALIBU_DAC_CTRL_VALUE;
++              ar40xx_phy_mmd_write(priv, i, 7,
++                                   AR40XX_MALIBU_PHY_MMD7_DAC_CTRL, val);
++              if (i == AR40XX_MALIBU_PHY_LAST_ADDR) {
++                      /* to avoid goes into hibernation */
++                      val = ar40xx_phy_mmd_read(priv, i, 3,
++                                                AR40XX_MALIBU_PHY_RLP_CTRL);
++                      val &= (~(1<<1));
++                      ar40xx_phy_mmd_write(priv, i, 3,
++                                           AR40XX_MALIBU_PHY_RLP_CTRL, val);
++              }
++      }
++
++      /* adjust psgmii serdes tx amp */
++      mdiobus_write(bus, AR40XX_PSGMII_ID, AR40XX_PSGMII_TX_DRIVER_1_CTRL,
++                    AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP);
++}
++
++static int
++ar40xx_hw_init(struct ar40xx_priv *priv)
++{
++      u32 i;
++
++      ar40xx_ess_reset(priv);
++
++      if (priv->mii_bus)
++              ar40xx_malibu_init(priv);
++      else
++              return -1;
++
++      ar40xx_psgmii_self_test(priv);
++      ar40xx_psgmii_self_test_clean(priv);
++
++      ar40xx_mac_mode_init(priv, priv->mac_mode);
++
++      for (i = 0; i < priv->dev.ports; i++)
++              ar40xx_init_port(priv, i);
++
++      ar40xx_init_globals(priv);
++
++      return 0;
++}
++
++/* Start of qm error WAR */
++
++static
++int ar40xx_force_1g_full(struct ar40xx_priv *priv, u32 port_id)
++{
++      u32 reg;
++
++      if (port_id < 0 || port_id > 6)
++              return -1;
++
++      reg = AR40XX_REG_PORT_STATUS(port_id);
++      return ar40xx_rmw(priv, reg, AR40XX_PORT_SPEED,
++                      (AR40XX_PORT_SPEED_1000M | AR40XX_PORT_DUPLEX));
++}
++
++static
++int ar40xx_get_qm_status(struct ar40xx_priv *priv,
++                       u32 port_id, u32 *qm_buffer_err)
++{
++      u32 reg;
++      u32 qm_val;
++
++      if (port_id < 1 || port_id > 5) {
++              *qm_buffer_err = 0;
++              return -1;
++      }
++
++      if (port_id < 4) {
++              reg = AR40XX_REG_QM_PORT0_3_QNUM;
++              ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
++              qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
++              /* every 8 bits for each port */
++              *qm_buffer_err = (qm_val >> (port_id * 8)) & 0xFF;
++      } else {
++              reg = AR40XX_REG_QM_PORT4_6_QNUM;
++              ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
++              qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
++              /* every 8 bits for each port */
++              *qm_buffer_err = (qm_val >> ((port_id-4) * 8)) & 0xFF;
++      }
++
++      return 0;
++}
++
++static void
++ar40xx_sw_mac_polling_task(struct ar40xx_priv *priv)
++{
++      static int task_count;
++      u32 i;
++      u32 reg, value;
++      u32 link, speed, duplex;
++      u32 qm_buffer_err;
++      u16 port_phy_status[AR40XX_NUM_PORTS];
++      static u32 qm_err_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
++      static u32 link_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
++      struct mii_bus *bus = NULL;
++
++      if (!priv || !priv->mii_bus)
++              return;
++
++      bus = priv->mii_bus;
++
++      ++task_count;
++
++      for (i = 1; i < AR40XX_NUM_PORTS; ++i) {
++              port_phy_status[i] =
++                      mdiobus_read(bus, i-1, AR40XX_PHY_SPEC_STATUS);
++              speed = link = duplex = port_phy_status[i];
++              speed &= AR40XX_PHY_SPEC_STATUS_SPEED;
++              speed >>= 14;
++              link &= AR40XX_PHY_SPEC_STATUS_LINK;
++              link >>= 10;
++              duplex &= AR40XX_PHY_SPEC_STATUS_DUPLEX;
++              duplex >>= 13;
++
++              if (link != priv->ar40xx_port_old_link[i]) {
++                      ++link_cnt[i];
++                      /* Up --> Down */
++                      if ((priv->ar40xx_port_old_link[i] ==
++                                      AR40XX_PORT_LINK_UP) &&
++                          (link == AR40XX_PORT_LINK_DOWN)) {
++                              /* LINK_EN disable(MAC force mode)*/
++                              reg = AR40XX_REG_PORT_STATUS(i);
++                              ar40xx_rmw(priv, reg,
++                                              AR40XX_PORT_AUTO_LINK_EN, 0);
++
++                              /* Check queue buffer */
++                              qm_err_cnt[i] = 0;
++                              ar40xx_get_qm_status(priv, i, &qm_buffer_err);
++                              if (qm_buffer_err) {
++                                      priv->ar40xx_port_qm_buf[i] =
++                                              AR40XX_QM_NOT_EMPTY;
++                              } else {
++                                      u16 phy_val = 0;
++
++                                      priv->ar40xx_port_qm_buf[i] =
++                                              AR40XX_QM_EMPTY;
++                                      ar40xx_force_1g_full(priv, i);
++                                      /* Ref:QCA8337 Datasheet,Clearing
++                                       * MENU_CTRL_EN prevents phy to
++                                       * stuck in 100BT mode when
++                                       * bringing up the link
++                                       */
++                                      ar40xx_phy_dbg_read(priv, i-1,
++                                                          AR40XX_PHY_DEBUG_0,
++                                                          &phy_val);
++                                      phy_val &= (~AR40XX_PHY_MANU_CTRL_EN);
++                                      ar40xx_phy_dbg_write(priv, i-1,
++                                                           AR40XX_PHY_DEBUG_0,
++                                                           phy_val);
++                              }
++                              priv->ar40xx_port_old_link[i] = link;
++                      } else if ((priv->ar40xx_port_old_link[i] ==
++                                              AR40XX_PORT_LINK_DOWN) &&
++                                      (link == AR40XX_PORT_LINK_UP)) {
++                              /* Down --> Up */
++                              if (priv->port_link_up[i] < 1) {
++                                      ++priv->port_link_up[i];
++                              } else {
++                                      /* Change port status */
++                                      reg = AR40XX_REG_PORT_STATUS(i);
++                                      value = ar40xx_read(priv, reg);
++                                      priv->port_link_up[i] = 0;
++
++                                      value &= ~(AR40XX_PORT_DUPLEX |
++                                                 AR40XX_PORT_SPEED);
++                                      value |= speed | (duplex ? BIT(6) : 0);
++                                      ar40xx_write(priv, reg, value);
++                                      /* clock switch need such time
++                                       * to avoid glitch
++                                       */
++                                      usleep_range(100, 200);
++
++                                      value |= AR40XX_PORT_AUTO_LINK_EN;
++                                      ar40xx_write(priv, reg, value);
++                                      /* HW need such time to make sure link
++                                       * stable before enable MAC
++                                       */
++                                      usleep_range(100, 200);
++
++                                      if (speed == AR40XX_PORT_SPEED_100M) {
++                                              u16 phy_val = 0;
++                                              /* Enable @100M, if down to 10M
++                                               * clock will change smoothly
++                                               */
++                                              ar40xx_phy_dbg_read(priv, i-1,
++                                                                  0,
++                                                                  &phy_val);
++                                              phy_val |=
++                                                      AR40XX_PHY_MANU_CTRL_EN;
++                                              ar40xx_phy_dbg_write(priv, i-1,
++                                                                   0,
++                                                                   phy_val);
++                                      }
++                                      priv->ar40xx_port_old_link[i] = link;
++                              }
++                      }
++              }
++
++              if (priv->ar40xx_port_qm_buf[i] == AR40XX_QM_NOT_EMPTY) {
++                      /* Check QM */
++                      ar40xx_get_qm_status(priv, i, &qm_buffer_err);
++                      if (qm_buffer_err) {
++                              ++qm_err_cnt[i];
++                      } else {
++                              priv->ar40xx_port_qm_buf[i] =
++                                              AR40XX_QM_EMPTY;
++                              qm_err_cnt[i] = 0;
++                              ar40xx_force_1g_full(priv, i);
++                      }
++              }
++      }
++}
++
++static void
++ar40xx_qm_err_check_work_task(struct work_struct *work)
++{
++      struct ar40xx_priv *priv = container_of(work, struct ar40xx_priv,
++                                      qm_dwork.work);
++
++      mutex_lock(&priv->qm_lock);
++
++      ar40xx_sw_mac_polling_task(priv);
++
++      mutex_unlock(&priv->qm_lock);
++
++      schedule_delayed_work(&priv->qm_dwork,
++                            msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
++}
++
++static int
++ar40xx_qm_err_check_work_start(struct ar40xx_priv *priv)
++{
++      mutex_init(&priv->qm_lock);
++
++      INIT_DELAYED_WORK(&priv->qm_dwork, ar40xx_qm_err_check_work_task);
++
++      schedule_delayed_work(&priv->qm_dwork,
++                            msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
++
++      return 0;
++}
++
++/* End of qm error WAR */
++
++static int
++ar40xx_vlan_init(struct ar40xx_priv *priv)
++{
++      int port;
++      unsigned long bmp;
++
++      /* By default Enable VLAN */
++      priv->vlan = 1;
++      priv->vlan_table[AR40XX_LAN_VLAN] = priv->cpu_bmp | priv->lan_bmp;
++      priv->vlan_table[AR40XX_WAN_VLAN] = priv->cpu_bmp | priv->wan_bmp;
++      priv->vlan_tagged = priv->cpu_bmp;
++      bmp = priv->lan_bmp;
++      for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
++                      priv->pvid[port] = AR40XX_LAN_VLAN;
++
++      bmp = priv->wan_bmp;
++      for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
++                      priv->pvid[port] = AR40XX_WAN_VLAN;
++
++      return 0;
++}
++
++static void
++ar40xx_mib_work_func(struct work_struct *work)
++{
++      struct ar40xx_priv *priv;
++      int err;
++
++      priv = container_of(work, struct ar40xx_priv, mib_work.work);
++
++      mutex_lock(&priv->mib_lock);
++
++      err = ar40xx_mib_capture(priv);
++      if (err)
++              goto next_port;
++
++      ar40xx_mib_fetch_port_stat(priv, priv->mib_next_port, false);
++
++next_port:
++      priv->mib_next_port++;
++      if (priv->mib_next_port >= priv->dev.ports)
++              priv->mib_next_port = 0;
++
++      mutex_unlock(&priv->mib_lock);
++
++      schedule_delayed_work(&priv->mib_work,
++                            msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
++}
++
++static void
++ar40xx_setup_port(struct ar40xx_priv *priv, int port, u32 members)
++{
++      u32 t;
++      u32 egress, ingress;
++      u32 pvid = priv->vlan_id[priv->pvid[port]];
++
++      if (priv->vlan) {
++              egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD;
++              ingress = AR40XX_IN_SECURE;
++      } else {
++              egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH;
++              ingress = AR40XX_IN_PORT_ONLY;
++      }
++
++      t = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S;
++      t |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S;
++      ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), t);
++
++      t = AR40XX_PORT_VLAN1_PORT_VLAN_PROP;
++      t |= egress << AR40XX_PORT_VLAN1_OUT_MODE_S;
++      ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
++
++      t = members;
++      t |= AR40XX_PORT_LOOKUP_LEARN;
++      t |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S;
++      t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
++      ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
++}
++
++static void
++ar40xx_vtu_op(struct ar40xx_priv *priv, u32 op, u32 val)
++{
++      if (ar40xx_wait_bit(priv, AR40XX_REG_VTU_FUNC1,
++                          AR40XX_VTU_FUNC1_BUSY, 0))
++              return;
++
++      if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD)
++              ar40xx_write(priv, AR40XX_REG_VTU_FUNC0, val);
++
++      op |= AR40XX_VTU_FUNC1_BUSY;
++      ar40xx_write(priv, AR40XX_REG_VTU_FUNC1, op);
++}
++
++static void
++ar40xx_vtu_load_vlan(struct ar40xx_priv *priv, u32 vid, u32 port_mask)
++{
++      u32 op;
++      u32 val;
++      int i;
++
++      op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S);
++      val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL;
++      for (i = 0; i < AR40XX_NUM_PORTS; i++) {
++              u32 mode;
++
++              if ((port_mask & BIT(i)) == 0)
++                      mode = AR40XX_VTU_FUNC0_EG_MODE_NOT;
++              else if (priv->vlan == 0)
++                      mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP;
++              else if ((priv->vlan_tagged & BIT(i)) ||
++                       (priv->vlan_id[priv->pvid[i]] != vid))
++                      mode = AR40XX_VTU_FUNC0_EG_MODE_TAG;
++              else
++                      mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG;
++
++              val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i);
++      }
++      ar40xx_vtu_op(priv, op, val);
++}
++
++static void
++ar40xx_vtu_flush(struct ar40xx_priv *priv)
++{
++      ar40xx_vtu_op(priv, AR40XX_VTU_FUNC1_OP_FLUSH, 0);
++}
++
++static int
++ar40xx_sw_hw_apply(struct switch_dev *dev)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      u8 portmask[AR40XX_NUM_PORTS];
++      int i, j;
++
++      mutex_lock(&priv->reg_mutex);
++      /* flush all vlan entries */
++      ar40xx_vtu_flush(priv);
++
++      memset(portmask, 0, sizeof(portmask));
++      if (priv->vlan) {
++              for (j = 0; j < AR40XX_MAX_VLANS; j++) {
++                      u8 vp = priv->vlan_table[j];
++
++                      if (!vp)
++                              continue;
++
++                      for (i = 0; i < dev->ports; i++) {
++                              u8 mask = BIT(i);
++
++                              if (vp & mask)
++                                      portmask[i] |= vp & ~mask;
++                      }
++
++                      ar40xx_vtu_load_vlan(priv, priv->vlan_id[j],
++                                           priv->vlan_table[j]);
++              }
++      } else {
++              /* 8021q vlan disabled */
++              for (i = 0; i < dev->ports; i++) {
++                      if (i == AR40XX_PORT_CPU)
++                              continue;
++
++                      portmask[i] = BIT(AR40XX_PORT_CPU);
++                      portmask[AR40XX_PORT_CPU] |= BIT(i);
++              }
++      }
++
++      /* update the port destination mask registers and tag settings */
++      for (i = 0; i < dev->ports; i++)
++              ar40xx_setup_port(priv, i, portmask[i]);
++
++      ar40xx_set_mirror_regs(priv);
++
++      mutex_unlock(&priv->reg_mutex);
++      return 0;
++}
++
++static int
++ar40xx_sw_reset_switch(struct switch_dev *dev)
++{
++      struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
++      int i, rv;
++
++      mutex_lock(&priv->reg_mutex);
++      memset(&priv->vlan, 0, sizeof(struct ar40xx_priv) -
++              offsetof(struct ar40xx_priv, vlan));
++
++      for (i = 0; i < AR40XX_MAX_VLANS; i++)
++              priv->vlan_id[i] = i;
++
++      ar40xx_vlan_init(priv);
++
++      priv->mirror_rx = false;
++      priv->mirror_tx = false;
++      priv->source_port = 0;
++      priv->monitor_port = 0;
++
++      mutex_unlock(&priv->reg_mutex);
++
++      rv = ar40xx_sw_hw_apply(dev);
++      return rv;
++}
++
++static int
++ar40xx_start(struct ar40xx_priv *priv)
++{
++      int ret;
++
++      ret = ar40xx_hw_init(priv);
++      if (ret)
++              return ret;
++
++      ret = ar40xx_sw_reset_switch(&priv->dev);
++      if (ret)
++              return ret;
++
++      /* at last, setup cpu port */
++      ret = ar40xx_cpuport_setup(priv);
++      if (ret)
++              return ret;
++
++      schedule_delayed_work(&priv->mib_work,
++                            msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
++
++      ar40xx_qm_err_check_work_start(priv);
++
++      return 0;
++}
++
++static const struct switch_dev_ops ar40xx_sw_ops = {
++      .attr_global = {
++              .attr = ar40xx_sw_attr_globals,
++              .n_attr = ARRAY_SIZE(ar40xx_sw_attr_globals),
++      },
++      .attr_port = {
++              .attr = ar40xx_sw_attr_port,
++              .n_attr = ARRAY_SIZE(ar40xx_sw_attr_port),
++      },
++      .attr_vlan = {
++              .attr = ar40xx_sw_attr_vlan,
++              .n_attr = ARRAY_SIZE(ar40xx_sw_attr_vlan),
++      },
++      .get_port_pvid = ar40xx_sw_get_pvid,
++      .set_port_pvid = ar40xx_sw_set_pvid,
++      .get_vlan_ports = ar40xx_sw_get_ports,
++      .set_vlan_ports = ar40xx_sw_set_ports,
++      .apply_config = ar40xx_sw_hw_apply,
++      .reset_switch = ar40xx_sw_reset_switch,
++      .get_port_link = ar40xx_sw_get_port_link,
++};
++
++/* Start of phy driver support */
++
++static const u32 ar40xx_phy_ids[] = {
++      0x004dd0b1,
++      0x004dd0b2, /* AR40xx */
++};
++
++static bool
++ar40xx_phy_match(u32 phy_id)
++{
++      int i;
++
++      for (i = 0; i < ARRAY_SIZE(ar40xx_phy_ids); i++)
++              if (phy_id == ar40xx_phy_ids[i])
++                      return true;
++
++      return false;
++}
++
++static bool
++is_ar40xx_phy(struct mii_bus *bus)
++{
++      unsigned i;
++
++      for (i = 0; i < 4; i++) {
++              u32 phy_id;
++
++              phy_id = mdiobus_read(bus, i, MII_PHYSID1) << 16;
++              phy_id |= mdiobus_read(bus, i, MII_PHYSID2);
++              if (!ar40xx_phy_match(phy_id))
++                      return false;
++      }
++
++      return true;
++}
++
++static int
++ar40xx_phy_probe(struct phy_device *phydev)
++{
++      if (!is_ar40xx_phy(phydev->mdio.bus))
++              return -ENODEV;
++
++      ar40xx_priv->mii_bus = phydev->mdio.bus;
++      phydev->priv = ar40xx_priv;
++      if (phydev->mdio.addr == 0)
++              ar40xx_priv->phy = phydev;
++
++      phydev->supported |= SUPPORTED_1000baseT_Full;
++      phydev->advertising |= ADVERTISED_1000baseT_Full;
++      return 0;
++}
++
++static void
++ar40xx_phy_remove(struct phy_device *phydev)
++{
++      ar40xx_priv->mii_bus = NULL;
++      phydev->priv = NULL;
++}
++
++static int
++ar40xx_phy_config_init(struct phy_device *phydev)
++{
++      return 0;
++}
++
++static int
++ar40xx_phy_read_status(struct phy_device *phydev)
++{
++      if (phydev->mdio.addr != 0)
++              return genphy_read_status(phydev);
++
++      return 0;
++}
++
++static int
++ar40xx_phy_config_aneg(struct phy_device *phydev)
++{
++      if (phydev->mdio.addr == 0)
++              return 0;
++
++      return genphy_config_aneg(phydev);
++}
++
++static struct phy_driver ar40xx_phy_driver = {
++      .phy_id         = 0x004d0000,
++      .name           = "QCA Malibu",
++      .phy_id_mask    = 0xffff0000,
++      .features       = PHY_BASIC_FEATURES,
++      .probe          = ar40xx_phy_probe,
++      .remove         = ar40xx_phy_remove,
++      .config_init    = ar40xx_phy_config_init,
++      .config_aneg    = ar40xx_phy_config_aneg,
++      .read_status    = ar40xx_phy_read_status,
++};
++
++static uint16_t ar40xx_gpio_get_phy(unsigned int offset)
++{
++      return offset / 4;
++}
++
++static uint16_t ar40xx_gpio_get_reg(unsigned int offset)
++{
++      return 0x8074 + offset % 4;
++}
++
++static void ar40xx_gpio_set(struct gpio_chip *gc, unsigned int offset,
++                          int value)
++{
++      struct ar40xx_priv *priv = gpiochip_get_data(gc);
++
++      ar40xx_phy_mmd_write(priv, ar40xx_gpio_get_phy(offset), 0x7,
++                           ar40xx_gpio_get_reg(offset),
++                           value ? 0xA000 : 0x8000);
++}
++
++static int ar40xx_gpio_get(struct gpio_chip *gc, unsigned offset)
++{
++      struct ar40xx_priv *priv = gpiochip_get_data(gc);
++
++      return ar40xx_phy_mmd_read(priv, ar40xx_gpio_get_phy(offset), 0x7,
++                                 ar40xx_gpio_get_reg(offset)) == 0xA000;
++}
++
++static int ar40xx_gpio_get_dir(struct gpio_chip *gc, unsigned offset)
++{
++      return 0; /* only out direction */
++}
++
++static int ar40xx_gpio_dir_out(struct gpio_chip *gc, unsigned offset,
++                             int value)
++{
++      /*
++       * the direction out value is used to set the initial value.
++       * support of this function is required by leds-gpio.c
++       */
++      ar40xx_gpio_set(gc, offset, value);
++      return 0;
++}
++
++static void ar40xx_register_gpio(struct device *pdev,
++                               struct ar40xx_priv *priv,
++                               struct device_node *switch_node)
++{
++      struct gpio_chip *gc;
++      int err;
++
++      gc = devm_kzalloc(pdev, sizeof(*gc), GFP_KERNEL);
++      if (!gc)
++              return;
++
++      gc->label = "ar40xx_gpio",
++      gc->base = -1,
++      gc->ngpio = 5 /* mmd 0 - 4 */ * 4 /* 0x8074 - 0x8077 */,
++      gc->parent = pdev;
++      gc->owner = THIS_MODULE;
++
++      gc->get_direction = ar40xx_gpio_get_dir;
++      gc->direction_output = ar40xx_gpio_dir_out;
++      gc->get = ar40xx_gpio_get;
++      gc->set = ar40xx_gpio_set;
++      gc->can_sleep = true;
++      gc->label = priv->dev.name;
++      gc->of_node = switch_node;
++
++      err = devm_gpiochip_add_data(pdev, gc, priv);
++      if (err != 0)
++              dev_err(pdev, "Failed to register gpio %d.\n", err);
++}
++
++/* End of phy driver support */
++
++/* Platform driver probe function */
++
++static int ar40xx_probe(struct platform_device *pdev)
++{
++      struct device_node *switch_node;
++      struct device_node *psgmii_node;
++      const __be32 *mac_mode;
++      struct clk *ess_clk;
++      struct switch_dev *swdev;
++      struct ar40xx_priv *priv;
++      u32 len;
++      u32 num_mibs;
++      struct resource psgmii_base = {0};
++      struct resource switch_base = {0};
++      int ret;
++
++      priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
++      if (!priv)
++              return -ENOMEM;
++
++      platform_set_drvdata(pdev, priv);
++      ar40xx_priv = priv;
++
++      switch_node = of_node_get(pdev->dev.of_node);
++      if (of_address_to_resource(switch_node, 0, &switch_base) != 0)
++              return -EIO;
++
++      priv->hw_addr = devm_ioremap_resource(&pdev->dev, &switch_base);
++      if (IS_ERR(priv->hw_addr)) {
++              dev_err(&pdev->dev, "Failed to ioremap switch_base!\n");
++              return PTR_ERR(priv->hw_addr);
++      }
++
++      /*psgmii dts get*/
++      psgmii_node = of_find_node_by_name(NULL, "ess-psgmii");
++      if (!psgmii_node) {
++              dev_err(&pdev->dev, "Failed to find ess-psgmii node!\n");
++              return -EINVAL;
++      }
++
++      if (of_address_to_resource(psgmii_node, 0, &psgmii_base) != 0)
++              return -EIO;
++
++      priv->psgmii_hw_addr = devm_ioremap_resource(&pdev->dev, &psgmii_base);
++      if (IS_ERR(priv->psgmii_hw_addr)) {
++              dev_err(&pdev->dev, "psgmii ioremap fail!\n");
++              return PTR_ERR(priv->psgmii_hw_addr);
++      }
++
++      mac_mode = of_get_property(switch_node, "switch_mac_mode", &len);
++      if (!mac_mode) {
++              dev_err(&pdev->dev, "Failed to read switch_mac_mode\n");
++              return -EINVAL;
++      }
++      priv->mac_mode = be32_to_cpup(mac_mode);
++
++      ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
++      if (ess_clk)
++              clk_prepare_enable(ess_clk);
++
++      priv->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
++      if (IS_ERR(priv->ess_rst)) {
++              dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
++              return PTR_ERR(priv->ess_rst);
++      }
++
++      if (of_property_read_u32(switch_node, "switch_cpu_bmp",
++                               &priv->cpu_bmp) ||
++          of_property_read_u32(switch_node, "switch_lan_bmp",
++                               &priv->lan_bmp) ||
++          of_property_read_u32(switch_node, "switch_wan_bmp",
++                               &priv->wan_bmp)) {
++              dev_err(&pdev->dev, "Failed to read port properties\n");
++              return -EIO;
++      }
++
++      ret = phy_driver_register(&ar40xx_phy_driver, THIS_MODULE);
++      if (ret) {
++              dev_err(&pdev->dev, "Failed to register ar40xx phy driver!\n");
++              return -EIO;
++      }
++
++      mutex_init(&priv->reg_mutex);
++      mutex_init(&priv->mib_lock);
++      INIT_DELAYED_WORK(&priv->mib_work, ar40xx_mib_work_func);
++
++      /* register switch */
++      swdev = &priv->dev;
++
++      swdev->alias = dev_name(&priv->mii_bus->dev);
++
++      swdev->cpu_port = AR40XX_PORT_CPU;
++      swdev->name = "QCA AR40xx";
++      swdev->vlans = AR40XX_MAX_VLANS;
++      swdev->ports = AR40XX_NUM_PORTS;
++      swdev->ops = &ar40xx_sw_ops;
++      ret = register_switch(swdev, NULL);
++      if (ret)
++              goto err_unregister_phy;
++
++      num_mibs = ARRAY_SIZE(ar40xx_mibs);
++      len = priv->dev.ports * num_mibs *
++            sizeof(*priv->mib_stats);
++      priv->mib_stats = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
++      if (!priv->mib_stats) {
++              ret = -ENOMEM;
++              goto err_unregister_switch;
++      }
++
++      ar40xx_start(priv);
++
++      if (of_property_read_bool(switch_node, "gpio-controller"))
++              ar40xx_register_gpio(&pdev->dev, ar40xx_priv, switch_node);
++
++      return 0;
++
++err_unregister_switch:
++      unregister_switch(&priv->dev);
++err_unregister_phy:
++      phy_driver_unregister(&ar40xx_phy_driver);
++      platform_set_drvdata(pdev, NULL);
++      return ret;
++}
++
++static int ar40xx_remove(struct platform_device *pdev)
++{
++      struct ar40xx_priv *priv = platform_get_drvdata(pdev);
++
++      cancel_delayed_work_sync(&priv->qm_dwork);
++      cancel_delayed_work_sync(&priv->mib_work);
++
++      unregister_switch(&priv->dev);
++
++      phy_driver_unregister(&ar40xx_phy_driver);
++
++      return 0;
++}
++
++static const struct of_device_id ar40xx_of_mtable[] = {
++      {.compatible = "qcom,ess-switch" },
++      {}
++};
++
++struct platform_driver ar40xx_drv = {
++      .probe = ar40xx_probe,
++      .remove = ar40xx_remove,
++      .driver = {
++              .name    = "ar40xx",
++              .of_match_table = ar40xx_of_mtable,
++      },
++};
++
++module_platform_driver(ar40xx_drv);
++
++MODULE_DESCRIPTION("IPQ40XX ESS driver");
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null
++++ b/drivers/net/phy/ar40xx.h
+@@ -0,0 +1,337 @@
++/*
++ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++ #ifndef __AR40XX_H
++#define __AR40XX_H
++
++#define AR40XX_MAX_VLANS      128
++#define AR40XX_NUM_PORTS      6
++#define AR40XX_NUM_PHYS       5
++
++#define BITS(_s, _n)  (((1UL << (_n)) - 1) << _s)
++
++struct ar40xx_priv {
++      struct switch_dev dev;
++
++      u8  __iomem      *hw_addr;
++      u8  __iomem      *psgmii_hw_addr;
++      u32 mac_mode;
++      struct reset_control *ess_rst;
++      u32 cpu_bmp;
++      u32 lan_bmp;
++      u32 wan_bmp;
++
++      struct mii_bus *mii_bus;
++      struct phy_device *phy;
++
++      /* mutex for qm task */
++      struct mutex qm_lock;
++      struct delayed_work qm_dwork;
++      u32 port_link_up[AR40XX_NUM_PORTS];
++      u32 ar40xx_port_old_link[AR40XX_NUM_PORTS];
++      u32 ar40xx_port_qm_buf[AR40XX_NUM_PORTS];
++
++      u32 phy_t_status;
++
++      /* mutex for switch reg access */
++      struct mutex reg_mutex;
++
++      /* mutex for mib task */
++      struct mutex mib_lock;
++      struct delayed_work mib_work;
++      int mib_next_port;
++      u64 *mib_stats;
++
++      char buf[2048];
++
++      /* all fields below will be cleared on reset */
++      bool vlan;
++      u16 vlan_id[AR40XX_MAX_VLANS];
++      u8 vlan_table[AR40XX_MAX_VLANS];
++      u8 vlan_tagged;
++      u16 pvid[AR40XX_NUM_PORTS];
++
++      /* mirror */
++      bool mirror_rx;
++      bool mirror_tx;
++      int source_port;
++      int monitor_port;
++};
++
++#define AR40XX_PORT_LINK_UP 1
++#define AR40XX_PORT_LINK_DOWN 0
++#define AR40XX_QM_NOT_EMPTY  1
++#define AR40XX_QM_EMPTY  0
++
++#define AR40XX_LAN_VLAN       1
++#define AR40XX_WAN_VLAN       2
++
++enum ar40xx_port_wrapper_cfg {
++      PORT_WRAPPER_PSGMII = 0,
++};
++
++struct ar40xx_mib_desc {
++      u32 size;
++      u32 offset;
++      const char *name;
++};
++
++#define AR40XX_PORT_CPU       0
++
++#define AR40XX_PSGMII_MODE_CONTROL    0x1b4
++#define   AR40XX_PSGMII_ATHR_CSCO_MODE_25M    BIT(0)
++
++#define AR40XX_PSGMIIPHY_TX_CONTROL    0x288
++
++#define AR40XX_MII_ATH_MMD_ADDR               0x0d
++#define AR40XX_MII_ATH_MMD_DATA               0x0e
++#define AR40XX_MII_ATH_DBG_ADDR               0x1d
++#define AR40XX_MII_ATH_DBG_DATA               0x1e
++
++#define AR40XX_STATS_RXBROAD          0x00
++#define AR40XX_STATS_RXPAUSE          0x04
++#define AR40XX_STATS_RXMULTI          0x08
++#define AR40XX_STATS_RXFCSERR         0x0c
++#define AR40XX_STATS_RXALIGNERR               0x10
++#define AR40XX_STATS_RXRUNT           0x14
++#define AR40XX_STATS_RXFRAGMENT               0x18
++#define AR40XX_STATS_RX64BYTE         0x1c
++#define AR40XX_STATS_RX128BYTE                0x20
++#define AR40XX_STATS_RX256BYTE                0x24
++#define AR40XX_STATS_RX512BYTE                0x28
++#define AR40XX_STATS_RX1024BYTE               0x2c
++#define AR40XX_STATS_RX1518BYTE               0x30
++#define AR40XX_STATS_RXMAXBYTE                0x34
++#define AR40XX_STATS_RXTOOLONG                0x38
++#define AR40XX_STATS_RXGOODBYTE               0x3c
++#define AR40XX_STATS_RXBADBYTE                0x44
++#define AR40XX_STATS_RXOVERFLOW               0x4c
++#define AR40XX_STATS_FILTERED         0x50
++#define AR40XX_STATS_TXBROAD          0x54
++#define AR40XX_STATS_TXPAUSE          0x58
++#define AR40XX_STATS_TXMULTI          0x5c
++#define AR40XX_STATS_TXUNDERRUN               0x60
++#define AR40XX_STATS_TX64BYTE         0x64
++#define AR40XX_STATS_TX128BYTE                0x68
++#define AR40XX_STATS_TX256BYTE                0x6c
++#define AR40XX_STATS_TX512BYTE                0x70
++#define AR40XX_STATS_TX1024BYTE               0x74
++#define AR40XX_STATS_TX1518BYTE               0x78
++#define AR40XX_STATS_TXMAXBYTE                0x7c
++#define AR40XX_STATS_TXOVERSIZE               0x80
++#define AR40XX_STATS_TXBYTE           0x84
++#define AR40XX_STATS_TXCOLLISION      0x8c
++#define AR40XX_STATS_TXABORTCOL               0x90
++#define AR40XX_STATS_TXMULTICOL               0x94
++#define AR40XX_STATS_TXSINGLECOL      0x98
++#define AR40XX_STATS_TXEXCDEFER               0x9c
++#define AR40XX_STATS_TXDEFER          0xa0
++#define AR40XX_STATS_TXLATECOL                0xa4
++
++#define AR40XX_REG_MODULE_EN                  0x030
++#define   AR40XX_MODULE_EN_MIB                        BIT(0)
++
++#define AR40XX_REG_MIB_FUNC                   0x034
++#define   AR40XX_MIB_BUSY             BIT(17)
++#define   AR40XX_MIB_CPU_KEEP                 BIT(20)
++#define   AR40XX_MIB_FUNC             BITS(24, 3)
++#define   AR40XX_MIB_FUNC_S           24
++#define   AR40XX_MIB_FUNC_NO_OP               0x0
++#define   AR40XX_MIB_FUNC_FLUSH               0x1
++
++#define AR40XX_REG_PORT_STATUS(_i)            (0x07c + (_i) * 4)
++#define   AR40XX_PORT_SPEED                   BITS(0, 2)
++#define   AR40XX_PORT_STATUS_SPEED_S  0
++#define   AR40XX_PORT_TX_EN                   BIT(2)
++#define   AR40XX_PORT_RX_EN                   BIT(3)
++#define   AR40XX_PORT_STATUS_TXFLOW   BIT(4)
++#define   AR40XX_PORT_STATUS_RXFLOW   BIT(5)
++#define   AR40XX_PORT_DUPLEX                  BIT(6)
++#define   AR40XX_PORT_TXHALF_FLOW             BIT(7)
++#define   AR40XX_PORT_STATUS_LINK_UP  BIT(8)
++#define   AR40XX_PORT_AUTO_LINK_EN            BIT(9)
++#define   AR40XX_PORT_STATUS_FLOW_CONTROL  BIT(12)
++
++#define AR40XX_REG_MAX_FRAME_SIZE             0x078
++#define   AR40XX_MAX_FRAME_SIZE_MTU           BITS(0, 14)
++
++#define AR40XX_REG_PORT_HEADER(_i)            (0x09c + (_i) * 4)
++
++#define AR40XX_REG_EEE_CTRL                   0x100
++#define   AR40XX_EEE_CTRL_DISABLE_PHY(_i)     BIT(4 + (_i) * 2)
++
++#define AR40XX_REG_PORT_VLAN0(_i)             (0x420 + (_i) * 0x8)
++#define   AR40XX_PORT_VLAN0_DEF_SVID          BITS(0, 12)
++#define   AR40XX_PORT_VLAN0_DEF_SVID_S                0
++#define   AR40XX_PORT_VLAN0_DEF_CVID          BITS(16, 12)
++#define   AR40XX_PORT_VLAN0_DEF_CVID_S                16
++
++#define AR40XX_REG_PORT_VLAN1(_i)             (0x424 + (_i) * 0x8)
++#define   AR40XX_PORT_VLAN1_PORT_VLAN_PROP    BIT(6)
++#define   AR40XX_PORT_VLAN1_OUT_MODE          BITS(12, 2)
++#define   AR40XX_PORT_VLAN1_OUT_MODE_S                12
++#define   AR40XX_PORT_VLAN1_OUT_MODE_UNMOD    0
++#define   AR40XX_PORT_VLAN1_OUT_MODE_UNTAG    1
++#define   AR40XX_PORT_VLAN1_OUT_MODE_TAG              2
++#define   AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH  3
++
++#define AR40XX_REG_VTU_FUNC0                  0x0610
++#define   AR40XX_VTU_FUNC0_EG_MODE            BITS(4, 14)
++#define   AR40XX_VTU_FUNC0_EG_MODE_S(_i)      (4 + (_i) * 2)
++#define   AR40XX_VTU_FUNC0_EG_MODE_KEEP               0
++#define   AR40XX_VTU_FUNC0_EG_MODE_UNTAG      1
++#define   AR40XX_VTU_FUNC0_EG_MODE_TAG                2
++#define   AR40XX_VTU_FUNC0_EG_MODE_NOT                3
++#define   AR40XX_VTU_FUNC0_IVL                        BIT(19)
++#define   AR40XX_VTU_FUNC0_VALID              BIT(20)
++
++#define AR40XX_REG_VTU_FUNC1                  0x0614
++#define   AR40XX_VTU_FUNC1_OP                 BITS(0, 3)
++#define   AR40XX_VTU_FUNC1_OP_NOOP            0
++#define   AR40XX_VTU_FUNC1_OP_FLUSH           1
++#define   AR40XX_VTU_FUNC1_OP_LOAD            2
++#define   AR40XX_VTU_FUNC1_OP_PURGE           3
++#define   AR40XX_VTU_FUNC1_OP_REMOVE_PORT     4
++#define   AR40XX_VTU_FUNC1_OP_GET_NEXT                5
++#define   AR40XX7_VTU_FUNC1_OP_GET_ONE                6
++#define   AR40XX_VTU_FUNC1_FULL                       BIT(4)
++#define   AR40XX_VTU_FUNC1_PORT                       BIT(8, 4)
++#define   AR40XX_VTU_FUNC1_PORT_S             8
++#define   AR40XX_VTU_FUNC1_VID                        BIT(16, 12)
++#define   AR40XX_VTU_FUNC1_VID_S              16
++#define   AR40XX_VTU_FUNC1_BUSY                       BIT(31)
++
++#define AR40XX_REG_FWD_CTRL0                  0x620
++#define   AR40XX_FWD_CTRL0_CPU_PORT_EN                BIT(10)
++#define   AR40XX_FWD_CTRL0_MIRROR_PORT                BITS(4, 4)
++#define   AR40XX_FWD_CTRL0_MIRROR_PORT_S      4
++
++#define AR40XX_REG_FWD_CTRL1                  0x624
++#define   AR40XX_FWD_CTRL1_UC_FLOOD           BITS(0, 7)
++#define   AR40XX_FWD_CTRL1_UC_FLOOD_S         0
++#define   AR40XX_FWD_CTRL1_MC_FLOOD           BITS(8, 7)
++#define   AR40XX_FWD_CTRL1_MC_FLOOD_S         8
++#define   AR40XX_FWD_CTRL1_BC_FLOOD           BITS(16, 7)
++#define   AR40XX_FWD_CTRL1_BC_FLOOD_S         16
++#define   AR40XX_FWD_CTRL1_IGMP                       BITS(24, 7)
++#define   AR40XX_FWD_CTRL1_IGMP_S             24
++
++#define AR40XX_REG_PORT_LOOKUP(_i)            (0x660 + (_i) * 0xc)
++#define   AR40XX_PORT_LOOKUP_MEMBER           BITS(0, 7)
++#define   AR40XX_PORT_LOOKUP_IN_MODE          BITS(8, 2)
++#define   AR40XX_PORT_LOOKUP_IN_MODE_S                8
++#define   AR40XX_PORT_LOOKUP_STATE            BITS(16, 3)
++#define   AR40XX_PORT_LOOKUP_STATE_S          16
++#define   AR40XX_PORT_LOOKUP_LEARN            BIT(20)
++#define   AR40XX_PORT_LOOKUP_LOOPBACK         BIT(21)
++#define   AR40XX_PORT_LOOKUP_ING_MIRROR_EN    BIT(25)
++
++#define AR40XX_REG_ATU_FUNC                   0x60c
++#define   AR40XX_ATU_FUNC_OP                  BITS(0, 4)
++#define   AR40XX_ATU_FUNC_OP_NOOP             0x0
++#define   AR40XX_ATU_FUNC_OP_FLUSH            0x1
++#define   AR40XX_ATU_FUNC_OP_LOAD             0x2
++#define   AR40XX_ATU_FUNC_OP_PURGE            0x3
++#define   AR40XX_ATU_FUNC_OP_FLUSH_LOCKED     0x4
++#define   AR40XX_ATU_FUNC_OP_FLUSH_UNICAST    0x5
++#define   AR40XX_ATU_FUNC_OP_GET_NEXT         0x6
++#define   AR40XX_ATU_FUNC_OP_SEARCH_MAC               0x7
++#define   AR40XX_ATU_FUNC_OP_CHANGE_TRUNK     0x8
++#define   AR40XX_ATU_FUNC_BUSY                        BIT(31)
++
++#define AR40XX_REG_QM_DEBUG_ADDR              0x820
++#define AR40XX_REG_QM_DEBUG_VALUE             0x824
++#define   AR40XX_REG_QM_PORT0_3_QNUM          0x1d
++#define   AR40XX_REG_QM_PORT4_6_QNUM          0x1e
++
++#define AR40XX_REG_PORT_HOL_CTRL1(_i)         (0x974 + (_i) * 0x8)
++#define   AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN  BIT(16)
++
++#define AR40XX_REG_PORT_FLOWCTRL_THRESH(_i)   (0x9b0 + (_i) * 0x4)
++#define   AR40XX_PORT0_FC_THRESH_ON_DFLT      0x60
++#define   AR40XX_PORT0_FC_THRESH_OFF_DFLT     0x90
++
++#define AR40XX_PHY_DEBUG_0   0
++#define AR40XX_PHY_MANU_CTRL_EN  BIT(12)
++
++#define AR40XX_PHY_DEBUG_2   2
++
++#define AR40XX_PHY_SPEC_STATUS 0x11
++#define   AR40XX_PHY_SPEC_STATUS_LINK         BIT(10)
++#define   AR40XX_PHY_SPEC_STATUS_DUPLEX               BIT(13)
++#define   AR40XX_PHY_SPEC_STATUS_SPEED                BITS(14, 2)
++
++/* port forwarding state */
++enum {
++      AR40XX_PORT_STATE_DISABLED = 0,
++      AR40XX_PORT_STATE_BLOCK = 1,
++      AR40XX_PORT_STATE_LISTEN = 2,
++      AR40XX_PORT_STATE_LEARN = 3,
++      AR40XX_PORT_STATE_FORWARD = 4
++};
++
++/* ingress 802.1q mode */
++enum {
++      AR40XX_IN_PORT_ONLY = 0,
++      AR40XX_IN_PORT_FALLBACK = 1,
++      AR40XX_IN_VLAN_ONLY = 2,
++      AR40XX_IN_SECURE = 3
++};
++
++/* egress 802.1q mode */
++enum {
++      AR40XX_OUT_KEEP = 0,
++      AR40XX_OUT_STRIP_VLAN = 1,
++      AR40XX_OUT_ADD_VLAN = 2
++};
++
++/* port speed */
++enum {
++      AR40XX_PORT_SPEED_10M = 0,
++      AR40XX_PORT_SPEED_100M = 1,
++      AR40XX_PORT_SPEED_1000M = 2,
++      AR40XX_PORT_SPEED_ERR = 3,
++};
++
++#define AR40XX_MIB_WORK_DELAY 2000 /* msecs */
++
++#define AR40XX_QM_WORK_DELAY    100
++
++#define   AR40XX_MIB_FUNC_CAPTURE     0x3
++
++#define AR40XX_REG_PORT_STATS_START   0x1000
++#define AR40XX_REG_PORT_STATS_LEN             0x100
++
++#define AR40XX_PORTS_ALL      0x3f
++
++#define AR40XX_PSGMII_ID      5
++#define AR40XX_PSGMII_CALB_NUM        100
++#define AR40XX_MALIBU_PSGMII_MODE_CTRL        0x6d
++#define AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL 0x220c
++#define AR40XX_MALIBU_PHY_MMD7_DAC_CTRL       0x801a
++#define AR40XX_MALIBU_DAC_CTRL_MASK   0x380
++#define AR40XX_MALIBU_DAC_CTRL_VALUE  0x280
++#define AR40XX_MALIBU_PHY_RLP_CTRL       0x805a
++#define AR40XX_PSGMII_TX_DRIVER_1_CTRL        0xb
++#define AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP 0x8a
++#define AR40XX_MALIBU_PHY_LAST_ADDR   4
++
++static inline struct ar40xx_priv *
++swdev_to_ar40xx(struct switch_dev *swdev)
++{
++      return container_of(swdev, struct ar40xx_priv, dev);
++}
++
++#endif
+--- /dev/null
++++ b/drivers/net/phy/mdio-ipq40xx.c
+@@ -0,0 +1,203 @@
++/*
++ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/delay.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/io.h>
++#include <linux/of_address.h>
++#include <linux/of_mdio.h>
++#include <linux/phy.h>
++#include <linux/platform_device.h>
++
++#define MDIO_CTRL_0_REG               0x40
++#define MDIO_CTRL_1_REG               0x44
++#define MDIO_CTRL_2_REG               0x48
++#define MDIO_CTRL_3_REG               0x4c
++#define MDIO_CTRL_4_REG               0x50
++#define MDIO_CTRL_4_ACCESS_BUSY               BIT(16)
++#define MDIO_CTRL_4_ACCESS_START              BIT(8)
++#define MDIO_CTRL_4_ACCESS_CODE_READ          0
++#define MDIO_CTRL_4_ACCESS_CODE_WRITE 1
++#define CTRL_0_REG_DEFAULT_VALUE      0x150FF
++
++#define IPQ40XX_MDIO_RETRY    1000
++#define IPQ40XX_MDIO_DELAY    10
++
++struct ipq40xx_mdio_data {
++      struct mii_bus  *mii_bus;
++      void __iomem    *membase;
++      int             phy_irq[PHY_MAX_ADDR];
++      struct device   *dev;
++};
++
++static int ipq40xx_mdio_wait_busy(struct ipq40xx_mdio_data *am)
++{
++      int i;
++
++      for (i = 0; i < IPQ40XX_MDIO_RETRY; i++) {
++              unsigned int busy;
++
++              busy = readl(am->membase + MDIO_CTRL_4_REG) &
++                      MDIO_CTRL_4_ACCESS_BUSY;
++              if (!busy)
++                      return 0;
++
++              /* BUSY might take to be cleard by 15~20 times of loop */
++              udelay(IPQ40XX_MDIO_DELAY);
++      }
++
++      dev_err(am->dev, "%s: MDIO operation timed out\n", am->mii_bus->name);
++
++      return -ETIMEDOUT;
++}
++
++static int ipq40xx_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
++{
++      struct ipq40xx_mdio_data *am = bus->priv;
++      int value = 0;
++      unsigned int cmd = 0;
++
++      lockdep_assert_held(&bus->mdio_lock);
++
++      if (ipq40xx_mdio_wait_busy(am))
++              return -ETIMEDOUT;
++
++      /* issue the phy address and reg */
++      writel((mii_id << 8) | regnum, am->membase + MDIO_CTRL_1_REG);
++
++      cmd = MDIO_CTRL_4_ACCESS_START|MDIO_CTRL_4_ACCESS_CODE_READ;
++
++      /* issue read command */
++      writel(cmd, am->membase + MDIO_CTRL_4_REG);
++
++      /* Wait read complete */
++      if (ipq40xx_mdio_wait_busy(am))
++              return -ETIMEDOUT;
++
++      /* Read data */
++      value = readl(am->membase + MDIO_CTRL_3_REG);
++
++      return value;
++}
++
++static int ipq40xx_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
++                          u16 value)
++{
++      struct ipq40xx_mdio_data *am = bus->priv;
++      unsigned int cmd = 0;
++
++      lockdep_assert_held(&bus->mdio_lock);
++
++      if (ipq40xx_mdio_wait_busy(am))
++              return -ETIMEDOUT;
++
++      /* issue the phy address and reg */
++      writel((mii_id << 8) | regnum, am->membase + MDIO_CTRL_1_REG);
++
++      /* issue write data */
++      writel(value, am->membase + MDIO_CTRL_2_REG);
++
++      cmd = MDIO_CTRL_4_ACCESS_START|MDIO_CTRL_4_ACCESS_CODE_WRITE;
++      /* issue write command */
++      writel(cmd, am->membase + MDIO_CTRL_4_REG);
++
++      /* Wait write complete */
++      if (ipq40xx_mdio_wait_busy(am))
++              return -ETIMEDOUT;
++
++      return 0;
++}
++
++static int ipq40xx_mdio_probe(struct platform_device *pdev)
++{
++      struct ipq40xx_mdio_data *am;
++      struct resource *res;
++      int i;
++
++      am = devm_kzalloc(&pdev->dev, sizeof(*am), GFP_KERNEL);
++      if (!am)
++              return -ENOMEM;
++
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      if (!res) {
++              dev_err(&pdev->dev, "no iomem resource found\n");
++              return -ENXIO;
++      }
++
++      am->membase = devm_ioremap_resource(&pdev->dev, res);
++      if (IS_ERR(am->membase)) {
++              dev_err(&pdev->dev, "unable to ioremap registers\n");
++              return PTR_ERR(am->membase);
++      }
++
++      am->mii_bus = devm_mdiobus_alloc(&pdev->dev);
++      if (!am->mii_bus)
++              return  -ENOMEM;
++
++      writel(CTRL_0_REG_DEFAULT_VALUE, am->membase + MDIO_CTRL_0_REG);
++
++      am->mii_bus->name = "ipq40xx_mdio";
++      am->mii_bus->read = ipq40xx_mdio_read;
++      am->mii_bus->write = ipq40xx_mdio_write;
++      memcpy(am->mii_bus->irq, am->phy_irq, sizeof(am->phy_irq));
++      am->mii_bus->priv = am;
++      am->mii_bus->parent = &pdev->dev;
++      snprintf(am->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(&pdev->dev));
++
++      for (i = 0; i < PHY_MAX_ADDR; i++)
++              am->phy_irq[i] = PHY_POLL;
++
++      am->dev = &pdev->dev;
++      platform_set_drvdata(pdev, am);
++
++      /* edma_axi_probe() use "am" drvdata.
++       * ipq40xx_mdio_probe() must be called first.
++       */
++      return of_mdiobus_register(am->mii_bus, pdev->dev.of_node);
++}
++
++static int ipq40xx_mdio_remove(struct platform_device *pdev)
++{
++      struct ipq40xx_mdio_data *am = platform_get_drvdata(pdev);
++
++      mdiobus_unregister(am->mii_bus);
++      return 0;
++}
++
++static const struct of_device_id ipq40xx_mdio_dt_ids[] = {
++      { .compatible = "qcom,ipq4019-mdio" },
++      { }
++};
++MODULE_DEVICE_TABLE(of, ipq40xx_mdio_dt_ids);
++
++static struct platform_driver ipq40xx_mdio_driver = {
++      .probe = ipq40xx_mdio_probe,
++      .remove = ipq40xx_mdio_remove,
++      .driver = {
++              .name = "ipq40xx-mdio",
++              .of_match_table = ipq40xx_mdio_dt_ids,
++      },
++};
++
++module_platform_driver(ipq40xx_mdio_driver);
++
++#define DRV_VERSION     "1.0"
++
++MODULE_DESCRIPTION("IPQ40XX MDIO interface driver");
++MODULE_AUTHOR("Qualcomm Atheros");
++MODULE_VERSION(DRV_VERSION);
++MODULE_LICENSE("Dual BSD/GPL");
diff --git a/target/linux/ipq40xx/patches-4.14/701-dts-ipq4019-add-mdio-node.patch b/target/linux/ipq40xx/patches-4.14/701-dts-ipq4019-add-mdio-node.patch
new file mode 100644 (file)
index 0000000..112c921
--- /dev/null
@@ -0,0 +1,52 @@
+From 09ed737593f71bcca08a537a6c15264a1a6add08 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Sun, 20 Nov 2016 01:10:33 +0100
+Subject: [PATCH] dts: ipq4019: add mdio node for ethernet
+
+This patch adds the mdio device-tree node.
+This is where the switch is connected to, so it's needed
+for the ethernet interfaces.
+
+Note: The driver isn't anywhere close to be upstream,
+so the info might change.
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -539,6 +539,34 @@
+                       status = "disabled";
+               };
++              mdio@90000 {
++                      #address-cells = <1>;
++                      #size-cells = <0>;
++                      compatible = "qcom,ipq4019-mdio";
++                      reg = <0x90000 0x64>;
++                      status = "disabled";
++
++                      ethernet-phy@0 {
++                              reg = <0>;
++                      };
++
++                      ethernet-phy@1 {
++                              reg = <1>;
++                      };
++
++                      ethernet-phy@2 {
++                              reg = <2>;
++                      };
++
++                      ethernet-phy@3 {
++                              reg = <3>;
++                      };
++
++                      ethernet-phy@4 {
++                              reg = <4>;
++                      };
++              };
++
+               usb3_ss_phy: ssphy@9a000 {
+                       compatible = "qca,uni-ssphy";
+                       reg = <0x9a000 0x800>;
diff --git a/target/linux/ipq40xx/patches-4.14/702-dts-ipq4019-add-PHY-switch-nodes.patch b/target/linux/ipq40xx/patches-4.14/702-dts-ipq4019-add-PHY-switch-nodes.patch
new file mode 100644 (file)
index 0000000..7ad9edb
--- /dev/null
@@ -0,0 +1,46 @@
+From 9deeec35dd3b628b95624e41d4e04acf728991ba Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Sun, 20 Nov 2016 02:20:54 +0100
+Subject: [PATCH] dts: ipq4019: add PHY/switch nodes
+
+This patch adds both the "qcom,ess-switch" and "qcom,ess-psgmii"
+nodes which are needed for the ar40xx.c driver to initialize the
+switch.
+
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -567,6 +567,29 @@
+                       };
+               };
++              ess-switch@c000000 {
++                      compatible = "qcom,ess-switch";
++                      reg = <0xc000000 0x80000>;
++                      switch_access_mode = "local bus";
++                      resets = <&gcc ESS_RESET>;
++                      reset-names = "ess_rst";
++                      clocks = <&gcc GCC_ESS_CLK>;
++                      clock-names = "ess_clk";
++                      switch_cpu_bmp = <0x1>;
++                      switch_lan_bmp = <0x1e>;
++                      switch_wan_bmp = <0x20>;
++                      switch_mac_mode = <0>; /* PORT_WRAPPER_PSGMII */
++                      switch_initvlas = <0x7c 0x54>;
++                      status = "disabled";
++              };
++
++              ess-psgmii@98000 {
++                      compatible = "qcom,ess-psgmii";
++                      reg = <0x98000 0x800>;
++                      psgmii_access_mode = "local bus";
++                      status = "disabled";
++              };
++
+               usb3_ss_phy: ssphy@9a000 {
+                       compatible = "qca,uni-ssphy";
+                       reg = <0x9a000 0x800>;
diff --git a/target/linux/ipq40xx/patches-4.14/710-net-add-qualcomm-essedma-ethernet-driver.patch b/target/linux/ipq40xx/patches-4.14/710-net-add-qualcomm-essedma-ethernet-driver.patch
new file mode 100644 (file)
index 0000000..e304911
--- /dev/null
@@ -0,0 +1,4578 @@
+From 12e9319da1adacac92930c899c99f0e1970cac11 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@googlemail.com>
+Date: Thu, 19 Jan 2017 02:01:31 +0100
+Subject: [PATCH 33/38] NET: add qualcomm essedma ethernet driver
+
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+---
+ drivers/net/ethernet/qualcomm/Kconfig  | 9 +++++++++
+ drivers/net/ethernet/qualcomm/Makefile | 1 +
+ 2 files changed, 10 insertions(+)
+
+--- a/drivers/net/ethernet/qualcomm/Kconfig
++++ b/drivers/net/ethernet/qualcomm/Kconfig
+@@ -61,4 +61,13 @@ config QCOM_EMAC
+ source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
++config ESSEDMA
++      tristate "Qualcomm Atheros ESS Edma support"
++      ---help---
++        This driver supports ethernet edma adapter.
++        Say Y to build this driver.
++
++        To compile this driver as a module, choose M here. The module
++        will be called essedma.ko.
++
+ endif # NET_VENDOR_QUALCOMM
+--- a/drivers/net/ethernet/qualcomm/Makefile
++++ b/drivers/net/ethernet/qualcomm/Makefile
+@@ -10,5 +10,6 @@ obj-$(CONFIG_QCA7000_UART) += qcauart.o
+ qcauart-objs := qca_uart.o
+ obj-y += emac/
++obj-$(CONFIG_ESSEDMA) += essedma/
+ obj-$(CONFIG_RMNET) += rmnet/
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/essedma/Makefile
+@@ -0,0 +1,9 @@
++#
++## Makefile for the Qualcomm Atheros ethernet edma driver
++#
++
++
++obj-$(CONFIG_ESSEDMA) += essedma.o
++
++essedma-objs := edma_axi.o edma.o edma_ethtool.o
++
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/essedma/edma.c
+@@ -0,0 +1,2143 @@
++/*
++ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/platform_device.h>
++#include <linux/if_vlan.h>
++#include "ess_edma.h"
++#include "edma.h"
++
++extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
++bool edma_stp_rstp;
++u16 edma_ath_eth_type;
++
++/* edma_skb_priority_offset()
++ *    get edma skb priority
++ */
++static unsigned int edma_skb_priority_offset(struct sk_buff *skb)
++{
++      return (skb->priority >> 2) & 1;
++}
++
++/* edma_alloc_tx_ring()
++ *    Allocate Tx descriptors ring
++ */
++static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo,
++                            struct edma_tx_desc_ring *etdr)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++
++      /* Initialize ring */
++      etdr->size = sizeof(struct edma_sw_desc) * etdr->count;
++      etdr->sw_next_to_fill = 0;
++      etdr->sw_next_to_clean = 0;
++
++      /* Allocate SW descriptors */
++      etdr->sw_desc = vzalloc(etdr->size);
++      if (!etdr->sw_desc) {
++              dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr);
++              return -ENOMEM;
++      }
++
++      /* Allocate HW descriptors */
++      etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma,
++                                        GFP_KERNEL);
++      if (!etdr->hw_desc) {
++              dev_err(&pdev->dev, "descriptor allocation for tx ring failed");
++              vfree(etdr->sw_desc);
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++/* edma_free_tx_ring()
++ *    Free tx rings allocated by edma_alloc_tx_rings
++ */
++static void edma_free_tx_ring(struct edma_common_info *edma_cinfo,
++                            struct edma_tx_desc_ring *etdr)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++
++      if (likely(etdr->dma))
++              dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc,
++                               etdr->dma);
++
++      vfree(etdr->sw_desc);
++      etdr->sw_desc = NULL;
++}
++
++/* edma_alloc_rx_ring()
++ *    allocate rx descriptor ring
++ */
++static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo,
++                           struct edma_rfd_desc_ring *erxd)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++
++      erxd->size = sizeof(struct edma_sw_desc) * erxd->count;
++      erxd->sw_next_to_fill = 0;
++      erxd->sw_next_to_clean = 0;
++
++      /* Allocate SW descriptors */
++      erxd->sw_desc = vzalloc(erxd->size);
++      if (!erxd->sw_desc)
++              return -ENOMEM;
++
++      /* Alloc HW descriptors */
++      erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma,
++                      GFP_KERNEL);
++      if (!erxd->hw_desc) {
++              vfree(erxd->sw_desc);
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++/* edma_free_rx_ring()
++ *    Free rx ring allocated by alloc_rx_ring
++ */
++static void edma_free_rx_ring(struct edma_common_info *edma_cinfo,
++                           struct edma_rfd_desc_ring *rxdr)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++
++      if (likely(rxdr->dma))
++              dma_free_coherent(&pdev->dev, rxdr->size, rxdr->hw_desc,
++                               rxdr->dma);
++
++      vfree(rxdr->sw_desc);
++      rxdr->sw_desc = NULL;
++}
++
++/* edma_configure_tx()
++ *    Configure transmission control data
++ */
++static void edma_configure_tx(struct edma_common_info *edma_cinfo)
++{
++      u32 txq_ctrl_data;
++
++      txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
++      txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN;
++      txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
++      edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data);
++}
++
++
++/* edma_configure_rx()
++ *    configure reception control data
++ */
++static void edma_configure_rx(struct edma_common_info *edma_cinfo)
++{
++      struct edma_hw *hw = &edma_cinfo->hw;
++      u32 rss_type, rx_desc1, rxq_ctrl_data;
++
++      /* Set RSS type */
++      rss_type = hw->rss_type;
++      edma_write_reg(EDMA_REG_RSS_TYPE, rss_type);
++
++      /* Set RFD burst number */
++      rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
++
++      /* Set RFD prefetch threshold */
++      rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
++
++      /* Set RFD in host ring low threshold to generte interrupt */
++      rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
++      edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1);
++
++      /* Set Rx FIFO threshold to start to DMA data to host */
++      rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE;
++
++      /* Set RX remove vlan bit */
++      rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN;
++
++      edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data);
++}
++
++/* edma_alloc_rx_buf()
++ *    does skb allocation for the received packets.
++ */
++static int edma_alloc_rx_buf(struct edma_common_info
++                           *edma_cinfo,
++                           struct edma_rfd_desc_ring *erdr,
++                           int cleaned_count, int queue_id)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++      struct edma_rx_free_desc *rx_desc;
++      struct edma_sw_desc *sw_desc;
++      struct sk_buff *skb;
++      unsigned int i;
++      u16 prod_idx, length;
++      u32 reg_data;
++
++      if (cleaned_count > erdr->count) {
++              dev_err(&pdev->dev, "Incorrect cleaned_count %d",
++                     cleaned_count);
++              return -1;
++      }
++
++      i = erdr->sw_next_to_fill;
++
++      while (cleaned_count) {
++              sw_desc = &erdr->sw_desc[i];
++              length = edma_cinfo->rx_head_buffer_len;
++
++              if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
++                      skb = sw_desc->skb;
++              } else {
++                      /* alloc skb */
++                      skb = netdev_alloc_skb(edma_netdev[0], length);
++                      if (!skb) {
++                              /* Better luck next round */
++                              break;
++                      }
++              }
++
++              if (edma_cinfo->page_mode) {
++                      struct page *pg = alloc_page(GFP_ATOMIC);
++
++                      if (!pg) {
++                              dev_kfree_skb_any(skb);
++                              break;
++                      }
++
++                      sw_desc->dma = dma_map_page(&pdev->dev, pg, 0,
++                                                 edma_cinfo->rx_page_buffer_len,
++                                                 DMA_FROM_DEVICE);
++                      if (dma_mapping_error(&pdev->dev,
++                                  sw_desc->dma)) {
++                              __free_page(pg);
++                              dev_kfree_skb_any(skb);
++                              break;
++                      }
++
++                      skb_fill_page_desc(skb, 0, pg, 0,
++                                         edma_cinfo->rx_page_buffer_len);
++                      sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG;
++                      sw_desc->length = edma_cinfo->rx_page_buffer_len;
++              } else {
++                      sw_desc->dma = dma_map_single(&pdev->dev, skb->data,
++                                                   length, DMA_FROM_DEVICE);
++                      if (dma_mapping_error(&pdev->dev,
++                         sw_desc->dma)) {
++                              dev_kfree_skb_any(skb);
++                              break;
++                      }
++
++                      sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD;
++                      sw_desc->length = length;
++              }
++
++              /* Update the buffer info */
++              sw_desc->skb = skb;
++              rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[i]);
++              rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma);
++              if (++i == erdr->count)
++                      i = 0;
++              cleaned_count--;
++      }
++
++      erdr->sw_next_to_fill = i;
++
++      if (i == 0)
++              prod_idx = erdr->count - 1;
++      else
++              prod_idx = i - 1;
++
++      /* Update the producer index */
++      edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &reg_data);
++      reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
++      reg_data |= prod_idx;
++      edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
++      return cleaned_count;
++}
++
++/* edma_init_desc()
++ *    update descriptor ring size, buffer and producer/consumer index
++ */
++static void edma_init_desc(struct edma_common_info *edma_cinfo)
++{
++      struct edma_rfd_desc_ring *rfd_ring;
++      struct edma_tx_desc_ring *etdr;
++      int i = 0, j = 0;
++      u32 data = 0;
++      u16 hw_cons_idx = 0;
++
++      /* Set the base address of every TPD ring. */
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
++              etdr = edma_cinfo->tpd_ring[i];
++
++              /* Update descriptor ring base address */
++              edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma);
++              edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data);
++
++              /* Calculate hardware consumer index */
++              hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
++              etdr->sw_next_to_fill = hw_cons_idx;
++              etdr->sw_next_to_clean = hw_cons_idx;
++              data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
++              data |= hw_cons_idx;
++
++              /* update producer index */
++              edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data);
++
++              /* update SW consumer index register */
++              edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx);
++
++              /* Set TPD ring size */
++              edma_write_reg(EDMA_REG_TPD_RING_SIZE,
++                             edma_cinfo->tx_ring_count &
++                                  EDMA_TPD_RING_SIZE_MASK);
++      }
++
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              rfd_ring = edma_cinfo->rfd_ring[j];
++              /* Update Receive Free descriptor ring base address */
++              edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j),
++                      (u32)(rfd_ring->dma));
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++
++      data = edma_cinfo->rx_head_buffer_len;
++      if (edma_cinfo->page_mode)
++              data = edma_cinfo->rx_page_buffer_len;
++
++      data &= EDMA_RX_BUF_SIZE_MASK;
++      data <<= EDMA_RX_BUF_SIZE_SHIFT;
++
++      /* Update RFD ring size and RX buffer size */
++      data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
++              << EDMA_RFD_RING_SIZE_SHIFT;
++
++      edma_write_reg(EDMA_REG_RX_DESC0, data);
++
++      /* Disable TX FIFO low watermark and high watermark */
++      edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0);
++
++      /* Load all of base address above */
++      edma_read_reg(EDMA_REG_TX_SRAM_PART, &data);
++      data |= 1 << EDMA_LOAD_PTR_SHIFT;
++      edma_write_reg(EDMA_REG_TX_SRAM_PART, data);
++}
++
++/* edma_receive_checksum
++ *    Api to check checksum on receive packets
++ */
++static void edma_receive_checksum(struct edma_rx_return_desc *rd,
++                                               struct sk_buff *skb)
++{
++      skb_checksum_none_assert(skb);
++
++      /* check the RRD IP/L4 checksum bit to see if
++       * its set, which in turn indicates checksum
++       * failure.
++       */
++      if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK)
++              return;
++
++      skb->ip_summed = CHECKSUM_UNNECESSARY;
++}
++
++/* edma_clean_rfd()
++ *    clean up rx resourcers on error
++ */
++static void edma_clean_rfd(struct edma_rfd_desc_ring *erdr, u16 index)
++{
++      struct edma_rx_free_desc *rx_desc;
++      struct edma_sw_desc *sw_desc;
++
++      rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[index]);
++      sw_desc = &erdr->sw_desc[index];
++      if (sw_desc->skb) {
++              dev_kfree_skb_any(sw_desc->skb);
++              sw_desc->skb = NULL;
++      }
++
++      memset(rx_desc, 0, sizeof(struct edma_rx_free_desc));
++}
++
++/* edma_rx_complete_fraglist()
++ *    Complete Rx processing for fraglist skbs
++ */
++static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd)
++{
++      int i;
++      u32 priority;
++      u16 port_type;
++      u8 mac_addr[EDMA_ETH_HDR_LEN];
++
++      port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT)
++                              & EDMA_RRD_PORT_TYPE_MASK;
++      /* if port type is 0x4, then only proceed with
++       * other stp/rstp calculation
++       */
++      if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) {
++              u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
++
++              /* calculate the frame priority */
++              priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
++                      & EDMA_RRD_PRIORITY_MASK;
++
++              for (i = 0; i < EDMA_ETH_HDR_LEN; i++)
++                      mac_addr[i] = skb->data[i];
++
++              /* Check if destination mac addr is bpdu addr */
++              if (!memcmp(mac_addr, bpdu_mac, 6)) {
++                      /* destination mac address is BPDU
++                       * destination mac address, then add
++                       * atheros header to the packet.
++                       */
++                      u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) |
++                              (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) |
++                              (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id;
++                      skb_push(skb, 4);
++                      memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
++                      *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type);
++                      *(uint16_t *)&skb->data[14] = htons(athr_hdr);
++              }
++      }
++}
++
++/*
++ * edma_rx_complete_fraglist()
++ *    Complete Rx processing for fraglist skbs
++ */
++static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
++                                      u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++      struct edma_hw *hw = &edma_cinfo->hw;
++      struct sk_buff *skb_temp;
++      struct edma_sw_desc *sw_desc;
++      int i;
++      u16 size_remaining;
++
++      skb->data_len = 0;
++      skb->tail += (hw->rx_head_buff_size - 16);
++      skb->len = skb->truesize = length;
++      size_remaining = length - (hw->rx_head_buff_size - 16);
++
++      /* clean-up all related sw_descs */
++      for (i = 1; i < num_rfds; i++) {
++              struct sk_buff *skb_prev;
++              sw_desc = &erdr->sw_desc[sw_next_to_clean];
++              skb_temp = sw_desc->skb;
++
++              dma_unmap_single(&pdev->dev, sw_desc->dma,
++                      sw_desc->length, DMA_FROM_DEVICE);
++
++              if (size_remaining < hw->rx_head_buff_size)
++                      skb_put(skb_temp, size_remaining);
++              else
++                      skb_put(skb_temp, hw->rx_head_buff_size);
++
++              /*
++               * If we are processing the first rfd, we link
++               * skb->frag_list to the skb corresponding to the
++               * first RFD
++               */
++              if (i == 1)
++                      skb_shinfo(skb)->frag_list = skb_temp;
++              else
++                      skb_prev->next = skb_temp;
++              skb_prev = skb_temp;
++              skb_temp->next = NULL;
++
++              skb->data_len += skb_temp->len;
++              size_remaining -= skb_temp->len;
++
++              /* Increment SW index */
++              sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
++              (*cleaned_count)++;
++      }
++
++      return sw_next_to_clean;
++}
++
++/* edma_rx_complete_paged()
++ *    Complete Rx processing for paged skbs
++ */
++static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
++                                      u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++      struct sk_buff *skb_temp;
++      struct edma_sw_desc *sw_desc;
++      int i;
++      u16 size_remaining;
++
++      skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
++
++      /* Setup skbuff fields */
++      skb->len = length;
++
++      if (likely(num_rfds <= 1)) {
++              skb->data_len = length;
++              skb->truesize += edma_cinfo->rx_page_buffer_len;
++              skb_fill_page_desc(skb, 0, skb_frag_page(frag),
++                              16, length);
++      } else {
++              frag->size -= 16;
++              skb->data_len = frag->size;
++              skb->truesize += edma_cinfo->rx_page_buffer_len;
++              size_remaining = length - frag->size;
++
++              skb_fill_page_desc(skb, 0, skb_frag_page(frag),
++                              16, frag->size);
++
++              /* clean-up all related sw_descs */
++              for (i = 1; i < num_rfds; i++) {
++                      sw_desc = &erdr->sw_desc[sw_next_to_clean];
++                      skb_temp = sw_desc->skb;
++                      frag = &skb_shinfo(skb_temp)->frags[0];
++                      dma_unmap_page(&pdev->dev, sw_desc->dma,
++                              sw_desc->length, DMA_FROM_DEVICE);
++
++                      if (size_remaining < edma_cinfo->rx_page_buffer_len)
++                              frag->size = size_remaining;
++
++                      skb_fill_page_desc(skb, i, skb_frag_page(frag),
++                                      0, frag->size);
++
++                      skb_shinfo(skb_temp)->nr_frags = 0;
++                      dev_kfree_skb_any(skb_temp);
++
++                      skb->data_len += frag->size;
++                      skb->truesize += edma_cinfo->rx_page_buffer_len;
++                      size_remaining -= frag->size;
++
++                      /* Increment SW index */
++                      sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
++                      (*cleaned_count)++;
++              }
++      }
++
++      return sw_next_to_clean;
++}
++
++/*
++ * edma_rx_complete()
++ *    Main api called from the poll function to process rx packets.
++ */
++static void edma_rx_complete(struct edma_common_info *edma_cinfo,
++                          int *work_done, int work_to_do, int queue_id,
++                          struct napi_struct *napi)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++      struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id];
++      struct net_device *netdev;
++      struct edma_adapter *adapter;
++      struct edma_sw_desc *sw_desc;
++      struct sk_buff *skb;
++      struct edma_rx_return_desc *rd;
++      u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1,
++          sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0;
++      u32 data = 0;
++      u8 *vaddr;
++      int port_id, i, drop_count = 0;
++      u32 priority;
++      u16 count = erdr->count, rfd_avail;
++      u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
++
++      sw_next_to_clean = erdr->sw_next_to_clean;
++
++      edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
++      hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
++                         EDMA_RFD_CONS_IDX_MASK;
++
++      do {
++              while (sw_next_to_clean != hw_next_to_clean) {
++                      if (!work_to_do)
++                              break;
++
++                      sw_desc = &erdr->sw_desc[sw_next_to_clean];
++                      skb = sw_desc->skb;
++
++                      /* Unmap the allocated buffer */
++                      if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD))
++                              dma_unmap_single(&pdev->dev, sw_desc->dma,
++                                              sw_desc->length, DMA_FROM_DEVICE);
++                      else
++                              dma_unmap_page(&pdev->dev, sw_desc->dma,
++                                            sw_desc->length, DMA_FROM_DEVICE);
++
++                      /* Get RRD */
++                      if (edma_cinfo->page_mode) {
++                              vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0]));
++                              memcpy((uint8_t *)&rrd[0], vaddr, 16);
++                              rd = (struct edma_rx_return_desc *)rrd;
++                              kunmap_atomic(vaddr);
++                      } else {
++                              rd = (struct edma_rx_return_desc *)skb->data;
++                      }
++
++                      /* Check if RRD is valid */
++                      if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) {
++                              edma_clean_rfd(erdr, sw_next_to_clean);
++                              sw_next_to_clean = (sw_next_to_clean + 1) &
++                                                 (erdr->count - 1);
++                              cleaned_count++;
++                              continue;
++                      }
++
++                      /* Get the number of RFDs from RRD */
++                      num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK;
++
++                      /* Get Rx port ID from switch */
++                      port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK;
++                      if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) {
++                              dev_err(&pdev->dev, "Invalid RRD source port bit set");
++                              for (i = 0; i < num_rfds; i++) {
++                                      edma_clean_rfd(erdr, sw_next_to_clean);
++                                      sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
++                                      cleaned_count++;
++                              }
++                              continue;
++                      }
++
++                      /* check if we have a sink for the data we receive.
++                       * If the interface isn't setup, we have to drop the
++                       * incoming data for now.
++                       */
++                      netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id];
++                      if (!netdev) {
++                              edma_clean_rfd(erdr, sw_next_to_clean);
++                              sw_next_to_clean = (sw_next_to_clean + 1) &
++                                                 (erdr->count - 1);
++                              cleaned_count++;
++                              continue;
++                      }
++                      adapter = netdev_priv(netdev);
++
++                      /* This code is added to handle a usecase where high
++                       * priority stream and a low priority stream are
++                       * received simultaneously on DUT. The problem occurs
++                       * if one of the  Rx rings is full and the corresponding
++                       * core is busy with other stuff. This causes ESS CPU
++                       * port to backpressure all incoming traffic including
++                       * high priority one. We monitor free descriptor count
++                       * on each CPU and whenever it reaches threshold (< 80),
++                       * we drop all low priority traffic and let only high
++                       * priotiy traffic pass through. We can hence avoid
++                       * ESS CPU port to send backpressure on high priroity
++                       * stream.
++                       */
++                      priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
++                              & EDMA_RRD_PRIORITY_MASK;
++                      if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) {
++                              rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1);
++                              if (rfd_avail < EDMA_RFD_AVAIL_THR) {
++                                      sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_REUSE;
++                                      sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
++                                      adapter->stats.rx_dropped++;
++                                      cleaned_count++;
++                                      drop_count++;
++                                      if (drop_count == 3) {
++                                              work_to_do--;
++                                              (*work_done)++;
++                                              drop_count = 0;
++                                      }
++                                      if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
++                                              /* If buffer clean count reaches 16, we replenish HW buffers. */
++                                              ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
++                                              edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
++                                                            sw_next_to_clean);
++                                              cleaned_count = ret_count;
++                                      }
++                                      continue;
++                              }
++                      }
++
++                      work_to_do--;
++                      (*work_done)++;
++
++                      /* Increment SW index */
++                      sw_next_to_clean = (sw_next_to_clean + 1) &
++                                         (erdr->count - 1);
++
++                      cleaned_count++;
++
++                      /* Get the packet size and allocate buffer */
++                      length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
++
++                      if (edma_cinfo->page_mode) {
++                              /* paged skb */
++                              sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
++                              if (!pskb_may_pull(skb, ETH_HLEN)) {
++                                      dev_kfree_skb_any(skb);
++                                      continue;
++                              }
++                      } else {
++                              /* single or fraglist skb */
++
++                              /* Addition of 16 bytes is required, as in the packet
++                               * first 16 bytes are rrd descriptors, so actual data
++                               * starts from an offset of 16.
++                               */
++                              skb_reserve(skb, 16);
++                              if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode)) {
++                                      skb_put(skb, length);
++                              } else {
++                                      sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
++                              }
++                      }
++
++                      if (edma_stp_rstp) {
++                              edma_rx_complete_stp_rstp(skb, port_id, rd);
++                      }
++
++                      skb->protocol = eth_type_trans(skb, netdev);
++
++                      /* Record Rx queue for RFS/RPS and fill flow hash from HW */
++                      skb_record_rx_queue(skb, queue_to_rxid[queue_id]);
++                      if (netdev->features & NETIF_F_RXHASH) {
++                              hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT);
++                              if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END))
++                                      skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4);
++                      }
++
++#ifdef CONFIG_NF_FLOW_COOKIE
++                      skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
++#endif
++                      edma_receive_checksum(rd, skb);
++
++                      /* Process VLAN HW acceleration indication provided by HW */
++                      if (unlikely(adapter->default_vlan_tag != rd->rrd4)) {
++                              vlan = rd->rrd4;
++                              if (likely(rd->rrd7 & EDMA_RRD_CVLAN))
++                                      __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
++                              else if (rd->rrd1 & EDMA_RRD_SVLAN)
++                                      __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan);
++                      }
++
++                      /* Update rx statistics */
++                      adapter->stats.rx_packets++;
++                      adapter->stats.rx_bytes += length;
++
++                      /* Check if we reached refill threshold */
++                      if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
++                              ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
++                              edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
++                                            sw_next_to_clean);
++                              cleaned_count = ret_count;
++                      }
++
++                      /* At this point skb should go to stack */
++                      napi_gro_receive(napi, skb);
++              }
++
++              /* Check if we still have NAPI budget */
++              if (!work_to_do)
++                      break;
++
++              /* Read index once again since we still have NAPI budget */
++              edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
++              hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
++                      EDMA_RFD_CONS_IDX_MASK;
++      } while (hw_next_to_clean != sw_next_to_clean);
++
++      erdr->sw_next_to_clean = sw_next_to_clean;
++
++      /* Refill here in case refill threshold wasn't reached */
++      if (likely(cleaned_count)) {
++              ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
++              if (ret_count)
++                      dev_dbg(&pdev->dev, "Not all buffers was reallocated");
++              edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
++                            erdr->sw_next_to_clean);
++      }
++}
++
++/* edma_delete_rfs_filter()
++ *    Remove RFS filter from switch
++ */
++static int edma_delete_rfs_filter(struct edma_adapter *adapter,
++                               struct edma_rfs_filter_node *filter_node)
++{
++      int res = -1;
++
++      struct flow_keys *keys = &filter_node->keys;
++
++      if (likely(adapter->set_rfs_rule))
++              res = (*adapter->set_rfs_rule)(adapter->netdev,
++                      flow_get_u32_src(keys), flow_get_u32_dst(keys),
++                      keys->ports.src, keys->ports.dst,
++                      keys->basic.ip_proto, filter_node->rq_id, 0);
++
++      return res;
++}
++
++/* edma_add_rfs_filter()
++ *    Add RFS filter to switch
++ */
++static int edma_add_rfs_filter(struct edma_adapter *adapter,
++                             struct flow_keys *keys, u16 rq,
++                             struct edma_rfs_filter_node *filter_node)
++{
++      int res = -1;
++
++      struct flow_keys *dest_keys = &filter_node->keys;
++
++      memcpy(dest_keys, &filter_node->keys, sizeof(*dest_keys));
++/*
++      dest_keys->control = keys->control;
++      dest_keys->basic = keys->basic;
++      dest_keys->addrs = keys->addrs;
++      dest_keys->ports = keys->ports;
++      dest_keys.ip_proto = keys->ip_proto;
++*/
++      /* Call callback registered by ESS driver */
++      if (likely(adapter->set_rfs_rule))
++              res = (*adapter->set_rfs_rule)(adapter->netdev, flow_get_u32_src(keys),
++                    flow_get_u32_dst(keys), keys->ports.src, keys->ports.dst,
++                    keys->basic.ip_proto, rq, 1);
++
++      return res;
++}
++
++/* edma_rfs_key_search()
++ *    Look for existing RFS entry
++ */
++static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h,
++                                                     struct flow_keys *key)
++{
++      struct edma_rfs_filter_node *p;
++
++      hlist_for_each_entry(p, h, node)
++              if (flow_get_u32_src(&p->keys) == flow_get_u32_src(key) &&
++                  flow_get_u32_dst(&p->keys) == flow_get_u32_dst(key) &&
++                  p->keys.ports.src == key->ports.src &&
++                  p->keys.ports.dst == key->ports.dst &&
++                  p->keys.basic.ip_proto == key->basic.ip_proto)
++                      return p;
++      return NULL;
++}
++
++/* edma_initialise_rfs_flow_table()
++ *    Initialise EDMA RFS flow table
++ */
++static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter)
++{
++      int i;
++
++      spin_lock_init(&adapter->rfs.rfs_ftab_lock);
++
++      /* Initialize EDMA flow hash table */
++      for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++)
++              INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]);
++
++      adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES;
++      adapter->rfs.filter_available = adapter->rfs.max_num_filter;
++      adapter->rfs.hashtoclean = 0;
++
++      /* Add timer to get periodic RFS updates from OS */
++      init_timer(&adapter->rfs.expire_rfs);
++      adapter->rfs.expire_rfs.function = edma_flow_may_expire;
++      adapter->rfs.expire_rfs.data = (unsigned long)adapter;
++      mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
++}
++
++/* edma_free_rfs_flow_table()
++ *    Free EDMA RFS flow table
++ */
++static void edma_free_rfs_flow_table(struct edma_adapter *adapter)
++{
++      int i;
++
++      /* Remove sync timer */
++      del_timer_sync(&adapter->rfs.expire_rfs);
++      spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
++
++      /* Free EDMA RFS table entries */
++      adapter->rfs.filter_available = 0;
++
++      /* Clean-up EDMA flow hash table */
++      for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) {
++              struct hlist_head *hhead;
++              struct hlist_node *tmp;
++              struct edma_rfs_filter_node *filter_node;
++              int res;
++
++              hhead = &adapter->rfs.hlist_head[i];
++              hlist_for_each_entry_safe(filter_node, tmp, hhead, node) {
++                      res  = edma_delete_rfs_filter(adapter, filter_node);
++                      if (res < 0)
++                              dev_warn(&adapter->netdev->dev,
++                                      "EDMA going down but RFS entry %d not allowed to be flushed by Switch",
++                                      filter_node->flow_id);
++                      hlist_del(&filter_node->node);
++                      kfree(filter_node);
++              }
++      }
++      spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
++}
++
++/* edma_tx_unmap_and_free()
++ *    clean TX buffer
++ */
++static inline void edma_tx_unmap_and_free(struct platform_device *pdev,
++                                       struct edma_sw_desc *sw_desc)
++{
++      struct sk_buff *skb = sw_desc->skb;
++
++      if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) ||
++                      (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST)))
++              /* unmap_single for skb head area */
++              dma_unmap_single(&pdev->dev, sw_desc->dma,
++                              sw_desc->length, DMA_TO_DEVICE);
++      else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)
++              /* unmap page for paged fragments */
++              dma_unmap_page(&pdev->dev, sw_desc->dma,
++                            sw_desc->length, DMA_TO_DEVICE);
++
++      if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST))
++              dev_kfree_skb_any(skb);
++
++      sw_desc->flags = 0;
++}
++
++/* edma_tx_complete()
++ *    Used to clean tx queues and update hardware and consumer index
++ */
++static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id)
++{
++      struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
++      struct edma_sw_desc *sw_desc;
++      struct platform_device *pdev = edma_cinfo->pdev;
++      int i;
++
++      u16 sw_next_to_clean = etdr->sw_next_to_clean;
++      u16 hw_next_to_clean;
++      u32 data = 0;
++
++      edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data);
++      hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
++
++      /* clean the buffer here */
++      while (sw_next_to_clean != hw_next_to_clean) {
++              sw_desc = &etdr->sw_desc[sw_next_to_clean];
++              edma_tx_unmap_and_free(pdev, sw_desc);
++              sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1);
++      }
++
++      etdr->sw_next_to_clean = sw_next_to_clean;
++
++      /* update the TPD consumer index register */
++      edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean);
++
++      /* Wake the queue if queue is stopped and netdev link is up */
++      for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) {
++              if (netif_tx_queue_stopped(etdr->nq[i])) {
++                      if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i]))
++                              netif_tx_wake_queue(etdr->nq[i]);
++              }
++      }
++}
++
++/* edma_get_tx_buffer()
++ *    Get sw_desc corresponding to the TPD
++ */
++static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo,
++                                             struct edma_tx_desc *tpd, int queue_id)
++{
++      struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
++      return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc];
++}
++
++/* edma_get_next_tpd()
++ *    Return a TPD descriptor for transfer
++ */
++static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo,
++                                           int queue_id)
++{
++      struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
++      u16 sw_next_to_fill = etdr->sw_next_to_fill;
++      struct edma_tx_desc *tpd_desc =
++              (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]);
++
++      etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1);
++
++      return tpd_desc;
++}
++
++/* edma_tpd_available()
++ *    Check number of free TPDs
++ */
++static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo,
++                                  int queue_id)
++{
++      struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
++
++      u16 sw_next_to_fill;
++      u16 sw_next_to_clean;
++      u16 count = 0;
++
++      sw_next_to_clean = etdr->sw_next_to_clean;
++      sw_next_to_fill = etdr->sw_next_to_fill;
++
++      if (likely(sw_next_to_clean <= sw_next_to_fill))
++              count = etdr->count;
++
++      return count + sw_next_to_clean - sw_next_to_fill - 1;
++}
++
++/* edma_tx_queue_get()
++ *    Get the starting number of  the queue
++ */
++static inline int edma_tx_queue_get(struct edma_adapter *adapter,
++                                 struct sk_buff *skb, int txq_id)
++{
++      /* skb->priority is used as an index to skb priority table
++       * and based on packet priority, correspong queue is assigned.
++       */
++      return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb);
++}
++
++/* edma_tx_update_hw_idx()
++ *    update the producer index for the ring transmitted
++ */
++static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo,
++                               struct sk_buff *skb, int queue_id)
++{
++      struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
++      u32 tpd_idx_data;
++
++      /* Read and update the producer index */
++      edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data);
++      tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS;
++      tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK)
++              << EDMA_TPD_PROD_IDX_SHIFT;
++
++      edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data);
++}
++
++/* edma_rollback_tx()
++ *    Function to retrieve tx resources in case of error
++ */
++static void edma_rollback_tx(struct edma_adapter *adapter,
++                          struct edma_tx_desc *start_tpd, int queue_id)
++{
++      struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id];
++      struct edma_sw_desc *sw_desc;
++      struct edma_tx_desc *tpd = NULL;
++      u16 start_index, index;
++
++      start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc);
++
++      index = start_index;
++      while (index != etdr->sw_next_to_fill) {
++              tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]);
++              sw_desc = &etdr->sw_desc[index];
++              edma_tx_unmap_and_free(adapter->pdev, sw_desc);
++              memset(tpd, 0, sizeof(struct edma_tx_desc));
++              if (++index == etdr->count)
++                      index = 0;
++      }
++      etdr->sw_next_to_fill = start_index;
++}
++
++/* edma_tx_map_and_fill()
++ *    gets called from edma_xmit_frame
++ *
++ * This is where the dma of the buffer to be transmitted
++ * gets mapped
++ */
++static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo,
++                             struct edma_adapter *adapter, struct sk_buff *skb, int queue_id,
++                             unsigned int flags_transmit, u16 from_cpu, u16 dp_bitmap,
++                             bool packet_is_rstp, int nr_frags)
++{
++      struct edma_sw_desc *sw_desc = NULL;
++      struct platform_device *pdev = edma_cinfo->pdev;
++      struct edma_tx_desc *tpd = NULL, *start_tpd = NULL;
++      struct sk_buff *iter_skb;
++      int i = 0;
++      u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
++      u16 buf_len, lso_desc_len = 0;
++
++      /* It should either be a nr_frags skb or fraglist skb but not both */
++      BUG_ON(nr_frags && skb_has_frag_list(skb));
++
++      if (skb_is_gso(skb)) {
++              /* TODO: What additional checks need to be performed here */
++              if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
++                      lso_word1 |= EDMA_TPD_IPV4_EN;
++                      ip_hdr(skb)->check = 0;
++                      tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
++                              ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
++              } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
++                      lso_word1 |= EDMA_TPD_LSO_V2_EN;
++                      ipv6_hdr(skb)->payload_len = 0;
++                      tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
++                              &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
++              } else
++                      return -EINVAL;
++
++              lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) |
++                              (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT);
++      } else if (flags_transmit & EDMA_HW_CHECKSUM) {
++                      u8 css, cso;
++                      cso = skb_checksum_start_offset(skb);
++                      css = cso  + skb->csum_offset;
++
++                      word1 |= (EDMA_TPD_CUSTOM_CSUM_EN);
++                      word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT;
++                      word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT);
++      }
++
++      if (skb->protocol == htons(ETH_P_PPP_SES))
++              word1 |= EDMA_TPD_PPPOE_EN;
++
++      if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) {
++              switch(skb->vlan_proto) {
++              case htons(ETH_P_8021Q):
++                      word3 |= (1 << EDMA_TX_INS_CVLAN);
++                      word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
++                      break;
++              case htons(ETH_P_8021AD):
++                      word1 |= (1 << EDMA_TX_INS_SVLAN);
++                      svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
++                      break;
++              default:
++                      dev_err(&pdev->dev, "no ctag or stag present\n");
++                      goto vlan_tag_error;
++              }
++      } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) {
++              word3 |= (1 << EDMA_TX_INS_CVLAN);
++              word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT;
++      }
++
++      if (packet_is_rstp) {
++              word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
++              word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT;
++      } else {
++              word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
++      }
++
++      buf_len = skb_headlen(skb);
++
++      if (lso_word1) {
++              if (lso_word1 & EDMA_TPD_LSO_V2_EN) {
++
++                      /* IPv6 LSOv2 descriptor */
++                      start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id);
++                      sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
++                      sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE;
++
++                      /* LSOv2 descriptor overrides addr field to pass length */
++                      tpd->addr = cpu_to_le16(skb->len);
++                      tpd->svlan_tag = svlan_tag;
++                      tpd->word1 = word1 | lso_word1;
++                      tpd->word3 = word3;
++              }
++
++              tpd = edma_get_next_tpd(edma_cinfo, queue_id);
++              if (!start_tpd)
++                      start_tpd = tpd;
++              sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
++
++              /* The last buffer info contain the skb address,
++               * so skb will be freed after unmap
++               */
++              sw_desc->length = lso_desc_len;
++              sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
++
++              sw_desc->dma = dma_map_single(&adapter->pdev->dev,
++                                      skb->data, buf_len, DMA_TO_DEVICE);
++              if (dma_mapping_error(&pdev->dev, sw_desc->dma))
++                      goto dma_error;
++
++              tpd->addr = cpu_to_le32(sw_desc->dma);
++              tpd->len  = cpu_to_le16(buf_len);
++
++              tpd->svlan_tag = svlan_tag;
++              tpd->word1 = word1 | lso_word1;
++              tpd->word3 = word3;
++
++              /* The last buffer info contain the skb address,
++               * so it will be freed after unmap
++               */
++              sw_desc->length = lso_desc_len;
++              sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
++
++              buf_len = 0;
++      }
++
++      if (likely(buf_len)) {
++
++              /* TODO Do not dequeue descriptor if there is a potential error */
++              tpd = edma_get_next_tpd(edma_cinfo, queue_id);
++
++              if (!start_tpd)
++                      start_tpd = tpd;
++
++              sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
++
++              /* The last buffer info contain the skb address,
++               * so it will be free after unmap
++               */
++              sw_desc->length = buf_len;
++              sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
++              sw_desc->dma = dma_map_single(&adapter->pdev->dev,
++                      skb->data, buf_len, DMA_TO_DEVICE);
++              if (dma_mapping_error(&pdev->dev, sw_desc->dma))
++                      goto dma_error;
++
++              tpd->addr = cpu_to_le32(sw_desc->dma);
++              tpd->len  = cpu_to_le16(buf_len);
++
++              tpd->svlan_tag = svlan_tag;
++              tpd->word1 = word1 | lso_word1;
++              tpd->word3 = word3;
++      }
++
++      /* Walk through all paged fragments */
++      while (nr_frags--) {
++              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++              buf_len = skb_frag_size(frag);
++              tpd = edma_get_next_tpd(edma_cinfo, queue_id);
++              sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
++              sw_desc->length = buf_len;
++              sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
++
++              sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE);
++
++              if (dma_mapping_error(NULL, sw_desc->dma))
++                      goto dma_error;
++
++              tpd->addr = cpu_to_le32(sw_desc->dma);
++              tpd->len  = cpu_to_le16(buf_len);
++
++              tpd->svlan_tag = svlan_tag;
++              tpd->word1 = word1 | lso_word1;
++              tpd->word3 = word3;
++              i++;
++      }
++
++      /* Walk through all fraglist skbs */
++      skb_walk_frags(skb, iter_skb) {
++              buf_len = iter_skb->len;
++              tpd = edma_get_next_tpd(edma_cinfo, queue_id);
++              sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
++              sw_desc->length = buf_len;
++              sw_desc->dma =  dma_map_single(&adapter->pdev->dev,
++                              iter_skb->data, buf_len, DMA_TO_DEVICE);
++
++              if (dma_mapping_error(NULL, sw_desc->dma))
++                      goto dma_error;
++
++              tpd->addr = cpu_to_le32(sw_desc->dma);
++              tpd->len  = cpu_to_le16(buf_len);
++              tpd->svlan_tag = svlan_tag;
++              tpd->word1 = word1 | lso_word1;
++              tpd->word3 = word3;
++              sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST;
++      }
++
++      if (tpd)
++              tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT;
++
++      sw_desc->skb = skb;
++      sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST;
++
++      return 0;
++
++dma_error:
++      edma_rollback_tx(adapter, start_tpd, queue_id);
++      dev_err(&pdev->dev, "TX DMA map failed\n");
++vlan_tag_error:
++      return -ENOMEM;
++}
++
++/* edma_check_link()
++ *    check Link status
++ */
++static int edma_check_link(struct edma_adapter *adapter)
++{
++      struct phy_device *phydev = adapter->phydev;
++
++      if (!(adapter->poll_required))
++              return __EDMA_LINKUP;
++
++      if (phydev->link)
++              return __EDMA_LINKUP;
++
++      return __EDMA_LINKDOWN;
++}
++
++/* edma_adjust_link()
++ *    check for edma link status
++ */
++void edma_adjust_link(struct net_device *netdev)
++{
++      int status;
++      struct edma_adapter *adapter = netdev_priv(netdev);
++      struct phy_device *phydev = adapter->phydev;
++
++      if (!test_bit(__EDMA_UP, &adapter->state_flags))
++              return;
++
++      status = edma_check_link(adapter);
++
++      if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) {
++              dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed);
++              adapter->link_state = __EDMA_LINKUP;
++              netif_carrier_on(netdev);
++              if (netif_running(netdev))
++                      netif_tx_wake_all_queues(netdev);
++      } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) {
++              dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name);
++              adapter->link_state = __EDMA_LINKDOWN;
++              netif_carrier_off(netdev);
++              netif_tx_stop_all_queues(netdev);
++      }
++}
++
++/* edma_get_stats()
++ *    Statistics api used to retreive the tx/rx statistics
++ */
++struct net_device_stats *edma_get_stats(struct net_device *netdev)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++
++      return &adapter->stats;
++}
++
++/* edma_xmit()
++ *    Main api to be called by the core for packet transmission
++ */
++netdev_tx_t edma_xmit(struct sk_buff *skb,
++                   struct net_device *net_dev)
++{
++      struct edma_adapter *adapter = netdev_priv(net_dev);
++      struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
++      struct edma_tx_desc_ring *etdr;
++      u16 from_cpu, dp_bitmap, txq_id;
++      int ret, nr_frags = 0, num_tpds_needed = 1, queue_id;
++      unsigned int flags_transmit = 0;
++      bool packet_is_rstp = false;
++      struct netdev_queue *nq = NULL;
++
++      if (skb_shinfo(skb)->nr_frags) {
++              nr_frags = skb_shinfo(skb)->nr_frags;
++              num_tpds_needed += nr_frags;
++      } else if (skb_has_frag_list(skb)) {
++              struct sk_buff *iter_skb;
++
++              skb_walk_frags(skb, iter_skb)
++                      num_tpds_needed++;
++      }
++
++      if (num_tpds_needed > EDMA_MAX_SKB_FRAGS) {
++              dev_err(&net_dev->dev,
++                      "skb received with fragments %d which is more than %lu",
++                      num_tpds_needed, EDMA_MAX_SKB_FRAGS);
++              dev_kfree_skb_any(skb);
++              adapter->stats.tx_errors++;
++              return NETDEV_TX_OK;
++      }
++
++      if (edma_stp_rstp) {
++              u16 ath_hdr, ath_eth_type;
++              u8 mac_addr[EDMA_ETH_HDR_LEN];
++              ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]);
++              if (ath_eth_type == edma_ath_eth_type) {
++                      packet_is_rstp = true;
++                      ath_hdr = htons(*(uint16_t *)&skb->data[14]);
++                      dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK;
++                      from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT;
++                      memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN);
++
++                      skb_pull(skb, 4);
++
++                      memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
++              }
++      }
++
++      /* this will be one of the 4 TX queues exposed to linux kernel */
++      txq_id = skb_get_queue_mapping(skb);
++      queue_id = edma_tx_queue_get(adapter, skb, txq_id);
++      etdr = edma_cinfo->tpd_ring[queue_id];
++      nq = netdev_get_tx_queue(net_dev, txq_id);
++
++      local_bh_disable();
++      /* Tx is not handled in bottom half context. Hence, we need to protect
++       * Tx from tasks and bottom half
++       */
++
++      if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) {
++              /* not enough descriptor, just stop queue */
++              netif_tx_stop_queue(nq);
++              local_bh_enable();
++              dev_dbg(&net_dev->dev, "Not enough descriptors available");
++              edma_cinfo->edma_ethstats.tx_desc_error++;
++              return NETDEV_TX_BUSY;
++      }
++
++      /* Check and mark VLAN tag offload */
++      if (skb_vlan_tag_present(skb))
++              flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG;
++      else if (adapter->default_vlan_tag)
++              flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG;
++
++      /* Check and mark checksum offload */
++      if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
++              flags_transmit |= EDMA_HW_CHECKSUM;
++
++      /* Map and fill descriptor for Tx */
++      ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id,
++              flags_transmit, from_cpu, dp_bitmap, packet_is_rstp, nr_frags);
++      if (ret) {
++              dev_kfree_skb_any(skb);
++              adapter->stats.tx_errors++;
++              goto netdev_okay;
++      }
++
++      /* Update SW producer index */
++      edma_tx_update_hw_idx(edma_cinfo, skb, queue_id);
++
++      /* update tx statistics */
++      adapter->stats.tx_packets++;
++      adapter->stats.tx_bytes += skb->len;
++
++netdev_okay:
++      local_bh_enable();
++      return NETDEV_TX_OK;
++}
++
++/*
++ * edma_flow_may_expire()
++ *    Timer function called periodically to delete the node
++ */
++void edma_flow_may_expire(unsigned long data)
++{
++      struct edma_adapter *adapter = (struct edma_adapter *)data;
++      int j;
++
++      spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
++      for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) {
++              struct hlist_head *hhead;
++              struct hlist_node *tmp;
++              struct edma_rfs_filter_node *n;
++              bool res;
++
++              hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++];
++              hlist_for_each_entry_safe(n, tmp, hhead, node) {
++                      res = rps_may_expire_flow(adapter->netdev, n->rq_id,
++                                      n->flow_id, n->filter_id);
++                      if (res) {
++                              int ret;
++                              ret = edma_delete_rfs_filter(adapter, n);
++                              if (ret < 0)
++                                      dev_dbg(&adapter->netdev->dev,
++                                                      "RFS entry %d not allowed to be flushed by Switch",
++                                                      n->flow_id);
++                              else {
++                                      hlist_del(&n->node);
++                                      kfree(n);
++                                      adapter->rfs.filter_available++;
++                              }
++                      }
++              }
++      }
++
++      adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1);
++      spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
++      mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
++}
++
++/* edma_rx_flow_steer()
++ *    Called by core to to steer the flow to CPU
++ */
++int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
++                     u16 rxq, u32 flow_id)
++{
++      struct flow_keys keys;
++      struct edma_rfs_filter_node *filter_node;
++      struct edma_adapter *adapter = netdev_priv(dev);
++      u16 hash_tblid;
++      int res;
++
++      if (skb->protocol == htons(ETH_P_IPV6)) {
++              dev_err(&adapter->pdev->dev, "IPv6 not supported\n");
++              res = -EINVAL;
++              goto no_protocol_err;
++      }
++
++      /* Dissect flow parameters
++       * We only support IPv4 + TCP/UDP
++       */
++      res = skb_flow_dissect_flow_keys(skb, &keys, 0);
++      if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) {
++              res = -EPROTONOSUPPORT;
++              goto no_protocol_err;
++      }
++
++      /* Check if table entry exists */
++      hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK;
++
++      spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
++      filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys);
++
++      if (filter_node) {
++              if (rxq == filter_node->rq_id) {
++                      res = -EEXIST;
++                      goto out;
++              } else {
++                      res = edma_delete_rfs_filter(adapter, filter_node);
++                      if (res < 0)
++                              dev_warn(&adapter->netdev->dev,
++                                              "Cannot steer flow %d to different queue",
++                                              filter_node->flow_id);
++                      else {
++                              adapter->rfs.filter_available++;
++                              res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
++                              if (res < 0) {
++                                      dev_warn(&adapter->netdev->dev,
++                                                      "Cannot steer flow %d to different queue",
++                                                      filter_node->flow_id);
++                              } else {
++                                      adapter->rfs.filter_available--;
++                                      filter_node->rq_id = rxq;
++                                      filter_node->filter_id = res;
++                              }
++                      }
++              }
++      } else {
++              if (adapter->rfs.filter_available == 0) {
++                      res = -EBUSY;
++                      goto out;
++              }
++
++              filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC);
++              if (!filter_node) {
++                      res = -ENOMEM;
++                      goto out;
++              }
++
++              res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
++              if (res < 0) {
++                      kfree(filter_node);
++                      goto out;
++              }
++
++              adapter->rfs.filter_available--;
++              filter_node->rq_id = rxq;
++              filter_node->filter_id = res;
++              filter_node->flow_id = flow_id;
++              filter_node->keys = keys;
++              INIT_HLIST_NODE(&filter_node->node);
++              hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]);
++      }
++
++out:
++      spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
++no_protocol_err:
++      return res;
++}
++
++/* edma_register_rfs_filter()
++ *    Add RFS filter callback
++ */
++int edma_register_rfs_filter(struct net_device *netdev,
++                          set_rfs_filter_callback_t set_filter)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++
++      spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
++
++      if (adapter->set_rfs_rule) {
++              spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
++              return -1;
++      }
++
++      adapter->set_rfs_rule = set_filter;
++      spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
++
++      return 0;
++}
++
++/* edma_alloc_tx_rings()
++ *    Allocate rx rings
++ */
++int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++      int i, err = 0;
++
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
++              err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
++              if (err) {
++                      dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i);
++                      return err;
++              }
++      }
++
++      return 0;
++}
++
++/* edma_free_tx_rings()
++ *    Free tx rings
++ */
++void edma_free_tx_rings(struct edma_common_info *edma_cinfo)
++{
++      int i;
++
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++)
++              edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
++}
++
++/* edma_free_tx_resources()
++ *    Free buffers associated with tx rings
++ */
++void edma_free_tx_resources(struct edma_common_info *edma_cinfo)
++{
++      struct edma_tx_desc_ring *etdr;
++      struct edma_sw_desc *sw_desc;
++      struct platform_device *pdev = edma_cinfo->pdev;
++      int i, j;
++
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
++              etdr = edma_cinfo->tpd_ring[i];
++              for (j = 0; j < EDMA_TX_RING_SIZE; j++) {
++                      sw_desc = &etdr->sw_desc[j];
++                      if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD |
++                              EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST))
++                              edma_tx_unmap_and_free(pdev, sw_desc);
++              }
++      }
++}
++
++/* edma_alloc_rx_rings()
++ *    Allocate rx rings
++ */
++int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo)
++{
++      struct platform_device *pdev = edma_cinfo->pdev;
++      int i, j, err = 0;
++
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
++              if (err) {
++                      dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i);
++                      return err;
++              }
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++
++      return 0;
++}
++
++/* edma_free_rx_rings()
++ *    free rx rings
++ */
++void edma_free_rx_rings(struct edma_common_info *edma_cinfo)
++{
++      int i, j;
++
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++}
++
++/* edma_free_queues()
++ *    Free the queues allocaated
++ */
++void edma_free_queues(struct edma_common_info *edma_cinfo)
++{
++      int i , j;
++
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
++              if (edma_cinfo->tpd_ring[i])
++                      kfree(edma_cinfo->tpd_ring[i]);
++              edma_cinfo->tpd_ring[i] = NULL;
++      }
++
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              if (edma_cinfo->rfd_ring[j])
++                      kfree(edma_cinfo->rfd_ring[j]);
++              edma_cinfo->rfd_ring[j] = NULL;
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++
++      edma_cinfo->num_rx_queues = 0;
++      edma_cinfo->num_tx_queues = 0;
++
++      return;
++}
++
++/* edma_free_rx_resources()
++ *    Free buffers associated with tx rings
++ */
++void edma_free_rx_resources(struct edma_common_info *edma_cinfo)
++{
++        struct edma_rfd_desc_ring *erdr;
++      struct edma_sw_desc *sw_desc;
++      struct platform_device *pdev = edma_cinfo->pdev;
++      int i, j, k;
++
++      for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) {
++              erdr = edma_cinfo->rfd_ring[k];
++              for (j = 0; j < EDMA_RX_RING_SIZE; j++) {
++                      sw_desc = &erdr->sw_desc[j];
++                      if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) {
++                              dma_unmap_single(&pdev->dev, sw_desc->dma,
++                                      sw_desc->length, DMA_FROM_DEVICE);
++                              edma_clean_rfd(erdr, j);
++                      } else if ((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)) {
++                              dma_unmap_page(&pdev->dev, sw_desc->dma,
++                                      sw_desc->length, DMA_FROM_DEVICE);
++                              edma_clean_rfd(erdr, j);
++                      }
++              }
++              k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++
++      }
++}
++
++/* edma_alloc_queues_tx()
++ *    Allocate memory for all rings
++ */
++int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo)
++{
++      int i;
++
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
++              struct edma_tx_desc_ring *etdr;
++              etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL);
++              if (!etdr)
++                      goto err;
++              etdr->count = edma_cinfo->tx_ring_count;
++              edma_cinfo->tpd_ring[i] = etdr;
++      }
++
++      return 0;
++err:
++      edma_free_queues(edma_cinfo);
++      return -1;
++}
++
++/* edma_alloc_queues_rx()
++ *    Allocate memory for all rings
++ */
++int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo)
++{
++      int i, j;
++
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              struct edma_rfd_desc_ring *rfd_ring;
++              rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring),
++                              GFP_KERNEL);
++              if (!rfd_ring)
++                      goto err;
++              rfd_ring->count = edma_cinfo->rx_ring_count;
++              edma_cinfo->rfd_ring[j] = rfd_ring;
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++      return 0;
++err:
++      edma_free_queues(edma_cinfo);
++      return -1;
++}
++
++/* edma_clear_irq_status()
++ *    Clear interrupt status
++ */
++void edma_clear_irq_status()
++{
++      edma_write_reg(EDMA_REG_RX_ISR, 0xff);
++      edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
++      edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff);
++      edma_write_reg(EDMA_REG_WOL_ISR, 0x1);
++};
++
++/* edma_configure()
++ *    Configure skb, edma interrupts and control register.
++ */
++int edma_configure(struct edma_common_info *edma_cinfo)
++{
++      struct edma_hw *hw = &edma_cinfo->hw;
++      u32 intr_modrt_data;
++      u32 intr_ctrl_data = 0;
++      int i, j, ret_count;
++
++      edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data);
++      intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
++      intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
++      edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data);
++
++      edma_clear_irq_status();
++
++      /* Clear any WOL status */
++      edma_write_reg(EDMA_REG_WOL_CTRL, 0);
++      intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
++      intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
++      edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
++      edma_configure_tx(edma_cinfo);
++      edma_configure_rx(edma_cinfo);
++
++      /* Allocate the RX buffer */
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j];
++              ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j);
++              if (ret_count) {
++                      dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n");
++              }
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++
++      /* Configure descriptor Ring */
++      edma_init_desc(edma_cinfo);
++      return 0;
++}
++
++/* edma_irq_enable()
++ *    Enable default interrupt generation settings
++ */
++void edma_irq_enable(struct edma_common_info *edma_cinfo)
++{
++      struct edma_hw *hw = &edma_cinfo->hw;
++      int i, j;
++
++      edma_write_reg(EDMA_REG_RX_ISR, 0xff);
++      for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
++              edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask);
++              j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
++      }
++      edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++)
++              edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask);
++}
++
++/* edma_irq_disable()
++ *    Disable Interrupt
++ */
++void edma_irq_disable(struct edma_common_info *edma_cinfo)
++{
++      int i;
++
++      for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++)
++              edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0);
++
++      for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++)
++              edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0);
++      edma_write_reg(EDMA_REG_MISC_IMR, 0);
++      edma_write_reg(EDMA_REG_WOL_IMR, 0);
++}
++
++/* edma_free_irqs()
++ *    Free All IRQs
++ */
++void edma_free_irqs(struct edma_adapter *adapter)
++{
++      struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
++      int i, j;
++      int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2);
++
++      for (i = 0; i < CONFIG_NR_CPUS; i++) {
++              for (j = edma_cinfo->edma_percpu_info[i].tx_start; j < (edma_cinfo->edma_percpu_info[i].tx_start + 4); j++)
++                      free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]);
++
++              for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++)
++                      free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]);
++      }
++}
++
++/* edma_enable_rx_ctrl()
++ *    Enable RX queue control
++ */
++void edma_enable_rx_ctrl(struct edma_hw *hw)
++{
++      u32 data;
++
++      edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
++      data |= EDMA_RXQ_CTRL_EN;
++      edma_write_reg(EDMA_REG_RXQ_CTRL, data);
++}
++
++
++/* edma_enable_tx_ctrl()
++ *    Enable TX queue control
++ */
++void edma_enable_tx_ctrl(struct edma_hw *hw)
++{
++      u32 data;
++
++      edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
++      data |= EDMA_TXQ_CTRL_TXQ_EN;
++      edma_write_reg(EDMA_REG_TXQ_CTRL, data);
++}
++
++/* edma_stop_rx_tx()
++ *    Disable RX/TQ Queue control
++ */
++void edma_stop_rx_tx(struct edma_hw *hw)
++{
++      u32 data;
++
++      edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
++      data &= ~EDMA_RXQ_CTRL_EN;
++      edma_write_reg(EDMA_REG_RXQ_CTRL, data);
++      edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
++      data &= ~EDMA_TXQ_CTRL_TXQ_EN;
++      edma_write_reg(EDMA_REG_TXQ_CTRL, data);
++}
++
++/* edma_reset()
++ *    Reset the EDMA
++ */
++int edma_reset(struct edma_common_info *edma_cinfo)
++{
++      struct edma_hw *hw = &edma_cinfo->hw;
++
++      edma_irq_disable(edma_cinfo);
++
++      edma_clear_irq_status();
++
++      edma_stop_rx_tx(hw);
++
++      return 0;
++}
++
++/* edma_fill_netdev()
++ *    Fill netdev for each etdr
++ */
++int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id,
++                  int dev, int txq_id)
++{
++      struct edma_tx_desc_ring *etdr;
++      int i = 0;
++
++      etdr = edma_cinfo->tpd_ring[queue_id];
++
++      while (etdr->netdev[i])
++              i++;
++
++      if (i >= EDMA_MAX_NETDEV_PER_QUEUE)
++              return -1;
++
++      /* Populate the netdev associated with the tpd ring */
++      etdr->netdev[i] = edma_netdev[dev];
++      etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id);
++
++      return 0;
++}
++
++/* edma_set_mac()
++ *    Change the Ethernet Address of the NIC
++ */
++int edma_set_mac_addr(struct net_device *netdev, void *p)
++{
++      struct sockaddr *addr = p;
++
++      if (!is_valid_ether_addr(addr->sa_data))
++              return -EINVAL;
++
++      if (netif_running(netdev))
++              return -EBUSY;
++
++      memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
++      return 0;
++}
++
++/* edma_set_stp_rstp()
++ *    set stp/rstp
++ */
++void edma_set_stp_rstp(bool rstp)
++{
++      edma_stp_rstp = rstp;
++}
++
++/* edma_assign_ath_hdr_type()
++ *    assign atheros header eth type
++ */
++void edma_assign_ath_hdr_type(int eth_type)
++{
++      edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK;
++}
++
++/* edma_get_default_vlan_tag()
++ *    Used by other modules to get the default vlan tag
++ */
++int edma_get_default_vlan_tag(struct net_device *netdev)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++
++      if (adapter->default_vlan_tag)
++              return adapter->default_vlan_tag;
++
++      return 0;
++}
++
++/* edma_open()
++ *    gets called when netdevice is up, start the queue.
++ */
++int edma_open(struct net_device *netdev)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++      struct platform_device *pdev = adapter->edma_cinfo->pdev;
++
++      netif_tx_start_all_queues(netdev);
++      edma_initialise_rfs_flow_table(adapter);
++      set_bit(__EDMA_UP, &adapter->state_flags);
++
++      /* if Link polling is enabled, in our case enabled for WAN, then
++       * do a phy start, else always set link as UP
++       */
++      if (adapter->poll_required) {
++              if (!IS_ERR(adapter->phydev)) {
++                      phy_start(adapter->phydev);
++                      phy_start_aneg(adapter->phydev);
++                      adapter->link_state = __EDMA_LINKDOWN;
++              } else {
++                      dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n");
++              }
++      } else {
++              adapter->link_state = __EDMA_LINKUP;
++              netif_carrier_on(netdev);
++      }
++
++      return 0;
++}
++
++
++/* edma_close()
++ *    gets called when netdevice is down, stops the queue.
++ */
++int edma_close(struct net_device *netdev)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++
++      edma_free_rfs_flow_table(adapter);
++      netif_carrier_off(netdev);
++      netif_tx_stop_all_queues(netdev);
++
++      if (adapter->poll_required) {
++              if (!IS_ERR(adapter->phydev))
++                      phy_stop(adapter->phydev);
++      }
++
++      adapter->link_state = __EDMA_LINKDOWN;
++
++      /* Set GMAC state to UP before link state is checked
++       */
++      clear_bit(__EDMA_UP, &adapter->state_flags);
++
++      return 0;
++}
++
++/* edma_poll
++ *    polling function that gets called when the napi gets scheduled.
++ *
++ * Main sequence of task performed in this api
++ * is clear irq status -> clear_tx_irq -> clean_rx_irq->
++ * enable interrupts.
++ */
++int edma_poll(struct napi_struct *napi, int budget)
++{
++      struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi,
++              struct edma_per_cpu_queues_info, napi);
++      struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
++      u32 reg_data;
++      u32 shadow_rx_status, shadow_tx_status;
++      int queue_id;
++      int i, work_done = 0;
++
++      /* Store the Rx/Tx status by ANDing it with
++       * appropriate CPU RX?TX mask
++       */
++      edma_read_reg(EDMA_REG_RX_ISR, &reg_data);
++      edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask;
++      shadow_rx_status = edma_percpu_info->rx_status;
++      edma_read_reg(EDMA_REG_TX_ISR, &reg_data);
++      edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask;
++      shadow_tx_status = edma_percpu_info->tx_status;
++
++      /* Every core will have a start, which will be computed
++       * in probe and stored in edma_percpu_info->tx_start variable.
++       * We will shift the status bit by tx_start to obtain
++       * status bits for the core on which the current processing
++       * is happening. Since, there are 4 tx queues per core,
++       * we will run the loop till we get the correct queue to clear.
++       */
++      while (edma_percpu_info->tx_status) {
++              queue_id = ffs(edma_percpu_info->tx_status) - 1;
++              edma_tx_complete(edma_cinfo, queue_id);
++              edma_percpu_info->tx_status &= ~(1 << queue_id);
++      }
++
++      /* Every core will have a start, which will be computed
++       * in probe and stored in edma_percpu_info->tx_start variable.
++       * We will shift the status bit by tx_start to obtain
++       * status bits for the core on which the current processing
++       * is happening. Since, there are 4 tx queues per core, we
++       * will run the loop till we get the correct queue to clear.
++       */
++      while (edma_percpu_info->rx_status) {
++              queue_id = ffs(edma_percpu_info->rx_status) - 1;
++              edma_rx_complete(edma_cinfo, &work_done,
++                              budget, queue_id, napi);
++
++              if (likely(work_done < budget))
++                      edma_percpu_info->rx_status &= ~(1 << queue_id);
++              else
++                      break;
++      }
++
++      /* Clear the status register, to avoid the interrupts to
++       * reoccur.This clearing of interrupt status register is
++       * done here as writing to status register only takes place
++       * once the  producer/consumer index has been updated to
++       * reflect that the packet transmission/reception went fine.
++       */
++      edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status);
++      edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status);
++
++      /* If budget not fully consumed, exit the polling mode */
++      if (likely(work_done < budget)) {
++              napi_complete(napi);
++
++              /* re-enable the interrupts */
++              for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
++                      edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1);
++              for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
++                      edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x1);
++      }
++
++      return work_done;
++}
++
++/* edma interrupt()
++ *    interrupt handler
++ */
++irqreturn_t edma_interrupt(int irq, void *dev)
++{
++      struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev;
++      struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
++      int i;
++
++      /* Unmask the TX/RX interrupt register */
++      for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
++              edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0);
++
++      for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
++              edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x0);
++
++      napi_schedule(&edma_percpu_info->napi);
++
++      return IRQ_HANDLED;
++}
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/essedma/edma.h
+@@ -0,0 +1,446 @@
++/*
++ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#ifndef _EDMA_H_
++#define _EDMA_H_
++
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/io.h>
++#include <linux/vmalloc.h>
++#include <linux/pagemap.h>
++#include <linux/smp.h>
++#include <linux/platform_device.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/sysctl.h>
++#include <linux/phy.h>
++#include <linux/of_net.h>
++#include <net/checksum.h>
++#include <net/ip6_checksum.h>
++#include <asm-generic/bug.h>
++#include "ess_edma.h"
++
++#define EDMA_CPU_CORES_SUPPORTED 4
++#define EDMA_MAX_PORTID_SUPPORTED 5
++#define EDMA_MAX_VLAN_SUPPORTED  EDMA_MAX_PORTID_SUPPORTED
++#define EDMA_MAX_PORTID_BITMAP_INDEX (EDMA_MAX_PORTID_SUPPORTED + 1)
++#define EDMA_MAX_PORTID_BITMAP_SUPPORTED 0x1f /* 0001_1111 = 0x1f */
++#define EDMA_MAX_NETDEV_PER_QUEUE 4 /* 3 Netdev per queue, 1 space for indexing */
++
++#define EDMA_MAX_RECEIVE_QUEUE 8
++#define EDMA_MAX_TRANSMIT_QUEUE 16
++
++/* WAN/LAN adapter number */
++#define EDMA_WAN 0
++#define EDMA_LAN 1
++
++/* VLAN tag */
++#define EDMA_LAN_DEFAULT_VLAN 1
++#define EDMA_WAN_DEFAULT_VLAN 2
++
++#define EDMA_DEFAULT_GROUP1_VLAN 1
++#define EDMA_DEFAULT_GROUP2_VLAN 2
++#define EDMA_DEFAULT_GROUP3_VLAN 3
++#define EDMA_DEFAULT_GROUP4_VLAN 4
++#define EDMA_DEFAULT_GROUP5_VLAN 5
++
++/* Queues exposed to linux kernel */
++#define EDMA_NETDEV_TX_QUEUE 4
++#define EDMA_NETDEV_RX_QUEUE 4
++
++/* Number of queues per core */
++#define EDMA_NUM_TXQ_PER_CORE 4
++#define EDMA_NUM_RXQ_PER_CORE 2
++
++#define EDMA_TPD_EOP_SHIFT 31
++
++#define EDMA_PORT_ID_SHIFT 12
++#define EDMA_PORT_ID_MASK 0x7
++
++/* tpd word 3 bit 18-28 */
++#define EDMA_TPD_PORT_BITMAP_SHIFT 18
++
++#define EDMA_TPD_FROM_CPU_SHIFT 25
++
++#define EDMA_FROM_CPU_MASK 0x80
++#define EDMA_SKB_PRIORITY_MASK 0x38
++
++/* TX/RX descriptor ring count */
++/* should be a power of 2 */
++#define EDMA_RX_RING_SIZE 128
++#define EDMA_TX_RING_SIZE 128
++
++/* Flags used in paged/non paged mode */
++#define EDMA_RX_HEAD_BUFF_SIZE_JUMBO 256
++#define EDMA_RX_HEAD_BUFF_SIZE 1540
++
++/* MAX frame size supported by switch */
++#define EDMA_MAX_JUMBO_FRAME_SIZE 9216
++
++/* Configurations */
++#define EDMA_INTR_CLEAR_TYPE 0
++#define EDMA_INTR_SW_IDX_W_TYPE 0
++#define EDMA_FIFO_THRESH_TYPE 0
++#define EDMA_RSS_TYPE 0
++#define EDMA_RX_IMT 0x0020
++#define EDMA_TX_IMT 0x0050
++#define EDMA_TPD_BURST 5
++#define EDMA_TXF_BURST 0x100
++#define EDMA_RFD_BURST 8
++#define EDMA_RFD_THR 16
++#define EDMA_RFD_LTHR 0
++
++/* RX/TX per CPU based mask/shift */
++#define EDMA_TX_PER_CPU_MASK 0xF
++#define EDMA_RX_PER_CPU_MASK 0x3
++#define EDMA_TX_PER_CPU_MASK_SHIFT 0x2
++#define EDMA_RX_PER_CPU_MASK_SHIFT 0x1
++#define EDMA_TX_CPU_START_SHIFT 0x2
++#define EDMA_RX_CPU_START_SHIFT 0x1
++
++/* FLags used in transmit direction */
++#define EDMA_HW_CHECKSUM 0x00000001
++#define EDMA_VLAN_TX_TAG_INSERT_FLAG 0x00000002
++#define EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG 0x00000004
++
++#define EDMA_SW_DESC_FLAG_LAST 0x1
++#define EDMA_SW_DESC_FLAG_SKB_HEAD 0x2
++#define EDMA_SW_DESC_FLAG_SKB_FRAG 0x4
++#define EDMA_SW_DESC_FLAG_SKB_FRAGLIST 0x8
++#define EDMA_SW_DESC_FLAG_SKB_NONE 0x10
++#define EDMA_SW_DESC_FLAG_SKB_REUSE 0x20
++
++
++#define EDMA_MAX_SKB_FRAGS (MAX_SKB_FRAGS + 1)
++
++/* Ethtool specific list of EDMA supported features */
++#define EDMA_SUPPORTED_FEATURES (SUPPORTED_10baseT_Half \
++                                      | SUPPORTED_10baseT_Full \
++                                      | SUPPORTED_100baseT_Half \
++                                      | SUPPORTED_100baseT_Full \
++                                      | SUPPORTED_1000baseT_Full)
++
++/* Recevie side atheros Header */
++#define EDMA_RX_ATH_HDR_VERSION 0x2
++#define EDMA_RX_ATH_HDR_VERSION_SHIFT 14
++#define EDMA_RX_ATH_HDR_PRIORITY_SHIFT 11
++#define EDMA_RX_ATH_PORT_TYPE_SHIFT 6
++#define EDMA_RX_ATH_HDR_RSTP_PORT_TYPE 0x4
++
++/* Transmit side atheros Header */
++#define EDMA_TX_ATH_HDR_PORT_BITMAP_MASK 0x7F
++#define EDMA_TX_ATH_HDR_FROM_CPU_MASK 0x80
++#define EDMA_TX_ATH_HDR_FROM_CPU_SHIFT 7
++
++#define EDMA_TXQ_START_CORE0 8
++#define EDMA_TXQ_START_CORE1 12
++#define EDMA_TXQ_START_CORE2 0
++#define EDMA_TXQ_START_CORE3 4
++
++#define EDMA_TXQ_IRQ_MASK_CORE0 0x0F00
++#define EDMA_TXQ_IRQ_MASK_CORE1 0xF000
++#define EDMA_TXQ_IRQ_MASK_CORE2 0x000F
++#define EDMA_TXQ_IRQ_MASK_CORE3 0x00F0
++
++#define EDMA_ETH_HDR_LEN 12
++#define EDMA_ETH_TYPE_MASK 0xFFFF
++
++#define EDMA_RX_BUFFER_WRITE 16
++#define EDMA_RFD_AVAIL_THR 80
++
++#define EDMA_GMAC_NO_MDIO_PHY PHY_MAX_ADDR
++
++extern int ssdk_rfs_ipct_rule_set(__be32 ip_src, __be32 ip_dst,
++                                __be16 sport, __be16 dport,
++                                uint8_t proto, u16 loadbalance, bool action);
++struct edma_ethtool_statistics {
++      u32 tx_q0_pkt;
++      u32 tx_q1_pkt;
++      u32 tx_q2_pkt;
++      u32 tx_q3_pkt;
++      u32 tx_q4_pkt;
++      u32 tx_q5_pkt;
++      u32 tx_q6_pkt;
++      u32 tx_q7_pkt;
++      u32 tx_q8_pkt;
++      u32 tx_q9_pkt;
++      u32 tx_q10_pkt;
++      u32 tx_q11_pkt;
++      u32 tx_q12_pkt;
++      u32 tx_q13_pkt;
++      u32 tx_q14_pkt;
++      u32 tx_q15_pkt;
++      u32 tx_q0_byte;
++      u32 tx_q1_byte;
++      u32 tx_q2_byte;
++      u32 tx_q3_byte;
++      u32 tx_q4_byte;
++      u32 tx_q5_byte;
++      u32 tx_q6_byte;
++      u32 tx_q7_byte;
++      u32 tx_q8_byte;
++      u32 tx_q9_byte;
++      u32 tx_q10_byte;
++      u32 tx_q11_byte;
++      u32 tx_q12_byte;
++      u32 tx_q13_byte;
++      u32 tx_q14_byte;
++      u32 tx_q15_byte;
++      u32 rx_q0_pkt;
++      u32 rx_q1_pkt;
++      u32 rx_q2_pkt;
++      u32 rx_q3_pkt;
++      u32 rx_q4_pkt;
++      u32 rx_q5_pkt;
++      u32 rx_q6_pkt;
++      u32 rx_q7_pkt;
++      u32 rx_q0_byte;
++      u32 rx_q1_byte;
++      u32 rx_q2_byte;
++      u32 rx_q3_byte;
++      u32 rx_q4_byte;
++      u32 rx_q5_byte;
++      u32 rx_q6_byte;
++      u32 rx_q7_byte;
++      u32 tx_desc_error;
++};
++
++struct edma_mdio_data {
++      struct mii_bus  *mii_bus;
++      void __iomem    *membase;
++      int phy_irq[PHY_MAX_ADDR];
++};
++
++/* EDMA LINK state */
++enum edma_link_state {
++      __EDMA_LINKUP, /* Indicate link is UP */
++      __EDMA_LINKDOWN /* Indicate link is down */
++};
++
++/* EDMA GMAC state */
++enum edma_gmac_state {
++      __EDMA_UP /* use to indicate GMAC is up */
++};
++
++/* edma transmit descriptor */
++struct edma_tx_desc {
++      __le16  len; /* full packet including CRC */
++      __le16  svlan_tag; /* vlan tag */
++      __le32  word1; /* byte 4-7 */
++      __le32  addr; /* address of buffer */
++      __le32  word3; /* byte 12 */
++};
++
++/* edma receive return descriptor */
++struct edma_rx_return_desc {
++      u16 rrd0;
++      u16 rrd1;
++      u16 rrd2;
++      u16 rrd3;
++      u16 rrd4;
++      u16 rrd5;
++      u16 rrd6;
++      u16 rrd7;
++};
++
++/* RFD descriptor */
++struct edma_rx_free_desc {
++      __le32  buffer_addr; /* buffer address */
++};
++
++/* edma hw specific data */
++struct edma_hw {
++      u32  __iomem *hw_addr; /* inner register address */
++      struct edma_adapter *adapter; /* netdevice adapter */
++      u32 rx_intr_mask; /*rx interrupt mask */
++      u32 tx_intr_mask; /* tx interrupt nask */
++      u32 misc_intr_mask; /* misc interrupt mask */
++      u32 wol_intr_mask; /* wake on lan interrupt mask */
++      bool intr_clear_type; /* interrupt clear */
++      bool intr_sw_idx_w; /* interrupt software index */
++      u32 rx_head_buff_size; /* Rx buffer size */
++      u8 rss_type; /* rss protocol type */
++};
++
++/* edma_sw_desc stores software descriptor
++ * SW descriptor has 1:1 map with HW descriptor
++ */
++struct edma_sw_desc {
++      struct sk_buff *skb;
++      dma_addr_t dma; /* dma address */
++      u16 length; /* Tx/Rx buffer length */
++      u32 flags;
++};
++
++/* per core related information */
++struct edma_per_cpu_queues_info {
++      struct napi_struct napi; /* napi associated with the core */
++      u32 tx_mask; /* tx interrupt mask */
++      u32 rx_mask; /* rx interrupt mask */
++      u32 tx_status; /* tx interrupt status */
++      u32 rx_status; /* rx interrupt status */
++      u32 tx_start; /* tx queue start */
++      u32 rx_start; /* rx queue start */
++      struct edma_common_info *edma_cinfo; /* edma common info */
++};
++
++/* edma specific common info */
++struct edma_common_info {
++      struct edma_tx_desc_ring *tpd_ring[16]; /* 16 Tx queues */
++      struct edma_rfd_desc_ring *rfd_ring[8]; /* 8 Rx queues */
++      struct platform_device *pdev; /* device structure */
++      struct net_device *netdev[EDMA_MAX_PORTID_SUPPORTED];
++      struct net_device *portid_netdev_lookup_tbl[EDMA_MAX_PORTID_BITMAP_INDEX];
++      struct ctl_table_header *edma_ctl_table_hdr;
++      int num_gmac;
++      struct edma_ethtool_statistics edma_ethstats; /* ethtool stats */
++      int num_rx_queues; /* number of rx queue */
++      u32 num_tx_queues; /* number of tx queue */
++      u32 tx_irq[16]; /* number of tx irq */
++      u32 rx_irq[8]; /* number of rx irq */
++      u32 from_cpu; /* from CPU TPD field */
++      u32 num_rxq_per_core; /* Rx queues per core */
++      u32 num_txq_per_core; /* Tx queues per core */
++      u16 tx_ring_count; /* Tx ring count */
++      u16 rx_ring_count; /* Rx ring*/
++      u16 rx_head_buffer_len; /* rx buffer length */
++      u16 rx_page_buffer_len; /* rx buffer length */
++      u32 page_mode; /* Jumbo frame supported flag */
++      u32 fraglist_mode; /* fraglist supported flag */
++      struct edma_hw hw; /* edma hw specific structure */
++      struct edma_per_cpu_queues_info edma_percpu_info[CONFIG_NR_CPUS]; /* per cpu information */
++      spinlock_t stats_lock; /* protect edma stats area for updation */
++};
++
++/* transimit packet descriptor (tpd) ring */
++struct edma_tx_desc_ring {
++      struct netdev_queue *nq[EDMA_MAX_NETDEV_PER_QUEUE]; /* Linux queue index */
++      struct net_device *netdev[EDMA_MAX_NETDEV_PER_QUEUE];
++                      /* Array of netdevs associated with the tpd ring */
++      void *hw_desc; /* descriptor ring virtual address */
++      struct edma_sw_desc *sw_desc; /* buffer associated with ring */
++      int netdev_bmp; /* Bitmap for per-ring netdevs */
++      u32 size; /* descriptor ring length in bytes */
++      u16 count; /* number of descriptors in the ring */
++      dma_addr_t dma; /* descriptor ring physical address */
++      u16 sw_next_to_fill; /* next Tx descriptor to fill */
++      u16 sw_next_to_clean; /* next Tx descriptor to clean */
++};
++
++/* receive free descriptor (rfd) ring */
++struct edma_rfd_desc_ring {
++      void *hw_desc; /* descriptor ring virtual address */
++      struct edma_sw_desc *sw_desc; /* buffer associated with ring */
++      u16 size; /* bytes allocated to sw_desc */
++      u16 count; /* number of descriptors in the ring */
++      dma_addr_t dma; /* descriptor ring physical address */
++      u16 sw_next_to_fill; /* next descriptor to fill */
++      u16 sw_next_to_clean; /* next descriptor to clean */
++};
++
++/* edma_rfs_flter_node - rfs filter node in hash table */
++struct edma_rfs_filter_node {
++      struct flow_keys keys;
++      u32 flow_id; /* flow_id of filter provided by kernel */
++      u16 filter_id; /* filter id of filter returned by adaptor */
++      u16 rq_id; /* desired rq index */
++      struct hlist_node node; /* edma rfs list node */
++};
++
++/* edma_rfs_flow_tbl - rfs flow table */
++struct edma_rfs_flow_table {
++      u16 max_num_filter; /* Maximum number of filters edma supports */
++      u16 hashtoclean; /* hash table index to clean next */
++      int filter_available; /* Number of free filters available */
++      struct hlist_head hlist_head[EDMA_RFS_FLOW_ENTRIES];
++      spinlock_t rfs_ftab_lock;
++      struct timer_list expire_rfs; /* timer function for edma_rps_may_expire_flow */
++};
++
++/* EDMA net device structure */
++struct edma_adapter {
++      struct net_device *netdev; /* netdevice */
++      struct platform_device *pdev; /* platform device */
++      struct edma_common_info *edma_cinfo; /* edma common info */
++      struct phy_device *phydev; /* Phy device */
++      struct edma_rfs_flow_table rfs; /* edma rfs flow table */
++      struct net_device_stats stats; /* netdev statistics */
++      set_rfs_filter_callback_t set_rfs_rule;
++      u32 flags;/* status flags */
++      unsigned long state_flags; /* GMAC up/down flags */
++      u32 forced_speed; /* link force speed */
++      u32 forced_duplex; /* link force duplex */
++      u32 link_state; /* phy link state */
++      u32 phy_mdio_addr; /* PHY device address on MII interface */
++      u32 poll_required; /* check if link polling is required */
++      u32 tx_start_offset[CONFIG_NR_CPUS]; /* tx queue start */
++      u32 default_vlan_tag; /* vlan tag */
++      u32 dp_bitmap;
++      uint8_t phy_id[MII_BUS_ID_SIZE + 3];
++};
++
++int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo);
++int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo);
++int edma_open(struct net_device *netdev);
++int edma_close(struct net_device *netdev);
++void edma_free_tx_resources(struct edma_common_info *edma_c_info);
++void edma_free_rx_resources(struct edma_common_info *edma_c_info);
++int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo);
++int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo);
++void edma_free_tx_rings(struct edma_common_info *edma_cinfo);
++void edma_free_rx_rings(struct edma_common_info *edma_cinfo);
++void edma_free_queues(struct edma_common_info *edma_cinfo);
++void edma_irq_disable(struct edma_common_info *edma_cinfo);
++int edma_reset(struct edma_common_info *edma_cinfo);
++int edma_poll(struct napi_struct *napi, int budget);
++netdev_tx_t edma_xmit(struct sk_buff *skb,
++              struct net_device *netdev);
++int edma_configure(struct edma_common_info *edma_cinfo);
++void edma_irq_enable(struct edma_common_info *edma_cinfo);
++void edma_enable_tx_ctrl(struct edma_hw *hw);
++void edma_enable_rx_ctrl(struct edma_hw *hw);
++void edma_stop_rx_tx(struct edma_hw *hw);
++void edma_free_irqs(struct edma_adapter *adapter);
++irqreturn_t edma_interrupt(int irq, void *dev);
++void edma_write_reg(u16 reg_addr, u32 reg_value);
++void edma_read_reg(u16 reg_addr, volatile u32 *reg_value);
++struct net_device_stats *edma_get_stats(struct net_device *netdev);
++int edma_set_mac_addr(struct net_device *netdev, void *p);
++int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
++              u16 rxq, u32 flow_id);
++int edma_register_rfs_filter(struct net_device *netdev,
++              set_rfs_filter_callback_t set_filter);
++void edma_flow_may_expire(unsigned long data);
++void edma_set_ethtool_ops(struct net_device *netdev);
++void edma_set_stp_rstp(bool tag);
++void edma_assign_ath_hdr_type(int tag);
++int edma_get_default_vlan_tag(struct net_device *netdev);
++void edma_adjust_link(struct net_device *netdev);
++int edma_fill_netdev(struct edma_common_info *edma_cinfo, int qid, int num, int txq_id);
++void edma_read_append_stats(struct edma_common_info *edma_cinfo);
++void edma_change_tx_coalesce(int usecs);
++void edma_change_rx_coalesce(int usecs);
++void edma_get_tx_rx_coalesce(u32 *reg_val);
++void edma_clear_irq_status(void);
++#endif /* _EDMA_H_ */
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/essedma/edma_axi.c
+@@ -0,0 +1,1220 @@
++/*
++ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/cpu_rmap.h>
++#include <linux/of.h>
++#include <linux/of_net.h>
++#include <linux/timer.h>
++#include "edma.h"
++#include "ess_edma.h"
++
++/* Weight round robin and virtual QID mask */
++#define EDMA_WRR_VID_SCTL_MASK 0xffff
++
++/* Weight round robin and virtual QID shift */
++#define EDMA_WRR_VID_SCTL_SHIFT 16
++
++char edma_axi_driver_name[] = "ess_edma";
++static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
++      NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
++
++static u32 edma_hw_addr;
++
++struct timer_list edma_stats_timer;
++
++char edma_tx_irq[16][64];
++char edma_rx_irq[8][64];
++struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
++static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1,
++                      EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3};
++static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1,
++                      EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3};
++
++static u32 edma_default_ltag  __read_mostly = EDMA_LAN_DEFAULT_VLAN;
++static u32 edma_default_wtag  __read_mostly = EDMA_WAN_DEFAULT_VLAN;
++static u32 edma_default_group1_vtag  __read_mostly = EDMA_DEFAULT_GROUP1_VLAN;
++static u32 edma_default_group2_vtag  __read_mostly = EDMA_DEFAULT_GROUP2_VLAN;
++static u32 edma_default_group3_vtag  __read_mostly = EDMA_DEFAULT_GROUP3_VLAN;
++static u32 edma_default_group4_vtag  __read_mostly = EDMA_DEFAULT_GROUP4_VLAN;
++static u32 edma_default_group5_vtag  __read_mostly = EDMA_DEFAULT_GROUP5_VLAN;
++static u32 edma_rss_idt_val = EDMA_RSS_IDT_VALUE;
++static u32 edma_rss_idt_idx;
++
++static int edma_weight_assigned_to_q __read_mostly;
++static int edma_queue_to_virtual_q __read_mostly;
++static bool edma_enable_rstp  __read_mostly;
++static int edma_athr_hdr_eth_type __read_mostly;
++
++static int page_mode;
++module_param(page_mode, int, 0);
++MODULE_PARM_DESC(page_mode, "enable page mode");
++
++static int overwrite_mode;
++module_param(overwrite_mode, int, 0);
++MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting");
++
++static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE;
++module_param(jumbo_mru, int, 0);
++MODULE_PARM_DESC(jumbo_mru, "enable fraglist support");
++
++static int num_rxq = 4;
++module_param(num_rxq, int, 0);
++MODULE_PARM_DESC(num_rxq, "change the number of rx queues");
++
++void edma_write_reg(u16 reg_addr, u32 reg_value)
++{
++      writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr)));
++}
++
++void edma_read_reg(u16 reg_addr, volatile u32 *reg_value)
++{
++      *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr));
++}
++
++/* edma_change_tx_coalesce()
++ *    change tx interrupt moderation timer
++ */
++void edma_change_tx_coalesce(int usecs)
++{
++      u32 reg_value;
++
++      /* Here, we right shift the value from the user by 1, this is
++       * done because IMT resolution timer is 2usecs. 1 count
++       * of this register corresponds to 2 usecs.
++       */
++      edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
++      reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16));
++      edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
++}
++
++/* edma_change_rx_coalesce()
++ *    change rx interrupt moderation timer
++ */
++void edma_change_rx_coalesce(int usecs)
++{
++      u32 reg_value;
++
++      /* Here, we right shift the value from the user by 1, this is
++       * done because IMT resolution timer is 2usecs. 1 count
++       * of this register corresponds to 2 usecs.
++       */
++      edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
++      reg_value = ((reg_value & 0xffff0000) | (usecs >> 1));
++      edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
++}
++
++/* edma_get_tx_rx_coalesce()
++ *    Get tx/rx interrupt moderation value
++ */
++void edma_get_tx_rx_coalesce(u32 *reg_val)
++{
++      edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val);
++}
++
++void edma_read_append_stats(struct edma_common_info *edma_cinfo)
++{
++      uint32_t *p;
++      int i;
++      u32 stat;
++
++      spin_lock(&edma_cinfo->stats_lock);
++      p = (uint32_t *)&(edma_cinfo->edma_ethstats);
++
++      for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
++              edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat);
++              *p += stat;
++              p++;
++      }
++
++      for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
++              edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat);
++              *p += stat;
++              p++;
++      }
++
++      for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
++              edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat);
++              *p += stat;
++              p++;
++      }
++
++      for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
++              edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat);
++              *p += stat;
++              p++;
++      }
++
++      spin_unlock(&edma_cinfo->stats_lock);
++}
++
++static void edma_statistics_timer(unsigned long data)
++{
++      struct edma_common_info *edma_cinfo = (struct edma_common_info *)data;
++
++      edma_read_append_stats(edma_cinfo);
++
++      mod_timer(&edma_stats_timer, jiffies + 1*HZ);
++}
++
++static int edma_enable_stp_rstp(struct ctl_table *table, int write,
++                              void __user *buffer, size_t *lenp,
++                              loff_t *ppos)
++{
++      int ret;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      if (write)
++              edma_set_stp_rstp(edma_enable_rstp);
++
++      return ret;
++}
++
++static int edma_ath_hdr_eth_type(struct ctl_table *table, int write,
++                               void __user *buffer, size_t *lenp,
++                               loff_t *ppos)
++{
++      int ret;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      if (write)
++              edma_assign_ath_hdr_type(edma_athr_hdr_eth_type);
++
++      return ret;
++}
++
++static int edma_change_default_lan_vlan(struct ctl_table *table, int write,
++                                      void __user *buffer, size_t *lenp,
++                                      loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      int ret;
++
++      if (!edma_netdev[1]) {
++              pr_err("Netdevice for default_lan does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[1]);
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_ltag;
++
++      return ret;
++}
++
++static int edma_change_default_wan_vlan(struct ctl_table *table, int write,
++                                      void __user *buffer, size_t *lenp,
++                                      loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      int ret;
++
++      if (!edma_netdev[0]) {
++              pr_err("Netdevice for default_wan does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[0]);
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_wtag;
++
++      return ret;
++}
++
++static int edma_change_group1_vtag(struct ctl_table *table, int write,
++                                 void __user *buffer, size_t *lenp,
++                                 loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      struct edma_common_info *edma_cinfo;
++      int ret;
++
++      if (!edma_netdev[0]) {
++              pr_err("Netdevice for Group 1 does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[0]);
++      edma_cinfo = adapter->edma_cinfo;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_group1_vtag;
++
++      return ret;
++}
++
++static int edma_change_group2_vtag(struct ctl_table *table, int write,
++                                 void __user *buffer, size_t *lenp,
++                                 loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      struct edma_common_info *edma_cinfo;
++      int ret;
++
++      if (!edma_netdev[1]) {
++              pr_err("Netdevice for Group 2 does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[1]);
++      edma_cinfo = adapter->edma_cinfo;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_group2_vtag;
++
++      return ret;
++}
++
++static int edma_change_group3_vtag(struct ctl_table *table, int write,
++                                 void __user *buffer, size_t *lenp,
++                                 loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      struct edma_common_info *edma_cinfo;
++      int ret;
++
++      if (!edma_netdev[2]) {
++              pr_err("Netdevice for Group 3 does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[2]);
++      edma_cinfo = adapter->edma_cinfo;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_group3_vtag;
++
++      return ret;
++}
++
++static int edma_change_group4_vtag(struct ctl_table *table, int write,
++                                 void __user *buffer, size_t *lenp,
++                                 loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      struct edma_common_info *edma_cinfo;
++      int ret;
++
++      if (!edma_netdev[3]) {
++              pr_err("Netdevice for Group 4 does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[3]);
++      edma_cinfo = adapter->edma_cinfo;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_group4_vtag;
++
++      return ret;
++}
++
++static int edma_change_group5_vtag(struct ctl_table *table, int write,
++                                 void __user *buffer, size_t *lenp,
++                                 loff_t *ppos)
++{
++      struct edma_adapter *adapter;
++      struct edma_common_info *edma_cinfo;
++      int ret;
++
++      if (!edma_netdev[4]) {
++              pr_err("Netdevice for Group 5 does not exist\n");
++              return -1;
++      }
++
++      adapter = netdev_priv(edma_netdev[4]);
++      edma_cinfo = adapter->edma_cinfo;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (write)
++              adapter->default_vlan_tag = edma_default_group5_vtag;
++
++      return ret;
++}
++
++static int edma_set_rss_idt_value(struct ctl_table *table, int write,
++                                void __user *buffer, size_t *lenp,
++                                loff_t *ppos)
++{
++      int ret;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      if (write && !ret)
++              edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx),
++                             edma_rss_idt_val);
++      return ret;
++}
++
++static int edma_set_rss_idt_idx(struct ctl_table *table, int write,
++                              void __user *buffer, size_t *lenp,
++                              loff_t *ppos)
++{
++      int ret;
++      u32 old_value = edma_rss_idt_idx;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      if (!write || ret)
++              return ret;
++
++      if (edma_rss_idt_idx >= EDMA_NUM_IDT) {
++              pr_err("Invalid RSS indirection table index %d\n",
++                     edma_rss_idt_idx);
++              edma_rss_idt_idx = old_value;
++              return -EINVAL;
++      }
++      return ret;
++}
++
++static int edma_weight_assigned_to_queues(struct ctl_table *table, int write,
++                                        void __user *buffer, size_t *lenp,
++                                        loff_t *ppos)
++{
++      int ret, queue_id, weight;
++      u32 reg_data, data, reg_addr;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      if (write) {
++              queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK;
++              if (queue_id < 0 || queue_id > 15) {
++                      pr_err("queue_id not within desired range\n");
++                      return -EINVAL;
++              }
++
++              weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT;
++              if (weight < 0 || weight > 0xF) {
++                      pr_err("queue_id not within desired range\n");
++                      return -EINVAL;
++              }
++
++              data = weight << EDMA_WRR_SHIFT(queue_id);
++
++              reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3);
++              edma_read_reg(reg_addr, &reg_data);
++              reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id));
++              edma_write_reg(reg_addr, data | reg_data);
++      }
++
++      return ret;
++}
++
++static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write,
++                                         void __user *buffer, size_t *lenp,
++                                         loff_t *ppos)
++{
++      int ret, queue_id, virtual_qid;
++      u32 reg_data, data, reg_addr;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      if (write) {
++              queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK;
++              if (queue_id < 0 || queue_id > 15) {
++                      pr_err("queue_id not within desired range\n");
++                      return -EINVAL;
++              }
++
++              virtual_qid = edma_queue_to_virtual_q >>
++                      EDMA_WRR_VID_SCTL_SHIFT;
++              if (virtual_qid < 0 || virtual_qid > 8) {
++                      pr_err("queue_id not within desired range\n");
++                      return -EINVAL;
++              }
++
++              data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id);
++
++              reg_addr = EDMA_REG_VQ_CTRL0 + (queue_id & ~0x3);
++              edma_read_reg(reg_addr, &reg_data);
++              reg_data &= ~(1 << EDMA_VQ_ID_SHIFT(queue_id));
++              edma_write_reg(reg_addr, data | reg_data);
++      }
++
++      return ret;
++}
++
++static struct ctl_table edma_table[] = {
++      {
++              .procname       = "default_lan_tag",
++              .data           = &edma_default_ltag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_default_lan_vlan
++      },
++      {
++              .procname       = "default_wan_tag",
++              .data           = &edma_default_wtag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_default_wan_vlan
++      },
++      {
++              .procname       = "weight_assigned_to_queues",
++              .data           = &edma_weight_assigned_to_q,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_weight_assigned_to_queues
++      },
++      {
++              .procname       = "queue_to_virtual_queue_map",
++              .data           = &edma_queue_to_virtual_q,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_queue_to_virtual_queue_map
++      },
++      {
++              .procname       = "enable_stp_rstp",
++              .data           = &edma_enable_rstp,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_enable_stp_rstp
++      },
++      {
++              .procname       = "athr_hdr_eth_type",
++              .data           = &edma_athr_hdr_eth_type,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_ath_hdr_eth_type
++      },
++      {
++              .procname       = "default_group1_vlan_tag",
++              .data           = &edma_default_group1_vtag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_group1_vtag
++      },
++      {
++              .procname       = "default_group2_vlan_tag",
++              .data           = &edma_default_group2_vtag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_group2_vtag
++      },
++      {
++              .procname       = "default_group3_vlan_tag",
++              .data           = &edma_default_group3_vtag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_group3_vtag
++      },
++      {
++              .procname       = "default_group4_vlan_tag",
++              .data           = &edma_default_group4_vtag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_group4_vtag
++      },
++      {
++              .procname       = "default_group5_vlan_tag",
++              .data           = &edma_default_group5_vtag,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_change_group5_vtag
++      },
++      {
++              .procname       = "edma_rss_idt_value",
++              .data           = &edma_rss_idt_val,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_set_rss_idt_value
++      },
++      {
++              .procname       = "edma_rss_idt_idx",
++              .data           = &edma_rss_idt_idx,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = edma_set_rss_idt_idx
++      },
++      {}
++};
++
++/* edma_axi_netdev_ops
++ *    Describe the operations supported by registered netdevices
++ *
++ * static const struct net_device_ops edma_axi_netdev_ops = {
++ *    .ndo_open               = edma_open,
++ *    .ndo_stop               = edma_close,
++ *    .ndo_start_xmit         = edma_xmit_frame,
++ *    .ndo_set_mac_address    = edma_set_mac_addr,
++ * }
++ */
++static const struct net_device_ops edma_axi_netdev_ops = {
++      .ndo_open               = edma_open,
++      .ndo_stop               = edma_close,
++      .ndo_start_xmit         = edma_xmit,
++      .ndo_set_mac_address    = edma_set_mac_addr,
++#ifdef CONFIG_RFS_ACCEL
++      .ndo_rx_flow_steer      = edma_rx_flow_steer,
++      .ndo_register_rfs_filter = edma_register_rfs_filter,
++      .ndo_get_default_vlan_tag = edma_get_default_vlan_tag,
++#endif
++      .ndo_get_stats          = edma_get_stats,
++};
++
++/* edma_axi_probe()
++ *    Initialise an adapter identified by a platform_device structure.
++ *
++ * The OS initialization, configuring of the adapter private structure,
++ * and a hardware reset occur in the probe.
++ */
++static int edma_axi_probe(struct platform_device *pdev)
++{
++      struct edma_common_info *edma_cinfo;
++      struct edma_hw *hw;
++      struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED];
++      struct resource *res;
++      struct device_node *np = pdev->dev.of_node;
++      struct device_node *pnp;
++      struct device_node *mdio_node = NULL;
++      struct platform_device *mdio_plat = NULL;
++      struct mii_bus *miibus = NULL;
++      struct edma_mdio_data *mdio_data = NULL;
++      int i, j, k, err = 0;
++      int portid_bmp;
++      int idx = 0, idx_mac = 0;
++
++      if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) {
++              dev_err(&pdev->dev, "Invalid CPU Cores\n");
++              return -EINVAL;
++      }
++
++      if ((num_rxq != 4) && (num_rxq != 8)) {
++              dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n");
++              return -EINVAL;
++      }
++      edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL);
++      if (!edma_cinfo) {
++              err = -ENOMEM;
++              goto err_alloc;
++      }
++
++      edma_cinfo->pdev = pdev;
++
++      of_property_read_u32(np, "qcom,num_gmac", &edma_cinfo->num_gmac);
++      if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) {
++              pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
++              err = -EINVAL;
++              goto err_cinfo;
++      }
++
++      /* Initialize the netdev array before allocation
++       * to avoid double free
++       */
++      for (i = 0 ; i < edma_cinfo->num_gmac ; i++)
++              edma_netdev[i] = NULL;
++
++      for (i = 0 ; i < edma_cinfo->num_gmac ; i++) {
++              edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter),
++                      EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE);
++
++              if (!edma_netdev[i]) {
++                      dev_err(&pdev->dev,
++                              "net device alloc fails for index=%d\n", i);
++                      err = -ENODEV;
++                      goto err_ioremap;
++              }
++
++              SET_NETDEV_DEV(edma_netdev[i], &pdev->dev);
++              platform_set_drvdata(pdev, edma_netdev[i]);
++              edma_cinfo->netdev[i] = edma_netdev[i];
++      }
++
++      /* Fill ring details */
++      edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE;
++      edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4);
++      edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE;
++
++      /* Update num rx queues based on module parameter */
++      edma_cinfo->num_rx_queues = num_rxq;
++      edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2);
++
++      edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE;
++
++      hw = &edma_cinfo->hw;
++
++      /* Fill HW defaults */
++      hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
++      hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
++
++      of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode);
++      of_property_read_u32(np, "qcom,rx_head_buf_size",
++                           &hw->rx_head_buff_size);
++
++      if (overwrite_mode) {
++              dev_info(&pdev->dev, "page mode overwritten");
++              edma_cinfo->page_mode = page_mode;
++      }
++
++      if (jumbo_mru)
++              edma_cinfo->fraglist_mode = 1;
++
++      if (edma_cinfo->page_mode)
++              hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO;
++      else if (edma_cinfo->fraglist_mode)
++              hw->rx_head_buff_size = jumbo_mru;
++      else if (!hw->rx_head_buff_size)
++              hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE;
++
++      hw->misc_intr_mask = 0;
++      hw->wol_intr_mask = 0;
++
++      hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE;
++      hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
++
++      /* configure RSS type to the different protocol that can be
++       * supported
++       */
++      hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP |
++              EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP |
++              EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
++
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++      edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res);
++      if (IS_ERR(edma_cinfo->hw.hw_addr)) {
++              err = PTR_ERR(edma_cinfo->hw.hw_addr);
++              goto err_ioremap;
++      }
++
++      edma_hw_addr = (u32)edma_cinfo->hw.hw_addr;
++
++      /* Parse tx queue interrupt number from device tree */
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++)
++              edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i);
++
++      /* Parse rx queue interrupt number from device tree
++       * Here we are setting j to point to the point where we
++       * left tx interrupt parsing(i.e 16) and run run the loop
++       * from 0 to 7 to parse rx interrupt number.
++       */
++      for (i = 0, j = edma_cinfo->num_tx_queues, k = 0;
++                      i < edma_cinfo->num_rx_queues; i++) {
++              edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j);
++              k += ((num_rxq == 4) ?  2 : 1);
++              j += ((num_rxq == 4) ?  2 : 1);
++      }
++
++      edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size;
++      edma_cinfo->rx_page_buffer_len = PAGE_SIZE;
++
++      err = edma_alloc_queues_tx(edma_cinfo);
++      if (err) {
++              dev_err(&pdev->dev, "Allocation of TX queue failed\n");
++              goto err_tx_qinit;
++      }
++
++      err = edma_alloc_queues_rx(edma_cinfo);
++      if (err) {
++              dev_err(&pdev->dev, "Allocation of RX queue failed\n");
++              goto err_rx_qinit;
++      }
++
++      err = edma_alloc_tx_rings(edma_cinfo);
++      if (err) {
++              dev_err(&pdev->dev, "Allocation of TX resources failed\n");
++              goto err_tx_rinit;
++      }
++
++      err = edma_alloc_rx_rings(edma_cinfo);
++      if (err) {
++              dev_err(&pdev->dev, "Allocation of RX resources failed\n");
++              goto err_rx_rinit;
++      }
++
++      /* Initialize netdev and netdev bitmap for transmit descriptor rings */
++      for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
++              struct edma_tx_desc_ring *etdr =  edma_cinfo->tpd_ring[i];
++              int j;
++
++              etdr->netdev_bmp = 0;
++              for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) {
++                      etdr->netdev[j] = NULL;
++                      etdr->nq[j] = NULL;
++              }
++      }
++
++      if (of_property_read_bool(np, "qcom,mdio_supported")) {
++              mdio_node = of_find_compatible_node(NULL, NULL,
++                                                  "qcom,ipq4019-mdio");
++              if (!mdio_node) {
++                      dev_err(&pdev->dev, "cannot find mdio node by phandle");
++                      err = -EIO;
++                      goto err_mdiobus_init_fail;
++              }
++
++              mdio_plat = of_find_device_by_node(mdio_node);
++              if (!mdio_plat) {
++                      dev_err(&pdev->dev,
++                              "cannot find platform device from mdio node");
++                      of_node_put(mdio_node);
++                      err = -EIO;
++                      goto err_mdiobus_init_fail;
++              }
++
++              mdio_data = dev_get_drvdata(&mdio_plat->dev);
++              if (!mdio_data) {
++                      dev_err(&pdev->dev,
++                              "cannot get mii bus reference from device data");
++                      of_node_put(mdio_node);
++                      err = -EIO;
++                      goto err_mdiobus_init_fail;
++              }
++
++              miibus = mdio_data->mii_bus;
++      }
++
++      for_each_available_child_of_node(np, pnp) {
++              const char *mac_addr;
++
++              /* this check is needed if parent and daughter dts have
++               * different number of gmac nodes
++               */
++              if (idx_mac == edma_cinfo->num_gmac) {
++                      of_node_put(np);
++                      break;
++              }
++
++              mac_addr = of_get_mac_address(pnp);
++              if (mac_addr)
++                      memcpy(edma_netdev[idx_mac]->dev_addr, mac_addr, ETH_ALEN);
++
++              idx_mac++;
++      }
++
++      /* Populate the adapter structure register the netdevice */
++      for (i = 0; i < edma_cinfo->num_gmac; i++) {
++              int k, m;
++
++              adapter[i] = netdev_priv(edma_netdev[i]);
++              adapter[i]->netdev = edma_netdev[i];
++              adapter[i]->pdev = pdev;
++              for (j = 0; j < CONFIG_NR_CPUS; j++) {
++                      m = i % 2;
++                      adapter[i]->tx_start_offset[j] =
++                              ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1));
++                      /* Share the queues with available net-devices.
++                       * For instance , with 5 net-devices
++                       * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
++                       * and eth1/eth3 will get the remaining.
++                       */
++                      for (k = adapter[i]->tx_start_offset[j]; k <
++                           (adapter[i]->tx_start_offset[j] + 2); k++) {
++                              if (edma_fill_netdev(edma_cinfo, k, i, j)) {
++                                      pr_err("Netdev overflow Error\n");
++                                      goto err_register;
++                              }
++                      }
++              }
++
++              adapter[i]->edma_cinfo = edma_cinfo;
++              edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops;
++              edma_netdev[i]->max_mtu = 9000;
++              edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM
++                                    | NETIF_F_HW_VLAN_CTAG_TX
++                                    | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG |
++                                    NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO;
++              edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
++                              NETIF_F_HW_VLAN_CTAG_RX
++                              | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
++                              NETIF_F_GRO;
++              edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG |
++                                         NETIF_F_TSO | NETIF_F_TSO6 |
++                                         NETIF_F_GRO;
++              edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG |
++                                           NETIF_F_TSO | NETIF_F_TSO6 |
++                                           NETIF_F_GRO;
++
++#ifdef CONFIG_RFS_ACCEL
++              edma_netdev[i]->features |=  NETIF_F_RXHASH | NETIF_F_NTUPLE;
++              edma_netdev[i]->hw_features |=  NETIF_F_RXHASH | NETIF_F_NTUPLE;
++              edma_netdev[i]->vlan_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
++              edma_netdev[i]->wanted_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
++#endif
++              edma_set_ethtool_ops(edma_netdev[i]);
++
++              /* This just fill in some default MAC address
++               */
++              if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) {
++                      random_ether_addr(edma_netdev[i]->dev_addr);
++                      pr_info("EDMA using MAC@ - using");
++                      pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
++                      *(edma_netdev[i]->dev_addr),
++                      *(edma_netdev[i]->dev_addr + 1),
++                      *(edma_netdev[i]->dev_addr + 2),
++                      *(edma_netdev[i]->dev_addr + 3),
++                      *(edma_netdev[i]->dev_addr + 4),
++                      *(edma_netdev[i]->dev_addr + 5));
++              }
++
++              err = register_netdev(edma_netdev[i]);
++              if (err)
++                      goto err_register;
++
++              /* carrier off reporting is important to
++               * ethtool even BEFORE open
++               */
++              netif_carrier_off(edma_netdev[i]);
++
++              /* Allocate reverse irq cpu mapping structure for
++              * receive queues
++              */
++#ifdef CONFIG_RFS_ACCEL
++              edma_netdev[i]->rx_cpu_rmap =
++                      alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE);
++              if (!edma_netdev[i]->rx_cpu_rmap) {
++                      err = -ENOMEM;
++                      goto err_rmap_alloc_fail;
++              }
++#endif
++      }
++
++      for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++)
++              edma_cinfo->portid_netdev_lookup_tbl[i] = NULL;
++
++      for_each_available_child_of_node(np, pnp) {
++              const uint32_t *vlan_tag = NULL;
++              int len;
++
++              /* this check is needed if parent and daughter dts have
++               * different number of gmac nodes
++               */
++              if (idx == edma_cinfo->num_gmac)
++                      break;
++
++              /* Populate port-id to netdev lookup table */
++              vlan_tag = of_get_property(pnp, "vlan_tag", &len);
++              if (!vlan_tag) {
++                      pr_err("Vlan tag parsing Failed.\n");
++                      goto err_rmap_alloc_fail;
++              }
++
++              adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1);
++              vlan_tag++;
++              portid_bmp = of_read_number(vlan_tag, 1);
++              adapter[idx]->dp_bitmap = portid_bmp;
++
++              portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */
++              while (portid_bmp) {
++                      int port_bit = ffs(portid_bmp);
++
++                      if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
++                              goto err_rmap_alloc_fail;
++                      edma_cinfo->portid_netdev_lookup_tbl[port_bit] =
++                              edma_netdev[idx];
++                      portid_bmp &= ~(1 << (port_bit - 1));
++              }
++
++              if (!of_property_read_u32(pnp, "qcom,poll_required",
++                                        &adapter[idx]->poll_required)) {
++                      if (adapter[idx]->poll_required) {
++                              of_property_read_u32(pnp, "qcom,phy_mdio_addr",
++                                                   &adapter[idx]->phy_mdio_addr);
++                              of_property_read_u32(pnp, "qcom,forced_speed",
++                                                   &adapter[idx]->forced_speed);
++                              of_property_read_u32(pnp, "qcom,forced_duplex",
++                                                   &adapter[idx]->forced_duplex);
++
++                              /* create a phyid using MDIO bus id
++                               * and MDIO bus address
++                               */
++                              snprintf(adapter[idx]->phy_id,
++                                       MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
++                                       miibus->id,
++                                       adapter[idx]->phy_mdio_addr);
++                      }
++              } else {
++                      adapter[idx]->poll_required = 0;
++                      adapter[idx]->forced_speed = SPEED_1000;
++                      adapter[idx]->forced_duplex = DUPLEX_FULL;
++              }
++
++              idx++;
++      }
++
++      edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net,
++                                                           "net/edma",
++                                                           edma_table);
++      if (!edma_cinfo->edma_ctl_table_hdr) {
++              dev_err(&pdev->dev, "edma sysctl table hdr not registered\n");
++              goto err_unregister_sysctl_tbl;
++      }
++
++      /* Disable all 16 Tx and 8 rx irqs */
++      edma_irq_disable(edma_cinfo);
++
++      err = edma_reset(edma_cinfo);
++      if (err) {
++              err = -EIO;
++              goto err_reset;
++      }
++
++      /* populate per_core_info, do a napi_Add, request 16 TX irqs,
++       * 8 RX irqs, do a napi enable
++       */
++      for (i = 0; i < CONFIG_NR_CPUS; i++) {
++              u8 rx_start;
++
++              edma_cinfo->edma_percpu_info[i].napi.state = 0;
++
++              netif_napi_add(edma_netdev[0],
++                             &edma_cinfo->edma_percpu_info[i].napi,
++                             edma_poll, 64);
++              napi_enable(&edma_cinfo->edma_percpu_info[i].napi);
++              edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i];
++              edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK
++                              << (i << EDMA_RX_PER_CPU_MASK_SHIFT);
++              edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i];
++              edma_cinfo->edma_percpu_info[i].rx_start =
++                      i << EDMA_RX_CPU_START_SHIFT;
++              rx_start = i << EDMA_RX_CPU_START_SHIFT;
++              edma_cinfo->edma_percpu_info[i].tx_status = 0;
++              edma_cinfo->edma_percpu_info[i].rx_status = 0;
++              edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo;
++
++              /* Request irq per core */
++              for (j = edma_cinfo->edma_percpu_info[i].tx_start;
++                   j < tx_start[i] + 4; j++) {
++                      sprintf(&edma_tx_irq[j][0], "edma_eth_tx%d", j);
++                      err = request_irq(edma_cinfo->tx_irq[j],
++                                        edma_interrupt,
++                                        0,
++                                        &edma_tx_irq[j][0],
++                                        &edma_cinfo->edma_percpu_info[i]);
++                      if (err)
++                              goto err_reset;
++              }
++
++              for (j = edma_cinfo->edma_percpu_info[i].rx_start;
++                   j < (rx_start +
++                   ((edma_cinfo->num_rx_queues == 4) ? 1 : 2));
++                   j++) {
++                      sprintf(&edma_rx_irq[j][0], "edma_eth_rx%d", j);
++                      err = request_irq(edma_cinfo->rx_irq[j],
++                                        edma_interrupt,
++                                        0,
++                                        &edma_rx_irq[j][0],
++                                        &edma_cinfo->edma_percpu_info[i]);
++                      if (err)
++                              goto err_reset;
++              }
++
++#ifdef CONFIG_RFS_ACCEL
++              for (j = edma_cinfo->edma_percpu_info[i].rx_start;
++                   j < rx_start + 2; j += 2) {
++                      err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap,
++                                             edma_cinfo->rx_irq[j]);
++                      if (err)
++                              goto err_rmap_add_fail;
++              }
++#endif
++      }
++
++      /* Used to clear interrupt status, allocate rx buffer,
++       * configure edma descriptors registers
++       */
++      err = edma_configure(edma_cinfo);
++      if (err) {
++              err = -EIO;
++              goto err_configure;
++      }
++
++      /* Configure RSS indirection table.
++       * 128 hash will be configured in the following
++       * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
++       * and so on
++       */
++      for (i = 0; i < EDMA_NUM_IDT; i++)
++              edma_write_reg(EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE);
++
++      /* Configure load balance mapping table.
++       * 4 table entry will be configured according to the
++       * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
++       * respectively.
++       */
++      edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
++
++      /* Configure Virtual queue for Tx rings
++       * User can also change this value runtime through
++       * a sysctl
++       */
++      edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
++      edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
++
++      /* Configure Max AXI Burst write size to 128 bytes*/
++      edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE,
++                     EDMA_AXIW_MAXWRSIZE_VALUE);
++
++      /* Enable All 16 tx and 8 rx irq mask */
++      edma_irq_enable(edma_cinfo);
++      edma_enable_tx_ctrl(&edma_cinfo->hw);
++      edma_enable_rx_ctrl(&edma_cinfo->hw);
++
++      for (i = 0; i < edma_cinfo->num_gmac; i++) {
++              if (adapter[i]->poll_required) {
++                      adapter[i]->phydev =
++                              phy_connect(edma_netdev[i],
++                                          (const char *)adapter[i]->phy_id,
++                                          &edma_adjust_link,
++                                          PHY_INTERFACE_MODE_SGMII);
++                      if (IS_ERR(adapter[i]->phydev)) {
++                              dev_dbg(&pdev->dev, "PHY attach FAIL");
++                              err = -EIO;
++                              goto edma_phy_attach_fail;
++                      } else {
++                              adapter[i]->phydev->advertising |=
++                                      ADVERTISED_Pause |
++                                      ADVERTISED_Asym_Pause;
++                              adapter[i]->phydev->supported |=
++                                      SUPPORTED_Pause |
++                                      SUPPORTED_Asym_Pause;
++                      }
++              } else {
++                      adapter[i]->phydev = NULL;
++              }
++      }
++
++      spin_lock_init(&edma_cinfo->stats_lock);
++
++      init_timer(&edma_stats_timer);
++      edma_stats_timer.expires = jiffies + 1*HZ;
++      edma_stats_timer.data = (unsigned long)edma_cinfo;
++      edma_stats_timer.function = edma_statistics_timer; /* timer handler */
++      add_timer(&edma_stats_timer);
++
++      return 0;
++
++edma_phy_attach_fail:
++      miibus = NULL;
++err_configure:
++#ifdef CONFIG_RFS_ACCEL
++      for (i = 0; i < edma_cinfo->num_gmac; i++) {
++              free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap);
++              adapter[i]->netdev->rx_cpu_rmap = NULL;
++      }
++#endif
++err_rmap_add_fail:
++      edma_free_irqs(adapter[0]);
++      for (i = 0; i < CONFIG_NR_CPUS; i++)
++              napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
++err_reset:
++err_unregister_sysctl_tbl:
++err_rmap_alloc_fail:
++      for (i = 0; i < edma_cinfo->num_gmac; i++)
++              unregister_netdev(edma_netdev[i]);
++err_register:
++err_mdiobus_init_fail:
++      edma_free_rx_rings(edma_cinfo);
++err_rx_rinit:
++      edma_free_tx_rings(edma_cinfo);
++err_tx_rinit:
++      edma_free_queues(edma_cinfo);
++err_rx_qinit:
++err_tx_qinit:
++      iounmap(edma_cinfo->hw.hw_addr);
++err_ioremap:
++      for (i = 0; i < edma_cinfo->num_gmac; i++) {
++              if (edma_netdev[i])
++                      free_netdev(edma_netdev[i]);
++      }
++err_cinfo:
++      kfree(edma_cinfo);
++err_alloc:
++      return err;
++}
++
++/* edma_axi_remove()
++ *    Device Removal Routine
++ *
++ * edma_axi_remove is called by the platform subsystem to alert the driver
++ * that it should release a platform device.
++ */
++static int edma_axi_remove(struct platform_device *pdev)
++{
++      struct edma_adapter *adapter = netdev_priv(edma_netdev[0]);
++      struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
++      struct edma_hw *hw = &edma_cinfo->hw;
++      int i;
++
++      for (i = 0; i < edma_cinfo->num_gmac; i++)
++              unregister_netdev(edma_netdev[i]);
++
++      edma_stop_rx_tx(hw);
++      for (i = 0; i < CONFIG_NR_CPUS; i++)
++              napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
++
++      edma_irq_disable(edma_cinfo);
++      edma_write_reg(EDMA_REG_RX_ISR, 0xff);
++      edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
++#ifdef CONFIG_RFS_ACCEL
++      for (i = 0; i < edma_cinfo->num_gmac; i++) {
++              free_irq_cpu_rmap(edma_netdev[i]->rx_cpu_rmap);
++              edma_netdev[i]->rx_cpu_rmap = NULL;
++      }
++#endif
++
++      for (i = 0; i < edma_cinfo->num_gmac; i++) {
++              struct edma_adapter *adapter = netdev_priv(edma_netdev[i]);
++
++              if (adapter->phydev)
++                      phy_disconnect(adapter->phydev);
++      }
++
++      del_timer_sync(&edma_stats_timer);
++      edma_free_irqs(adapter);
++      unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr);
++      edma_free_tx_resources(edma_cinfo);
++      edma_free_rx_resources(edma_cinfo);
++      edma_free_tx_rings(edma_cinfo);
++      edma_free_rx_rings(edma_cinfo);
++      edma_free_queues(edma_cinfo);
++      for (i = 0; i < edma_cinfo->num_gmac; i++)
++              free_netdev(edma_netdev[i]);
++
++      kfree(edma_cinfo);
++
++      return 0;
++}
++
++static const struct of_device_id edma_of_mtable[] = {
++      {.compatible = "qcom,ess-edma" },
++      {}
++};
++MODULE_DEVICE_TABLE(of, edma_of_mtable);
++
++static struct platform_driver edma_axi_driver = {
++      .driver = {
++              .name    = edma_axi_driver_name,
++              .of_match_table = edma_of_mtable,
++      },
++      .probe    = edma_axi_probe,
++      .remove   = edma_axi_remove,
++};
++
++module_platform_driver(edma_axi_driver);
++
++MODULE_AUTHOR("Qualcomm Atheros Inc");
++MODULE_DESCRIPTION("QCA ESS EDMA driver");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c
+@@ -0,0 +1,374 @@
++/*
++ * Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/ethtool.h>
++#include <linux/netdevice.h>
++#include <linux/string.h>
++#include "edma.h"
++
++struct edma_ethtool_stats {
++      uint8_t stat_string[ETH_GSTRING_LEN];
++      uint32_t stat_offset;
++};
++
++#define EDMA_STAT(m)    offsetof(struct edma_ethtool_statistics, m)
++#define DRVINFO_LEN   32
++
++/* Array of strings describing statistics
++ */
++static const struct edma_ethtool_stats edma_gstrings_stats[] = {
++      {"tx_q0_pkt", EDMA_STAT(tx_q0_pkt)},
++      {"tx_q1_pkt", EDMA_STAT(tx_q1_pkt)},
++      {"tx_q2_pkt", EDMA_STAT(tx_q2_pkt)},
++      {"tx_q3_pkt", EDMA_STAT(tx_q3_pkt)},
++      {"tx_q4_pkt", EDMA_STAT(tx_q4_pkt)},
++      {"tx_q5_pkt", EDMA_STAT(tx_q5_pkt)},
++      {"tx_q6_pkt", EDMA_STAT(tx_q6_pkt)},
++      {"tx_q7_pkt", EDMA_STAT(tx_q7_pkt)},
++      {"tx_q8_pkt", EDMA_STAT(tx_q8_pkt)},
++      {"tx_q9_pkt", EDMA_STAT(tx_q9_pkt)},
++      {"tx_q10_pkt", EDMA_STAT(tx_q10_pkt)},
++      {"tx_q11_pkt", EDMA_STAT(tx_q11_pkt)},
++      {"tx_q12_pkt", EDMA_STAT(tx_q12_pkt)},
++      {"tx_q13_pkt", EDMA_STAT(tx_q13_pkt)},
++      {"tx_q14_pkt", EDMA_STAT(tx_q14_pkt)},
++      {"tx_q15_pkt", EDMA_STAT(tx_q15_pkt)},
++      {"tx_q0_byte", EDMA_STAT(tx_q0_byte)},
++      {"tx_q1_byte", EDMA_STAT(tx_q1_byte)},
++      {"tx_q2_byte", EDMA_STAT(tx_q2_byte)},
++      {"tx_q3_byte", EDMA_STAT(tx_q3_byte)},
++      {"tx_q4_byte", EDMA_STAT(tx_q4_byte)},
++      {"tx_q5_byte", EDMA_STAT(tx_q5_byte)},
++      {"tx_q6_byte", EDMA_STAT(tx_q6_byte)},
++      {"tx_q7_byte", EDMA_STAT(tx_q7_byte)},
++      {"tx_q8_byte", EDMA_STAT(tx_q8_byte)},
++      {"tx_q9_byte", EDMA_STAT(tx_q9_byte)},
++      {"tx_q10_byte", EDMA_STAT(tx_q10_byte)},
++      {"tx_q11_byte", EDMA_STAT(tx_q11_byte)},
++      {"tx_q12_byte", EDMA_STAT(tx_q12_byte)},
++      {"tx_q13_byte", EDMA_STAT(tx_q13_byte)},
++      {"tx_q14_byte", EDMA_STAT(tx_q14_byte)},
++      {"tx_q15_byte", EDMA_STAT(tx_q15_byte)},
++      {"rx_q0_pkt", EDMA_STAT(rx_q0_pkt)},
++      {"rx_q1_pkt", EDMA_STAT(rx_q1_pkt)},
++      {"rx_q2_pkt", EDMA_STAT(rx_q2_pkt)},
++      {"rx_q3_pkt", EDMA_STAT(rx_q3_pkt)},
++      {"rx_q4_pkt", EDMA_STAT(rx_q4_pkt)},
++      {"rx_q5_pkt", EDMA_STAT(rx_q5_pkt)},
++      {"rx_q6_pkt", EDMA_STAT(rx_q6_pkt)},
++      {"rx_q7_pkt", EDMA_STAT(rx_q7_pkt)},
++      {"rx_q0_byte", EDMA_STAT(rx_q0_byte)},
++      {"rx_q1_byte", EDMA_STAT(rx_q1_byte)},
++      {"rx_q2_byte", EDMA_STAT(rx_q2_byte)},
++      {"rx_q3_byte", EDMA_STAT(rx_q3_byte)},
++      {"rx_q4_byte", EDMA_STAT(rx_q4_byte)},
++      {"rx_q5_byte", EDMA_STAT(rx_q5_byte)},
++      {"rx_q6_byte", EDMA_STAT(rx_q6_byte)},
++      {"rx_q7_byte", EDMA_STAT(rx_q7_byte)},
++      {"tx_desc_error", EDMA_STAT(tx_desc_error)},
++};
++
++#define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats)
++
++/* edma_get_strset_count()
++ *    Get strset count
++ */
++static int edma_get_strset_count(struct net_device *netdev,
++                               int sset)
++{
++      switch (sset) {
++      case ETH_SS_STATS:
++              return EDMA_STATS_LEN;
++      default:
++              netdev_dbg(netdev, "%s: Invalid string set", __func__);
++              return -EOPNOTSUPP;
++      }
++}
++
++
++/* edma_get_strings()
++ *    get stats string
++ */
++static void edma_get_strings(struct net_device *netdev, uint32_t stringset,
++                           uint8_t *data)
++{
++      uint8_t *p = data;
++      uint32_t i;
++
++      switch (stringset) {
++      case ETH_SS_STATS:
++              for (i = 0; i < EDMA_STATS_LEN; i++) {
++                      memcpy(p, edma_gstrings_stats[i].stat_string,
++                              min((size_t)ETH_GSTRING_LEN,
++                                  strlen(edma_gstrings_stats[i].stat_string)
++                                  + 1));
++                      p += ETH_GSTRING_LEN;
++              }
++              break;
++      }
++}
++
++/* edma_get_ethtool_stats()
++ *    Get ethtool statistics
++ */
++static void edma_get_ethtool_stats(struct net_device *netdev,
++                                 struct ethtool_stats *stats, uint64_t *data)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++      struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
++      int i;
++      uint8_t *p = NULL;
++
++      edma_read_append_stats(edma_cinfo);
++
++      for(i = 0; i < EDMA_STATS_LEN; i++) {
++              p = (uint8_t *)&(edma_cinfo->edma_ethstats) +
++                      edma_gstrings_stats[i].stat_offset;
++              data[i] = *(uint32_t *)p;
++      }
++}
++
++/* edma_get_drvinfo()
++ *    get edma driver info
++ */
++static void edma_get_drvinfo(struct net_device *dev,
++                           struct ethtool_drvinfo *info)
++{
++      strlcpy(info->driver, "ess_edma", DRVINFO_LEN);
++      strlcpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN);
++}
++
++/* edma_nway_reset()
++ *    Reset the phy, if available.
++ */
++static int edma_nway_reset(struct net_device *netdev)
++{
++      return -EINVAL;
++}
++
++/* edma_get_wol()
++ *    get wake on lan info
++ */
++static void edma_get_wol(struct net_device *netdev,
++                       struct ethtool_wolinfo *wol)
++{
++      wol->supported = 0;
++      wol->wolopts = 0;
++}
++
++/* edma_get_msglevel()
++ *    get message level.
++ */
++static uint32_t edma_get_msglevel(struct net_device *netdev)
++{
++      return 0;
++}
++
++/* edma_get_settings()
++ *    Get edma settings
++ */
++static int edma_get_settings(struct net_device *netdev,
++                           struct ethtool_cmd *ecmd)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++
++      if (adapter->poll_required) {
++              struct phy_device *phydev = NULL;
++              uint16_t phyreg;
++
++              if ((adapter->forced_speed != SPEED_UNKNOWN)
++                      && !(adapter->poll_required))
++                      return -EPERM;
++
++              phydev = adapter->phydev;
++
++              ecmd->advertising = phydev->advertising;
++              ecmd->autoneg = phydev->autoneg;
++
++              if (adapter->link_state == __EDMA_LINKDOWN) {
++                      ecmd->speed =  SPEED_UNKNOWN;
++                      ecmd->duplex = DUPLEX_UNKNOWN;
++              } else {
++                      ecmd->speed = phydev->speed;
++                      ecmd->duplex = phydev->duplex;
++              }
++
++              ecmd->phy_address = adapter->phy_mdio_addr;
++
++              phyreg = (uint16_t)phy_read(adapter->phydev, MII_LPA);
++              if (phyreg & LPA_10HALF)
++                      ecmd->lp_advertising |= ADVERTISED_10baseT_Half;
++
++              if (phyreg & LPA_10FULL)
++                      ecmd->lp_advertising |= ADVERTISED_10baseT_Full;
++
++              if (phyreg & LPA_100HALF)
++                      ecmd->lp_advertising |= ADVERTISED_100baseT_Half;
++
++              if (phyreg & LPA_100FULL)
++                      ecmd->lp_advertising |= ADVERTISED_100baseT_Full;
++
++              phyreg = (uint16_t)phy_read(adapter->phydev, MII_STAT1000);
++              if (phyreg & LPA_1000HALF)
++                      ecmd->lp_advertising |= ADVERTISED_1000baseT_Half;
++
++              if (phyreg & LPA_1000FULL)
++                      ecmd->lp_advertising |= ADVERTISED_1000baseT_Full;
++      } else {
++              /* If the speed/duplex for this GMAC is forced and we
++               * are not polling for link state changes, return the
++               * values as specified by platform. This will be true
++               * for GMACs connected to switch, and interfaces that
++               * do not use a PHY.
++               */
++              if (!(adapter->poll_required)) {
++                      if (adapter->forced_speed != SPEED_UNKNOWN) {
++                              /* set speed and duplex */
++                              ethtool_cmd_speed_set(ecmd, SPEED_1000);
++                              ecmd->duplex = DUPLEX_FULL;
++
++                              /* Populate capabilities advertised by self */
++                              ecmd->advertising = 0;
++                              ecmd->autoneg = 0;
++                              ecmd->port = PORT_TP;
++                              ecmd->transceiver = XCVR_EXTERNAL;
++                      } else {
++                              /* non link polled and non
++                               * forced speed/duplex interface
++                               */
++                              return -EIO;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++/* edma_set_settings()
++ *    Set EDMA settings
++ */
++static int edma_set_settings(struct net_device *netdev,
++                           struct ethtool_cmd *ecmd)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++      struct phy_device *phydev = NULL;
++
++      if ((adapter->forced_speed != SPEED_UNKNOWN) &&
++           !adapter->poll_required)
++              return -EPERM;
++
++      phydev = adapter->phydev;
++      phydev->advertising = ecmd->advertising;
++      phydev->autoneg = ecmd->autoneg;
++      phydev->speed = ethtool_cmd_speed(ecmd);
++      phydev->duplex = ecmd->duplex;
++
++      genphy_config_aneg(phydev);
++
++      return 0;
++}
++
++/* edma_get_coalesce
++ *    get interrupt mitigation
++ */
++static int edma_get_coalesce(struct net_device *netdev,
++                           struct ethtool_coalesce *ec)
++{
++      u32 reg_val;
++
++      edma_get_tx_rx_coalesce(&reg_val);
++
++      /* We read the Interrupt Moderation Timer(IMT) register value,
++       * use lower 16 bit for rx and higher 16 bit for Tx. We do a
++       * left shift by 1, because IMT resolution timer is 2usecs.
++       * Hence the value given by the register is multiplied by 2 to
++       * get the actual time in usecs.
++       */
++      ec->tx_coalesce_usecs = (((reg_val >> 16) & 0xffff) << 1);
++      ec->rx_coalesce_usecs = ((reg_val & 0xffff) << 1);
++
++      return 0;
++}
++
++/* edma_set_coalesce
++ *    set interrupt mitigation
++ */
++static int edma_set_coalesce(struct net_device *netdev,
++                           struct ethtool_coalesce *ec)
++{
++      if (ec->tx_coalesce_usecs)
++              edma_change_tx_coalesce(ec->tx_coalesce_usecs);
++      if (ec->rx_coalesce_usecs)
++              edma_change_rx_coalesce(ec->rx_coalesce_usecs);
++
++      return 0;
++}
++
++/* edma_set_priv_flags()
++ *    Set EDMA private flags
++ */
++static int edma_set_priv_flags(struct net_device *netdev, u32 flags)
++{
++      return 0;
++}
++
++/* edma_get_priv_flags()
++ *    get edma driver flags
++ */
++static u32 edma_get_priv_flags(struct net_device *netdev)
++{
++      return 0;
++}
++
++/* edma_get_ringparam()
++ *    get ring size
++ */
++static void edma_get_ringparam(struct net_device *netdev,
++                             struct ethtool_ringparam *ring)
++{
++      struct edma_adapter *adapter = netdev_priv(netdev);
++      struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
++
++      ring->tx_max_pending = edma_cinfo->tx_ring_count;
++      ring->rx_max_pending = edma_cinfo->rx_ring_count;
++}
++
++/* Ethtool operations
++ */
++static const struct ethtool_ops edma_ethtool_ops = {
++      .get_drvinfo = &edma_get_drvinfo,
++      .get_link = &ethtool_op_get_link,
++      .get_msglevel = &edma_get_msglevel,
++      .nway_reset = &edma_nway_reset,
++      .get_wol = &edma_get_wol,
++      .get_settings = &edma_get_settings,
++      .set_settings = &edma_set_settings,
++      .get_strings = &edma_get_strings,
++      .get_sset_count = &edma_get_strset_count,
++      .get_ethtool_stats = &edma_get_ethtool_stats,
++      .get_coalesce = &edma_get_coalesce,
++      .set_coalesce = &edma_set_coalesce,
++      .get_priv_flags = edma_get_priv_flags,
++      .set_priv_flags = edma_set_priv_flags,
++      .get_ringparam = edma_get_ringparam,
++};
++
++/* edma_set_ethtool_ops
++ *    Set ethtool operations
++ */
++void edma_set_ethtool_ops(struct net_device *netdev)
++{
++      netdev->ethtool_ops = &edma_ethtool_ops;
++}
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/essedma/ess_edma.h
+@@ -0,0 +1,332 @@
++/*
++ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for
++ * any purpose with or without fee is hereby granted, provided that the
++ * above copyright notice and this permission notice appear in all copies.
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#ifndef _ESS_EDMA_H_
++#define _ESS_EDMA_H_
++
++#include <linux/types.h>
++
++struct edma_adapter;
++struct edma_hw;
++
++/* register definition */
++#define EDMA_REG_MAS_CTRL 0x0
++#define EDMA_REG_TIMEOUT_CTRL 0x004
++#define EDMA_REG_DBG0 0x008
++#define EDMA_REG_DBG1 0x00C
++#define EDMA_REG_SW_CTRL0 0x100
++#define EDMA_REG_SW_CTRL1 0x104
++
++/* Interrupt Status Register */
++#define EDMA_REG_RX_ISR 0x200
++#define EDMA_REG_TX_ISR 0x208
++#define EDMA_REG_MISC_ISR 0x210
++#define EDMA_REG_WOL_ISR 0x218
++
++#define EDMA_MISC_ISR_RX_URG_Q(x) (1 << x)
++
++#define EDMA_MISC_ISR_AXIR_TIMEOUT 0x00000100
++#define EDMA_MISC_ISR_AXIR_ERR 0x00000200
++#define EDMA_MISC_ISR_TXF_DEAD 0x00000400
++#define EDMA_MISC_ISR_AXIW_ERR 0x00000800
++#define EDMA_MISC_ISR_AXIW_TIMEOUT 0x00001000
++
++#define EDMA_WOL_ISR 0x00000001
++
++/* Interrupt Mask Register */
++#define EDMA_REG_MISC_IMR 0x214
++#define EDMA_REG_WOL_IMR 0x218
++
++#define EDMA_RX_IMR_NORMAL_MASK 0x1
++#define EDMA_TX_IMR_NORMAL_MASK 0x1
++#define EDMA_MISC_IMR_NORMAL_MASK 0x80001FFF
++#define EDMA_WOL_IMR_NORMAL_MASK 0x1
++
++/* Edma receive consumer index */
++#define EDMA_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
++/* Edma transmit consumer index */
++#define EDMA_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
++
++/* IRQ Moderator Initial Timer Register */
++#define EDMA_REG_IRQ_MODRT_TIMER_INIT 0x280
++#define EDMA_IRQ_MODRT_TIMER_MASK 0xFFFF
++#define EDMA_IRQ_MODRT_RX_TIMER_SHIFT 0
++#define EDMA_IRQ_MODRT_TX_TIMER_SHIFT 16
++
++/* Interrupt Control Register */
++#define EDMA_REG_INTR_CTRL 0x284
++#define EDMA_INTR_CLR_TYP_SHIFT 0
++#define EDMA_INTR_SW_IDX_W_TYP_SHIFT 1
++#define EDMA_INTR_CLEAR_TYPE_W1 0
++#define EDMA_INTR_CLEAR_TYPE_R 1
++
++/* RX Interrupt Mask Register */
++#define EDMA_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
++
++/* TX Interrupt mask register */
++#define EDMA_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
++
++/* Load Ptr Register
++ * Software sets this bit after the initialization of the head and tail
++ */
++#define EDMA_REG_TX_SRAM_PART 0x400
++#define EDMA_LOAD_PTR_SHIFT 16
++
++/* TXQ Control Register */
++#define EDMA_REG_TXQ_CTRL 0x404
++#define EDMA_TXQ_CTRL_IP_OPTION_EN 0x10
++#define EDMA_TXQ_CTRL_TXQ_EN 0x20
++#define EDMA_TXQ_CTRL_ENH_MODE 0x40
++#define EDMA_TXQ_CTRL_LS_8023_EN 0x80
++#define EDMA_TXQ_CTRL_TPD_BURST_EN 0x100
++#define EDMA_TXQ_CTRL_LSO_BREAK_EN 0x200
++#define EDMA_TXQ_NUM_TPD_BURST_MASK 0xF
++#define EDMA_TXQ_TXF_BURST_NUM_MASK 0xFFFF
++#define EDMA_TXQ_NUM_TPD_BURST_SHIFT 0
++#define EDMA_TXQ_TXF_BURST_NUM_SHIFT 16
++
++#define       EDMA_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
++#define EDMA_TXF_WATER_MARK_MASK 0x0FFF
++#define EDMA_TXF_LOW_WATER_MARK_SHIFT 0
++#define EDMA_TXF_HIGH_WATER_MARK_SHIFT 16
++#define EDMA_TXQ_CTRL_BURST_MODE_EN 0x80000000
++
++/* WRR Control Register */
++#define EDMA_REG_WRR_CTRL_Q0_Q3 0x40c
++#define EDMA_REG_WRR_CTRL_Q4_Q7 0x410
++#define EDMA_REG_WRR_CTRL_Q8_Q11 0x414
++#define EDMA_REG_WRR_CTRL_Q12_Q15 0x418
++
++/* Weight round robin(WRR), it takes queue as input, and computes
++ * starting bits where we need to write the weight for a particular
++ * queue
++ */
++#define EDMA_WRR_SHIFT(x) (((x) * 5) % 20)
++
++/* Tx Descriptor Control Register */
++#define EDMA_REG_TPD_RING_SIZE 0x41C
++#define EDMA_TPD_RING_SIZE_SHIFT 0
++#define EDMA_TPD_RING_SIZE_MASK 0xFFFF
++
++/* Transmit descriptor base address */
++#define EDMA_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
++
++/* TPD Index Register */
++#define EDMA_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
++
++#define EDMA_TPD_PROD_IDX_BITS 0x0000FFFF
++#define EDMA_TPD_CONS_IDX_BITS 0xFFFF0000
++#define EDMA_TPD_PROD_IDX_MASK 0xFFFF
++#define EDMA_TPD_CONS_IDX_MASK 0xFFFF
++#define EDMA_TPD_PROD_IDX_SHIFT 0
++#define EDMA_TPD_CONS_IDX_SHIFT 16
++
++/* TX Virtual Queue Mapping Control Register */
++#define EDMA_REG_VQ_CTRL0 0x4A0
++#define EDMA_REG_VQ_CTRL1 0x4A4
++
++/* Virtual QID shift, it takes queue as input, and computes
++ * Virtual QID position in virtual qid control register
++ */
++#define EDMA_VQ_ID_SHIFT(i) (((i) * 3) % 24)
++
++/* Virtual Queue Default Value */
++#define EDMA_VQ_REG_VALUE 0x240240
++
++/* Tx side Port Interface Control Register */
++#define EDMA_REG_PORT_CTRL 0x4A8
++#define EDMA_PAD_EN_SHIFT 15
++
++/* Tx side VLAN Configuration Register */
++#define EDMA_REG_VLAN_CFG 0x4AC
++
++#define EDMA_TX_CVLAN 16
++#define EDMA_TX_INS_CVLAN 17
++#define EDMA_TX_CVLAN_TAG_SHIFT 0
++
++#define EDMA_TX_SVLAN 14
++#define EDMA_TX_INS_SVLAN 15
++#define EDMA_TX_SVLAN_TAG_SHIFT 16
++
++/* Tx Queue Packet Statistic Register */
++#define EDMA_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
++
++#define EDMA_TX_STAT_PKT_MASK 0xFFFFFF
++
++/* Tx Queue Byte Statistic Register */
++#define EDMA_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
++
++/* Load Balance Based Ring Offset Register */
++#define EDMA_REG_LB_RING 0x800
++#define EDMA_LB_RING_ENTRY_MASK 0xff
++#define EDMA_LB_RING_ID_MASK 0x7
++#define EDMA_LB_RING_PROFILE_ID_MASK 0x3
++#define EDMA_LB_RING_ENTRY_BIT_OFFSET 8
++#define EDMA_LB_RING_ID_OFFSET 0
++#define EDMA_LB_RING_PROFILE_ID_OFFSET 3
++#define EDMA_LB_REG_VALUE 0x6040200
++
++/* Load Balance Priority Mapping Register */
++#define EDMA_REG_LB_PRI_START 0x804
++#define EDMA_REG_LB_PRI_END 0x810
++#define EDMA_LB_PRI_REG_INC 4
++#define EDMA_LB_PRI_ENTRY_BIT_OFFSET 4
++#define EDMA_LB_PRI_ENTRY_MASK 0xf
++
++/* RSS Priority Mapping Register */
++#define EDMA_REG_RSS_PRI 0x820
++#define EDMA_RSS_PRI_ENTRY_MASK 0xf
++#define EDMA_RSS_RING_ID_MASK 0x7
++#define EDMA_RSS_PRI_ENTRY_BIT_OFFSET 4
++
++/* RSS Indirection Register */
++#define EDMA_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
++#define EDMA_NUM_IDT 16
++#define EDMA_RSS_IDT_VALUE 0x64206420
++
++/* Default RSS Ring Register */
++#define EDMA_REG_DEF_RSS 0x890
++#define EDMA_DEF_RSS_MASK 0x7
++
++/* RSS Hash Function Type Register */
++#define EDMA_REG_RSS_TYPE 0x894
++#define EDMA_RSS_TYPE_NONE 0x01
++#define EDMA_RSS_TYPE_IPV4TCP 0x02
++#define EDMA_RSS_TYPE_IPV6_TCP 0x04
++#define EDMA_RSS_TYPE_IPV4_UDP 0x08
++#define EDMA_RSS_TYPE_IPV6UDP 0x10
++#define EDMA_RSS_TYPE_IPV4 0x20
++#define EDMA_RSS_TYPE_IPV6 0x40
++#define EDMA_RSS_HASH_MODE_MASK 0x7f
++
++#define EDMA_REG_RSS_HASH_VALUE 0x8C0
++
++#define EDMA_REG_RSS_TYPE_RESULT 0x8C4
++
++#define EDMA_HASH_TYPE_START 0
++#define EDMA_HASH_TYPE_END 5
++#define EDMA_HASH_TYPE_SHIFT 12
++
++#define EDMA_RFS_FLOW_ENTRIES 1024
++#define EDMA_RFS_FLOW_ENTRIES_MASK (EDMA_RFS_FLOW_ENTRIES - 1)
++#define EDMA_RFS_EXPIRE_COUNT_PER_CALL 128
++
++/* RFD Base Address Register */
++#define EDMA_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
++
++/* RFD Index Register */
++#define EDMA_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2))
++
++#define EDMA_RFD_PROD_IDX_BITS 0x00000FFF
++#define EDMA_RFD_CONS_IDX_BITS 0x0FFF0000
++#define EDMA_RFD_PROD_IDX_MASK 0xFFF
++#define EDMA_RFD_CONS_IDX_MASK 0xFFF
++#define EDMA_RFD_PROD_IDX_SHIFT 0
++#define EDMA_RFD_CONS_IDX_SHIFT 16
++
++/* Rx Descriptor Control Register */
++#define EDMA_REG_RX_DESC0 0xA10
++#define EDMA_RFD_RING_SIZE_MASK 0xFFF
++#define EDMA_RX_BUF_SIZE_MASK 0xFFFF
++#define EDMA_RFD_RING_SIZE_SHIFT 0
++#define EDMA_RX_BUF_SIZE_SHIFT 16
++
++#define EDMA_REG_RX_DESC1 0xA14
++#define EDMA_RXQ_RFD_BURST_NUM_MASK 0x3F
++#define EDMA_RXQ_RFD_PF_THRESH_MASK 0x1F
++#define EDMA_RXQ_RFD_LOW_THRESH_MASK 0xFFF
++#define EDMA_RXQ_RFD_BURST_NUM_SHIFT 0
++#define EDMA_RXQ_RFD_PF_THRESH_SHIFT 8
++#define EDMA_RXQ_RFD_LOW_THRESH_SHIFT 16
++
++/* RXQ Control Register */
++#define EDMA_REG_RXQ_CTRL 0xA18
++#define EDMA_FIFO_THRESH_TYPE_SHIF 0
++#define EDMA_FIFO_THRESH_128_BYTE 0x0
++#define EDMA_FIFO_THRESH_64_BYTE 0x1
++#define EDMA_RXQ_CTRL_RMV_VLAN 0x00000002
++#define EDMA_RXQ_CTRL_EN 0x0000FF00
++
++/* AXI Burst Size Config */
++#define EDMA_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
++#define EDMA_AXIW_MAXWRSIZE_VALUE 0x0
++
++/* Rx Statistics Register */
++#define EDMA_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
++#define EDMA_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
++
++/* WoL Pattern Length Register */
++#define EDMA_REG_WOL_PATTERN_LEN0 0xC00
++#define EDMA_WOL_PT_LEN_MASK 0xFF
++#define EDMA_WOL_PT0_LEN_SHIFT 0
++#define EDMA_WOL_PT1_LEN_SHIFT 8
++#define EDMA_WOL_PT2_LEN_SHIFT 16
++#define EDMA_WOL_PT3_LEN_SHIFT 24
++
++#define EDMA_REG_WOL_PATTERN_LEN1 0xC04
++#define EDMA_WOL_PT4_LEN_SHIFT 0
++#define EDMA_WOL_PT5_LEN_SHIFT 8
++#define EDMA_WOL_PT6_LEN_SHIFT 16
++
++/* WoL Control Register */
++#define EDMA_REG_WOL_CTRL 0xC08
++#define EDMA_WOL_WK_EN 0x00000001
++#define EDMA_WOL_MG_EN 0x00000002
++#define EDMA_WOL_PT0_EN 0x00000004
++#define EDMA_WOL_PT1_EN 0x00000008
++#define EDMA_WOL_PT2_EN 0x00000010
++#define EDMA_WOL_PT3_EN 0x00000020
++#define EDMA_WOL_PT4_EN 0x00000040
++#define EDMA_WOL_PT5_EN 0x00000080
++#define EDMA_WOL_PT6_EN 0x00000100
++
++/* MAC Control Register */
++#define EDMA_REG_MAC_CTRL0 0xC20
++#define EDMA_REG_MAC_CTRL1 0xC24
++
++/* WoL Pattern Register */
++#define EDMA_REG_WOL_PATTERN_START 0x5000
++#define EDMA_PATTERN_PART_REG_OFFSET 0x40
++
++
++/* TX descriptor fields */
++#define EDMA_TPD_HDR_SHIFT 0
++#define EDMA_TPD_PPPOE_EN 0x00000100
++#define EDMA_TPD_IP_CSUM_EN 0x00000200
++#define EDMA_TPD_TCP_CSUM_EN 0x0000400
++#define EDMA_TPD_UDP_CSUM_EN 0x00000800
++#define EDMA_TPD_CUSTOM_CSUM_EN 0x00000C00
++#define EDMA_TPD_LSO_EN 0x00001000
++#define EDMA_TPD_LSO_V2_EN 0x00002000
++#define EDMA_TPD_IPV4_EN 0x00010000
++#define EDMA_TPD_MSS_MASK 0x1FFF
++#define EDMA_TPD_MSS_SHIFT 18
++#define EDMA_TPD_CUSTOM_CSUM_SHIFT 18
++
++/* RRD descriptor fields */
++#define EDMA_RRD_NUM_RFD_MASK 0x000F
++#define EDMA_RRD_SVLAN 0x8000
++#define EDMA_RRD_FLOW_COOKIE_MASK 0x07FF;
++
++#define EDMA_RRD_PKT_SIZE_MASK 0x3FFF
++#define EDMA_RRD_CSUM_FAIL_MASK 0xC000
++#define EDMA_RRD_CVLAN 0x0001
++#define EDMA_RRD_DESC_VALID 0x8000
++
++#define EDMA_RRD_PRIORITY_SHIFT 4
++#define EDMA_RRD_PRIORITY_MASK 0x7
++#define EDMA_RRD_PORT_TYPE_SHIFT 7
++#define EDMA_RRD_PORT_TYPE_MASK 0x1F
++#endif /* _ESS_EDMA_H_ */
diff --git a/target/linux/ipq40xx/patches-4.14/711-dts-ipq4019-add-ethernet-essedma-node.patch b/target/linux/ipq40xx/patches-4.14/711-dts-ipq4019-add-ethernet-essedma-node.patch
new file mode 100644 (file)
index 0000000..285cafd
--- /dev/null
@@ -0,0 +1,92 @@
+From c611d3780fa101662a822d10acf8feb04ca97409 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Sun, 20 Nov 2016 01:01:10 +0100
+Subject: [PATCH] dts: ipq4019: add ethernet essedma node
+
+This patch adds the device-tree node for the ethernet
+interfaces.
+
+Note: The driver isn't anywhere close to be upstream,
+so the info might change.
+
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 60 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 60 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -28,6 +28,8 @@
+               spi1 = &spi_1;
+               i2c0 = &i2c_0;
+               i2c1 = &i2c_1;
++              ethernet0 = &gmac0;
++              ethernet1 = &gmac1;
+       };
+       cpus {
+@@ -590,6 +592,64 @@
+                       status = "disabled";
+               };
++              edma@c080000 {
++                      compatible = "qcom,ess-edma";
++                      reg = <0xc080000 0x8000>;
++                      qcom,page-mode = <0>;
++                      qcom,rx_head_buf_size = <1540>;
++                      qcom,mdio_supported;
++                      qcom,poll_required = <1>;
++                      qcom,num_gmac = <2>;
++                      interrupts = <0  65 IRQ_TYPE_EDGE_RISING
++                                    0  66 IRQ_TYPE_EDGE_RISING
++                                    0  67 IRQ_TYPE_EDGE_RISING
++                                    0  68 IRQ_TYPE_EDGE_RISING
++                                    0  69 IRQ_TYPE_EDGE_RISING
++                                    0  70 IRQ_TYPE_EDGE_RISING
++                                    0  71 IRQ_TYPE_EDGE_RISING
++                                    0  72 IRQ_TYPE_EDGE_RISING
++                                    0  73 IRQ_TYPE_EDGE_RISING
++                                    0  74 IRQ_TYPE_EDGE_RISING
++                                    0  75 IRQ_TYPE_EDGE_RISING
++                                    0  76 IRQ_TYPE_EDGE_RISING
++                                    0  77 IRQ_TYPE_EDGE_RISING
++                                    0  78 IRQ_TYPE_EDGE_RISING
++                                    0  79 IRQ_TYPE_EDGE_RISING
++                                    0  80 IRQ_TYPE_EDGE_RISING
++                                    0 240 IRQ_TYPE_EDGE_RISING
++                                    0 241 IRQ_TYPE_EDGE_RISING
++                                    0 242 IRQ_TYPE_EDGE_RISING
++                                    0 243 IRQ_TYPE_EDGE_RISING
++                                    0 244 IRQ_TYPE_EDGE_RISING
++                                    0 245 IRQ_TYPE_EDGE_RISING
++                                    0 246 IRQ_TYPE_EDGE_RISING
++                                    0 247 IRQ_TYPE_EDGE_RISING
++                                    0 248 IRQ_TYPE_EDGE_RISING
++                                    0 249 IRQ_TYPE_EDGE_RISING
++                                    0 250 IRQ_TYPE_EDGE_RISING
++                                    0 251 IRQ_TYPE_EDGE_RISING
++                                    0 252 IRQ_TYPE_EDGE_RISING
++                                    0 253 IRQ_TYPE_EDGE_RISING
++                                    0 254 IRQ_TYPE_EDGE_RISING
++                                    0 255 IRQ_TYPE_EDGE_RISING>;
++
++                      status = "disabled";
++
++                      gmac0: gmac0 {
++                              local-mac-address = [00 00 00 00 00 00];
++                              vlan_tag = <1 0x1f>;
++                      };
++
++                      gmac1: gmac1 {
++                              local-mac-address = [00 00 00 00 00 00];
++                              qcom,phy_mdio_addr = <4>;
++                              qcom,poll_required = <1>;
++                              qcom,forced_speed = <1000>;
++                              qcom,forced_duplex = <1>;
++                              vlan_tag = <2 0x20>;
++                      };
++              };
++
+               usb3_ss_phy: ssphy@9a000 {
+                       compatible = "qca,uni-ssphy";
+                       reg = <0x9a000 0x800>;
diff --git a/target/linux/ipq40xx/patches-4.14/820-qcom-ipq4019-Add-IPQ4019-USB-HS-SS-PHY-drivers.patch b/target/linux/ipq40xx/patches-4.14/820-qcom-ipq4019-Add-IPQ4019-USB-HS-SS-PHY-drivers.patch
new file mode 100644 (file)
index 0000000..47291fe
--- /dev/null
@@ -0,0 +1,429 @@
+From e73682ec4455c34f3f3edc7f40d90ed297521012 Mon Sep 17 00:00:00 2001
+From: Senthilkumar N L <snlakshm@codeaurora.org>
+Date: Tue, 6 Jan 2015 12:52:23 +0530
+Subject: [PATCH] qcom: ipq4019: Add IPQ4019 USB HS/SS PHY drivers
+
+These drivers handles control and configuration of the HS
+and SS USB PHY transceivers.
+
+Signed-off-by: Senthilkumar N L <snlakshm@codeaurora.org>
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+
+---
+Changed:
+       - replaced spaces with tabs
+       - remove emulation and host variables
+---
+ drivers/usb/phy/Kconfig          |  11 ++
+ drivers/usb/phy/Makefile         |   2 +
+ drivers/usb/phy/phy-qca-baldur.c | 233 +++++++++++++++++++++++++++++++++++++++
+ drivers/usb/phy/phy-qca-uniphy.c | 141 +++++++++++++++++++++++
+ 4 files changed, 387 insertions(+)
+ create mode 100644 drivers/usb/phy/phy-qca-baldur.c
+ create mode 100644 drivers/usb/phy/phy-qca-uniphy.c
+
+--- a/drivers/usb/phy/Kconfig
++++ b/drivers/usb/phy/Kconfig
+@@ -188,6 +188,17 @@ config USB_MXS_PHY
+         MXS Phy is used by some of the i.MX SoCs, for example imx23/28/6x.
++config USB_IPQ4019_PHY
++      tristate "IPQ4019 PHY wrappers support"
++      depends on (USB || USB_GADGET) && ARCH_QCOM
++      select USB_PHY
++      help
++        Enable this to support the USB PHY transceivers on QCA961x chips.
++        It handles PHY initialization, clock management required after
++        resetting the hardware and power management.
++        This driver is required even for peripheral only or host only
++        mode configurations.
++
+ config USB_ULPI
+       bool "Generic ULPI Transceiver Driver"
+       depends on ARM || ARM64
+--- a/drivers/usb/phy/Makefile
++++ b/drivers/usb/phy/Makefile
+@@ -21,6 +21,8 @@ obj-$(CONFIG_USB_GPIO_VBUS)          += phy-gpio
+ obj-$(CONFIG_USB_ISP1301)             += phy-isp1301.o
+ obj-$(CONFIG_USB_MSM_OTG)             += phy-msm-usb.o
+ obj-$(CONFIG_USB_QCOM_8X16_PHY)       += phy-qcom-8x16-usb.o
++obj-$(CONFIG_USB_IPQ4019_PHY)         += phy-qca-baldur.o
++obj-$(CONFIG_USB_IPQ4019_PHY)         += phy-qca-uniphy.o
+ obj-$(CONFIG_USB_MV_OTG)              += phy-mv-usb.o
+ obj-$(CONFIG_USB_MXS_PHY)             += phy-mxs-usb.o
+ obj-$(CONFIG_USB_ULPI)                        += phy-ulpi.o
+--- /dev/null
++++ b/drivers/usb/phy/phy-qca-baldur.c
+@@ -0,0 +1,233 @@
++/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ *
++ */
++
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/regulator/consumer.h>
++#include <linux/usb/phy.h>
++#include <linux/reset.h>
++#include <linux/of_device.h>
++
++/**
++ *  USB Hardware registers
++ */
++#define PHY_CTRL0_ADDR        0x000
++#define PHY_CTRL1_ADDR        0x004
++#define PHY_CTRL2_ADDR        0x008
++#define PHY_CTRL3_ADDR        0x00C
++#define PHY_CTRL4_ADDR        0x010
++#define PHY_MISC_ADDR 0x024
++#define PHY_IPG_ADDR  0x030
++
++#define PHY_CTRL0_VAL 0xA4600015
++#define PHY_CTRL1_VAL 0x09500000
++#define PHY_CTRL2_VAL 0x00058180
++#define PHY_CTRL3_VAL 0x6DB6DCD6
++#define PHY_CTRL4_VAL 0x836DB6DB
++#define PHY_MISC_VAL  0x3803FB0C
++#define PHY_IPG_VAL   0x47323232
++
++#define USB30_HS_PHY_HOST_MODE        (0x01 << 21)
++#define USB20_HS_PHY_HOST_MODE        (0x01 << 5)
++
++/* used to differentiate between USB3 HS and USB2 HS PHY */
++struct qca_baldur_hs_data {
++      unsigned int usb3_hs_phy;
++      unsigned int phy_config_offset;
++};
++
++struct qca_baldur_hs_phy {
++      struct device *dev;
++      struct usb_phy phy;
++
++      void __iomem *base;
++      void __iomem *qscratch_base;
++
++      struct reset_control *por_rst;
++      struct reset_control *srif_rst;
++
++      const struct qca_baldur_hs_data *data;
++};
++
++#define phy_to_dw_phy(x) container_of((x), struct qca_baldur_hs_phy, phy)
++
++static int qca_baldur_phy_read(struct usb_phy *x, u32 reg)
++{
++      struct qca_baldur_hs_phy *phy = phy_to_dw_phy(x);
++
++      return readl(phy->base + reg);
++}
++
++static int qca_baldur_phy_write(struct usb_phy *x, u32 val, u32 reg)
++{
++      struct qca_baldur_hs_phy *phy = phy_to_dw_phy(x);
++
++      writel(val, phy->base + reg);
++      return 0;
++}
++
++static int qca_baldur_hs_phy_init(struct usb_phy *x)
++{
++      struct qca_baldur_hs_phy *phy = phy_to_dw_phy(x);
++
++      /* assert HS PHY POR reset */
++      reset_control_assert(phy->por_rst);
++      msleep(10);
++
++      /* assert HS PHY SRIF reset */
++      reset_control_assert(phy->srif_rst);
++      msleep(10);
++
++      /* deassert HS PHY SRIF reset and program HS PHY registers */
++      reset_control_deassert(phy->srif_rst);
++      msleep(10);
++
++      /* perform PHY register writes */
++      writel(PHY_CTRL0_VAL, phy->base + PHY_CTRL0_ADDR);
++      writel(PHY_CTRL1_VAL, phy->base + PHY_CTRL1_ADDR);
++      writel(PHY_CTRL2_VAL, phy->base + PHY_CTRL2_ADDR);
++      writel(PHY_CTRL3_VAL, phy->base + PHY_CTRL3_ADDR);
++      writel(PHY_CTRL4_VAL, phy->base + PHY_CTRL4_ADDR);
++      writel(PHY_MISC_VAL, phy->base + PHY_MISC_ADDR);
++      writel(PHY_IPG_VAL, phy->base + PHY_IPG_ADDR);
++
++      msleep(10);
++
++      /* de-assert USB3 HS PHY POR reset */
++      reset_control_deassert(phy->por_rst);
++
++      return 0;
++}
++
++static int qca_baldur_hs_get_resources(struct qca_baldur_hs_phy *phy)
++{
++      struct platform_device *pdev = to_platform_device(phy->dev);
++      struct resource *res;
++
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      phy->base = devm_ioremap_resource(phy->dev, res);
++      if (IS_ERR(phy->base))
++              return PTR_ERR(phy->base);
++
++      phy->por_rst = devm_reset_control_get(phy->dev, "por_rst");
++      if (IS_ERR(phy->por_rst))
++              return PTR_ERR(phy->por_rst);
++
++      phy->srif_rst = devm_reset_control_get(phy->dev, "srif_rst");
++      if (IS_ERR(phy->srif_rst))
++              return PTR_ERR(phy->srif_rst);
++
++      return 0;
++}
++
++static void qca_baldur_hs_put_resources(struct qca_baldur_hs_phy *phy)
++{
++      reset_control_assert(phy->srif_rst);
++      reset_control_assert(phy->por_rst);
++}
++
++static int qca_baldur_hs_remove(struct platform_device *pdev)
++{
++      struct qca_baldur_hs_phy *phy = platform_get_drvdata(pdev);
++
++      usb_remove_phy(&phy->phy);
++      return 0;
++}
++
++static void qca_baldur_hs_phy_shutdown(struct usb_phy *x)
++{
++      struct qca_baldur_hs_phy *phy = phy_to_dw_phy(x);
++
++      qca_baldur_hs_put_resources(phy);
++}
++
++static struct usb_phy_io_ops qca_baldur_io_ops = {
++      .read = qca_baldur_phy_read,
++      .write = qca_baldur_phy_write,
++};
++
++static const struct qca_baldur_hs_data usb3_hs_data = {
++      .usb3_hs_phy = 1,
++      .phy_config_offset = USB30_HS_PHY_HOST_MODE,
++};
++
++static const struct qca_baldur_hs_data usb2_hs_data = {
++      .usb3_hs_phy = 0,
++      .phy_config_offset = USB20_HS_PHY_HOST_MODE,
++};
++
++static const struct of_device_id qca_baldur_hs_id_table[] = {
++      { .compatible = "qca,baldur-usb3-hsphy", .data = &usb3_hs_data },
++      { .compatible = "qca,baldur-usb2-hsphy", .data = &usb2_hs_data },
++      { /* Sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, qca_baldur_hs_id_table);
++
++static int qca_baldur_hs_probe(struct platform_device *pdev)
++{
++      const struct of_device_id *match;
++      struct qca_baldur_hs_phy *phy;
++      int err;
++
++      match = of_match_device(qca_baldur_hs_id_table, &pdev->dev);
++      if (!match)
++              return -ENODEV;
++
++      phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
++      if (!phy)
++              return -ENOMEM;
++
++      platform_set_drvdata(pdev, phy);
++      phy->dev = &pdev->dev;
++
++      phy->data = match->data;
++
++      err = qca_baldur_hs_get_resources(phy);
++      if (err < 0) {
++              dev_err(&pdev->dev, "failed to request resources: %d\n", err);
++              return err;
++      }
++
++      phy->phy.dev = phy->dev;
++      phy->phy.label = "qca-baldur-hsphy";
++      phy->phy.init = qca_baldur_hs_phy_init;
++      phy->phy.shutdown = qca_baldur_hs_phy_shutdown;
++      phy->phy.type = USB_PHY_TYPE_USB2;
++      phy->phy.io_ops = &qca_baldur_io_ops;
++
++      err = usb_add_phy_dev(&phy->phy);
++      return err;
++}
++
++static struct platform_driver qca_baldur_hs_driver = {
++      .probe          = qca_baldur_hs_probe,
++      .remove         = qca_baldur_hs_remove,
++      .driver         = {
++              .name   = "qca-baldur-hsphy",
++              .owner  = THIS_MODULE,
++              .of_match_table = qca_baldur_hs_id_table,
++      },
++};
++
++module_platform_driver(qca_baldur_hs_driver);
++
++MODULE_ALIAS("platform:qca-baldur-hsphy");
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("USB3 QCA BALDUR HSPHY driver");
+--- /dev/null
++++ b/drivers/usb/phy/phy-qca-uniphy.c
+@@ -0,0 +1,135 @@
++/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ *
++ */
++
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/regulator/consumer.h>
++#include <linux/usb/phy.h>
++#include <linux/reset.h>
++#include <linux/of_device.h>
++
++struct qca_uni_ss_phy {
++      struct usb_phy phy;
++      struct device *dev;
++
++      void __iomem *base;
++
++      struct reset_control *por_rst;
++};
++
++#define phy_to_dw_phy(x) container_of((x), struct qca_uni_ss_phy, phy)
++
++static void qca_uni_ss_phy_shutdown(struct usb_phy *x)
++{
++      struct qca_uni_ss_phy *phy = phy_to_dw_phy(x);
++
++      /* assert SS PHY POR reset */
++      reset_control_assert(phy->por_rst);
++}
++
++static int qca_uni_ss_phy_init(struct usb_phy *x)
++{
++      struct qca_uni_ss_phy *phy = phy_to_dw_phy(x);
++
++      /* assert SS PHY POR reset */
++      reset_control_assert(phy->por_rst);
++
++      msleep(20);
++
++      /* deassert SS PHY POR reset */
++      reset_control_deassert(phy->por_rst);
++
++      return 0;
++}
++
++static int qca_uni_ss_get_resources(struct platform_device *pdev,
++              struct qca_uni_ss_phy *phy)
++{
++      struct resource *res;
++
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      phy->base = devm_ioremap_resource(phy->dev, res);
++      if (IS_ERR(phy->base))
++              return PTR_ERR(phy->base);
++
++      phy->por_rst = devm_reset_control_get(phy->dev, "por_rst");
++      if (IS_ERR(phy->por_rst))
++              return PTR_ERR(phy->por_rst);
++
++      return 0;
++}
++
++static int qca_uni_ss_remove(struct platform_device *pdev)
++{
++      struct qca_uni_ss_phy *phy = platform_get_drvdata(pdev);
++
++      usb_remove_phy(&phy->phy);
++      return 0;
++}
++
++static const struct of_device_id qca_uni_ss_id_table[] = {
++      { .compatible = "qca,uni-ssphy" },
++      { /* Sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, qca_uni_ss_id_table);
++
++static int qca_uni_ss_probe(struct platform_device *pdev)
++{
++      struct qca_uni_ss_phy *phy;
++      int ret;
++
++      phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
++      if (!phy)
++              return -ENOMEM;
++
++      platform_set_drvdata(pdev, phy);
++      phy->dev = &pdev->dev;
++
++      ret = qca_uni_ss_get_resources(pdev, phy);
++      if (ret < 0) {
++              dev_err(&pdev->dev, "failed to request resources: %d\n", ret);
++              return ret;
++      }
++
++      phy->phy.dev = phy->dev;
++      phy->phy.label = "qca-uni-ssphy";
++      phy->phy.init = qca_uni_ss_phy_init;
++      phy->phy.shutdown = qca_uni_ss_phy_shutdown;
++      phy->phy.type = USB_PHY_TYPE_USB3;
++
++      ret = usb_add_phy_dev(&phy->phy);
++      return ret;
++}
++
++static struct platform_driver qca_uni_ss_driver = {
++      .probe = qca_uni_ss_probe,
++      .remove = qca_uni_ss_remove,
++      .driver = {
++              .name = "qca-uni-ssphy",
++              .owner = THIS_MODULE,
++              .of_match_table = qca_uni_ss_id_table,
++      },
++};
++
++module_platform_driver(qca_uni_ss_driver);
++
++MODULE_ALIAS("platform:qca-uni-ssphy");
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("USB3 QCA UNI SSPHY driver");
diff --git a/target/linux/ipq40xx/patches-4.14/830-usb-dwc3-register-qca-ipq4019-dwc3-in-dwc3-of-simple.patch b/target/linux/ipq40xx/patches-4.14/830-usb-dwc3-register-qca-ipq4019-dwc3-in-dwc3-of-simple.patch
new file mode 100644 (file)
index 0000000..e9b3344
--- /dev/null
@@ -0,0 +1,25 @@
+From 08c18ab774368feb610d1eb952957bb1bb35129f Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Sat, 19 Nov 2016 00:52:35 +0100
+Subject: [PATCH 37/38] usb: dwc3: register qca,ipq4019-dwc3 in dwc3-of-simple
+
+For host mode, the dwc3 found in the IPQ4019 can be driven
+by the dwc3-of-simple module. It will get more tricky for
+OTG since they'll need to enable VBUS and reconfigure the
+registers.
+
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+---
+ drivers/usb/dwc3/dwc3-of-simple.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/usb/dwc3/dwc3-of-simple.c
++++ b/drivers/usb/dwc3/dwc3-of-simple.c
+@@ -176,6 +176,7 @@ static const struct dev_pm_ops dwc3_of_s
+ static const struct of_device_id of_dwc3_simple_match[] = {
+       { .compatible = "qcom,dwc3" },
++      { .compatible = "qca,ipq4019-dwc3" },
+       { .compatible = "rockchip,rk3399-dwc3" },
+       { .compatible = "xlnx,zynqmp-dwc3" },
+       { .compatible = "cavium,octeon-7130-usb-uctl" },
diff --git a/target/linux/ipq40xx/patches-4.14/850-soc-add-qualcomm-syscon.patch b/target/linux/ipq40xx/patches-4.14/850-soc-add-qualcomm-syscon.patch
new file mode 100644 (file)
index 0000000..59e277c
--- /dev/null
@@ -0,0 +1,177 @@
+From: Christian Lamparter <chunkeey@googlemail.com>
+Subject: SoC: add qualcomm syscon
+--- a/drivers/soc/qcom/Makefile
++++ b/drivers/soc/qcom/Makefile
+@@ -9,3 +9,4 @@ obj-$(CONFIG_QCOM_SMEM_STATE) += smem_st
+ obj-$(CONFIG_QCOM_SMP2P)      += smp2p.o
+ obj-$(CONFIG_QCOM_SMSM)       += smsm.o
+ obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
++obj-$(CONFIG_QCOM_TCSR)        += qcom_tcsr.o
+--- a/drivers/soc/qcom/Kconfig
++++ b/drivers/soc/qcom/Kconfig
+@@ -78,6 +78,13 @@ config QCOM_SMSM
+         Say yes here to support the Qualcomm Shared Memory State Machine.
+         The state machine is represented by bits in shared memory.
++config QCOM_TCSR
++      tristate "QCOM Top Control and Status Registers"
++      depends on ARCH_QCOM
++      help
++        Say y here to enable TCSR support.  The TCSR provides control
++        functions for various peripherals.
++
+ config QCOM_WCNSS_CTRL
+       tristate "Qualcomm WCNSS control driver"
+       depends on ARCH_QCOM
+--- /dev/null
++++ b/drivers/soc/qcom/qcom_tcsr.c
+@@ -0,0 +1,98 @@
++/*
++ * Copyright (c) 2014, The Linux foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License rev 2 and
++ * only rev 2 as published by the free Software foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
++
++#define TCSR_USB_PORT_SEL     0xb0
++#define TCSR_USB_HSPHY_CONFIG 0xC
++
++#define TCSR_ESS_INTERFACE_SEL_OFFSET   0x0
++#define TCSR_ESS_INTERFACE_SEL_MASK     0xf
++
++#define TCSR_WIFI0_GLB_CFG_OFFSET     0x0
++#define TCSR_WIFI1_GLB_CFG_OFFSET     0x4
++#define TCSR_PNOC_SNOC_MEMTYPE_M0_M2  0x4
++
++static int tcsr_probe(struct platform_device *pdev)
++{
++      struct resource *res;
++      const struct device_node *node = pdev->dev.of_node;
++      void __iomem *base;
++      u32 val;
++
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      base = devm_ioremap_resource(&pdev->dev, res);
++      if (IS_ERR(base))
++              return PTR_ERR(base);
++
++      if (!of_property_read_u32(node, "qcom,usb-ctrl-select", &val)) {
++              dev_err(&pdev->dev, "setting usb port select = %d\n", val);
++              writel(val, base + TCSR_USB_PORT_SEL);
++      }
++
++      if (!of_property_read_u32(node, "qcom,usb-hsphy-mode-select", &val)) {
++              dev_info(&pdev->dev, "setting usb hs phy mode select = %x\n", val);
++              writel(val, base + TCSR_USB_HSPHY_CONFIG);
++      }
++
++      if (!of_property_read_u32(node, "qcom,ess-interface-select", &val)) {
++              u32 tmp = 0;
++              dev_info(&pdev->dev, "setting ess interface select = %x\n", val);
++              tmp = readl(base + TCSR_ESS_INTERFACE_SEL_OFFSET);
++              tmp = tmp & (~TCSR_ESS_INTERFACE_SEL_MASK);
++              tmp = tmp | (val&TCSR_ESS_INTERFACE_SEL_MASK);
++              writel(tmp, base + TCSR_ESS_INTERFACE_SEL_OFFSET);
++        }
++
++      if (!of_property_read_u32(node, "qcom,wifi_glb_cfg", &val)) {
++              dev_info(&pdev->dev, "setting wifi_glb_cfg = %x\n", val);
++              writel(val, base + TCSR_WIFI0_GLB_CFG_OFFSET);
++              writel(val, base + TCSR_WIFI1_GLB_CFG_OFFSET);
++      }
++
++      if (!of_property_read_u32(node, "qcom,wifi_noc_memtype_m0_m2", &val)) {
++              dev_info(&pdev->dev,
++                      "setting wifi_noc_memtype_m0_m2 = %x\n", val);
++              writel(val, base + TCSR_PNOC_SNOC_MEMTYPE_M0_M2);
++      }
++
++      return 0;
++}
++
++static const struct of_device_id tcsr_dt_match[] = {
++      { .compatible = "qcom,tcsr", },
++      { },
++};
++
++MODULE_DEVICE_TABLE(of, tcsr_dt_match);
++
++static struct platform_driver tcsr_driver = {
++      .driver = {
++              .name           = "tcsr",
++              .owner          = THIS_MODULE,
++              .of_match_table = tcsr_dt_match,
++      },
++      .probe = tcsr_probe,
++};
++
++module_platform_driver(tcsr_driver);
++
++MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
++MODULE_DESCRIPTION("QCOM TCSR driver");
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/include/dt-bindings/soc/qcom,tcsr.h
+@@ -0,0 +1,48 @@
++/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++#ifndef __DT_BINDINGS_QCOM_TCSR_H
++#define __DT_BINDINGS_QCOM_TCSR_H
++
++#define TCSR_USB_SELECT_USB3_P0               0x1
++#define TCSR_USB_SELECT_USB3_P1               0x2
++#define TCSR_USB_SELECT_USB3_DUAL     0x3
++
++/* IPQ40xx HS PHY Mode Select */
++#define TCSR_USB_HSPHY_HOST_MODE      0x00E700E7
++#define TCSR_USB_HSPHY_DEVICE_MODE    0x00C700E7
++
++/* IPQ40xx ess interface mode select */
++#define TCSR_ESS_PSGMII              0
++#define TCSR_ESS_PSGMII_RGMII5       1
++#define TCSR_ESS_PSGMII_RMII0        2
++#define TCSR_ESS_PSGMII_RMII1        4
++#define TCSR_ESS_PSGMII_RMII0_RMII1  6
++#define TCSR_ESS_PSGMII_RGMII4       9
++
++/*
++ * IPQ40xx WiFi Global Config
++ * Bit 30:AXID_EN
++ * Enable AXI master bus Axid translating to confirm all txn submitted by order
++ * Bit 24: Use locally generated socslv_wxi_bvalid
++ * 1:  use locally generate socslv_wxi_bvalid for performance.
++ * 0:  use SNOC socslv_wxi_bvalid.
++ */
++#define TCSR_WIFI_GLB_CFG             0x41000000
++
++/* IPQ40xx MEM_TYPE_SEL_M0_M2 Select Bit 26:24 - 2 NORMAL */
++#define TCSR_WIFI_NOC_MEMTYPE_M0_M2   0x02222222
++
++/* TCSR A/B REG */
++#define IPQ806X_TCSR_REG_A_ADM_CRCI_MUX_SEL     0
++#define IPQ806X_TCSR_REG_B_ADM_CRCI_MUX_SEL     1
++
++#endif
diff --git a/target/linux/ipq40xx/patches-4.14/864-03-dts-ipq4019-ap-dk01-add-tcsr-config-to-dtsi.patch b/target/linux/ipq40xx/patches-4.14/864-03-dts-ipq4019-ap-dk01-add-tcsr-config-to-dtsi.patch
new file mode 100644 (file)
index 0000000..f171760
--- /dev/null
@@ -0,0 +1,42 @@
+--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
+@@ -15,12 +15,39 @@
+  */
+ #include "qcom-ipq4019.dtsi"
++#include <dt-bindings/soc/qcom,tcsr.h>
+ / {
+       model = "Qualcomm Technologies, Inc. IPQ4019/AP-DK01.1";
+       compatible = "qcom,ipq4019";
+       soc {
++              tcsr@194b000 {
++                      /* select hostmode */
++                      compatible = "qcom,tcsr";
++                      reg = <0x194b000 0x100>;
++                      qcom,usb-hsphy-mode-select = <TCSR_USB_HSPHY_HOST_MODE>;
++                      status = "ok";
++              };
++
++              ess_tcsr@1953000 {
++                      compatible = "qcom,tcsr";
++                      reg = <0x1953000 0x1000>;
++                      qcom,ess-interface-select = <TCSR_ESS_PSGMII>;
++              };
++
++              tcsr@1949000 {
++                      compatible = "qcom,tcsr";
++                      reg = <0x1949000 0x100>;
++                      qcom,wifi_glb_cfg = <TCSR_WIFI_GLB_CFG>;
++              };
++
++              tcsr@1957000 {
++                      compatible = "qcom,tcsr";
++                      reg = <0x1957000 0x100>;
++                      qcom,wifi_noc_memtype_m0_m2 = <TCSR_WIFI_NOC_MEMTYPE_M0_M2>;
++              };
++
+               rng@22000 {
+                       status = "ok";
+               };
diff --git a/target/linux/ipq40xx/patches-4.14/864-05-dts-ipq4019-ap-dk01-remove-spi-chip-node-from-dtsi.patch b/target/linux/ipq40xx/patches-4.14/864-05-dts-ipq4019-ap-dk01-remove-spi-chip-node-from-dtsi.patch
new file mode 100644 (file)
index 0000000..5cbb79c
--- /dev/null
@@ -0,0 +1,17 @@
+--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
+@@ -93,14 +93,6 @@
+                       pinctrl-names = "default";
+                       status = "ok";
+                       cs-gpios = <&tlmm 54 0>;
+-
+-                      mx25l25635e@0 {
+-                              #address-cells = <1>;
+-                              #size-cells = <1>;
+-                              reg = <0>;
+-                              compatible = "mx25l25635e";
+-                              spi-max-frequency = <24000000>;
+-                      };
+               };
+               serial@78af000 {
diff --git a/target/linux/ipq40xx/patches-4.14/864-07-dts-ipq4019-ap-dk01.1-c1-add-spi-and-ram-nodes.patch b/target/linux/ipq40xx/patches-4.14/864-07-dts-ipq4019-ap-dk01.1-c1-add-spi-and-ram-nodes.patch
new file mode 100644 (file)
index 0000000..e9d2620
--- /dev/null
@@ -0,0 +1,115 @@
+--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts
++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts
+@@ -19,4 +19,112 @@
+ / {
+       model = "Qualcomm Technologies, Inc. IPQ40xx/AP-DK01.1-C1";
++      memory {
++              device_type = "memory";
++              reg = <0x80000000 0x10000000>;
++      };
++
++      reserved-memory {
++              #address-cells = <0x1>;
++              #size-cells = <0x1>;
++              ranges;
++
++              apps_bl@87000000 {
++                      reg = <0x87000000 0x400000>;
++                      no-map;
++              };
++
++              sbl@87400000 {
++                      reg = <0x87400000 0x100000>;
++                      no-map;
++              };
++
++              cnss_debug@87500000 {
++                      reg = <0x87500000 0x600000>;
++                      no-map;
++              };
++
++              cpu_context_dump@87b00000 {
++                      reg = <0x87b00000 0x080000>;
++                      no-map;
++              };
++
++              tz_apps@87b80000 {
++                      reg = <0x87b80000 0x280000>;
++                      no-map;
++              };
++
++              smem@87e00000 {
++                      reg = <0x87e00000 0x080000>;
++                      no-map;
++              };
++
++              tz@87e80000 {
++                      reg = <0x87e80000 0x180000>;
++                      no-map;
++              };
++      };
++};
++
++&spi_0 {
++      mx25l25635f@0 {
++              compatible = "mx25l25635f", "jedec,spi-nor";
++              #address-cells = <1>;
++              #size-cells = <1>;
++              reg = <0>;
++              spi-max-frequency = <24000000>;
++
++              SBL1@0 {
++                      label = "SBL1";
++                      reg = <0x0 0x40000>;
++                      read-only;
++              };
++              MIBIB@40000 {
++                      label = "MIBIB";
++                      reg = <0x40000 0x20000>;
++                      read-only;
++              };
++              QSEE@60000 {
++                      label = "QSEE";
++                      reg = <0x60000 0x60000>;
++                      read-only;
++              };
++              CDT@c0000 {
++                      label = "CDT";
++                      reg = <0xc0000 0x10000>;
++                      read-only;
++              };
++              DDRPARAMS@d0000 {
++                      label = "DDRPARAMS";
++                      reg = <0xd0000 0x10000>;
++                      read-only;
++              };
++              APPSBLENV@e0000 {
++                      label = "APPSBLENV";
++                      reg = <0xe0000 0x10000>;
++                      read-only;
++              };
++              APPSBL@f0000 {
++                      label = "APPSBL";
++                      reg = <0xf0000 0x80000>;
++                      read-only;
++              };
++              ART@170000 {
++                      label = "ART";
++                      reg = <0x170000 0x10000>;
++                      read-only;
++              };
++              kernel@180000 {
++                      label = "kernel";
++                      reg = <0x180000 0x400000>;
++              };
++              rootfs@580000 {
++                      label = "rootfs";
++                      reg = <0x580000 0x1600000>;
++              };
++              firmware@180000 {
++                      label = "firmware";
++                      reg = <0x180000 0x1a00000>;
++              };
++      };
+ };
diff --git a/target/linux/ipq40xx/patches-4.14/864-08-dts-ipq4019-ap-dk01.1-c1-add-compatible-string.patch b/target/linux/ipq40xx/patches-4.14/864-08-dts-ipq4019-ap-dk01.1-c1-add-compatible-string.patch
new file mode 100644 (file)
index 0000000..2d4ff31
--- /dev/null
@@ -0,0 +1,10 @@
+--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts
++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts
+@@ -18,6 +18,7 @@
+ / {
+       model = "Qualcomm Technologies, Inc. IPQ40xx/AP-DK01.1-C1";
++      compatible = "qcom,ap-dk01.1-c1", "qcom,ap-dk01.2-c1", "qcom,ipq4019";
+       memory {
+               device_type = "memory";
diff --git a/target/linux/ipq40xx/profiles/00-default.mk b/target/linux/ipq40xx/profiles/00-default.mk
new file mode 100644 (file)
index 0000000..f6ded85
--- /dev/null
@@ -0,0 +1,9 @@
+define Profile/Default
+  NAME:=Default Profile
+  PRIORITY:=1
+endef
+
+define Profile/Default/Description
+       Default package set compatible with most boards.
+endef
+$(eval $(call Profile,Default))