Merge pull request #1472 from danielboulby-arm/db/Reclaim
authordanh-arm <dan.handley@arm.com>
Wed, 25 Jul 2018 14:16:24 +0000 (15:16 +0100)
committerGitHub <noreply@github.com>
Wed, 25 Jul 2018 14:16:24 +0000 (15:16 +0100)
Rework page table setup for varying number of mem regions

236 files changed:
.gitignore
Makefile
acknowledgements.rst
bl1/aarch64/bl1_exceptions.S
bl1/bl1.ld.S
bl1/bl1_main.c
bl2/aarch64/bl2_el3_exceptions.S
bl2/bl2.ld.S
bl2/bl2_el3.ld.S
bl2u/bl2u.ld.S
bl31/aarch64/ea_delegate.S [new file with mode: 0644]
bl31/aarch64/runtime_exceptions.S
bl31/bl31.ld.S
bl31/bl31.mk
bl32/sp_min/sp_min.ld.S
bl32/sp_min/sp_min_main.c
bl32/tsp/aarch64/tsp_exceptions.S
bl32/tsp/tsp.ld.S
common/aarch64/early_exceptions.S
docs/marvell/build.txt [new file with mode: 0644]
docs/marvell/misc/mvebu-a8k-addr-map.txt [new file with mode: 0644]
docs/marvell/misc/mvebu-amb.txt [new file with mode: 0644]
docs/marvell/misc/mvebu-ccu.txt [new file with mode: 0644]
docs/marvell/misc/mvebu-io-win.txt [new file with mode: 0644]
docs/marvell/misc/mvebu-iob.txt [new file with mode: 0644]
docs/marvell/porting.txt [new file with mode: 0644]
docs/plat/rpi3.rst
docs/porting-guide.rst
docs/user-guide.rst
drivers/io/io_fip.c
drivers/io/io_memmap.c
drivers/io/io_semihosting.c
drivers/io/io_storage.c
drivers/marvell/amb_adec.c [new file with mode: 0644]
drivers/marvell/cache_llc.c [new file with mode: 0644]
drivers/marvell/ccu.c [new file with mode: 0644]
drivers/marvell/comphy.h [new file with mode: 0644]
drivers/marvell/comphy/comphy-cp110.h [new file with mode: 0644]
drivers/marvell/comphy/phy-comphy-cp110.c [new file with mode: 0644]
drivers/marvell/comphy/phy-comphy-cp110.h [new file with mode: 0644]
drivers/marvell/gwin.c [new file with mode: 0644]
drivers/marvell/i2c/a8k_i2c.c [new file with mode: 0644]
drivers/marvell/io_win.c [new file with mode: 0644]
drivers/marvell/iob.c [new file with mode: 0644]
drivers/marvell/mci.c [new file with mode: 0644]
drivers/marvell/mochi/ap807_setup.c [new file with mode: 0644]
drivers/marvell/mochi/apn806_setup.c [new file with mode: 0644]
drivers/marvell/mochi/cp110_setup.c [new file with mode: 0644]
drivers/marvell/thermal.c [new file with mode: 0644]
drivers/mmc/mmc.c [new file with mode: 0644]
include/common/aarch64/asm_macros.S
include/common/asm_macros_common.S
include/common/debug.h
include/drivers/marvell/a8k_i2c.h [new file with mode: 0644]
include/drivers/marvell/addr_map.h [new file with mode: 0644]
include/drivers/marvell/amb_adec.h [new file with mode: 0644]
include/drivers/marvell/aro.h [new file with mode: 0644]
include/drivers/marvell/cache_llc.h [new file with mode: 0644]
include/drivers/marvell/ccu.h [new file with mode: 0644]
include/drivers/marvell/gwin.h [new file with mode: 0644]
include/drivers/marvell/i2c.h [new file with mode: 0644]
include/drivers/marvell/io_win.h [new file with mode: 0644]
include/drivers/marvell/iob.h [new file with mode: 0644]
include/drivers/marvell/mci.h [new file with mode: 0644]
include/drivers/marvell/mochi/ap_setup.h [new file with mode: 0644]
include/drivers/marvell/mochi/cp110_setup.h [new file with mode: 0644]
include/drivers/marvell/thermal.h [new file with mode: 0644]
include/drivers/mmc.h [new file with mode: 0644]
include/lib/aarch32/arch.h
include/lib/aarch64/arch.h
include/lib/cpus/aarch32/cpu_macros.S
include/lib/cpus/aarch64/cortex_a72.h
include/lib/cpus/aarch64/cortex_deimos.h [new file with mode: 0644]
include/lib/cpus/aarch64/cortex_helios.h [new file with mode: 0644]
include/lib/cpus/aarch64/cpu_macros.S
include/lib/extensions/ras_arch.h
include/lib/utils.h
include/lib/xlat_tables/xlat_mmu_helpers.h
include/lib/xlat_tables/xlat_tables_defs.h
include/lib/xlat_tables/xlat_tables_v2.h
include/lib/xlat_tables/xlat_tables_v2_helpers.h
include/plat/arm/common/aarch64/arm_macros.S
include/plat/arm/common/arm_common.ld.S
include/plat/arm/common/plat_arm.h
include/plat/marvell/a8k/common/a8k_common.h [new file with mode: 0644]
include/plat/marvell/a8k/common/board_marvell_def.h [new file with mode: 0644]
include/plat/marvell/a8k/common/marvell_def.h [new file with mode: 0644]
include/plat/marvell/a8k/common/plat_marvell.h [new file with mode: 0644]
include/plat/marvell/a8k/common/plat_pm_trace.h [new file with mode: 0644]
include/plat/marvell/common/aarch64/cci_macros.S [new file with mode: 0644]
include/plat/marvell/common/aarch64/marvell_macros.S [new file with mode: 0644]
include/plat/marvell/common/marvell_plat_priv.h [new file with mode: 0644]
include/plat/marvell/common/marvell_pm.h [new file with mode: 0644]
include/plat/marvell/common/mvebu.h [new file with mode: 0644]
lib/cpus/aarch64/cortex_a76.S
lib/cpus/aarch64/cortex_deimos.S [new file with mode: 0644]
lib/cpus/aarch64/cortex_helios.S [new file with mode: 0644]
lib/cpus/aarch64/denver.S
lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
lib/utils/mem_region.c
lib/xlat_tables/xlat_tables_common.c
lib/xlat_tables_v2/aarch32/enable_mmu.S
lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h [deleted file]
lib/xlat_tables_v2/aarch64/enable_mmu.S
lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h [deleted file]
lib/xlat_tables_v2/xlat_tables.mk
lib/xlat_tables_v2/xlat_tables_context.c
lib/xlat_tables_v2/xlat_tables_core.c
lib/xlat_tables_v2/xlat_tables_private.h
lib/xlat_tables_v2/xlat_tables_utils.c
maintainers.rst
make_helpers/build_macros.mk
plat/arm/board/fvp/fvp_bl2u_setup.c
plat/arm/board/fvp/platform.mk
plat/arm/common/arm_bl1_setup.c
plat/arm/common/arm_bl2_setup.c
plat/arm/common/arm_bl2u_setup.c
plat/arm/common/arm_bl31_setup.c
plat/arm/common/arm_common.c
plat/arm/common/arm_common.mk
plat/arm/common/arm_image_load.c
plat/arm/common/arm_pm.c
plat/arm/css/common/css_common.mk
plat/arm/css/drivers/mhu/css_mhu_doorbell.c
plat/arm/css/drivers/scmi/scmi.h
plat/arm/css/drivers/scmi/scmi_ap_core_proto.c [new file with mode: 0644]
plat/arm/css/drivers/scmi/scmi_private.h
plat/arm/css/drivers/scp/css_pm_scmi.c
plat/arm/css/drivers/scp/css_pm_scpi.c
plat/arm/css/drivers/scp/css_sds.c
plat/arm/css/drivers/sds/sds.h
plat/common/aarch64/platform_helpers.S
plat/common/plat_bl1_common.c
plat/hisilicon/hikey/hikey_io_storage.c
plat/hisilicon/hikey/hikey_rotpk.S [new file with mode: 0644]
plat/hisilicon/hikey/hikey_tbbr.c [new file with mode: 0644]
plat/hisilicon/hikey/include/hikey_layout.h
plat/hisilicon/hikey/include/platform_def.h
plat/hisilicon/hikey/platform.mk
plat/imx/common/imx8_psci.c [new file with mode: 0644]
plat/imx/common/imx8_topology.c
plat/imx/common/include/plat_imx8.h
plat/imx/imx8qm/imx8qm_psci.c
plat/imx/imx8qm/platform.mk
plat/imx/imx8qx/imx8qx_psci.c
plat/imx/imx8qx/include/platform_def.h
plat/imx/imx8qx/platform.mk
plat/marvell/a8k/a70x0/board/dram_port.c [new file with mode: 0644]
plat/marvell/a8k/a70x0/board/marvell_plat_config.c [new file with mode: 0644]
plat/marvell/a8k/a70x0/mvebu_def.h [new file with mode: 0644]
plat/marvell/a8k/a70x0/platform.mk [new file with mode: 0644]
plat/marvell/a8k/a70x0_amc/board/dram_port.c [new file with mode: 0644]
plat/marvell/a8k/a70x0_amc/board/marvell_plat_config.c [new file with mode: 0644]
plat/marvell/a8k/a70x0_amc/mvebu_def.h [new file with mode: 0644]
plat/marvell/a8k/a70x0_amc/platform.mk [new file with mode: 0644]
plat/marvell/a8k/a80x0/board/dram_port.c [new file with mode: 0644]
plat/marvell/a8k/a80x0/board/marvell_plat_config.c [new file with mode: 0644]
plat/marvell/a8k/a80x0/mvebu_def.h [new file with mode: 0644]
plat/marvell/a8k/a80x0/platform.mk [new file with mode: 0644]
plat/marvell/a8k/a80x0_mcbin/board/dram_port.c [new file with mode: 0644]
plat/marvell/a8k/a80x0_mcbin/board/marvell_plat_config.c [new file with mode: 0644]
plat/marvell/a8k/a80x0_mcbin/mvebu_def.h [new file with mode: 0644]
plat/marvell/a8k/a80x0_mcbin/platform.mk [new file with mode: 0644]
plat/marvell/a8k/common/a8k_common.mk [new file with mode: 0644]
plat/marvell/a8k/common/aarch64/a8k_common.c [new file with mode: 0644]
plat/marvell/a8k/common/aarch64/plat_arch_config.c [new file with mode: 0644]
plat/marvell/a8k/common/aarch64/plat_helpers.S [new file with mode: 0644]
plat/marvell/a8k/common/include/a8k_plat_def.h [new file with mode: 0644]
plat/marvell/a8k/common/include/ddr_info.h [new file with mode: 0644]
plat/marvell/a8k/common/include/plat_macros.S [new file with mode: 0644]
plat/marvell/a8k/common/include/platform_def.h [new file with mode: 0644]
plat/marvell/a8k/common/mss/mss_a8k.mk [new file with mode: 0644]
plat/marvell/a8k/common/mss/mss_bl2_setup.c [new file with mode: 0644]
plat/marvell/a8k/common/mss/mss_pm_ipc.c [new file with mode: 0644]
plat/marvell/a8k/common/mss/mss_pm_ipc.h [new file with mode: 0644]
plat/marvell/a8k/common/plat_bl1_setup.c [new file with mode: 0644]
plat/marvell/a8k/common/plat_bl31_setup.c [new file with mode: 0644]
plat/marvell/a8k/common/plat_ble_setup.c [new file with mode: 0644]
plat/marvell/a8k/common/plat_pm.c [new file with mode: 0644]
plat/marvell/a8k/common/plat_pm_trace.c [new file with mode: 0644]
plat/marvell/a8k/common/plat_thermal.c [new file with mode: 0644]
plat/marvell/common/aarch64/marvell_common.c [new file with mode: 0644]
plat/marvell/common/aarch64/marvell_helpers.S [new file with mode: 0644]
plat/marvell/common/marvell_bl1_setup.c [new file with mode: 0644]
plat/marvell/common/marvell_bl2_setup.c [new file with mode: 0644]
plat/marvell/common/marvell_bl31_setup.c [new file with mode: 0644]
plat/marvell/common/marvell_cci.c [new file with mode: 0644]
plat/marvell/common/marvell_common.mk [new file with mode: 0644]
plat/marvell/common/marvell_ddr_info.c [new file with mode: 0644]
plat/marvell/common/marvell_gicv2.c [new file with mode: 0644]
plat/marvell/common/marvell_io_storage.c [new file with mode: 0644]
plat/marvell/common/marvell_pm.c [new file with mode: 0644]
plat/marvell/common/marvell_topology.c [new file with mode: 0644]
plat/marvell/common/mrvl_sip_svc.c [new file with mode: 0644]
plat/marvell/common/mss/mss_common.mk [new file with mode: 0644]
plat/marvell/common/mss/mss_ipc_drv.c [new file with mode: 0644]
plat/marvell/common/mss/mss_ipc_drv.h [new file with mode: 0644]
plat/marvell/common/mss/mss_mem.h [new file with mode: 0644]
plat/marvell/common/mss/mss_scp_bl2_format.h [new file with mode: 0644]
plat/marvell/common/mss/mss_scp_bootloader.c [new file with mode: 0644]
plat/marvell/common/mss/mss_scp_bootloader.h [new file with mode: 0644]
plat/marvell/common/plat_delay_timer.c [new file with mode: 0644]
plat/marvell/marvell.mk [new file with mode: 0644]
plat/marvell/version.mk [new file with mode: 0644]
plat/mediatek/mt6795/bl31.ld.S
plat/mediatek/mt6795/power_tracer.c
plat/mediatek/mt8173/power_tracer.c
plat/nvidia/tegra/common/tegra_bl31_setup.c
plat/rockchip/common/pmusram/pmu_sram_cpus_on.S
plat/rockchip/rk3399/drivers/pmu/plat_pmu_macros.S
plat/rpi3/aarch64/plat_helpers.S
plat/rpi3/include/platform_def.h
plat/rpi3/platform.mk
plat/rpi3/rpi3_bl1_setup.c
plat/rpi3/rpi3_bl31_setup.c
plat/rpi3/rpi3_common.c
plat/rpi3/rpi3_hw.h
plat/rpi3/rpi3_mbox.c [new file with mode: 0644]
plat/rpi3/rpi3_pm.c
plat/rpi3/rpi3_private.h
plat/rpi3/rpi3_rng.c [new file with mode: 0644]
plat/rpi3/rpi3_stack_protector.c [new file with mode: 0644]
plat/socionext/synquacer/platform.mk
plat/socionext/synquacer/sq_bl31_setup.c
services/spd/opteed/opteed_main.c
services/spd/opteed/opteed_private.h
services/std_svc/sdei/sdei_main.c
services/std_svc/spm/aarch64/spm_shim_exceptions.S
services/std_svc/spm/sp_setup.c
tools/cert_create/Makefile
tools/doimage/Makefile [new file with mode: 0644]
tools/doimage/doimage.c [new file with mode: 0644]
tools/doimage/doimage.mk [new file with mode: 0644]

index 4ece189ab25b664a651b34e2d005d1c11341218a..7f8642e3447e0495b72bb1d15db00cb972ff339d 100644 (file)
@@ -18,6 +18,7 @@ tools/cert_create/src/*.o
 tools/cert_create/src/**/*.o
 tools/cert_create/cert_create
 tools/cert_create/cert_create.exe
+tools/doimage/doimage
 
 # GNU GLOBAL files
 GPATH
index 180c5584208deff7f2361a027bb3cd03f7d38c38..533cb8ab1e4d1b5d9c8c0c3aa1916f2b7a873c24 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -85,7 +85,13 @@ $(eval $(call add_define,DEBUG))
 ifneq (${DEBUG}, 0)
         BUILD_TYPE     :=      debug
         TF_CFLAGS      +=      -g
-        ASFLAGS                +=      -g -Wa,--gdwarf-2
+
+        ifneq ($(findstring clang,$(notdir $(CC))),)
+             ASFLAGS           +=      -g
+        else
+             ASFLAGS           +=      -g -Wa,--gdwarf-2
+        endif
+
         # Use LOG_LEVEL_INFO by default for debug builds
         LOG_LEVEL      :=      40
 else
@@ -119,7 +125,7 @@ CC                  :=      ${CROSS_COMPILE}gcc
 CPP                    :=      ${CROSS_COMPILE}cpp
 AS                     :=      ${CROSS_COMPILE}gcc
 AR                     :=      ${CROSS_COMPILE}ar
-LD                     :=      ${CROSS_COMPILE}ld
+LINKER                 :=      ${CROSS_COMPILE}ld
 OC                     :=      ${CROSS_COMPILE}objcopy
 OD                     :=      ${CROSS_COMPILE}objdump
 NM                     :=      ${CROSS_COMPILE}nm
@@ -128,8 +134,8 @@ DTC                 :=      dtc
 
 # Use ${LD}.bfd instead if it exists (as absolute path or together with $PATH).
 ifneq ($(strip $(wildcard ${LD}.bfd) \
-       $(foreach dir,$(subst :, ,${PATH}),$(wildcard ${dir}/${LD}.bfd))),)
-LD                     :=      ${LD}.bfd
+       $(foreach dir,$(subst :, ,${PATH}),$(wildcard ${dir}/${LINKER}.bfd))),)
+LINKER                 :=      ${LINKER}.bfd
 endif
 
 ifeq (${ARM_ARCH_MAJOR},7)
@@ -143,12 +149,21 @@ endif
 ifeq ($(notdir $(CC)),armclang)
 TF_CFLAGS_aarch32      =       -target arm-arm-none-eabi $(march32-directive)
 TF_CFLAGS_aarch64      =       -target aarch64-arm-none-eabi -march=armv8-a
+LD                     =       $(LINKER)
+AS                     =       $(CC) -c -x assembler-with-cpp $(TF_CFLAGS_$(ARCH))
+CPP                    =       $(CC) -E $(TF_CFLAGS_$(ARCH))
+PP                     =       $(CC) -E $(TF_CFLAGS_$(ARCH))
 else ifneq ($(findstring clang,$(notdir $(CC))),)
 TF_CFLAGS_aarch32      =       $(target32-directive)
 TF_CFLAGS_aarch64      =       -target aarch64-elf
+LD                     =       $(LINKER)
+AS                     =       $(CC) -c -x assembler-with-cpp $(TF_CFLAGS_$(ARCH))
+CPP                    =       $(CC) -E
+PP                     =       $(CC) -E
 else
 TF_CFLAGS_aarch32      =       $(march32-directive)
 TF_CFLAGS_aarch64      =       -march=armv8-a
+LD                     =       $(LINKER)
 endif
 
 TF_CFLAGS_aarch32      +=      -mno-unaligned-access
index 9b81b6c5e97169808eef0d8b14170dd2c8be9b53..5686a580daaaa639366ec09de1c0637bac6e76ab 100644 (file)
@@ -14,5 +14,7 @@ Xilinx, Inc.
 
 NXP Semiconductors
 
+Marvell International Ltd.
+
 Individuals
 -----------
index 7ac028a5c8bbd6897d350b6b071d158f55218fb3..cf8a6a79df0e23e952dabcdc323fc7f9e325c407 100644 (file)
@@ -26,25 +26,25 @@ vector_entry SynchronousExceptionSP0
        mov     x0, #SYNC_EXCEPTION_SP_EL0
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SynchronousExceptionSP0
+end_vector_entry SynchronousExceptionSP0
 
 vector_entry IrqSP0
        mov     x0, #IRQ_SP_EL0
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size IrqSP0
+end_vector_entry IrqSP0
 
 vector_entry FiqSP0
        mov     x0, #FIQ_SP_EL0
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size FiqSP0
+end_vector_entry FiqSP0
 
 vector_entry SErrorSP0
        mov     x0, #SERROR_SP_EL0
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SErrorSP0
+end_vector_entry SErrorSP0
 
        /* -----------------------------------------------------
         * Current EL with SPx: 0x200 - 0x400
@@ -54,25 +54,25 @@ vector_entry SynchronousExceptionSPx
        mov     x0, #SYNC_EXCEPTION_SP_ELX
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SynchronousExceptionSPx
+end_vector_entry SynchronousExceptionSPx
 
 vector_entry IrqSPx
        mov     x0, #IRQ_SP_ELX
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size IrqSPx
+end_vector_entry IrqSPx
 
 vector_entry FiqSPx
        mov     x0, #FIQ_SP_ELX
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size FiqSPx
+end_vector_entry FiqSPx
 
 vector_entry SErrorSPx
        mov     x0, #SERROR_SP_ELX
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SErrorSPx
+end_vector_entry SErrorSPx
 
        /* -----------------------------------------------------
         * Lower EL using AArch64 : 0x400 - 0x600
@@ -91,25 +91,25 @@ vector_entry SynchronousExceptionA64
        b.ne    unexpected_sync_exception
 
        b       smc_handler64
-       check_vector_size SynchronousExceptionA64
+end_vector_entry SynchronousExceptionA64
 
 vector_entry IrqA64
        mov     x0, #IRQ_AARCH64
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size IrqA64
+end_vector_entry IrqA64
 
 vector_entry FiqA64
        mov     x0, #FIQ_AARCH64
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size FiqA64
+end_vector_entry FiqA64
 
 vector_entry SErrorA64
        mov     x0, #SERROR_AARCH64
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SErrorA64
+end_vector_entry SErrorA64
 
        /* -----------------------------------------------------
         * Lower EL using AArch32 : 0x600 - 0x800
@@ -119,25 +119,25 @@ vector_entry SynchronousExceptionA32
        mov     x0, #SYNC_EXCEPTION_AARCH32
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SynchronousExceptionA32
+end_vector_entry SynchronousExceptionA32
 
 vector_entry IrqA32
        mov     x0, #IRQ_AARCH32
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size IrqA32
+end_vector_entry IrqA32
 
 vector_entry FiqA32
        mov     x0, #FIQ_AARCH32
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size FiqA32
+end_vector_entry FiqA32
 
 vector_entry SErrorA32
        mov     x0, #SERROR_AARCH32
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SErrorA32
+end_vector_entry SErrorA32
 
 
 func smc_handler64
index 26c0ae4b34cbf4d41021cec46f9eaba098a9533b..fabe3ef66a217dd0ebd76b3aa814785e4bf59677 100644 (file)
@@ -28,10 +28,19 @@ SECTIONS
         *bl1_entrypoint.o(.text*)
         *(.text*)
         *(.vectors)
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __TEXT_END__ = .;
      } >ROM
 
+     /* .ARM.extab and .ARM.exidx are only added because Clang need them */
+     .ARM.extab . : {
+        *(.ARM.extab* .gnu.linkonce.armextab.*)
+     } >ROM
+
+     .ARM.exidx . : {
+        *(.ARM.exidx* .gnu.linkonce.armexidx.*)
+     } >ROM
+
     .rodata . : {
         __RODATA_START__ = .;
         *(.rodata*)
@@ -152,7 +161,7 @@ SECTIONS
          * as device memory.  No other unexpected data must creep in.
          * Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __COHERENT_RAM_END__ = .;
     } >RAM
 #endif
index 64b363c5f0be8b470a713281d3319618d4d14014..047cd6fb86da8e46c91afd669e62b8b9c9c7ffc0 100644 (file)
@@ -74,8 +74,8 @@ void bl1_calc_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
  * populates a new memory layout for BL2 that ensures that BL1's data sections
  * resident in secure RAM are not visible to BL2.
  ******************************************************************************/
-void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
-                            meminfo_t *bl2_mem_layout)
+void bl1_init_bl2_mem_layout(const struct meminfo *bl1_mem_layout,
+                            struct meminfo *bl2_mem_layout)
 {
        bl1_calc_bl2_mem_layout(bl1_mem_layout, bl2_mem_layout);
 }
index 987f6e35bf9828e5c0fdb52167a77162c7a69058..07d104062691615f409b9231b2f310607c4b3f26 100644 (file)
@@ -26,25 +26,25 @@ vector_entry SynchronousExceptionSP0
        mov     x0, #SYNC_EXCEPTION_SP_EL0
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SynchronousExceptionSP0
+end_vector_entry SynchronousExceptionSP0
 
 vector_entry IrqSP0
        mov     x0, #IRQ_SP_EL0
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size IrqSP0
+end_vector_entry IrqSP0
 
 vector_entry FiqSP0
        mov     x0, #FIQ_SP_EL0
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size FiqSP0
+end_vector_entry FiqSP0
 
 vector_entry SErrorSP0
        mov     x0, #SERROR_SP_EL0
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SErrorSP0
+end_vector_entry SErrorSP0
 
        /* -----------------------------------------------------
         * Current EL with SPx: 0x200 - 0x400
@@ -54,25 +54,25 @@ vector_entry SynchronousExceptionSPx
        mov     x0, #SYNC_EXCEPTION_SP_ELX
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SynchronousExceptionSPx
+end_vector_entry SynchronousExceptionSPx
 
 vector_entry IrqSPx
        mov     x0, #IRQ_SP_ELX
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size IrqSPx
+end_vector_entry IrqSPx
 
 vector_entry FiqSPx
        mov     x0, #FIQ_SP_ELX
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size FiqSPx
+end_vector_entry FiqSPx
 
 vector_entry SErrorSPx
        mov     x0, #SERROR_SP_ELX
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SErrorSPx
+end_vector_entry SErrorSPx
 
        /* -----------------------------------------------------
         * Lower EL using AArch64 : 0x400 - 0x600
@@ -82,25 +82,25 @@ vector_entry SynchronousExceptionA64
        mov     x0, #SYNC_EXCEPTION_AARCH64
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SynchronousExceptionA64
+end_vector_entry SynchronousExceptionA64
 
 vector_entry IrqA64
        mov     x0, #IRQ_AARCH64
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size IrqA64
+end_vector_entry IrqA64
 
 vector_entry FiqA64
        mov     x0, #FIQ_AARCH64
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size FiqA64
+end_vector_entry FiqA64
 
 vector_entry SErrorA64
        mov     x0, #SERROR_AARCH64
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SErrorA64
+end_vector_entry SErrorA64
 
        /* -----------------------------------------------------
         * Lower EL using AArch32 : 0x600 - 0x800
@@ -110,22 +110,22 @@ vector_entry SynchronousExceptionA32
        mov     x0, #SYNC_EXCEPTION_AARCH32
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SynchronousExceptionA32
+end_vector_entry SynchronousExceptionA32
 
 vector_entry IrqA32
        mov     x0, #IRQ_AARCH32
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size IrqA32
+end_vector_entry IrqA32
 
 vector_entry FiqA32
        mov     x0, #FIQ_AARCH32
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size FiqA32
+end_vector_entry FiqA32
 
 vector_entry SErrorA32
        mov     x0, #SERROR_AARCH32
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SErrorA32
+end_vector_entry SErrorA32
index 69c22eb328a72452e1d2397aba72164bc5dae31c..6d26cdb226855a8cab44ea40180b76c4644d6a2e 100644 (file)
@@ -28,10 +28,19 @@ SECTIONS
         *bl2_entrypoint.o(.text*)
         *(.text*)
         *(.vectors)
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __TEXT_END__ = .;
      } >RAM
 
+     /* .ARM.extab and .ARM.exidx are only added because Clang need them */
+     .ARM.extab . : {
+        *(.ARM.extab* .gnu.linkonce.armextab.*)
+     } >RAM
+
+     .ARM.exidx . : {
+        *(.ARM.exidx* .gnu.linkonce.armexidx.*)
+     } >RAM
+
     .rodata . : {
         __RODATA_START__ = .;
         *(.rodata*)
@@ -42,7 +51,7 @@ SECTIONS
         KEEP(*(.img_parser_lib_descs))
         __PARSER_LIB_DESCS_END__ = .;
 
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __RODATA_END__ = .;
     } >RAM
 #else
@@ -65,7 +74,7 @@ SECTIONS
          * read-only, executable.  No RW data from the next section must
          * creep in.  Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __RO_END__ = .;
     } >RAM
 #endif
@@ -131,7 +140,7 @@ SECTIONS
          * as device memory.  No other unexpected data must creep in.
          * Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __COHERENT_RAM_END__ = .;
     } >RAM
 #endif
index 0f91edc952e720c93ece12b0994247f22bb126de..82ab427d1716723fc42d38ecf7564fc36a7cd5bf 100644 (file)
@@ -42,7 +42,7 @@ SECTIONS
        __TEXT_RESIDENT_END__ = .;
         *(.text*)
         *(.vectors)
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __TEXT_END__ = .;
 #if BL2_IN_XIP_MEM
      } >ROM
@@ -69,7 +69,7 @@ SECTIONS
         KEEP(*(cpu_ops))
         __CPU_OPS_END__ = .;
 
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __RODATA_END__ = .;
 #if BL2_IN_XIP_MEM
     } >ROM
@@ -111,7 +111,7 @@ SECTIONS
          * read-only, executable.  No RW data from the next section must
          * creep in.  Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
 
         __RO_END__ = .;
 #if BL2_IN_XIP_MEM
@@ -195,7 +195,7 @@ SECTIONS
          * as device memory.  No other unexpected data must creep in.
          * Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __COHERENT_RAM_END__ = .;
     } >RAM
 #endif
index 7b97758d9b008e24dffe7afc5f4c84e7075dea0e..3db5f894666cd4efc522a0ea0f42b996cd0d49bb 100644 (file)
@@ -28,14 +28,23 @@ SECTIONS
         *bl2u_entrypoint.o(.text*)
         *(.text*)
         *(.vectors)
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __TEXT_END__ = .;
      } >RAM
 
+     /* .ARM.extab and .ARM.exidx are only added because Clang need them */
+     .ARM.extab . : {
+        *(.ARM.extab* .gnu.linkonce.armextab.*)
+     } >RAM
+
+     .ARM.exidx . : {
+        *(.ARM.exidx* .gnu.linkonce.armexidx.*)
+     } >RAM
+
     .rodata . : {
         __RODATA_START__ = .;
         *(.rodata*)
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __RODATA_END__ = .;
     } >RAM
 #else
@@ -52,7 +61,7 @@ SECTIONS
          * read-only, executable.  No RW data from the next section must
          * creep in.  Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __RO_END__ = .;
     } >RAM
 #endif
@@ -118,7 +127,7 @@ SECTIONS
          * as device memory.  No other unexpected data must creep in.
          * Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __COHERENT_RAM_END__ = .;
     } >RAM
 #endif
diff --git a/bl31/aarch64/ea_delegate.S b/bl31/aarch64/ea_delegate.S
new file mode 100644 (file)
index 0000000..9d7c5e8
--- /dev/null
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <assert_macros.S>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <context.h>
+#include <ea_handle.h>
+#include <ras_arch.h>
+
+
+       .globl  handle_lower_el_ea_esb
+       .globl  enter_lower_el_sync_ea
+       .globl  enter_lower_el_async_ea
+
+
+/*
+ * Function to delegate External Aborts synchronized by ESB instruction at EL3
+ * vector entry. This function assumes GP registers x0-x29 have been saved, and
+ * are available for use. It delegates the handling of the EA to platform
+ * handler, and returns only upon successfully handling the EA; otherwise
+ * panics. On return from this function, the original exception handler is
+ * expected to resume.
+ */
+func handle_lower_el_ea_esb
+       mov     x0, #ERROR_EA_ESB
+       mrs     x1, DISR_EL1
+       b       ea_proceed
+endfunc handle_lower_el_ea_esb
+
+
+/*
+ * This function forms the tail end of Synchronous Exception entry from lower
+ * EL, and expects to handle only Synchronous External Aborts from lower EL. If
+ * any other kind of exception is detected, then this function reports unhandled
+ * exception.
+ *
+ * Since it's part of exception vector, this function doesn't expect any GP
+ * registers to have been saved. It delegates the handling of the EA to platform
+ * handler, and upon successfully handling the EA, exits EL3; otherwise panics.
+ */
+func enter_lower_el_sync_ea
+       /*
+        * Explicitly save x30 so as to free up a register and to enable
+        * branching.
+        */
+       str     x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+       mrs     x30, esr_el3
+       ubfx    x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+       /* Check for I/D aborts from lower EL */
+       cmp     x30, #EC_IABORT_LOWER_EL
+       b.eq    1f
+
+       cmp     x30, #EC_DABORT_LOWER_EL
+       b.ne    2f
+
+1:
+       /* Test for EA bit in the instruction syndrome */
+       mrs     x30, esr_el3
+       tbz     x30, #ESR_ISS_EABORT_EA_BIT, 2f
+
+       /* Save GP registers */
+       bl      save_gp_registers
+
+       /* Setup exception class and syndrome arguments for platform handler */
+       mov     x0, #ERROR_EA_SYNC
+       mrs     x1, esr_el3
+       adr     x30, el3_exit
+       b       delegate_sync_ea
+
+2:
+       /* Synchronous exceptions other than the above are assumed to be EA */
+       ldr     x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+       no_ret  report_unhandled_exception
+endfunc enter_lower_el_sync_ea
+
+
+/*
+ * This function handles SErrors from lower ELs.
+ *
+ * Since it's part of exception vector, this function doesn't expect any GP
+ * registers to have been saved. It delegates the handling of the EA to platform
+ * handler, and upon successfully handling the EA, exits EL3; otherwise panics.
+ */
+func enter_lower_el_async_ea
+       /*
+        * Explicitly save x30 so as to free up a register and to enable
+        * branching
+        */
+       str     x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+       /* Save GP registers */
+       bl      save_gp_registers
+
+       /* Setup exception class and syndrome arguments for platform handler */
+       mov     x0, #ERROR_EA_ASYNC
+       mrs     x1, esr_el3
+       adr     x30, el3_exit
+       b       delegate_async_ea
+endfunc enter_lower_el_async_ea
+
+
+/*
+ * Prelude for Synchronous External Abort handling. This function assumes that
+ * all GP registers have been saved by the caller.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ */
+func delegate_sync_ea
+#if RAS_EXTENSION
+       /*
+        * Check for Uncontainable error type. If so, route to the platform
+        * fatal error handler rather than the generic EA one.
+        */
+       ubfx    x2, x1, #EABORT_SET_SHIFT, #EABORT_SET_WIDTH
+       cmp     x2, #ERROR_STATUS_SET_UC
+       b.ne    1f
+
+       /* Check fault status code */
+       ubfx    x3, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
+       cmp     x3, #SYNC_EA_FSC
+       b.ne    1f
+
+       no_ret  plat_handle_uncontainable_ea
+1:
+#endif
+
+       b       ea_proceed
+endfunc delegate_sync_ea
+
+
+/*
+ * Prelude for Asynchronous External Abort handling. This function assumes that
+ * all GP registers have been saved by the caller.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ */
+func delegate_async_ea
+#if RAS_EXTENSION
+       /*
+        * Check for Implementation Defined Syndrome. If so, skip checking
+        * Uncontainable error type from the syndrome as the format is unknown.
+        */
+       tbnz    x1, #SERROR_IDS_BIT, 1f
+
+       /*
+        * Check for Uncontainable error type. If so, route to the platform
+        * fatal error handler rather than the generic EA one.
+        */
+       ubfx    x2, x1, #EABORT_AET_SHIFT, #EABORT_AET_WIDTH
+       cmp     x2, #ERROR_STATUS_UET_UC
+       b.ne    1f
+
+       /* Check DFSC for SError type */
+       ubfx    x3, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
+       cmp     x3, #DFSC_SERROR
+       b.ne    1f
+
+       no_ret  plat_handle_uncontainable_ea
+1:
+#endif
+
+       b       ea_proceed
+endfunc delegate_async_ea
+
+
+/*
+ * Delegate External Abort handling to platform's EA handler. This function
+ * assumes that all GP registers have been saved by the caller.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ */
+func ea_proceed
+       /*
+        * If the ESR loaded earlier is not zero, we were processing an EA
+        * already, and this is a double fault.
+        */
+       ldr     x5, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
+       cbz     x5, 1f
+       no_ret  plat_handle_double_fault
+
+1:
+       /* Save EL3 state */
+       mrs     x2, spsr_el3
+       mrs     x3, elr_el3
+       stp     x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+
+       /*
+        * Save ESR as handling might involve lower ELs, and returning back to
+        * EL3 from there would trample the original ESR.
+        */
+       mrs     x4, scr_el3
+       mrs     x5, esr_el3
+       stp     x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+
+       /*
+        * Setup rest of arguments, and call platform External Abort handler.
+        *
+        * x0: EA reason (already in place)
+        * x1: Exception syndrome (already in place).
+        * x2: Cookie (unused for now).
+        * x3: Context pointer.
+        * x4: Flags (security state from SCR for now).
+        */
+       mov     x2, xzr
+       mov     x3, sp
+       ubfx    x4, x4, #0, #1
+
+       /* Switch to runtime stack */
+       ldr     x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+       msr     spsel, #0
+       mov     sp, x5
+
+       mov     x29, x30
+#if ENABLE_ASSERTIONS
+       /* Stash the stack pointer */
+       mov     x28, sp
+#endif
+       bl      plat_ea_handler
+
+#if ENABLE_ASSERTIONS
+       /*
+        * Error handling flows might involve long jumps; so upon returning from
+        * the platform error handler, validate that the we've completely
+        * unwound the stack.
+        */
+       mov     x27, sp
+       cmp     x28, x27
+       ASM_ASSERT(eq)
+#endif
+
+       /* Make SP point to context */
+       msr     spsel, #1
+
+       /* Restore EL3 state and ESR */
+       ldp     x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+       msr     spsr_el3, x1
+       msr     elr_el3, x2
+
+       /* Restore ESR_EL3 and SCR_EL3 */
+       ldp     x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+       msr     scr_el3, x3
+       msr     esr_el3, x4
+
+#if ENABLE_ASSERTIONS
+       cmp     x4, xzr
+       ASM_ASSERT(ne)
+#endif
+
+       /* Clear ESR storage */
+       str     xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
+
+       ret     x29
+endfunc ea_proceed
index 346cd3b313a5e56fccdfbc7e6b46cd80ae50b9af..54db6814d542d4a195a35a872ea1aa6fe4c9f68b 100644 (file)
@@ -66,9 +66,7 @@
 
        /* Save GP registers and restore them afterwards */
        bl      save_gp_registers
-       mov     x0, #ERROR_EA_ESB
-       mrs     x1, DISR_EL1
-       bl      delegate_ea
+       bl      handle_lower_el_ea_esb
        bl      restore_gp_registers
 
 1:
 #endif
        .endm
 
-       /*
-        * Handle External Abort by delegating to the platform's EA handler.
-        * Once the platform handler returns, the macro exits EL3 and returns to
-        * where the abort was taken from.
-        *
-        * This macro assumes that x30 is available for use.
-        *
-        * 'abort_type' is a constant passed to the platform handler, indicating
-        * the cause of the External Abort.
-        */
-       .macro handle_ea abort_type
-       /* Save GP registers */
-       bl      save_gp_registers
-
-       /* Setup exception class and syndrome arguments for platform handler */
-       mov     x0, \abort_type
-       mrs     x1, esr_el3
-       adr     x30, el3_exit
-       b       delegate_ea
-       .endm
-
        /* ---------------------------------------------------------------------
         * This macro handles Synchronous exceptions.
         * Only SMC exceptions are supported.
        cmp     x30, #EC_AARCH64_SMC
        b.eq    smc_handler64
 
-       /* Check for I/D aborts from lower EL */
-       cmp     x30, #EC_IABORT_LOWER_EL
-       b.eq    1f
-
-       cmp     x30, #EC_DABORT_LOWER_EL
-       b.ne    2f
-
-1:
-       /* Test for EA bit in the instruction syndrome */
-       mrs     x30, esr_el3
-       tbz     x30, #ESR_ISS_EABORT_EA_BIT, 2f
-       handle_ea #ERROR_EA_SYNC
-
-2:
-       /* Other kinds of synchronous exceptions are not handled */
+       /* Synchronous exceptions other than the above are assumed to be EA */
        ldr     x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-       b       report_unhandled_exception
+       b       enter_lower_el_sync_ea
        .endm
 
 
@@ -233,7 +196,7 @@ vector_base runtime_exceptions
 vector_entry sync_exception_sp_el0
        /* We don't expect any synchronous exceptions from EL3 */
        b       report_unhandled_exception
-       check_vector_size sync_exception_sp_el0
+end_vector_entry sync_exception_sp_el0
 
 vector_entry irq_sp_el0
        /*
@@ -241,17 +204,17 @@ vector_entry irq_sp_el0
         * error. Loop infinitely.
         */
        b       report_unhandled_interrupt
-       check_vector_size irq_sp_el0
+end_vector_entry irq_sp_el0
 
 
 vector_entry fiq_sp_el0
        b       report_unhandled_interrupt
-       check_vector_size fiq_sp_el0
+end_vector_entry fiq_sp_el0
 
 
 vector_entry serror_sp_el0
-       b       report_unhandled_exception
-       check_vector_size serror_sp_el0
+       no_ret  plat_handle_el3_ea
+end_vector_entry serror_sp_el0
 
        /* ---------------------------------------------------------------------
         * Current EL with SP_ELx: 0x200 - 0x400
@@ -265,19 +228,19 @@ vector_entry sync_exception_sp_elx
         * corrupted.
         */
        b       report_unhandled_exception
-       check_vector_size sync_exception_sp_elx
+end_vector_entry sync_exception_sp_elx
 
 vector_entry irq_sp_elx
        b       report_unhandled_interrupt
-       check_vector_size irq_sp_elx
+end_vector_entry irq_sp_elx
 
 vector_entry fiq_sp_elx
        b       report_unhandled_interrupt
-       check_vector_size fiq_sp_elx
+end_vector_entry fiq_sp_elx
 
 vector_entry serror_sp_elx
-       b       report_unhandled_exception
-       check_vector_size serror_sp_elx
+       no_ret  plat_handle_el3_ea
+end_vector_entry serror_sp_elx
 
        /* ---------------------------------------------------------------------
         * Lower EL using AArch64 : 0x400 - 0x600
@@ -292,28 +255,22 @@ vector_entry sync_exception_aarch64
         */
        check_and_unmask_ea
        handle_sync_exception
-       check_vector_size sync_exception_aarch64
+end_vector_entry sync_exception_aarch64
 
 vector_entry irq_aarch64
        check_and_unmask_ea
        handle_interrupt_exception irq_aarch64
-       check_vector_size irq_aarch64
+end_vector_entry irq_aarch64
 
 vector_entry fiq_aarch64
        check_and_unmask_ea
        handle_interrupt_exception fiq_aarch64
-       check_vector_size fiq_aarch64
+end_vector_entry fiq_aarch64
 
 vector_entry serror_aarch64
        msr     daifclr, #DAIF_ABT_BIT
-
-       /*
-        * Explicitly save x30 so as to free up a register and to enable
-        * branching
-        */
-       str     x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-       handle_ea #ERROR_EA_ASYNC
-       check_vector_size serror_aarch64
+       b       enter_lower_el_async_ea
+end_vector_entry serror_aarch64
 
        /* ---------------------------------------------------------------------
         * Lower EL using AArch32 : 0x600 - 0x800
@@ -328,28 +285,22 @@ vector_entry sync_exception_aarch32
         */
        check_and_unmask_ea
        handle_sync_exception
-       check_vector_size sync_exception_aarch32
+end_vector_entry sync_exception_aarch32
 
 vector_entry irq_aarch32
        check_and_unmask_ea
        handle_interrupt_exception irq_aarch32
-       check_vector_size irq_aarch32
+end_vector_entry irq_aarch32
 
 vector_entry fiq_aarch32
        check_and_unmask_ea
        handle_interrupt_exception fiq_aarch32
-       check_vector_size fiq_aarch32
+end_vector_entry fiq_aarch32
 
 vector_entry serror_aarch32
        msr     daifclr, #DAIF_ABT_BIT
-
-       /*
-        * Explicitly save x30 so as to free up a register and to enable
-        * branching
-        */
-       str     x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-       handle_ea #ERROR_EA_ASYNC
-       check_vector_size serror_aarch32
+       b       enter_lower_el_async_ea
+end_vector_entry serror_aarch32
 
 
        /* ---------------------------------------------------------------------
@@ -525,62 +476,3 @@ rt_svc_fw_critical_error:
        msr     spsel, #1
        no_ret  report_unhandled_exception
 endfunc smc_handler
-
-/*
- * Delegate External Abort handling to platform's EA handler. This function
- * assumes that all GP registers have been saved by the caller.
- *
- * x0: EA reason
- * x1: EA syndrome
- */
-func delegate_ea
-       /* Save EL3 state */
-       mrs     x2, spsr_el3
-       mrs     x3, elr_el3
-       stp     x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
-
-       /*
-        * Save ESR as handling might involve lower ELs, and returning back to
-        * EL3 from there would trample the original ESR.
-        */
-       mrs     x4, scr_el3
-       mrs     x5, esr_el3
-       stp     x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
-
-       /*
-        * Setup rest of arguments, and call platform External Abort handler.
-        *
-        * x0: EA reason (already in place)
-        * x1: Exception syndrome (already in place).
-        * x2: Cookie (unused for now).
-        * x3: Context pointer.
-        * x4: Flags (security state from SCR for now).
-        */
-       mov     x2, xzr
-       mov     x3, sp
-       ubfx    x4, x4, #0, #1
-
-       /* Switch to runtime stack */
-       ldr     x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
-       msr     spsel, #0
-       mov     sp, x5
-
-       mov     x29, x30
-       bl      plat_ea_handler
-       mov     x30, x29
-
-       /* Make SP point to context */
-       msr     spsel, #1
-
-       /* Restore EL3 state */
-       ldp     x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
-       msr     spsr_el3, x1
-       msr     elr_el3, x2
-
-       /* Restore ESR_EL3 and SCR_EL3 */
-       ldp     x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
-       msr     scr_el3, x3
-       msr     esr_el3, x4
-
-       ret
-endfunc delegate_ea
index 59df9b80e24abaeef6640f8401f54684e73d1287..66cb3f30e4bd4384713df4cd533d6b6845aee124 100644 (file)
@@ -32,7 +32,7 @@ SECTIONS
         *bl31_entrypoint.o(.text*)
         *(.text*)
         *(.vectors)
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __TEXT_END__ = .;
     } >RAM
 
@@ -67,7 +67,7 @@ SECTIONS
         . = ALIGN(8);
 #include <pubsub_events.h>
 
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __RODATA_END__ = .;
     } >RAM
 #else
@@ -111,7 +111,7 @@ SECTIONS
          * executable.  No RW data from the next section must creep in.
          * Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __RO_END__ = .;
     } >RAM
 #endif
@@ -131,7 +131,7 @@ SECTIONS
     spm_shim_exceptions : ALIGN(PAGE_SIZE) {
         __SPM_SHIM_EXCEPTIONS_START__ = .;
         *(.spm_shim_exceptions)
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __SPM_SHIM_EXCEPTIONS_END__ = .;
     } >RAM
 #endif
@@ -246,7 +246,7 @@ SECTIONS
          * as device memory.  No other unexpected data must creep in.
          * Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __COHERENT_RAM_END__ = .;
     } >RAM
 #endif
index 307ddab694fab41867d72f4123f81ce5eb7eebd9..bff965386528625b130b3e7de6001a69037e2f89 100644 (file)
@@ -19,6 +19,7 @@ BL31_SOURCES          +=      bl31/bl31_main.c                                \
                                bl31/interrupt_mgmt.c                           \
                                bl31/aarch64/bl31_entrypoint.S                  \
                                bl31/aarch64/crash_reporting.S                  \
+                               bl31/aarch64/ea_delegate.S                      \
                                bl31/aarch64/runtime_exceptions.S               \
                                bl31/bl31_context_mgmt.c                        \
                                common/runtime_svc.c                            \
index 71de883975e3d91db70c205683b86222967436c9..ce6c954eca54d95d4eb2e01c333092c985c3b8cc 100644 (file)
@@ -28,10 +28,19 @@ SECTIONS
         *entrypoint.o(.text*)
         *(.text*)
         *(.vectors)
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __TEXT_END__ = .;
     } >RAM
 
+     /* .ARM.extab and .ARM.exidx are only added because Clang need them */
+     .ARM.extab . : {
+        *(.ARM.extab* .gnu.linkonce.armextab.*)
+     } >RAM
+
+     .ARM.exidx . : {
+        *(.ARM.exidx* .gnu.linkonce.armexidx.*)
+     } >RAM
+
     .rodata . : {
         __RODATA_START__ = .;
         *(.rodata*)
@@ -55,7 +64,7 @@ SECTIONS
         . = ALIGN(8);
 #include <pubsub_events.h>
 
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __RODATA_END__ = .;
     } >RAM
 #else
@@ -92,7 +101,7 @@ SECTIONS
          * read-only, executable.  No RW data from the next section must
          * creep in.  Ensure the rest of the current memory block is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __RO_END__ = .;
     } >RAM
 #endif
@@ -207,7 +216,7 @@ SECTIONS
          * as device memory.  No other unexpected data must creep in.
          * Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __COHERENT_RAM_END__ = .;
     } >RAM
 
index 8e891b757dec881c71dd48264ef143e227cc5dfe..f06a48bfe3c81c583896f361d500872fdcb694cd 100644 (file)
@@ -20,6 +20,7 @@
 #include <smccc_helpers.h>
 #include <stddef.h>
 #include <stdint.h>
+#include <std_svc.h>
 #include <string.h>
 #include <types.h>
 #include <utils.h>
index 4b2ad75eeaf529dd2a5f43ec542b904e9189378f..48e358a3a4b3d7527a7ba9716fc54359d36b5fa1 100644 (file)
@@ -82,19 +82,19 @@ vector_base tsp_exceptions
         */
 vector_entry sync_exception_sp_el0
        b       plat_panic_handler
-       check_vector_size sync_exception_sp_el0
+end_vector_entry sync_exception_sp_el0
 
 vector_entry irq_sp_el0
        b       plat_panic_handler
-       check_vector_size irq_sp_el0
+end_vector_entry irq_sp_el0
 
 vector_entry fiq_sp_el0
        b       plat_panic_handler
-       check_vector_size fiq_sp_el0
+end_vector_entry fiq_sp_el0
 
 vector_entry serror_sp_el0
        b       plat_panic_handler
-       check_vector_size serror_sp_el0
+end_vector_entry serror_sp_el0
 
 
        /* -----------------------------------------------------
@@ -104,19 +104,19 @@ vector_entry serror_sp_el0
         */
 vector_entry sync_exception_sp_elx
        b       plat_panic_handler
-       check_vector_size sync_exception_sp_elx
+end_vector_entry sync_exception_sp_elx
 
 vector_entry irq_sp_elx
        handle_tsp_interrupt irq_sp_elx
-       check_vector_size irq_sp_elx
+end_vector_entry irq_sp_elx
 
 vector_entry fiq_sp_elx
        handle_tsp_interrupt fiq_sp_elx
-       check_vector_size fiq_sp_elx
+end_vector_entry fiq_sp_elx
 
 vector_entry serror_sp_elx
        b       plat_panic_handler
-       check_vector_size serror_sp_elx
+end_vector_entry serror_sp_elx
 
 
        /* -----------------------------------------------------
@@ -126,19 +126,19 @@ vector_entry serror_sp_elx
         */
 vector_entry sync_exception_aarch64
        b       plat_panic_handler
-       check_vector_size sync_exception_aarch64
+end_vector_entry sync_exception_aarch64
 
 vector_entry irq_aarch64
        b       plat_panic_handler
-       check_vector_size irq_aarch64
+end_vector_entry irq_aarch64
 
 vector_entry fiq_aarch64
        b       plat_panic_handler
-       check_vector_size fiq_aarch64
+end_vector_entry fiq_aarch64
 
 vector_entry serror_aarch64
        b       plat_panic_handler
-       check_vector_size serror_aarch64
+end_vector_entry serror_aarch64
 
 
        /* -----------------------------------------------------
@@ -148,16 +148,16 @@ vector_entry serror_aarch64
         */
 vector_entry sync_exception_aarch32
        b       plat_panic_handler
-       check_vector_size sync_exception_aarch32
+end_vector_entry sync_exception_aarch32
 
 vector_entry irq_aarch32
        b       plat_panic_handler
-       check_vector_size irq_aarch32
+end_vector_entry irq_aarch32
 
 vector_entry fiq_aarch32
        b       plat_panic_handler
-       check_vector_size fiq_aarch32
+end_vector_entry fiq_aarch32
 
 vector_entry serror_aarch32
        b       plat_panic_handler
-       check_vector_size serror_aarch32
+end_vector_entry serror_aarch32
index 31c5a67e04b55f9540cbf70c5f8c76b5f726f61b..97b12ce199e615be4b3151348e84785a2672c30d 100644 (file)
@@ -29,14 +29,14 @@ SECTIONS
         *tsp_entrypoint.o(.text*)
         *(.text*)
         *(.vectors)
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __TEXT_END__ = .;
     } >RAM
 
     .rodata . : {
         __RODATA_START__ = .;
         *(.rodata*)
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __RODATA_END__ = .;
     } >RAM
 #else
@@ -52,7 +52,7 @@ SECTIONS
          * read-only, executable.  No RW data from the next section must
          * creep in.  Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __RO_END__ = .;
     } >RAM
 #endif
@@ -117,7 +117,7 @@ SECTIONS
          * as device memory.  No other unexpected data must creep in.
          * Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __COHERENT_RAM_END__ = .;
     } >RAM
 #endif
index 19cc35d4ea8a942a88d744265fe3e710a355d0f6..ba94f6cb1eea4175b1e3b0c4512eb167798eaaa3 100644 (file)
@@ -24,25 +24,25 @@ vector_entry SynchronousExceptionSP0
        mov     x0, #SYNC_EXCEPTION_SP_EL0
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SynchronousExceptionSP0
+end_vector_entry SynchronousExceptionSP0
 
 vector_entry IrqSP0
        mov     x0, #IRQ_SP_EL0
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size IrqSP0
+end_vector_entry IrqSP0
 
 vector_entry FiqSP0
        mov     x0, #FIQ_SP_EL0
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size FiqSP0
+end_vector_entry FiqSP0
 
 vector_entry SErrorSP0
        mov     x0, #SERROR_SP_EL0
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SErrorSP0
+end_vector_entry SErrorSP0
 
        /* -----------------------------------------------------
         * Current EL with SPx: 0x200 - 0x400
@@ -52,25 +52,25 @@ vector_entry SynchronousExceptionSPx
        mov     x0, #SYNC_EXCEPTION_SP_ELX
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SynchronousExceptionSPx
+end_vector_entry SynchronousExceptionSPx
 
 vector_entry IrqSPx
        mov     x0, #IRQ_SP_ELX
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size IrqSPx
+end_vector_entry IrqSPx
 
 vector_entry FiqSPx
        mov     x0, #FIQ_SP_ELX
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size FiqSPx
+end_vector_entry FiqSPx
 
 vector_entry SErrorSPx
        mov     x0, #SERROR_SP_ELX
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SErrorSPx
+end_vector_entry SErrorSPx
 
        /* -----------------------------------------------------
         * Lower EL using AArch64 : 0x400 - 0x600
@@ -80,25 +80,25 @@ vector_entry SynchronousExceptionA64
        mov     x0, #SYNC_EXCEPTION_AARCH64
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SynchronousExceptionA64
+end_vector_entry SynchronousExceptionA64
 
 vector_entry IrqA64
        mov     x0, #IRQ_AARCH64
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size IrqA64
+end_vector_entry IrqA64
 
 vector_entry FiqA64
        mov     x0, #FIQ_AARCH64
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size FiqA64
+end_vector_entry FiqA64
 
 vector_entry SErrorA64
        mov     x0, #SERROR_AARCH64
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SErrorA64
+end_vector_entry SErrorA64
 
        /* -----------------------------------------------------
         * Lower EL using AArch32 : 0x600 - 0x800
@@ -108,22 +108,22 @@ vector_entry SynchronousExceptionA32
        mov     x0, #SYNC_EXCEPTION_AARCH32
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SynchronousExceptionA32
+end_vector_entry SynchronousExceptionA32
 
 vector_entry IrqA32
        mov     x0, #IRQ_AARCH32
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size IrqA32
+end_vector_entry IrqA32
 
 vector_entry FiqA32
        mov     x0, #FIQ_AARCH32
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size FiqA32
+end_vector_entry FiqA32
 
 vector_entry SErrorA32
        mov     x0, #SERROR_AARCH32
        bl      plat_report_exception
        no_ret  plat_panic_handler
-       check_vector_size SErrorA32
+end_vector_entry SErrorA32
diff --git a/docs/marvell/build.txt b/docs/marvell/build.txt
new file mode 100644 (file)
index 0000000..63a40a8
--- /dev/null
@@ -0,0 +1,181 @@
+TF-A Build Instructions
+======================
+
+This section describes how to compile the ARM Trusted Firmware (TF-A) project for Marvell's platforms.
+
+Build Instructions
+------------------
+(1) Set the cross compiler::
+
+               > export CROSS_COMPILE=/path/to/toolchain/aarch64-linux-gnu-
+
+(2) Set path for FIP images:
+
+       Set U-Boot image path (relatively to TF-A root or absolute path)::
+
+               > export BL33=path/to/u-boot.bin
+
+       For example: if U-Boot project (and its images) is located at ~/project/u-boot,
+       BL33 should be ~/project/u-boot/u-boot.bin
+
+       .. note::
+
+          u-boot.bin should be used and not u-boot-spl.bin
+
+       Set MSS/SCP image path (mandatory only for Armada80x0 and Aramada8xxy)::
+
+               > export SCP_BL2=path/to/mrvl_scp_bl2*.img
+
+(3) Armada-37x0 build requires WTP tools installation.
+
+       See below in the section "Tools Installation for Armada37x0 Builds".
+       Install ARM 32-bit cross compiler, which is required by building WTMI image for CM3::
+
+               > sudo apt-get install gcc-arm-linux-gnueabi
+
+(4) Clean previous build residuals (if any)::
+
+               > make distclean
+
+(5) Build TF-A:
+
+       There are several build options:
+
+       - DEBUG: default is without debug information (=0). in order to enable it use DEBUG=1
+
+       - LOG_LEVEL: defines the level of logging which will be purged to the default output port.
+
+               LOG_LEVEL_NONE          0
+               LOG_LEVEL_ERROR         10
+               LOG_LEVEL_NOTICE        20
+               LOG_LEVEL_WARNING       30
+               LOG_LEVEL_INFO          40
+               LOG_LEVEL_VERBOSE       50
+
+       - USE_COHERENT_MEM: This flag determines whether to include the coherent memory region in the
+               BL memory map or not.
+
+       -LLC_ENABLE: Flag defining the LLC (L3) cache state. The cache is enabled by default (LLC_ENABLE=1).
+
+       - MARVELL_SECURE_BOOT: build trusted(=1)/non trusted(=0) image, default is non trusted.
+
+       - BLE_PATH:
+               Points to BLE (Binary ROM extension) sources folder. Only required for A8K and A8K+ builds.
+               The parameter is optional, its default value is "ble".
+
+       - MV_DDR_PATH:
+               For A7/8K, use this parameter to point to mv_ddr driver sources to allow BLE build. For A37x0,
+               it is used for ddr_tool build.
+               Usage example: MV_DDR_PATH=path/to/mv_ddr
+               The parameter is optional for A7/8K, when this parameter is not set, the mv_ddr
+               sources are expected to be located at: drivers/marvell/mv_ddr. However, the parameter
+               is necessary for A37x0.
+
+       - DDR_TOPOLOGY: For Armada37x0 only, the DDR topology map index/name, default is 0.
+               Supported Options:
+                       - DDR3 1CS (0): DB-88F3720-DDR3-Modular (512MB); EspressoBIN (512MB)
+                       - DDR4 1CS (1): DB-88F3720-DDR4-Modular (512MB)
+                       - DDR3 2CS (2): EspressoBIN (1GB)
+                       - DDR4 2CS (3): DB-88F3720-DDR4-Modular (4GB)
+                       - DDR3 1CS (4): DB-88F3720-DDR3-Modular (1GB)
+                       - CUSTOMER (CUST): Customer board, DDR3 1CS 512MB
+
+       - CLOCKSPRESET: For Armada37x0 only, the clock tree configuration preset including CPU and DDR frequency,
+               default is CPU_800_DDR_800.
+                       - CPU_600_DDR_600       -       CPU at 600 MHz, DDR at 600 MHz
+                       - CPU_800_DDR_800       -       CPU at 800 MHz, DDR at 800 MHz
+                       - CPU_1000_DDR_800      -       CPU at 1000 MHz, DDR at 800 MHz
+                       - CPU_1200_DDR_750      -       CPU at 1200 MHz, DDR at 750 MHz
+
+       - BOOTDEV: For Armada37x0 only, the flash boot device, default is SPINOR,
+                       Currently, Armada37x0 only supports SPINOR, SPINAND, EMMCNORM and SATA:
+
+                               - SPINOR - SPI NOR flash boot
+                               - SPINAND - SPI NAND flash boot
+                               - EMMCNORM - eMMC Download Mode
+                                       Download boot loader or program code from eMMC flash into CM3 or CA53
+                                       Requires full initialization and command sequence
+                               - SATA - SATA device boot
+
+       - PARTNUM: For Armada37x0 only, the boot partition number, default is 0. To boot from eMMC, the value
+               should be aligned with the parameter in U-Boot with name of CONFIG_SYS_MMC_ENV_PART, whose
+               value by default is 1.
+               For details about CONFIG_SYS_MMC_ENV_PART, please refer to the U-Boot build instructions.
+
+       - WTMI_IMG: For Armada37x0 only, the path of the WTMI image can point to an image which does
+               nothing, an image which supports EFUSE or a customized CM3 firmware binary. The default image
+               is wtmi.bin that built from sources in WTP folder, which is the next option. If the default
+               image is OK, then this option should be skipped.
+       - WTP: For Armada37x0 only, use this parameter to point to wtptools source code directory, which
+               can be found as a3700_utils.zip in the release.
+               Usage example: WTP=/path/to/a3700_utils
+
+       - CP_NUM: Total amount of CPs (South Bridge) chips wired to the interconnected APs.
+               When the parameter is omitted, the build is uses the default number of CPs equal to 2.
+               The parameter is valid for Armada 8K-plus SoC family (PLAT=a8xxy) and results in a build of images
+               suitable for a8xxY SoC, where "Y" is a number of connected CPs and "xx" is a number of CPU cores.
+               Valid values with CP_NUM is in a range of 0 to 8.
+               The CPs defined by this parameter are evenly distributed across interconnected APs that in turn
+               are dynamically detected. For instance, if the CP_NUM=6 and the TF-A detects 2 interconnected
+               APs, each AP assumed to have 3 attached CPs. With the same amount of APs and CP_NUM=3, the AP0
+               will have 2 CPs connected and AP1 - a just single CP.
+
+       For example, in order to build the image in debug mode with log level up to 'notice' level run::
+
+               > make DEBUG=1 USE_COHERENT_MEM=0 LOG_LEVEL=20 PLAT=<MARVELL_PLATFORM> all fip
+
+       And if we want to build a Armada37x0 image in debug mode with log level up to 'notice' level,
+       the image has the preset CPU at 1000 MHz, preset DDR3 at 800 MHz, the DDR topology of DDR3 2CS,
+       the image boot from SPI NOR flash partition 0, and the image is non trusted in WTP, the command
+       line is as following::
+
+               > make DEBUG=1 USE_COHERENT_MEM=0 LOG_LEVEL=20 SECURE=0 CLOCKSPRESET=CPU_1000_DDR_800 \
+                       DDR_TOPOLOGY=2 BOOTDEV=SPINOR PARTNUM=0 PLAT=a3700 all fip
+
+       Supported MARVELL_PLATFORM are:
+               - a3700
+               - a70x0
+               - a70x0_amc (for AMC board)
+               - a70x0_cust (for customers)
+               - a80x0
+               - a80x0_mcbin (for MacciatoBin)
+
+Special Build Flags
+--------------------
+       - PLAT_RECOVERY_IMAGE_ENABLE: When set this option to enable secondary recovery function when build
+               atf. In order to build uart recovery image this operation should be disabled for a70x0 and a80x0
+                because of hardware limitation(boot from secondary image can interrupt uart recovery process).
+               This MACRO definition is set in plat/marvell/a8k/common/include/platform_def.h file
+
+(for more information about build options, please refer to section 'Summary of build options' in  TF-A user-guide:
+ https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/user-guide.md)
+
+
+Build output
+-------------
+Marvell's TF-A compilation generates 7 files:
+       - ble.bin               - BLe image
+       - bl1.bin               - BL1 image
+       - bl2.bin               - BL2 image
+       - bl31.bin              - BL31 image
+       - fip.bin               - FIP image (contains BL2, BL31 & BL33 (U-Boot) images)
+       - boot-image.bin        - TF-A image (contains BL1 and FIP images)
+       - flash-image.bin       - Image which contains boot-image.bin and SPL image; should be placed on the boot flash/device.
+
+
+Tools Installation for Armada37x0 Builds
+-----------------------------------------
+Install a cross GNU ARM tool chain for building the WTMI binary.
+Any cross GNU ARM tool chain that is able to build ARM Cortex M3 binaries
+is suitable.
+
+On Debian/Uboot hosts the default GNU ARM tool chain can be installed
+using the following command::
+
+               > sudo apt-get install gcc-arm-linux-gnueabi
+
+If required, the default tool chain prefix "arm-linux-gnueabi-" can be
+overwritten using the environment variable CROSS_CM3.
+Example for BASH shell::
+
+               > export CROSS_CM3=/opt/arm-cross/bin/arm-linux-gnueabi
diff --git a/docs/marvell/misc/mvebu-a8k-addr-map.txt b/docs/marvell/misc/mvebu-a8k-addr-map.txt
new file mode 100644 (file)
index 0000000..586e8b7
--- /dev/null
@@ -0,0 +1,47 @@
+Address decoding flow and address translation units of Marvell Armada 8K SoC family
+
++--------------------------------------------------------------------------------------------------+
+|                                                              +-------------+    +--------------+ |
+|                                                              | Memory      +-----   DRAM CS    | |
+|+------------+ +-----------+ +-----------+                    | Controller  |    +--------------+ |
+||  AP DMA    | |           | |           |                    +-------------+                     |
+||  SD/eMMC   | | CA72 CPUs | |  AP MSS   |                    +-------------+                     |
+||  MCI-0/1   | |           | |           |                    | Memory      |                     |
+|+------+-----+ +--+--------+ +--------+--+  +------------+    | Controller  |     +-------------+ |
+|       |          |                   |     |            +----- Translaton  |     |AP           | |
+|       |          |                   |     |            |    +-------------+     |Configuration| |
+|       |          |                   +-----+            +-------------------------Space        | |
+|       |          | +-------------+         |  CCU       |                        +-------------+ |
+|       |          | | MMU         +---------+  Windows   |   +-----------+        +-------------+ |
+|       |          +-| translation |         |  Lookup    +----           +---------   AP SPI    | |
+|       |            +-------------+         |            |   |           |        +-------------+ |
+|       |            +-------------+         |            |   |  IO       |        +-------------+ |
+|       +------------| SMMU        +---------+            |   |  Windows  +---------  AP MCI0/1  | |
+|                    | translation |         +------------+   |  Lookup   |        +-------------+ |
+|                    +---------+---+                          |           |        +-------------+ |
+|             -                |                              |           +---------   AP STM    | |
+|             +-----------------                              |           |        +-------------+ |
+| AP          |                |                              +-+---------+                        |
++---------------------------------------------------------------|----------------------------------+
++-------------|-------------------------------------------------|----------------------------------+
+| CP          |            +-------------+               +------+-----+      +-------------------+ |
+|             |            |             |               |            +-------   SB CFG Space    | |
+|             |            |   DIOB      |               |            |      +-------------------+ |
+|             |            |   Windows   -----------------  IOB       |      +-------------------+ |
+|             |            |   Control   |               |  Windows   +------| SB PCIe-0 - PCIe2 | |
+|             |            |             |               |  Lookup    |      +-------------------+ |
+|             |            +------+------+               |            |      +-------------------+ |
+|             |                   |                      |            +------+      SB NAND      | |
+|             |                   |                      +------+-----+      +-------------------+ |
+|             |                   |                             |                                  |
+|             |                   |                             |                                  |
+|   +------------------+   +------------+                +------+-----+      +-------------------+ |
+|   | Network Engine   |   |            |                |            +-------  SB SPI-0/SPI-1   | |
+|   | Security Engine  |   | PCIe, MSS  |                |  RUNIT     |      +-------------------+ |
+|   | SATA, USB        |   | DMA        |                |  Windows   |      +-------------------+ |
+|   | SD/eMMC          |   |            |                |  Lookup    +-------   SB Device Bus   | |
+|   | TDM, I2C         |   |            |                |            |      +-------------------+ |
+|   +------------------+   +------------+                +------------+                            |
+|                                                                                                  |
++--------------------------------------------------------------------------------------------------+
+
diff --git a/docs/marvell/misc/mvebu-amb.txt b/docs/marvell/misc/mvebu-amb.txt
new file mode 100644 (file)
index 0000000..2a7a41e
--- /dev/null
@@ -0,0 +1,45 @@
+AMB - AXI MBUS address decoding
+-------------------------------
+
+AXI to M-bridge decoding unit driver for Marvell Armada 8K and 8K+ SoCs.
+
+- The Runit offers a second level of address windows lookup. It is used to map transaction towards
+the CD BootROM, SPI0, SPI1 and Device bus (NOR).
+- The Runit contains eight configurable windows. Each window defines a contiguous,
+address space and the properties associated with that address space.
+
+Unit           Bank            ATTR
+Device-Bus     DEV_BOOT_CS     0x2F
+               DEV_CS0         0x3E
+               DEV_CS1         0x3D
+               DEV_CS2         0x3B
+               DEV_CS3         0x37
+SPI-0          SPI_A_CS0       0x1E
+               SPI_A_CS1       0x5E
+               SPI_A_CS2       0x9E
+               SPI_A_CS3       0xDE
+               SPI_A_CS4       0x1F
+               SPI_A_CS5       0x5F
+               SPI_A_CS6       0x9F
+               SPI_A_CS7       0xDF
+SPI1           SPI_B_CS0       0x1A
+               SPI_B_CS1       0x5A
+               SPI_B_CS2       0x9A
+               SPI_B_CS3       0xDA
+BOOT_ROM       BOOT_ROM        0x1D
+UART           UART            0x01
+
+Mandatory functions:
+       - marvell_get_amb_memory_map
+               returns the AMB windows configuration and the number of windows
+
+Mandatory structures:
+       amb_memory_map - Array that include the configuration of the windows
+         every window/entry is a struct which has 2 parameters:
+         - base address of the window
+         - Attribute of the window
+
+Examples:
+       struct addr_map_win amb_memory_map[] = {
+               {0xf900,        AMB_DEV_CS0_ID},
+       };
diff --git a/docs/marvell/misc/mvebu-ccu.txt b/docs/marvell/misc/mvebu-ccu.txt
new file mode 100644 (file)
index 0000000..9764027
--- /dev/null
@@ -0,0 +1,23 @@
+Marvell CCU address decoding bindings
+=====================================
+
+CCU configration driver (1st stage address translation) for Marvell Armada 8K and 8K+ SoCs.
+
+The CCU node includes a description of the address decoding configuration.
+
+Mandatory functions:
+       - marvell_get_ccu_memory_map
+               return the CCU windows configuration and the number of windows
+               of the specific AP.
+
+Mandatory structures:
+       ccu_memory_map - Array that includes the configuration of the windows
+         every window/entry is a struct which has 3 parameters:
+         - Base address of the window
+         - Size of the window
+         - Target-ID of the window
+
+Example:
+       struct addr_map_win ccu_memory_map[] = {
+               {0x00000000f2000000,     0x00000000e000000,      IO_0_TID}, /* IO window */
+       };
diff --git a/docs/marvell/misc/mvebu-io-win.txt b/docs/marvell/misc/mvebu-io-win.txt
new file mode 100644 (file)
index 0000000..c83ad1f
--- /dev/null
@@ -0,0 +1,35 @@
+Marvell IO WIN address decoding bindings
+=====================================
+
+IO Window configration driver (2nd stage address translation) for Marvell Armada 8K and 8K+ SoCs.
+
+The IO WIN includes a description of the address decoding configuration.
+
+Transactions that are decoded by CCU windows as IO peripheral, have an additional
+layer of decoding. This additional address decoding layer defines one of the
+following targets:
+       0x0 = BootRom
+       0x1 = STM (Serial Trace Macro-cell, a programmer's port into trace stream)
+       0x2 = SPI direct access
+       0x3 = PCIe registers
+       0x4 = MCI Port
+       0x5 = PCIe port
+
+Mandatory functions:
+       - marvell_get_io_win_memory_map
+               returns the IO windows configuration and the number of windows
+               of the specific AP.
+
+Mandatory structures:
+       io_win_memory_map - Array that include the configuration of the windows
+         every window/entry is a struct which has 3 parameters:
+         - Base address of the window
+         - Size of the window
+         - Target-ID of the window
+
+Example:
+       struct addr_map_win io_win_memory_map[] = {
+               {0x00000000fe000000,    0x000000001f00000,      PCIE_PORT_TID}, /* PCIe window 31Mb for PCIe port*/
+               {0x00000000ffe00000,    0x000000000100000,      PCIE_REGS_TID}, /* PCI-REG window 64Kb for PCIe-reg*/
+               {0x00000000f6000000,    0x000000000100000,      MCIPHY_TID},    /* MCI window  1Mb for PHY-reg*/
+       };
diff --git a/docs/marvell/misc/mvebu-iob.txt b/docs/marvell/misc/mvebu-iob.txt
new file mode 100644 (file)
index 0000000..97ec09d
--- /dev/null
@@ -0,0 +1,40 @@
+Marvell IOB address decoding bindings
+=====================================
+
+IO bridge configration driver (3rd stage address translation) for Marvell Armada 8K and 8K+ SoCs.
+
+The IOB includes a description of the address decoding configuration.
+
+IOB supports up to n (in CP110 n=24) windows for external memory transaction.
+When a transaction passes through the IOB, its address is compared to each of
+the enabled windows. If there is a hit and it passes the security checks, it is
+advanced to the target port.
+
+Mandatory functions:
+       - marvell_get_iob_memory_map
+               returns the IOB windows configuration and the number of windows
+
+Mandatory structures:
+       iob_memory_map - Array that include the configuration of the windows
+         every window/entry is a struct which has 3 parameters:
+         - Base address of the window
+         - Size of the window
+         - Target-ID of the window
+
+Target ID options:
+       - 0x0 = Internal configuration space
+       - 0x1 = MCI0
+       - 0x2 = PEX1_X1
+       - 0x3 = PEX2_X1
+       - 0x4 = PEX0_X4
+       - 0x5 = NAND flash
+       - 0x6 = RUNIT (NOR/SPI/BootRoom)
+       - 0x7 = MCI1
+
+Example:
+       struct addr_map_win iob_memory_map[] = {
+               {0x00000000f7000000,    0x0000000001000000,     PEX1_TID}, /* PEX1_X1 window */
+               {0x00000000f8000000,    0x0000000001000000,     PEX2_TID}, /* PEX2_X1 window */
+               {0x00000000f6000000,    0x0000000001000000,     PEX0_TID}, /* PEX0_X4 window */
+               {0x00000000f9000000,    0x0000000001000000,     NAND_TID}  /* NAND window */
+       };
diff --git a/docs/marvell/porting.txt b/docs/marvell/porting.txt
new file mode 100644 (file)
index 0000000..78000e9
--- /dev/null
@@ -0,0 +1,66 @@
+TF-A Porting Guide
+=================
+
+This section describes how to port TF-A to a customer board, assuming that the SoC being used is already supported
+in TF-A.
+
+
+Source Code Structure
+---------------------
+- The customer platform specific code shall reside under "plat/marvell/<soc family>/<soc>_cust"
+       (e.g. 'plat/marvell/a8k/a7040_cust').
+- The platform name for build purposes is called "<soc>_cust" (e.g. a7040_cust).
+- The build system will reuse all files from within the soc directory, and take only the porting
+  files from the customer platform directory.
+
+Files that require porting are located at "plat/marvell/<soc family>/<soc>_cust" directory.
+
+
+Armada-70x0/Armada-80x0 Porting
+-------------------------------
+
+  - SoC Physical Address Map (marvell_plat_config.c):
+       - This file describes the SoC physical memory mapping to be used for the CCU, IOWIN, AXI-MBUS and IOB
+         address decode units (Refer to the functional spec for more details).
+       - In most cases, using the default address decode windows should work OK.
+       - In cases where a special physical address map is needed (e.g. Special size for PCIe MEM windows,
+         large memory mapped SPI flash...), then porting of the SoC memory map is required.
+       - Note: For a detailed information on how CCU, IOWIN, AXI-MBUS & IOB work, please refer to the SoC functional spec,
+         and under "docs/marvell/misc/mvebu-[ccu/iob/amb/io-win].txt" files.
+
+  - boot loader recovery (marvell_plat_config.c):
+       - Background:
+               boot rom can skip the current image and choose to boot from next position if a specific value
+               (0xDEADB002) is returned by the ble main function. This feature is used for boot loader recovery
+               by booting from a valid flash-image saved in next position on flash (e.g. address 2M in SPI flash).
+
+               Supported options to implement the skip request are:
+                       - GPIO
+                       - I2C
+                       - User defined
+
+       - Porting:
+               Under marvell_plat_config.c, implement struct skip_image that includes specific board parameters.
+               .. warning:: to disable this feature make sure the struct skip_image is not implemented.
+
+       - Example:
+               In A7040-DB specific implementation (plat/marvell/a8k/a70x0/board/marvell_plat_config.c),
+               the image skip is implemented using GPIO: mpp 33 (SW5).
+
+               Before resetting the board make sure there is a valid image on the next flash address:
+                       -tftp [valid address] flash-image.bin
+                       -sf update [valid address] 0x2000000 [size]
+
+               Press reset and keep pressing the button connected to the chosen GPIO pin. A skip image request
+               message is printed on the screen and boot rom boots from the saved image at the next position.
+
+  - DDR Porting (dram_port.c):
+       - This file defines the dram topology and parameters of the target board.
+       - The DDR code is part of the BLE component, which is an extension of ARM Trusted Firmware (TF-A).
+       - The DDR driver called mv_ddr is released separately apart from TF-A sources.
+       - The BLE and consequently, the DDR init code is executed at the early stage of the boot process.
+       - Each supported platform of the TF-A has its own DDR porting file called dram_port.c located at
+         ``atf/plat/marvell/a8k/<platform>/board`` directory.
+       - Please refer to '<path_to_mv_ddr_sources>/doc/porting_guide.txt' for detailed porting description.
+       - The build target directory is "build/<platform>/release/ble".
+
index c8e2405cc0e242b34b18de326624e8e4fab73fbe..fbf753b09dd06275d49a14cf90a784b378cb5ca7 100644 (file)
@@ -7,8 +7,7 @@ Trusted Firmware-A for Raspberry Pi 3
 .. contents::
 
 The `Raspberry Pi 3`_ is an inexpensive single-board computer that contains four
-Arm Cortex-A53 cores, which makes it possible to have a port of Trusted
-Firmware-A (TF-A).
+Arm Cortex-A53 cores.
 
 The following instructions explain how to use this port of the TF-A with the
 default distribution of `Raspbian`_ because that's the distribution officially
@@ -66,7 +65,7 @@ Placement of images
 
 The file ``armstub8.bin`` contains BL1 and the FIP. It is needed to add padding
 between them so that the addresses they are loaded to match the ones specified
-when compiling TF-A.
+when compiling TF-A. This is done automatically by the build system.
 
 The device tree block is loaded by the VideoCore loader from an appropriate
 file, but we can specify the address it is loaded to in ``config.txt``.
@@ -108,13 +107,13 @@ secure platform!
                |       ...       |
                |                 |
     0x01000000 +-----------------+
-               |     Kernel      |
+               |       DTB       | (Loaded by the VideoCore)
                +-----------------+
                |                 |
                |       ...       |
                |                 |
     0x02000000 +-----------------+
-               |       DTB       |
+               |     Kernel      | (Loaded by the VideoCore)
                +-----------------+
                |                 |
                |       ...       |
@@ -123,9 +122,9 @@ secure platform!
                |   Secure SRAM   | BL2, BL31
     0x10100000 +-----------------+
                |   Secure DRAM   | BL32 (Secure payload)
-    0x10300000 +-----------------+
-               | Non-secure DRAM | BL33
     0x11000000 +-----------------+
+               | Non-secure DRAM | BL33
+               +-----------------+
                |                 |
                |       ...       |
                |                 |
@@ -133,10 +132,10 @@ secure platform!
                |       I/O       |
     0x40000000 +-----------------+
 
-The area between **0x10000000** and **0x11000000** has to be protected so that
-the kernel doesn't use it. That is done by adding ``memmap=16M$256M`` to the
-command line passed to the kernel. See the `Setup SD card`_ instructions to see
-how to do it.
+The area between **0x10000000** and **0x11000000** has to be manually protected
+so that the kernel doesn't use it. That is done by adding ``memmap=16M$256M`` to
+the command line passed to the kernel. See the `Setup SD card`_ instructions to
+see how to do it.
 
 The last 16 MiB of DRAM can only be accessed by the VideoCore, that has
 different mappings than the Arm cores in which the I/O addresses don't overlap
@@ -159,14 +158,24 @@ The `Linux kernel tree`_ has instructions on how to jump to the Linux kernel
 in ``Documentation/arm/Booting`` and ``Documentation/arm64/booting.txt``. The
 bootstrap should take care of this.
 
+This port support a direct boot of the Linux kernel from the firmware (as a BL33
+image). Alternatively, U-Boot or other bootloaders may be used.
+
 Secondary cores
 ~~~~~~~~~~~~~~~
 
+This port of the Trusted Firmware-A supports ``PSCI_CPU_ON``,
+`PSCI_SYSTEM_RESET`` and ``PSCI_SYSTEM_OFF``. The last one doesn't really turn
+the system off, it simply reboots it and asks the VideoCore firmware to keep it
+in a low power mode permanently.
+
 The kernel used by `Raspbian`_ doesn't have support for PSCI, so it is needed to
 use mailboxes to trap the secondary cores until they are ready to jump to the
 kernel. This mailbox is located at a different address in the AArch32 default
 kernel than in the AArch64 kernel.
 
+Kernels with PSCI support can use the PSCI calls instead for a cleaner boot.
+
 Also, this port of TF-A has another Trusted Mailbox in Shared BL RAM. During
 cold boot, all secondary cores wait in a loop until they are given given an
 address to jump to in this Mailbox (``bl31_warm_entrypoint``).
@@ -187,71 +196,60 @@ To boot a AArch32 kernel, both AArch64 and AArch32 toolchains are required. The
 AArch32 toolchain is needed for the AArch32 bootstrap needed to load a 32-bit
 kernel.
 
-First, clone and compile `Raspberry Pi 3 TF-A bootstrap`_. Choose the one
-needed for the architecture of your kernel.
-
-Then compile TF-A. For a AArch32 kernel, use the following command line:
-
-.. code:: shell
-
-    CROSS_COMPILE=aarch64-linux-gnu- make PLAT=rpi3             \
-    RPI3_BL33_IN_AARCH32=1                                      \
-    BL33=../rpi3-arm-tf-bootstrap/aarch32/el2-bootstrap.bin     \
-    all fip
-
-For a AArch64 kernel, use this other command line:
-
-.. code:: shell
-
-    CROSS_COMPILE=aarch64-linux-gnu- make PLAT=rpi3             \
-    BL33=../rpi3-arm-tf-bootstrap/aarch64/el2-bootstrap.bin     \
-    all fip
-
-Then, join BL1 and the FIP with the following instructions (replace ``release``
-by ``debug`` if you set the build option ``DEBUG=1``):
-
-.. code:: shell
-
-    cp build/rpi3/release/bl1.bin bl1.pad.bin
-    truncate --size=131072 bl1.pad.bin
-    cat bl1.pad.bin build/rpi3/release/fip.bin > armstub8.bin
-
-The resulting file, ``armstub8.bin``, contains BL1 and the FIP in the place they
-need to be for TF-A to boot correctly. Now, follow the instructions in
-`Setup SD card`_.
+The build system concatenates BL1 and the FIP so that the addresses match the
+ones in the memory map. The resulting file is ``armstub8.bin``, located in the
+build folder (e.g. ``build/rpi3/debug/armstub8.bin``). To know how to use this
+file, follow the instructions in `Setup SD card`_.
 
 The following build options are supported:
 
-- ``PRELOADED_BL33_BASE``: Specially useful because the file ``kernel8.img`` can
-  be loaded anywhere by modifying the file ``config.txt``. It doesn't have to
-  contain a kernel, it could have any arbitrary payload.
-
-- ``RESET_TO_BL31``: Set to 1 by default. If using a 32-bit kernel like
-  `Raspbian`_, the space used by BL1 can overwritten by the kernel when it is
-  being loaded. Even when using a AArch64 kernel the region used by
-  BL1 isn't protected and the kernel could overwrite it. The space used by BL31
-  is reserved by the command line passed to the kernel.
-
 - ``RPI3_BL33_IN_AARCH32``: This port can load a AArch64 or AArch32 BL33 image.
   By default this option is 0, which means that TF-A will jump to BL33 in EL2
   in AArch64 mode. If set to 1, it will jump to BL33 in Hypervisor in AArch32
   mode.
 
+- ``PRELOADED_BL33_BASE``: Used to specify the address of a BL33 binary that has
+  been preloaded by any other system than using the firmware. ``BL33`` isn't
+  needed in the build command line if this option is used. Specially useful
+  because the file ``kernel8.img`` can be loaded anywhere by modifying the file
+  ``config.txt``. It doesn't have to contain a kernel, it could have any
+  arbitrary payload.
+
+- ``RPI3_DIRECT_LINUX_BOOT``: Disabled by default. Set to 1 to enable the direct
+  boot of the Linux kernel from the firmware. Option ``RPI3_PRELOADED_DTB_BASE``
+  is mandatory when the direct Linux kernel boot is used. Options
+  ``PRELOADED_BL33_BASE`` will most likely be needed as well because it is
+  unlikely that the kernel image will fit in the space reserved for BL33 images.
+  This option can be combined with ``RPI3_BL33_IN_AARCH32`` in order to boot a
+  32-bit kernel. The only thing this option does is to set the arguments in
+  registers x0-x3 or r0-r2 as expected by the kernel.
+
+- ``RPI3_PRELOADED_DTB_BASE``: Auxiliary build option needed when using
+  ``RPI3_DIRECT_LINUX_BOOT=1``. This option allows to specify the location of a
+  DTB in memory.
+
 - ``BL32``: This port can load and run OP-TEE. The OP-TEE image is optional.
   Please use the code from `here <https://github.com/OP-TEE/optee_os>`__.
   Build the Trusted Firmware with option ``BL32=tee-header_v2.bin
   BL32_EXTRA1=tee-pager_v2.bin  BL32_EXTRA2=tee-pageable_v2.bin``
   to put the binaries into the FIP.
 
-- ``TRUSTED_BOARD_BOOT``: This port supports TBB. Set this option
-  ``TRUSTED_BOARD_BOOT=1`` to enable it. In order to use TBB, you might
-  want to set ``GENERATE_COT=1`` to let the contents of the FIP automatically
-  signed by the build process. The ROT key will be generated and output to
-  ``rot_key.pem`` in the build directory. It is able to set ROT_KEY to
-  your own key in PEM format.
-  Also in order to build, you need to clone mbedtls from
-  `here <https://github.com/ARMmbed/mbedtls>`__.
-  And set MBEDTLS_DIR to mbedtls source directory.
+  Note: If OP-TEE is used it may be needed to add the following options to the
+  Linux command line so that the USB driver doesn't use FIQs:
+  ``dwc_otg.fiq_enable=0 dwc_otg.fiq_fsm_enable=0 dwc_otg.nak_holdoff=0``.
+  This will unfortunately reduce the performance of the USB driver. It is needed
+  when using Raspbian, for example.
+
+- ``TRUSTED_BOARD_BOOT``: This port supports TBB. Set this option to 1 to enable
+  it. In order to use TBB, you might want to set ``GENERATE_COT=1`` to let the
+  contents of the FIP automatically signed by the build process. The ROT key
+  will be generated and output to ``rot_key.pem`` in the build directory. It is
+  able to set ROT_KEY to your own key in PEM format.  Also in order to build,
+  you need to clone mbed TLS from `here <https://github.com/ARMmbed/mbedtls>`__.
+  ``MBEDTLS_DIR`` must point at the mbed TLS source directory.
+
+- ``ENABLE_STACK_PROTECTOR``: Disabled by default. It uses the hardware RNG of
+  the board.
 
 The following is not currently supported:
 
@@ -261,13 +259,65 @@ The following is not currently supported:
   address by changing the file ``armstub8.bin``, so there's no point in using
   TF-A in this case.
 
-- ``LOAD_IMAGE_V2=0``: Only version 2 is supported.
-
 - ``MULTI_CONSOLE_API=0``: The multi console API must be enabled. Note that the
   crash console uses the internal 16550 driver functions directly in order to be
   able to print error messages during early crashes before setting up the
   multi console API.
 
+Building the firmware for kernels that don't support PSCI
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is the case for the 32-bit image of Raspbian, for example. 64-bit kernels
+always support PSCI, but they may not know that the system understands PSCI due
+to an incorrect DTB file.
+
+First, clone and compile the 32-bit version of the `Raspberry Pi 3 TF-A
+bootstrap`_. Choose the one needed for the architecture of your kernel.
+
+Then compile TF-A. For a 32-bit kernel, use the following command line:
+
+.. code:: shell
+
+    CROSS_COMPILE=aarch64-linux-gnu- make PLAT=rpi3             \
+    RPI3_BL33_IN_AARCH32=1                                      \
+    BL33=../rpi3-arm-tf-bootstrap/aarch32/el2-bootstrap.bin
+
+For a 64-bit kernel, use this other command line:
+
+.. code:: shell
+
+    CROSS_COMPILE=aarch64-linux-gnu- make PLAT=rpi3             \
+    BL33=../rpi3-arm-tf-bootstrap/aarch64/el2-bootstrap.bin
+
+However, enabling PSCI support in a 64-bit kernel is really easy. In the
+repository `Raspberry Pi 3 TF-A bootstrap`_ there is a patch that can be applied
+to the Linux kernel tree maintained by the Raspberry Pi foundation. It modifes
+the DTS to tell the kernel to use PSCI. Once this patch is applied, follow the
+instructions in `AArch64 kernel build instructions`_ to get a working 64-bit
+kernel image and supporting files.
+
+Building the firmware for kernels that support PSCI
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For a 64-bit kernel:
+
+.. code:: shell
+
+    CROSS_COMPILE=aarch64-linux-gnu- make PLAT=rpi3             \
+    PRELOADED_BL33_BASE=0x02000000                              \
+    RPI3_PRELOADED_DTB_BASE=0x01000000                          \
+    RPI3_DIRECT_LINUX_BOOT=1
+
+For a 32-bit kernel:
+
+.. code:: shell
+
+    CROSS_COMPILE=aarch64-linux-gnu- make PLAT=rpi3             \
+    PRELOADED_BL33_BASE=0x02000000                              \
+    RPI3_PRELOADED_DTB_BASE=0x01000000                          \
+    RPI3_DIRECT_LINUX_BOOT=1                                    \
+    RPI3_BL33_IN_AARCH32=1
+
 AArch64 kernel build instructions
 ---------------------------------
 
@@ -284,7 +334,7 @@ allows the user to run 64-bit binaries in addition to 32-bit binaries.
 
 .. code:: shell
 
-    git clone --depth=1 -b rpi-4.14.y https://github.com/raspberrypi/linux
+    git clone --depth=1 -b rpi-4.18.y https://github.com/raspberrypi/linux
     cd linux
 
 2. Configure and compile the kernel. Adapt the number after ``-j`` so that it is
@@ -304,6 +354,7 @@ allows the user to run 64-bit binaries in addition to 32-bit binaries.
 
     cp arch/arm64/boot/Image /path/to/boot/kernel8.img
     cp arch/arm64/boot/dts/broadcom/bcm2710-rpi-3-b.dtb /path/to/boot/
+    cp arch/arm64/boot/dts/broadcom/bcm2710-rpi-3-b-plus.dtb /path/to/boot/
 
 4. Install the kernel modules. Replace the path by the corresponding path to the
    filesystem partition of the SD card on your computer.
@@ -347,8 +398,8 @@ untouched). They have been tested with the image available in 2018-03-13.
 ::
 
     enable_uart=1
-    kernel_address=0x01000000
-    device_tree_address=0x02000000
+    kernel_address=0x02000000
+    device_tree_address=0x01000000
 
 If you connect a serial cable to the Mini UART and your computer, and connect
 to it (for example, with ``screen /dev/ttyUSB0 115200``) you should see some
index 5462cc1ec21bae7e6c19c0fe41399f173f64041a..a7a88f6497d95ff8dffa412bd4c7f48f2adafb92 100644 (file)
@@ -2850,6 +2850,106 @@ you can keep the default implementation here (which calls ``console_flush()``).
 If you're trying to debug crashes in BL1, you can call the console_xx_core_flush
 function exported by some console drivers from here.
 
+Extternal Abort handling and RAS Support
+----------------------------------------
+
+Function : plat_ea_handler
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : int
+    Argument : uint64_t
+    Argument : void *
+    Argument : void *
+    Argument : uint64_t
+    Return   : void
+
+This function is invoked by the RAS framework for the platform to handle an
+External Abort received at EL3. The intention of the function is to attempt to
+resolve the cause of External Abort and return; if that's not possible, to
+initiate orderly shutdown of the system.
+
+The first parameter (``int ea_reason``) indicates the reason for External Abort.
+Its value is one of ``ERROR_EA_*`` constants defined in ``ea_handle.h``.
+
+The second parameter (``uint64_t syndrome``) is the respective syndrome
+presented to EL3 after having received the External Abort. Depending on the
+nature of the abort (as can be inferred from the ``ea_reason`` parameter), this
+can be the content of either ``ESR_EL3`` or ``DISR_EL1``.
+
+The third parameter (``void *cookie``) is unused for now. The fourth parameter
+(``void *handle``) is a pointer to the preempted context. The fifth parameter
+(``uint64_t flags``) indicates the preempted security state. These parameters
+are received from the top-level exception handler.
+
+If ``RAS_EXTENSION`` is set to ``1``, the default implementation of this
+function iterates through RAS handlers registered by the platform. If any of the
+RAS handlers resolve the External Abort, no further action is taken.
+
+If ``RAS_EXTENSION`` is set to ``0``, or if none of the platform RAS handlers
+could resolve the External Abort, the default implementation prints an error
+message, and panics.
+
+Function : plat_handle_uncontainable_ea
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : int
+    Argument : uint64_t
+    Return   : void
+
+This function is invoked by the RAS framework when an External Abort of
+Uncontainable type is received at EL3. Due to the critical nature of
+Uncontainable errors, the intention of this function is to initiate orderly
+shutdown of the system, and is not expected to return.
+
+This function must be implemented in assembly.
+
+The first and second parameters are the same as that of ``plat_ea_handler``.
+
+The default implementation of this function calls
+``report_unhandled_exception``.
+
+Function : plat_handle_double_fault
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : int
+    Argument : uint64_t
+    Return   : void
+
+This function is invoked by the RAS framework when another External Abort is
+received at EL3 while one is already being handled. I.e., a call to
+``plat_ea_handler`` is outstanding. Due to its critical nature, the intention of
+this function is to initiate orderly shutdown of the system, and is not expected
+recover or return.
+
+This function must be implemented in assembly.
+
+The first and second parameters are the same as that of ``plat_ea_handler``.
+
+The default implementation of this function calls
+``report_unhandled_exception``.
+
+Function : plat_handle_el3_ea
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Return   : void
+
+This function is invoked when an External Abort is received while executing in
+EL3. Due to its critical nature, the intention of this function is to initiate
+orderly shutdown of the system, and is not expected recover or return.
+
+This function must be implemented in assembly.
+
+The default implementation of this function calls
+``report_unhandled_exception``.
+
 Build flags
 -----------
 
index 68a74edd5fd22a8bae0cdf85655459f25a703eb4..da260264adb2d0ddccc523b642ddfd3f3a09244b 100644 (file)
@@ -52,7 +52,7 @@ Install the required packages to build TF-A with the following command:
 
 ::
 
-    sudo apt-get install build-essential gcc make git libssl-dev
+    sudo apt-get install device-tree-compiler build-essential gcc make git libssl-dev
 
 TF-A has been tested with `Linaro Release 17.10`_.
 
@@ -62,8 +62,8 @@ given Linaro Release. Also, these `Linaro instructions`_ provide further
 guidance and a script, which can be used to download Linaro deliverables
 automatically.
 
-Optionally, TF-A can be built using clang or Arm Compiler 6.
-See instructions below on how to switch the default compiler.
+Optionally, TF-A can be built using clang version 4.0 or newer or Arm
+Compiler 6. See instructions below on how to switch the default compiler.
 
 In addition, the following optional packages and tools may be needed:
 
@@ -103,10 +103,14 @@ Building TF-A
 
        export CROSS_COMPILE=<path-to-aarch32-gcc>/bin/arm-linux-gnueabihf-
 
-   It is possible to build TF-A using clang or Arm Compiler 6. To do so
-   ``CC`` needs to point to the clang or armclang binary. Only the compiler
-   is switched; the assembler and linker need to be provided by the GNU
-   toolchain, thus ``CROSS_COMPILE`` should be set as described above.
+   It is possible to build TF-A using Clang or Arm Compiler 6. To do so
+   ``CC`` needs to point to the clang or armclang binary, which will
+   also select the clang or armclang assembler. Be aware that the
+   GNU linker is used by default.  In case of being needed the linker
+   can be overriden using the ``LD`` variable. Clang linker version 6 is
+   known to work with TF-A.
+
+   In both cases ``CROSS_COMPILE`` should be set as described above.
 
    Arm Compiler 6 will be selected when the base name of the path assigned
    to ``CC`` matches the string 'armclang'.
index 6e7103dbee9374aad685a5de8da2c70f481ba8d3..9d6c763e8969bb92597a6c59fba4c25d76c05050 100644 (file)
@@ -372,7 +372,6 @@ static int fip_file_read(io_entity_t *entity, uintptr_t buffer, size_t length,
        uintptr_t backend_handle;
 
        assert(entity != NULL);
-       assert(buffer != (uintptr_t)NULL);
        assert(length_read != NULL);
        assert(entity->info != (uintptr_t)NULL);
 
index bf59d6a5c742cdcf93afb5ff1ac3e00ac9e4030e..5595e60a424909a8a7b295b0bbbdaa17c98ddcc4 100644 (file)
@@ -9,6 +9,7 @@
 #include <io_driver.h>
 #include <io_memmap.h>
 #include <io_storage.h>
+#include <platform_def.h>
 #include <string.h>
 #include <utils.h>
 
@@ -169,7 +170,6 @@ static int memmap_block_read(io_entity_t *entity, uintptr_t buffer,
        size_t pos_after;
 
        assert(entity != NULL);
-       assert(buffer != (uintptr_t)NULL);
        assert(length_read != NULL);
 
        fp = (file_state_t *) entity->info;
@@ -197,7 +197,6 @@ static int memmap_block_write(io_entity_t *entity, const uintptr_t buffer,
        size_t pos_after;
 
        assert(entity != NULL);
-       assert(buffer != (uintptr_t)NULL);
        assert(length_written != NULL);
 
        fp = (file_state_t *) entity->info;
index 4abf44f7d2e21b0b4c4a62f5227dd6b033883695..9ca0a9dc9c2262dd3e0db38ef603f453045fc12f 100644 (file)
@@ -8,6 +8,7 @@
 #include <io_driver.h>
 #include <io_semihosting.h>
 #include <io_storage.h>
+#include <platform_def.h>
 #include <semihosting.h>
 
 
@@ -133,7 +134,6 @@ static int sh_file_read(io_entity_t *entity, uintptr_t buffer, size_t length,
        long file_handle;
 
        assert(entity != NULL);
-       assert(buffer != (uintptr_t)NULL);
        assert(length_read != NULL);
 
        file_handle = (long)entity->info;
@@ -158,7 +158,6 @@ static int sh_file_write(io_entity_t *entity, const uintptr_t buffer,
        size_t bytes = length;
 
        assert(entity != NULL);
-       assert(buffer != (uintptr_t)NULL);
        assert(length_written != NULL);
 
        file_handle = (long)entity->info;
index 0918de0a23daefb7ddb14c902e84818d266b1dc4..948f84813768d8c2204d4496cacd7adf9c1dbe4c 100644 (file)
@@ -279,7 +279,7 @@ int io_read(uintptr_t handle,
                size_t *length_read)
 {
        int result = -ENODEV;
-       assert(is_valid_entity(handle) && (buffer != (uintptr_t)NULL));
+       assert(is_valid_entity(handle));
 
        io_entity_t *entity = (io_entity_t *)handle;
 
@@ -299,7 +299,7 @@ int io_write(uintptr_t handle,
                size_t *length_written)
 {
        int result = -ENODEV;
-       assert(is_valid_entity(handle) && (buffer != (uintptr_t)NULL));
+       assert(is_valid_entity(handle));
 
        io_entity_t *entity = (io_entity_t *)handle;
 
diff --git a/drivers/marvell/amb_adec.c b/drivers/marvell/amb_adec.c
new file mode 100644 (file)
index 0000000..06a1957
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* AXI to M-Bridge decoding unit driver for Marvell Armada 8K and 8K+ SoCs */
+
+#include <a8k_common.h>
+#include <debug.h>
+#include <mmio.h>
+#include <mvebu.h>
+#include <mvebu_def.h>
+
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+#define DEBUG_ADDR_MAP
+#endif
+
+/* common defines */
+#define WIN_ENABLE_BIT                 (0x1)
+
+#define MVEBU_AMB_ADEC_OFFSET          (0x70ff00)
+
+#define AMB_WIN_CR_OFFSET(win)         (amb_base + 0x0 + (0x8 * win))
+#define AMB_ATTR_OFFSET                        8
+#define AMB_ATTR_MASK                  0xFF
+#define AMB_SIZE_OFFSET                        16
+#define AMB_SIZE_MASK                  0xFF
+
+#define AMB_WIN_BASE_OFFSET(win)       (amb_base + 0x4 + (0x8 * win))
+#define AMB_BASE_OFFSET                        16
+#define AMB_BASE_ADDR_MASK             ((1 << (32 - AMB_BASE_OFFSET)) - 1)
+
+#define AMB_WIN_ALIGNMENT_64K          (0x10000)
+#define AMB_WIN_ALIGNMENT_1M           (0x100000)
+
+uintptr_t amb_base;
+
+static void amb_check_win(struct addr_map_win *win, uint32_t win_num)
+{
+       uint32_t base_addr;
+
+       /* make sure the base address is in 16-bit range */
+       if (win->base_addr > AMB_BASE_ADDR_MASK) {
+               WARN("Window %d: base address is too big 0x%llx\n",
+                      win_num, win->base_addr);
+               win->base_addr = AMB_BASE_ADDR_MASK;
+               WARN("Set the base address to 0x%llx\n", win->base_addr);
+       }
+
+       base_addr  = win->base_addr << AMB_BASE_OFFSET;
+       /* for AMB The base is always 1M aligned */
+       /* check if address is aligned to 1M */
+       if (IS_NOT_ALIGN(base_addr, AMB_WIN_ALIGNMENT_1M)) {
+               win->base_addr = ALIGN_UP(base_addr, AMB_WIN_ALIGNMENT_1M);
+               WARN("Window %d: base address unaligned to 0x%x\n",
+                      win_num, AMB_WIN_ALIGNMENT_1M);
+               WARN("Align up the base address to 0x%llx\n", win->base_addr);
+       }
+
+       /* size parameter validity check */
+       if (!IS_POWER_OF_2(win->win_size)) {
+               WARN("Window %d: window size is not power of 2 (0x%llx)\n",
+                      win_num, win->win_size);
+               win->win_size = ROUND_UP_TO_POW_OF_2(win->win_size);
+               WARN("Rounding size to 0x%llx\n", win->win_size);
+       }
+}
+
+static void amb_enable_win(struct addr_map_win *win, uint32_t win_num)
+{
+       uint32_t ctrl, base, size;
+
+       /*
+        * size is 64KB granularity.
+        * The number of ones specifies the size of the
+        * window in 64 KB granularity. 0 is 64KB
+        */
+       size = (win->win_size / AMB_WIN_ALIGNMENT_64K) - 1;
+       ctrl = (size << AMB_SIZE_OFFSET) | (win->target_id << AMB_ATTR_OFFSET);
+       base = win->base_addr << AMB_BASE_OFFSET;
+
+       mmio_write_32(AMB_WIN_BASE_OFFSET(win_num), base);
+       mmio_write_32(AMB_WIN_CR_OFFSET(win_num), ctrl);
+
+       /* enable window after configuring window size (and attributes) */
+       ctrl |= WIN_ENABLE_BIT;
+       mmio_write_32(AMB_WIN_CR_OFFSET(win_num), ctrl);
+}
+
+#ifdef DEBUG_ADDR_MAP
+static void dump_amb_adec(void)
+{
+       uint32_t ctrl, base, win_id, attr;
+       uint32_t size, size_count;
+
+       /* Dump all AMB windows */
+       tf_printf("bank  attribute     base          size\n");
+       tf_printf("--------------------------------------------\n");
+       for (win_id = 0; win_id < AMB_MAX_WIN_ID; win_id++) {
+               ctrl = mmio_read_32(AMB_WIN_CR_OFFSET(win_id));
+               if (ctrl & WIN_ENABLE_BIT) {
+                       base = mmio_read_32(AMB_WIN_BASE_OFFSET(win_id));
+                       attr = (ctrl >> AMB_ATTR_OFFSET) & AMB_ATTR_MASK;
+                       size_count = (ctrl >> AMB_SIZE_OFFSET) & AMB_SIZE_MASK;
+                       size = (size_count + 1) * AMB_WIN_ALIGNMENT_64K;
+                       tf_printf("amb   0x%04x        0x%08x    0x%08x\n",
+                                 attr, base, size);
+               }
+       }
+}
+#endif
+
+int init_amb_adec(uintptr_t base)
+{
+       struct addr_map_win *win;
+       uint32_t win_id, win_reg;
+       uint32_t win_count;
+
+       INFO("Initializing AXI to MBus Bridge Address decoding\n");
+
+       /* Get the base address of the AMB address decoding */
+       amb_base = base + MVEBU_AMB_ADEC_OFFSET;
+
+       /* Get the array of the windows and its size */
+       marvell_get_amb_memory_map(&win, &win_count, base);
+       if (win_count <= 0)
+               INFO("no windows configurations found\n");
+
+       if (win_count > AMB_MAX_WIN_ID) {
+               INFO("number of windows is bigger than %d\n", AMB_MAX_WIN_ID);
+               return 0;
+       }
+
+       /* disable all AMB windows */
+       for (win_id = 0; win_id < AMB_MAX_WIN_ID; win_id++) {
+               win_reg = mmio_read_32(AMB_WIN_CR_OFFSET(win_id));
+               win_reg &= ~WIN_ENABLE_BIT;
+               mmio_write_32(AMB_WIN_CR_OFFSET(win_id), win_reg);
+       }
+
+       /* enable relevant windows */
+       for (win_id = 0; win_id < win_count; win_id++, win++) {
+               amb_check_win(win, win_id);
+               amb_enable_win(win, win_id);
+       }
+
+#ifdef DEBUG_ADDR_MAP
+       dump_amb_adec();
+#endif
+
+       INFO("Done AXI to MBus Bridge Address decoding Initializing\n");
+
+       return 0;
+}
diff --git a/drivers/marvell/cache_llc.c b/drivers/marvell/cache_llc.c
new file mode 100644 (file)
index 0000000..e13e6ce
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* LLC driver is the Last Level Cache (L3C) driver
+ * for Marvell SoCs in AP806, AP807, and AP810
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cache_llc.h>
+#include <ccu.h>
+#include <mmio.h>
+#include <mvebu_def.h>
+
+#define CCU_HTC_CR(ap_index)           (MVEBU_CCU_BASE(ap_index) + 0x200)
+#define CCU_SET_POC_OFFSET             5
+
+extern void ca72_l2_enable_unique_clean(void);
+
+void llc_cache_sync(int ap_index)
+{
+       mmio_write_32(LLC_SYNC(ap_index), 0);
+       /* Atomic write, no need to wait */
+}
+
+void llc_flush_all(int ap_index)
+{
+       mmio_write_32(L2X0_CLEAN_INV_WAY(ap_index), LLC_WAY_MASK);
+       llc_cache_sync(ap_index);
+}
+
+void llc_clean_all(int ap_index)
+{
+       mmio_write_32(L2X0_CLEAN_WAY(ap_index), LLC_WAY_MASK);
+       llc_cache_sync(ap_index);
+}
+
+void llc_inv_all(int ap_index)
+{
+       mmio_write_32(L2X0_INV_WAY(ap_index), LLC_WAY_MASK);
+       llc_cache_sync(ap_index);
+}
+
+void llc_disable(int ap_index)
+{
+       llc_flush_all(ap_index);
+       mmio_write_32(LLC_CTRL(ap_index), 0);
+       dsbishst();
+}
+
+void llc_enable(int ap_index, int excl_mode)
+{
+       uint32_t val;
+
+       dsbsy();
+       llc_inv_all(ap_index);
+       dsbsy();
+
+       val = LLC_CTRL_EN;
+       if (excl_mode)
+               val |= LLC_EXCLUSIVE_EN;
+
+       mmio_write_32(LLC_CTRL(ap_index), val);
+       dsbsy();
+}
+
+int llc_is_exclusive(int ap_index)
+{
+       uint32_t reg;
+
+       reg = mmio_read_32(LLC_CTRL(ap_index));
+
+       if ((reg & (LLC_CTRL_EN | LLC_EXCLUSIVE_EN)) ==
+                  (LLC_CTRL_EN | LLC_EXCLUSIVE_EN))
+               return 1;
+
+       return 0;
+}
+
+void llc_runtime_enable(int ap_index)
+{
+       uint32_t reg;
+
+       reg = mmio_read_32(LLC_CTRL(ap_index));
+       if (reg & LLC_CTRL_EN)
+               return;
+
+       INFO("Enabling LLC\n");
+
+       /*
+        * Enable L2 UniqueClean evictions with data
+        *  Note: this configuration assumes that LLC is configured
+        *        in exclusive mode.
+        *        Later on in the code this assumption will be validated
+        */
+       ca72_l2_enable_unique_clean();
+       llc_enable(ap_index, 1);
+
+       /* Set point of coherency to DDR.
+        * This is required by units which have SW cache coherency
+        */
+       reg = mmio_read_32(CCU_HTC_CR(ap_index));
+       reg |= (0x1 << CCU_SET_POC_OFFSET);
+       mmio_write_32(CCU_HTC_CR(ap_index), reg);
+}
diff --git a/drivers/marvell/ccu.c b/drivers/marvell/ccu.c
new file mode 100644 (file)
index 0000000..e478d63
--- /dev/null
@@ -0,0 +1,361 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* CCU unit device driver for Marvell AP807, AP807 and AP810 SoCs */
+
+#include <a8k_common.h>
+#include <ccu.h>
+#include <debug.h>
+#include <mmio.h>
+#include <mvebu.h>
+#include <mvebu_def.h>
+
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+#define DEBUG_ADDR_MAP
+#endif
+
+/* common defines */
+#define WIN_ENABLE_BIT                 (0x1)
+/* Physical address of the base of the window = {AddrLow[19:0],20’h0} */
+#define ADDRESS_SHIFT                  (20 - 4)
+#define ADDRESS_MASK                   (0xFFFFFFF0)
+#define CCU_WIN_ALIGNMENT              (0x100000)
+
+#define IS_DRAM_TARGET(tgt)            ((((tgt) == DRAM_0_TID) || \
+                                       ((tgt) == DRAM_1_TID) || \
+                                       ((tgt) == RAR_TID)) ? 1 : 0)
+
+/* For storage of CR, SCR, ALR, AHR abd GCR */
+static uint32_t ccu_regs_save[MVEBU_CCU_MAX_WINS * 4 + 1];
+
+#ifdef DEBUG_ADDR_MAP
+static void dump_ccu(int ap_index)
+{
+       uint32_t win_id, win_cr, alr, ahr;
+       uint8_t target_id;
+       uint64_t start, end;
+
+       /* Dump all AP windows */
+       tf_printf("\tbank  target     start              end\n");
+       tf_printf("\t----------------------------------------------------\n");
+       for (win_id = 0; win_id < MVEBU_CCU_MAX_WINS; win_id++) {
+               win_cr = mmio_read_32(CCU_WIN_CR_OFFSET(ap_index, win_id));
+               if (win_cr & WIN_ENABLE_BIT) {
+                       target_id = (win_cr >> CCU_TARGET_ID_OFFSET) &
+                                    CCU_TARGET_ID_MASK;
+                       alr = mmio_read_32(CCU_WIN_ALR_OFFSET(ap_index,
+                                                             win_id));
+                       ahr = mmio_read_32(CCU_WIN_AHR_OFFSET(ap_index,
+                                                             win_id));
+                       start = ((uint64_t)alr << ADDRESS_SHIFT);
+                       end = (((uint64_t)ahr + 0x10) << ADDRESS_SHIFT);
+                       tf_printf("\tccu    %02x     0x%016llx 0x%016llx\n",
+                                 target_id, start, end);
+               }
+       }
+       win_cr = mmio_read_32(CCU_WIN_GCR_OFFSET(ap_index));
+       target_id = (win_cr >> CCU_GCR_TARGET_OFFSET) & CCU_GCR_TARGET_MASK;
+       tf_printf("\tccu   GCR %d - all other transactions\n", target_id);
+}
+#endif
+
+void ccu_win_check(struct addr_map_win *win)
+{
+       /* check if address is aligned to 1M */
+       if (IS_NOT_ALIGN(win->base_addr, CCU_WIN_ALIGNMENT)) {
+               win->base_addr = ALIGN_UP(win->base_addr, CCU_WIN_ALIGNMENT);
+               NOTICE("%s: Align up the base address to 0x%llx\n",
+                      __func__, win->base_addr);
+       }
+
+       /* size parameter validity check */
+       if (IS_NOT_ALIGN(win->win_size, CCU_WIN_ALIGNMENT)) {
+               win->win_size = ALIGN_UP(win->win_size, CCU_WIN_ALIGNMENT);
+               NOTICE("%s: Aligning size to 0x%llx\n",
+                      __func__, win->win_size);
+       }
+}
+
+void ccu_enable_win(int ap_index, struct addr_map_win *win, uint32_t win_id)
+{
+       uint32_t ccu_win_reg;
+       uint32_t alr, ahr;
+       uint64_t end_addr;
+
+       if ((win_id == 0) || (win_id > MVEBU_CCU_MAX_WINS)) {
+               ERROR("Enabling wrong CCU window %d!\n", win_id);
+               return;
+       }
+
+       end_addr = (win->base_addr + win->win_size - 1);
+       alr = (uint32_t)((win->base_addr >> ADDRESS_SHIFT) & ADDRESS_MASK);
+       ahr = (uint32_t)((end_addr >> ADDRESS_SHIFT) & ADDRESS_MASK);
+
+       mmio_write_32(CCU_WIN_ALR_OFFSET(ap_index, win_id), alr);
+       mmio_write_32(CCU_WIN_AHR_OFFSET(ap_index, win_id), ahr);
+
+       ccu_win_reg = WIN_ENABLE_BIT;
+       ccu_win_reg |= (win->target_id & CCU_TARGET_ID_MASK)
+                       << CCU_TARGET_ID_OFFSET;
+       mmio_write_32(CCU_WIN_CR_OFFSET(ap_index, win_id), ccu_win_reg);
+}
+
+static void ccu_disable_win(int ap_index, uint32_t win_id)
+{
+       uint32_t win_reg;
+
+       if ((win_id == 0) || (win_id > MVEBU_CCU_MAX_WINS)) {
+               ERROR("Disabling wrong CCU window %d!\n", win_id);
+               return;
+       }
+
+       win_reg = mmio_read_32(CCU_WIN_CR_OFFSET(ap_index, win_id));
+       win_reg &= ~WIN_ENABLE_BIT;
+       mmio_write_32(CCU_WIN_CR_OFFSET(ap_index, win_id), win_reg);
+}
+
+/* Insert/Remove temporary window for using the out-of reset default
+ * CPx base address to access the CP configuration space prior to
+ * the further base address update in accordance with address mapping
+ * design.
+ *
+ * NOTE: Use the same window array for insertion and removal of
+ *       temporary windows.
+ */
+void ccu_temp_win_insert(int ap_index, struct addr_map_win *win, int size)
+{
+       uint32_t win_id;
+
+       for (int i = 0; i < size; i++) {
+               win_id = MVEBU_CCU_MAX_WINS - 1 - i;
+               ccu_win_check(win);
+               ccu_enable_win(ap_index, win, win_id);
+               win++;
+       }
+}
+
+/*
+ * NOTE: Use the same window array for insertion and removal of
+ *       temporary windows.
+ */
+void ccu_temp_win_remove(int ap_index, struct addr_map_win *win, int size)
+{
+       uint32_t win_id;
+
+       for (int i = 0; i < size; i++) {
+               uint64_t base;
+               uint32_t target;
+
+               win_id = MVEBU_CCU_MAX_WINS - 1 - i;
+
+               target = mmio_read_32(CCU_WIN_CR_OFFSET(ap_index, win_id));
+               target >>= CCU_TARGET_ID_OFFSET;
+               target &= CCU_TARGET_ID_MASK;
+
+               base = mmio_read_32(CCU_WIN_ALR_OFFSET(ap_index, win_id));
+               base <<= ADDRESS_SHIFT;
+
+               if ((win->target_id != target) || (win->base_addr != base)) {
+                       ERROR("%s: Trying to remove bad window-%d!\n",
+                             __func__, win_id);
+                       continue;
+               }
+               ccu_disable_win(ap_index, win_id);
+               win++;
+       }
+}
+
+/* Returns current DRAM window target (DRAM_0_TID, DRAM_1_TID, RAR_TID)
+ * NOTE: Call only once for each AP.
+ * The AP0 DRAM window is located at index 2 only at the BL31 execution start.
+ * Then it relocated to index 1 for matching the rest of APs DRAM settings.
+ * Calling this function after relocation will produce wrong results on AP0
+ */
+static uint32_t ccu_dram_target_get(int ap_index)
+{
+       /* On BLE stage the AP0 DRAM window is opened by the BootROM at index 2.
+        * All the rest of detected APs will use window at index 1.
+        * The AP0 DRAM window is moved from index 2 to 1 during
+        * init_ccu() execution.
+        */
+       const uint32_t win_id = (ap_index == 0) ? 2 : 1;
+       uint32_t target;
+
+       target = mmio_read_32(CCU_WIN_CR_OFFSET(ap_index, win_id));
+       target >>= CCU_TARGET_ID_OFFSET;
+       target &= CCU_TARGET_ID_MASK;
+
+       return target;
+}
+
+void ccu_dram_target_set(int ap_index, uint32_t target)
+{
+       /* On BLE stage the AP0 DRAM window is opened by the BootROM at index 2.
+        * All the rest of detected APs will use window at index 1.
+        * The AP0 DRAM window is moved from index 2 to 1
+        * during init_ccu() execution.
+        */
+       const uint32_t win_id = (ap_index == 0) ? 2 : 1;
+       uint32_t dram_cr;
+
+       dram_cr = mmio_read_32(CCU_WIN_CR_OFFSET(ap_index, win_id));
+       dram_cr &= ~(CCU_TARGET_ID_MASK << CCU_TARGET_ID_OFFSET);
+       dram_cr |= (target & CCU_TARGET_ID_MASK) << CCU_TARGET_ID_OFFSET;
+       mmio_write_32(CCU_WIN_CR_OFFSET(ap_index, win_id), dram_cr);
+}
+
+/* Setup CCU DRAM window and enable it */
+void ccu_dram_win_config(int ap_index, struct addr_map_win *win)
+{
+#if IMAGE_BLE /* BLE */
+       /* On BLE stage the AP0 DRAM window is opened by the BootROM at index 2.
+        * Since the BootROM is not accessing DRAM at BLE stage,
+        * the DRAM window can be temporarely disabled.
+        */
+       const uint32_t win_id = (ap_index == 0) ? 2 : 1;
+#else /* end of BLE */
+       /* At the ccu_init() execution stage, DRAM windows of all APs
+        * are arranged at index 1.
+        * The AP0 still has the old window BootROM DRAM at index 2, so
+        * the window-1 can be safely disabled without breaking the DRAM access.
+        */
+       const uint32_t win_id = 1;
+#endif
+
+       ccu_disable_win(ap_index, win_id);
+       /* enable write secure (and clear read secure) */
+       mmio_write_32(CCU_WIN_SCR_OFFSET(ap_index, win_id),
+                     CCU_WIN_ENA_WRITE_SECURE);
+       ccu_win_check(win);
+       ccu_enable_win(ap_index, win, win_id);
+}
+
+/* Save content of CCU window + GCR */
+static void ccu_save_win_range(int ap_id, int win_first,
+                              int win_last, uint32_t *buffer)
+{
+       int win_id, idx;
+       /* Save CCU */
+       for (idx = 0, win_id = win_first; win_id <= win_last; win_id++) {
+               buffer[idx++] = mmio_read_32(CCU_WIN_CR_OFFSET(ap_id, win_id));
+               buffer[idx++] = mmio_read_32(CCU_WIN_SCR_OFFSET(ap_id, win_id));
+               buffer[idx++] = mmio_read_32(CCU_WIN_ALR_OFFSET(ap_id, win_id));
+               buffer[idx++] = mmio_read_32(CCU_WIN_AHR_OFFSET(ap_id, win_id));
+       }
+       buffer[idx] = mmio_read_32(CCU_WIN_GCR_OFFSET(ap_id));
+}
+
+/* Restore content of CCU window + GCR */
+static void ccu_restore_win_range(int ap_id, int win_first,
+                                 int win_last, uint32_t *buffer)
+{
+       int win_id, idx;
+       /* Restore CCU */
+       for (idx = 0, win_id = win_first; win_id <= win_last; win_id++) {
+               mmio_write_32(CCU_WIN_CR_OFFSET(ap_id, win_id),  buffer[idx++]);
+               mmio_write_32(CCU_WIN_SCR_OFFSET(ap_id, win_id), buffer[idx++]);
+               mmio_write_32(CCU_WIN_ALR_OFFSET(ap_id, win_id), buffer[idx++]);
+               mmio_write_32(CCU_WIN_AHR_OFFSET(ap_id, win_id), buffer[idx++]);
+       }
+       mmio_write_32(CCU_WIN_GCR_OFFSET(ap_id), buffer[idx]);
+}
+
+void ccu_save_win_all(int ap_id)
+{
+       ccu_save_win_range(ap_id, 0, MVEBU_CCU_MAX_WINS - 1, ccu_regs_save);
+}
+
+void ccu_restore_win_all(int ap_id)
+{
+       ccu_restore_win_range(ap_id, 0, MVEBU_CCU_MAX_WINS - 1, ccu_regs_save);
+}
+
+int init_ccu(int ap_index)
+{
+       struct addr_map_win *win, *dram_win;
+       uint32_t win_id, win_reg;
+       uint32_t win_count, array_id;
+       uint32_t dram_target;
+#if IMAGE_BLE
+       /* In BootROM context CCU Window-1
+        * has SRAM_TID target and should not be disabled
+        */
+       const uint32_t win_start = 2;
+#else
+       const uint32_t win_start = 1;
+#endif
+
+       INFO("Initializing CCU Address decoding\n");
+
+       /* Get the array of the windows and fill the map data */
+       marvell_get_ccu_memory_map(ap_index, &win, &win_count);
+       if (win_count <= 0) {
+               INFO("No windows configurations found\n");
+       } else if (win_count > (MVEBU_CCU_MAX_WINS - 1)) {
+               ERROR("CCU mem map array > than max available windows (%d)\n",
+                     MVEBU_CCU_MAX_WINS);
+               win_count = MVEBU_CCU_MAX_WINS;
+       }
+
+       /* Need to set GCR to DRAM before all CCU windows are disabled for
+        * securing the normal access to DRAM location, which the ATF is running
+        * from. Once all CCU windows are set, which have to include the
+        * dedicated DRAM window as well, the GCR can be switched to the target
+        * defined by the platform configuration.
+        */
+       dram_target = ccu_dram_target_get(ap_index);
+       win_reg = (dram_target & CCU_GCR_TARGET_MASK) << CCU_GCR_TARGET_OFFSET;
+       mmio_write_32(CCU_WIN_GCR_OFFSET(ap_index), win_reg);
+
+       /* If the DRAM window was already configured at the BLE stage,
+        * only the window target considered valid, the address range should be
+        * updated according to the platform configuration.
+        */
+       for (dram_win = win, array_id = 0; array_id < win_count;
+            array_id++, dram_win++) {
+               if (IS_DRAM_TARGET(dram_win->target_id)) {
+                       dram_win->target_id = dram_target;
+                       break;
+               }
+       }
+
+       /* Disable all AP CCU windows
+        * Window-0 is always bypassed since it already contains
+        * data allowing the internal configuration space access
+        */
+       for (win_id = win_start; win_id < MVEBU_CCU_MAX_WINS; win_id++) {
+               ccu_disable_win(ap_index, win_id);
+               /* enable write secure (and clear read secure) */
+               mmio_write_32(CCU_WIN_SCR_OFFSET(ap_index, win_id),
+                             CCU_WIN_ENA_WRITE_SECURE);
+       }
+
+       /* win_id is the index of the current ccu window
+        * array_id is the index of the current memory map window entry
+        */
+       for (win_id = win_start, array_id = 0;
+           ((win_id < MVEBU_CCU_MAX_WINS) && (array_id < win_count));
+           win_id++) {
+               ccu_win_check(win);
+               ccu_enable_win(ap_index, win, win_id);
+               win++;
+               array_id++;
+       }
+
+       /* Get & set the default target according to board topology */
+       win_reg = (marvell_get_ccu_gcr_target(ap_index) & CCU_GCR_TARGET_MASK)
+                  << CCU_GCR_TARGET_OFFSET;
+       mmio_write_32(CCU_WIN_GCR_OFFSET(ap_index), win_reg);
+
+#ifdef DEBUG_ADDR_MAP
+       dump_ccu(ap_index);
+#endif
+
+       INFO("Done CCU Address decoding Initializing\n");
+
+       return 0;
+}
diff --git a/drivers/marvell/comphy.h b/drivers/marvell/comphy.h
new file mode 100644 (file)
index 0000000..788b1b6
--- /dev/null
@@ -0,0 +1,473 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* Driver for COMPHY unit that is part or Marvell A8K SoCs */
+
+#ifndef _COMPHY_H_
+#define _COMPHY_H_
+
+/* COMPHY registers */
+#define COMMON_PHY_CFG1_REG                    0x0
+#define COMMON_PHY_CFG1_PWR_UP_OFFSET          1
+#define COMMON_PHY_CFG1_PWR_UP_MASK            \
+                               (0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET)
+#define COMMON_PHY_CFG1_PIPE_SELECT_OFFSET     2
+#define COMMON_PHY_CFG1_PIPE_SELECT_MASK       \
+                               (0x1 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET)
+#define COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET    13
+#define COMMON_PHY_CFG1_PWR_ON_RESET_MASK      \
+                               (0x1 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET)
+#define COMMON_PHY_CFG1_CORE_RSTN_OFFSET       14
+#define COMMON_PHY_CFG1_CORE_RSTN_MASK         \
+                               (0x1 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET)
+#define COMMON_PHY_PHY_MODE_OFFSET             15
+#define COMMON_PHY_PHY_MODE_MASK               \
+                               (0x1 << COMMON_PHY_PHY_MODE_OFFSET)
+
+#define COMMON_SELECTOR_PHY_OFFSET                     0x140
+#define COMMON_SELECTOR_PIPE_OFFSET                    0x144
+
+#define COMMON_PHY_SD_CTRL1                            0x148
+#define COMMON_PHY_SD_CTRL1_COMPHY_0_4_PORT_OFFSET     0
+#define COMMON_PHY_SD_CTRL1_COMPHY_0_4_PORT_MASK       0xFFFF
+#define COMMON_PHY_SD_CTRL1_PCIE_X4_EN_OFFSET          24
+#define COMMON_PHY_SD_CTRL1_PCIE_X4_EN_MASK            \
+                               (0x1 << COMMON_PHY_SD_CTRL1_PCIE_X4_EN_OFFSET)
+#define COMMON_PHY_SD_CTRL1_PCIE_X2_EN_OFFSET          25
+#define COMMON_PHY_SD_CTRL1_PCIE_X2_EN_MASK            \
+                               (0x1 << COMMON_PHY_SD_CTRL1_PCIE_X2_EN_OFFSET)
+
+#define DFX_DEV_GEN_CTRL12                     0x80
+#define DFX_DEV_GEN_PCIE_CLK_SRC_OFFSET                7
+#define DFX_DEV_GEN_PCIE_CLK_SRC_MASK          \
+                               (0x3 << DFX_DEV_GEN_PCIE_CLK_SRC_OFFSET)
+
+/* HPIPE register */
+#define HPIPE_PWR_PLL_REG                      0x4
+#define HPIPE_PWR_PLL_REF_FREQ_OFFSET          0
+#define HPIPE_PWR_PLL_REF_FREQ_MASK            \
+                               (0x1f << HPIPE_PWR_PLL_REF_FREQ_OFFSET)
+#define HPIPE_PWR_PLL_PHY_MODE_OFFSET          5
+#define HPIPE_PWR_PLL_PHY_MODE_MASK            \
+                               (0x7 << HPIPE_PWR_PLL_PHY_MODE_OFFSET)
+
+#define HPIPE_DFE_REG0                         0x01C
+#define HPIPE_DFE_RES_FORCE_OFFSET             15
+#define HPIPE_DFE_RES_FORCE_MASK               \
+                               (0x1 << HPIPE_DFE_RES_FORCE_OFFSET)
+
+#define HPIPE_G2_SET_1_REG                     0x040
+#define HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET    0
+#define HPIPE_G2_SET_1_G2_RX_SELMUPI_MASK      \
+                               (0x7 << HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET)
+#define HPIPE_G2_SET_1_G2_RX_SELMUPP_OFFSET    3
+#define HPIPE_G2_SET_1_G2_RX_SELMUPP_MASK      \
+                               (0x7 << HPIPE_G2_SET_1_G2_RX_SELMUPP_OFFSET)
+#define HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET    6
+#define HPIPE_G2_SET_1_G2_RX_SELMUFI_MASK      \
+                               (0x3 << HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET)
+
+#define HPIPE_G3_SETTINGS_1_REG                        0x048
+#define HPIPE_G3_RX_SELMUPI_OFFSET             0
+#define HPIPE_G3_RX_SELMUPI_MASK               \
+                               (0x7 << HPIPE_G3_RX_SELMUPI_OFFSET)
+#define HPIPE_G3_RX_SELMUPF_OFFSET             3
+#define HPIPE_G3_RX_SELMUPF_MASK               \
+                               (0x7 << HPIPE_G3_RX_SELMUPF_OFFSET)
+#define HPIPE_G3_SETTING_BIT_OFFSET            13
+#define HPIPE_G3_SETTING_BIT_MASK              \
+                               (0x1 << HPIPE_G3_SETTING_BIT_OFFSET)
+
+#define HPIPE_INTERFACE_REG                    0x94
+#define HPIPE_INTERFACE_GEN_MAX_OFFSET         10
+#define HPIPE_INTERFACE_GEN_MAX_MASK           \
+                               (0x3 << HPIPE_INTERFACE_GEN_MAX_OFFSET)
+#define HPIPE_INTERFACE_DET_BYPASS_OFFSET      12
+#define HPIPE_INTERFACE_DET_BYPASS_MASK                \
+                               (0x1 << HPIPE_INTERFACE_DET_BYPASS_OFFSET)
+#define HPIPE_INTERFACE_LINK_TRAIN_OFFSET      14
+#define HPIPE_INTERFACE_LINK_TRAIN_MASK                \
+                               (0x1 << HPIPE_INTERFACE_LINK_TRAIN_OFFSET)
+
+#define HPIPE_VDD_CAL_CTRL_REG                 0x114
+#define HPIPE_EXT_SELLV_RXSAMPL_OFFSET         5
+#define HPIPE_EXT_SELLV_RXSAMPL_MASK           \
+                               (0x1f << HPIPE_EXT_SELLV_RXSAMPL_OFFSET)
+
+#define HPIPE_PCIE_REG0                                0x120
+#define HPIPE_PCIE_IDLE_SYNC_OFFSET            12
+#define HPIPE_PCIE_IDLE_SYNC_MASK              \
+                               (0x1 << HPIPE_PCIE_IDLE_SYNC_OFFSET)
+#define HPIPE_PCIE_SEL_BITS_OFFSET             13
+#define HPIPE_PCIE_SEL_BITS_MASK               \
+                               (0x3 << HPIPE_PCIE_SEL_BITS_OFFSET)
+
+#define HPIPE_LANE_ALIGN_REG                   0x124
+#define HPIPE_LANE_ALIGN_OFF_OFFSET            12
+#define HPIPE_LANE_ALIGN_OFF_MASK              \
+                               (0x1 << HPIPE_LANE_ALIGN_OFF_OFFSET)
+
+#define HPIPE_MISC_REG                         0x13C
+#define HPIPE_MISC_CLK100M_125M_OFFSET         4
+#define HPIPE_MISC_CLK100M_125M_MASK           \
+                               (0x1 << HPIPE_MISC_CLK100M_125M_OFFSET)
+#define HPIPE_MISC_ICP_FORCE_OFFSET            5
+#define HPIPE_MISC_ICP_FORCE_MASK              \
+                               (0x1 << HPIPE_MISC_ICP_FORCE_OFFSET)
+#define HPIPE_MISC_TXDCLK_2X_OFFSET            6
+#define HPIPE_MISC_TXDCLK_2X_MASK              \
+                               (0x1 << HPIPE_MISC_TXDCLK_2X_OFFSET)
+#define HPIPE_MISC_CLK500_EN_OFFSET            7
+#define HPIPE_MISC_CLK500_EN_MASK              \
+                               (0x1 << HPIPE_MISC_CLK500_EN_OFFSET)
+#define HPIPE_MISC_REFCLK_SEL_OFFSET           10
+#define HPIPE_MISC_REFCLK_SEL_MASK             \
+                               (0x1 << HPIPE_MISC_REFCLK_SEL_OFFSET)
+
+#define HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG    0x16C
+#define HPIPE_SMAPLER_OFFSET                   12
+#define HPIPE_SMAPLER_MASK                     (0x1 << HPIPE_SMAPLER_OFFSET)
+
+#define HPIPE_PWR_CTR_DTL_REG                  0x184
+#define HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET      2
+#define HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK                \
+                               (0x1 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET)
+
+#define HPIPE_FRAME_DET_CONTROL_REG            0x220
+#define HPIPE_FRAME_DET_LOCK_LOST_TO_OFFSET    12
+#define HPIPE_FRAME_DET_LOCK_LOST_TO_MASK      \
+                               (0x1 << HPIPE_FRAME_DET_LOCK_LOST_TO_OFFSET)
+
+#define HPIPE_TX_TRAIN_CTRL_0_REG              0x268
+#define HPIPE_TX_TRAIN_P2P_HOLD_OFFSET         15
+#define HPIPE_TX_TRAIN_P2P_HOLD_MASK           \
+                               (0x1 << HPIPE_TX_TRAIN_P2P_HOLD_OFFSET)
+
+#define HPIPE_TX_TRAIN_CTRL_REG                        0x26C
+#define HPIPE_TX_TRAIN_CTRL_G1_OFFSET          0
+#define HPIPE_TX_TRAIN_CTRL_G1_MASK            \
+                               (0x1 << HPIPE_TX_TRAIN_CTRL_G1_OFFSET)
+#define HPIPE_TX_TRAIN_CTRL_GN1_OFFSET         1
+#define HPIPE_TX_TRAIN_CTRL_GN1_MASK           \
+                               (0x1 << HPIPE_TX_TRAIN_CTRL_GN1_OFFSET)
+#define HPIPE_TX_TRAIN_CTRL_G0_OFFSET          2
+#define HPIPE_TX_TRAIN_CTRL_G0_MASK            \
+                               (0x1 << HPIPE_TX_TRAIN_CTRL_G0_OFFSET)
+
+#define HPIPE_TX_TRAIN_CTRL_4_REG              0x278
+#define HPIPE_TRX_TRAIN_TIMER_OFFSET           0
+#define HPIPE_TRX_TRAIN_TIMER_MASK             \
+                               (0x3FF << HPIPE_TRX_TRAIN_TIMER_OFFSET)
+
+#define HPIPE_TX_TRAIN_CTRL_5_REG              0x2A4
+#define HPIPE_TX_TRAIN_START_SQ_EN_OFFSET      11
+#define HPIPE_TX_TRAIN_START_SQ_EN_MASK                \
+                               (0x1 << HPIPE_TX_TRAIN_START_SQ_EN_OFFSET)
+#define HPIPE_TX_TRAIN_START_FRM_DET_EN_OFFSET 12
+#define HPIPE_TX_TRAIN_START_FRM_DET_EN_MASK   \
+                               (0x1 << HPIPE_TX_TRAIN_START_FRM_DET_EN_OFFSET)
+#define HPIPE_TX_TRAIN_START_FRM_LOCK_EN_OFFSET        13
+#define HPIPE_TX_TRAIN_START_FRM_LOCK_EN_MASK  \
+                               (0x1 << HPIPE_TX_TRAIN_START_FRM_LOCK_EN_OFFSET)
+#define HPIPE_TX_TRAIN_WAIT_TIME_EN_OFFSET     14
+#define HPIPE_TX_TRAIN_WAIT_TIME_EN_MASK       \
+                               (0x1 << HPIPE_TX_TRAIN_WAIT_TIME_EN_OFFSET)
+
+#define HPIPE_TX_TRAIN_REG                     0x31C
+#define HPIPE_TX_TRAIN_CHK_INIT_OFFSET         4
+#define HPIPE_TX_TRAIN_CHK_INIT_MASK           \
+                               (0x1 << HPIPE_TX_TRAIN_CHK_INIT_OFFSET)
+#define HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_OFFSET 7
+#define HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_MASK   \
+                               (0x1 << HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_OFFSET)
+
+#define HPIPE_CDR_CONTROL_REG                  0x418
+#define HPIPE_CDR_RX_MAX_DFE_ADAPT_0_OFFSET    14
+#define HPIPE_CDR_RX_MAX_DFE_ADAPT_0_MASK      \
+                               (0x3 << HPIPE_CDR_RX_MAX_DFE_ADAPT_0_OFFSET)
+#define HPIPE_CDR_RX_MAX_DFE_ADAPT_1_OFFSET    12
+#define HPIPE_CDR_RX_MAX_DFE_ADAPT_1_MASK      \
+                               (0x3 << HPIPE_CDR_RX_MAX_DFE_ADAPT_1_OFFSET)
+#define HPIPE_CDR_MAX_DFE_ADAPT_0_OFFSET       9
+#define HPIPE_CDR_MAX_DFE_ADAPT_0_MASK         \
+                               (0x7 << HPIPE_CDR_MAX_DFE_ADAPT_0_OFFSET)
+#define HPIPE_CDR_MAX_DFE_ADAPT_1_OFFSET       6
+#define HPIPE_CDR_MAX_DFE_ADAPT_1_MASK         \
+                               (0x7 << HPIPE_CDR_MAX_DFE_ADAPT_1_OFFSET)
+
+#define HPIPE_TX_TRAIN_CTRL_11_REG             0x438
+#define HPIPE_TX_STATUS_CHECK_MODE_OFFSET      6
+#define HPIPE_TX_TX_STATUS_CHECK_MODE_MASK     \
+                               (0x1 << HPIPE_TX_STATUS_CHECK_MODE_OFFSET)
+#define HPIPE_TX_NUM_OF_PRESET_OFFSET          10
+#define HPIPE_TX_NUM_OF_PRESET_MASK            \
+                               (0x7 << HPIPE_TX_NUM_OF_PRESET_OFFSET)
+#define HPIPE_TX_SWEEP_PRESET_EN_OFFSET                15
+#define HPIPE_TX_SWEEP_PRESET_EN_MASK          \
+                               (0x1 << HPIPE_TX_SWEEP_PRESET_EN_OFFSET)
+#define HPIPE_G2_SETTINGS_4_REG                        0x44C
+#define HPIPE_G2_DFE_RES_OFFSET                        8
+#define HPIPE_G2_DFE_RES_MASK                  (0x3 << HPIPE_G2_DFE_RES_OFFSET)
+
+#define HPIPE_G3_SETTING_3_REG                 0x450
+#define HPIPE_G3_FFE_CAP_SEL_OFFSET            0
+#define HPIPE_G3_FFE_CAP_SEL_MASK              \
+                               (0xf << HPIPE_G3_FFE_CAP_SEL_OFFSET)
+#define HPIPE_G3_FFE_RES_SEL_OFFSET            4
+#define HPIPE_G3_FFE_RES_SEL_MASK              \
+                               (0x7 << HPIPE_G3_FFE_RES_SEL_OFFSET)
+#define HPIPE_G3_FFE_SETTING_FORCE_OFFSET      7
+#define HPIPE_G3_FFE_SETTING_FORCE_MASK                \
+                               (0x1 << HPIPE_G3_FFE_SETTING_FORCE_OFFSET)
+#define HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET      12
+#define HPIPE_G3_FFE_DEG_RES_LEVEL_MASK                \
+                               (0x3 << HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET)
+#define HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET     14
+#define HPIPE_G3_FFE_LOAD_RES_LEVEL_MASK       \
+                               (0x3 << HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET)
+
+#define HPIPE_G3_SETTING_4_REG                 0x454
+#define HPIPE_G3_DFE_RES_OFFSET                        8
+#define HPIPE_G3_DFE_RES_MASK                  (0x3 << HPIPE_G3_DFE_RES_OFFSET)
+
+#define HPIPE_DFE_CONTROL_REG                  0x470
+#define HPIPE_DFE_TX_MAX_DFE_ADAPT_OFFSET      14
+#define HPIPE_DFE_TX_MAX_DFE_ADAPT_MASK                \
+                               (0x3 << HPIPE_DFE_TX_MAX_DFE_ADAPT_OFFSET)
+
+#define HPIPE_DFE_CTRL_28_REG                  0x49C
+#define HPIPE_DFE_CTRL_28_PIPE4_OFFSET         7
+#define HPIPE_DFE_CTRL_28_PIPE4_MASK           \
+                               (0x1 << HPIPE_DFE_CTRL_28_PIPE4_OFFSET)
+
+#define HPIPE_G3_SETTING_5_REG                 0x548
+#define HPIPE_G3_SETTING_5_G3_ICP_OFFSET       0
+#define HPIPE_G3_SETTING_5_G3_ICP_MASK         \
+                               (0xf << HPIPE_G3_SETTING_5_G3_ICP_OFFSET)
+
+#define HPIPE_LANE_STATUS1_REG                 0x60C
+#define HPIPE_LANE_STATUS1_PCLK_EN_OFFSET      0
+#define HPIPE_LANE_STATUS1_PCLK_EN_MASK                \
+                               (0x1 << HPIPE_LANE_STATUS1_PCLK_EN_OFFSET)
+
+#define HPIPE_LANE_CFG4_REG                    0x620
+#define HPIPE_LANE_CFG4_DFE_EN_SEL_OFFSET      3
+#define HPIPE_LANE_CFG4_DFE_EN_SEL_MASK                \
+                               (0x1 << HPIPE_LANE_CFG4_DFE_EN_SEL_OFFSET)
+
+#define HPIPE_LANE_EQU_CONFIG_0_REG            0x69C
+#define HPIPE_CFG_EQ_FS_OFFSET                 0
+#define HPIPE_CFG_EQ_FS_MASK                   (0x3f << HPIPE_CFG_EQ_FS_OFFSET)
+#define HPIPE_CFG_EQ_LF_OFFSET                 6
+#define HPIPE_CFG_EQ_LF_MASK                   (0x3f << HPIPE_CFG_EQ_LF_OFFSET)
+#define HPIPE_CFG_PHY_RC_EP_OFFSET             12
+#define HPIPE_CFG_PHY_RC_EP_MASK               \
+                               (0x1 << HPIPE_CFG_PHY_RC_EP_OFFSET)
+
+#define HPIPE_LANE_EQ_CFG1_REG                 0x6a0
+#define HPIPE_CFG_UPDATE_POLARITY_OFFSET       12
+#define HPIPE_CFG_UPDATE_POLARITY_MASK         \
+                               (0x1 << HPIPE_CFG_UPDATE_POLARITY_OFFSET)
+
+#define HPIPE_LANE_EQ_CFG2_REG                 0x6a4
+#define HPIPE_CFG_EQ_BUNDLE_DIS_OFFSET         14
+#define HPIPE_CFG_EQ_BUNDLE_DIS_MASK           \
+                               (0x1 << HPIPE_CFG_EQ_BUNDLE_DIS_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG0_REG             0x6a8
+#define HPIPE_CFG_CURSOR_PRESET0_OFFSET                0
+#define HPIPE_CFG_CURSOR_PRESET0_MASK          \
+                               (0x3f << HPIPE_CFG_CURSOR_PRESET0_OFFSET)
+#define HPIPE_CFG_CURSOR_PRESET1_OFFSET                6
+#define HPIPE_CFG_CURSOR_PRESET1_MASK          \
+                               (0x3f << HPIPE_CFG_CURSOR_PRESET1_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG1_REG             0x6ac
+#define HPIPE_CFG_CURSOR_PRESET2_OFFSET                0
+#define HPIPE_CFG_CURSOR_PRESET2_MASK          \
+                               (0x3f << HPIPE_CFG_CURSOR_PRESET2_OFFSET)
+#define HPIPE_CFG_CURSOR_PRESET3_OFFSET                6
+#define HPIPE_CFG_CURSOR_PRESET3_MASK          \
+                               (0x3f << HPIPE_CFG_CURSOR_PRESET3_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG2_REG             0x6b0
+#define HPIPE_CFG_CURSOR_PRESET4_OFFSET                0
+#define HPIPE_CFG_CURSOR_PRESET4_MASK          \
+                               (0x3f << HPIPE_CFG_CURSOR_PRESET4_OFFSET)
+#define HPIPE_CFG_CURSOR_PRESET5_OFFSET                6
+#define HPIPE_CFG_CURSOR_PRESET5_MASK          \
+                               (0x3f << HPIPE_CFG_CURSOR_PRESET5_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG3_REG             0x6b4
+#define HPIPE_CFG_CURSOR_PRESET6_OFFSET                0
+#define HPIPE_CFG_CURSOR_PRESET6_MASK          \
+                               (0x3f << HPIPE_CFG_CURSOR_PRESET6_OFFSET)
+#define HPIPE_CFG_CURSOR_PRESET7_OFFSET                6
+#define HPIPE_CFG_CURSOR_PRESET7_MASK          \
+                               (0x3f << HPIPE_CFG_CURSOR_PRESET7_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG4_REG             0x6b8
+#define HPIPE_CFG_CURSOR_PRESET8_OFFSET                0
+#define HPIPE_CFG_CURSOR_PRESET8_MASK          \
+                               (0x3f << HPIPE_CFG_CURSOR_PRESET8_OFFSET)
+#define HPIPE_CFG_CURSOR_PRESET9_OFFSET                6
+#define HPIPE_CFG_CURSOR_PRESET9_MASK          \
+                               (0x3f << HPIPE_CFG_CURSOR_PRESET9_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG5_REG             0x6bc
+#define HPIPE_CFG_CURSOR_PRESET10_OFFSET       0
+#define HPIPE_CFG_CURSOR_PRESET10_MASK         \
+                               (0x3f << HPIPE_CFG_CURSOR_PRESET10_OFFSET)
+#define HPIPE_CFG_CURSOR_PRESET11_OFFSET       6
+#define HPIPE_CFG_CURSOR_PRESET11_MASK         \
+                               (0x3f << HPIPE_CFG_CURSOR_PRESET11_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG6_REG             0x6c0
+#define HPIPE_CFG_PRE_CURSOR_PRESET0_OFFSET    0
+#define HPIPE_CFG_PRE_CURSOR_PRESET0_MASK      \
+                               (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET0_OFFSET)
+#define HPIPE_CFG_POST_CURSOR_PRESET0_OFFSET   6
+#define HPIPE_CFG_POST_CURSOR_PRESET0_MASK     \
+                               (0x3f << HPIPE_CFG_POST_CURSOR_PRESET0_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG7_REG             0x6c4
+#define HPIPE_CFG_PRE_CURSOR_PRESET1_OFFSET    0
+#define HPIPE_CFG_PRE_CURSOR_PRESET1_MASK      \
+                               (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET1_OFFSET)
+#define HPIPE_CFG_POST_CURSOR_PRESET1_OFFSET   6
+#define HPIPE_CFG_POST_CURSOR_PRESET1_MASK     \
+                               (0x3f << HPIPE_CFG_POST_CURSOR_PRESET1_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG8_REG             0x6c8
+#define HPIPE_CFG_PRE_CURSOR_PRESET2_OFFSET    0
+#define HPIPE_CFG_PRE_CURSOR_PRESET2_MASK      \
+                               (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET2_OFFSET)
+#define HPIPE_CFG_POST_CURSOR_PRESET2_OFFSET   6
+#define HPIPE_CFG_POST_CURSOR_PRESET2_MASK     \
+                               (0x3f << HPIPE_CFG_POST_CURSOR_PRESET2_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG9_REG             0x6cc
+#define HPIPE_CFG_PRE_CURSOR_PRESET3_OFFSET    0
+#define HPIPE_CFG_PRE_CURSOR_PRESET3_MASK      \
+                               (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET3_OFFSET)
+#define HPIPE_CFG_POST_CURSOR_PRESET3_OFFSET   6
+#define HPIPE_CFG_POST_CURSOR_PRESET3_MASK     \
+                               (0x3f << HPIPE_CFG_POST_CURSOR_PRESET3_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG10_REG            0x6d0
+#define HPIPE_CFG_PRE_CURSOR_PRESET4_OFFSET    0
+#define HPIPE_CFG_PRE_CURSOR_PRESET4_MASK      \
+                               (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET4_OFFSET)
+#define HPIPE_CFG_POST_CURSOR_PRESET4_OFFSET   6
+#define HPIPE_CFG_POST_CURSOR_PRESET4_MASK     \
+                               (0x3f << HPIPE_CFG_POST_CURSOR_PRESET4_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG11_REG            0x6d4
+#define HPIPE_CFG_PRE_CURSOR_PRESET5_OFFSET    0
+#define HPIPE_CFG_PRE_CURSOR_PRESET5_MASK      \
+                               (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET5_OFFSET)
+#define HPIPE_CFG_POST_CURSOR_PRESET5_OFFSET   6
+#define HPIPE_CFG_POST_CURSOR_PRESET5_MASK     \
+                               (0x3f << HPIPE_CFG_POST_CURSOR_PRESET5_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG12_REG            0x6d8
+#define HPIPE_CFG_PRE_CURSOR_PRESET6_OFFSET    0
+#define HPIPE_CFG_PRE_CURSOR_PRESET6_MASK      \
+                               (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET6_OFFSET)
+#define HPIPE_CFG_POST_CURSOR_PRESET6_OFFSET   6
+#define HPIPE_CFG_POST_CURSOR_PRESET6_MASK     \
+                               (0x3f << HPIPE_CFG_POST_CURSOR_PRESET6_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG13_REG            0x6dc
+#define HPIPE_CFG_PRE_CURSOR_PRESET7_OFFSET    0
+#define HPIPE_CFG_PRE_CURSOR_PRESET7_MASK      \
+                               (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET7_OFFSET)
+#define HPIPE_CFG_POST_CURSOR_PRESET7_OFFSET   6
+#define HPIPE_CFG_POST_CURSOR_PRESET7_MASK     \
+                               (0x3f << HPIPE_CFG_POST_CURSOR_PRESET7_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG14_REG            0x6e0
+#define HPIPE_CFG_PRE_CURSOR_PRESET8_OFFSET    0
+#define HPIPE_CFG_PRE_CURSOR_PRESET8_MASK      \
+                               (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET8_OFFSET)
+#define HPIPE_CFG_POST_CURSOR_PRESET8_OFFSET   6
+#define HPIPE_CFG_POST_CURSOR_PRESET8_MASK     \
+                               (0x3f << HPIPE_CFG_POST_CURSOR_PRESET8_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG15_REG            0x6e4
+#define HPIPE_CFG_PRE_CURSOR_PRESET9_OFFSET    0
+#define HPIPE_CFG_PRE_CURSOR_PRESET9_MASK      \
+                               (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET9_OFFSET)
+#define HPIPE_CFG_POST_CURSOR_PRESET9_OFFSET   6
+#define HPIPE_CFG_POST_CURSOR_PRESET9_MASK     \
+                               (0x3f << HPIPE_CFG_POST_CURSOR_PRESET9_OFFSET)
+
+#define HPIPE_LANE_PRESET_CFG16_REG            0x6e8
+#define HPIPE_CFG_PRE_CURSOR_PRESET10_OFFSET   0
+#define HPIPE_CFG_PRE_CURSOR_PRESET10_MASK     \
+                               (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET10_OFFSET)
+#define HPIPE_CFG_POST_CURSOR_PRESET10_OFFSET  6
+#define HPIPE_CFG_POST_CURSOR_PRESET10_MASK    \
+                               (0x3f << HPIPE_CFG_POST_CURSOR_PRESET10_OFFSET)
+
+#define HPIPE_LANE_EQ_REMOTE_SETTING_REG       0x6f8
+#define HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_OFFSET        0
+#define HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_MASK  \
+                               (0x1 << HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_OFFSET)
+#define HPIPE_LANE_CFG_FOM_ONLY_MODE_OFFFSET   1
+#define HPIPE_LANE_CFG_FOM_ONLY_MODE_MASK      \
+                               (0x1 << HPIPE_LANE_CFG_FOM_ONLY_MODE_OFFFSET)
+#define HPIPE_LANE_CFG_FOM_PRESET_VECTOR_OFFSET        2
+#define HPIPE_LANE_CFG_FOM_PRESET_VECTOR_MASK  \
+                               (0xf << HPIPE_LANE_CFG_FOM_PRESET_VECTOR_OFFSET)
+
+#define HPIPE_RST_CLK_CTRL_REG                 0x704
+#define HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET     0
+#define HPIPE_RST_CLK_CTRL_PIPE_RST_MASK       \
+                               (0x1 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET)
+#define HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET   2
+#define HPIPE_RST_CLK_CTRL_FIXED_PCLK_MASK     \
+                               (0x1 << HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET)
+#define HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET   3
+#define HPIPE_RST_CLK_CTRL_PIPE_WIDTH_MASK     \
+                               (0x1 << HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET)
+#define HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET        9
+#define HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_MASK  \
+                               (0x1 << HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET)
+
+#define HPIPE_CLK_SRC_LO_REG                           0x70c
+#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_OFFSET      1
+#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_MASK                \
+                       (0x1 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_OFFSET)
+#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_OFFSET    2
+#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_MASK      \
+                       (0x3 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_OFFSET)
+#define HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET             5
+#define HPIPE_CLK_SRC_LO_PLL_RDY_DL_MASK               \
+                       (0x7 << HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET)
+
+#define HPIPE_CLK_SRC_HI_REG                   0x710
+#define HPIPE_CLK_SRC_HI_LANE_STRT_OFFSET      0
+#define HPIPE_CLK_SRC_HI_LANE_STRT_MASK                \
+                       (0x1 << HPIPE_CLK_SRC_HI_LANE_STRT_OFFSET)
+#define HPIPE_CLK_SRC_HI_LANE_BREAK_OFFSET     1
+#define HPIPE_CLK_SRC_HI_LANE_BREAK_MASK       \
+                       (0x1 << HPIPE_CLK_SRC_HI_LANE_BREAK_OFFSET)
+#define HPIPE_CLK_SRC_HI_LANE_MASTER_OFFSET    2
+#define HPIPE_CLK_SRC_HI_LANE_MASTER_MASK      \
+                       (0x1 << HPIPE_CLK_SRC_HI_LANE_MASTER_OFFSET)
+#define HPIPE_CLK_SRC_HI_MODE_PIPE_OFFSET      7
+#define HPIPE_CLK_SRC_HI_MODE_PIPE_MASK                \
+                       (0x1 << HPIPE_CLK_SRC_HI_MODE_PIPE_OFFSET)
+
+#define HPIPE_GLOBAL_PM_CTRL                   0x740
+#define HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET     0
+#define HPIPE_GLOBAL_PM_RXDLOZ_WAIT_MASK       \
+                       (0xFF << HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET)
+
+#endif /* _COMPHY_H_ */
+
diff --git a/drivers/marvell/comphy/comphy-cp110.h b/drivers/marvell/comphy/comphy-cp110.h
new file mode 100644 (file)
index 0000000..925abb5
--- /dev/null
@@ -0,0 +1,775 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* Marvell CP110 SoC COMPHY unit driver */
+
+#ifndef _PHY_COMPHY_CP110_H
+#define _PHY_COMPHY_CP110_H
+
+#define SD_ADDR(base, lane)                    (base + 0x1000 * lane)
+#define HPIPE_ADDR(base, lane)                 (SD_ADDR(base, lane) + 0x800)
+#define COMPHY_ADDR(base, lane)                        (base + 0x28 * lane)
+
+#define MAX_NUM_OF_FFE                         8
+#define RX_TRAINING_TIMEOUT                    500
+
+/* Comphy registers */
+#define COMMON_PHY_CFG1_REG                    0x0
+#define COMMON_PHY_CFG1_PWR_UP_OFFSET          1
+#define COMMON_PHY_CFG1_PWR_UP_MASK            \
+                               (0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET)
+#define COMMON_PHY_CFG1_PIPE_SELECT_OFFSET     2
+#define COMMON_PHY_CFG1_PIPE_SELECT_MASK       \
+                               (0x1 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET)
+#define COMMON_PHY_CFG1_CORE_RSTN_OFFSET       13
+#define COMMON_PHY_CFG1_CORE_RSTN_MASK         \
+                               (0x1 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET)
+#define COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET    14
+#define COMMON_PHY_CFG1_PWR_ON_RESET_MASK      \
+                               (0x1 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET)
+#define COMMON_PHY_PHY_MODE_OFFSET             15
+#define COMMON_PHY_PHY_MODE_MASK               \
+                               (0x1 << COMMON_PHY_PHY_MODE_OFFSET)
+
+#define COMMON_PHY_CFG6_REG                    0x14
+#define COMMON_PHY_CFG6_IF_40_SEL_OFFSET       18
+#define COMMON_PHY_CFG6_IF_40_SEL_MASK         \
+                               (0x1 << COMMON_PHY_CFG6_IF_40_SEL_OFFSET)
+
+#define COMMON_PHY_CFG6_REG                    0x14
+#define COMMON_PHY_CFG6_IF_40_SEL_OFFSET       18
+#define COMMON_PHY_CFG6_IF_40_SEL_MASK         \
+                               (0x1 << COMMON_PHY_CFG6_IF_40_SEL_OFFSET)
+
+#define COMMON_SELECTOR_PHY_REG_OFFSET         0x140
+#define COMMON_SELECTOR_PIPE_REG_OFFSET                0x144
+#define COMMON_SELECTOR_COMPHY_MASK            0xf
+#define COMMON_SELECTOR_COMPHYN_FIELD_WIDTH    4
+#define COMMON_SELECTOR_COMPHYN_SATA           0x4
+#define COMMON_SELECTOR_PIPE_COMPHY_PCIE       0x4
+#define COMMON_SELECTOR_PIPE_COMPHY_USBH       0x1
+#define COMMON_SELECTOR_PIPE_COMPHY_USBD       0x2
+
+/* SGMII/HS-SGMII/SFI/RXAUI */
+#define COMMON_SELECTOR_COMPHY0_1_2_NETWORK    0x1
+#define COMMON_SELECTOR_COMPHY3_RXAUI          0x1
+#define COMMON_SELECTOR_COMPHY3_SGMII          0x2
+#define COMMON_SELECTOR_COMPHY4_PORT1          0x1
+#define COMMON_SELECTOR_COMPHY4_ALL_OTHERS     0x2
+#define COMMON_SELECTOR_COMPHY5_RXAUI          0x2
+#define COMMON_SELECTOR_COMPHY5_SGMII          0x1
+
+#define COMMON_PHY_SD_CTRL1                    0x148
+#define COMMON_PHY_SD_CTRL1_COMPHY_0_PORT_OFFSET       0
+#define COMMON_PHY_SD_CTRL1_COMPHY_1_PORT_OFFSET       4
+#define COMMON_PHY_SD_CTRL1_COMPHY_2_PORT_OFFSET       8
+#define COMMON_PHY_SD_CTRL1_COMPHY_3_PORT_OFFSET       12
+#define COMMON_PHY_SD_CTRL1_COMPHY_0_3_PORT_MASK       0xFFFF
+#define COMMON_PHY_SD_CTRL1_COMPHY_0_1_PORT_MASK       0xFF
+#define COMMON_PHY_SD_CTRL1_PCIE_X4_EN_OFFSET  24
+#define COMMON_PHY_SD_CTRL1_PCIE_X4_EN_MASK    \
+                               (0x1 << COMMON_PHY_SD_CTRL1_PCIE_X4_EN_OFFSET)
+#define COMMON_PHY_SD_CTRL1_PCIE_X2_EN_OFFSET  25
+#define COMMON_PHY_SD_CTRL1_PCIE_X2_EN_MASK    \
+                               (0x1 << COMMON_PHY_SD_CTRL1_PCIE_X2_EN_OFFSET)
+#define COMMON_PHY_SD_CTRL1_RXAUI1_OFFSET      26
+#define COMMON_PHY_SD_CTRL1_RXAUI1_MASK                \
+                               (0x1 << COMMON_PHY_SD_CTRL1_RXAUI1_OFFSET)
+#define COMMON_PHY_SD_CTRL1_RXAUI0_OFFSET      27
+#define COMMON_PHY_SD_CTRL1_RXAUI0_MASK                \
+                               (0x1 << COMMON_PHY_SD_CTRL1_RXAUI0_OFFSET)
+
+/* DFX register */
+#define DFX_BASE                               (0x400000)
+#define DFX_DEV_GEN_CTRL12_REG                 (0x280)
+#define DFX_DEV_GEN_PCIE_CLK_SRC_MUX           (0x3)
+#define DFX_DEV_GEN_PCIE_CLK_SRC_OFFSET                7
+#define DFX_DEV_GEN_PCIE_CLK_SRC_MASK          \
+                               (0x3 << DFX_DEV_GEN_PCIE_CLK_SRC_OFFSET)
+
+/* SerDes IP registers */
+#define SD_EXTERNAL_CONFIG0_REG                                0
+#define SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET           1
+#define SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK             \
+                       (1 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET)
+#define SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET       3
+#define SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_MASK         \
+                       (0xf << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET)
+#define SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET       7
+#define SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_MASK         \
+                       (0xf << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET)
+#define SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET            11
+#define SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK              \
+                       (1 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET)
+#define SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET            12
+#define SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK              \
+                       (1 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET)
+#define SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_OFFSET       14
+#define SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_MASK         \
+                       (1 << SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_OFFSET)
+#define SD_EXTERNAL_CONFIG0_MEDIA_MODE_OFFSET          15
+#define SD_EXTERNAL_CONFIG0_MEDIA_MODE_MASK            \
+                       (0x1 << SD_EXTERNAL_CONFIG0_MEDIA_MODE_OFFSET)
+
+#define SD_EXTERNAL_CONFIG1_REG                        0x4
+#define SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET    3
+#define SD_EXTERNAL_CONFIG1_RESET_IN_MASK      \
+                       (0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET)
+#define SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET     4
+#define SD_EXTERNAL_CONFIG1_RX_INIT_MASK       \
+                       (0x1 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET)
+#define SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET  5
+#define SD_EXTERNAL_CONFIG1_RESET_CORE_MASK    \
+                       (0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET)
+#define SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET 6
+#define SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK   \
+                       (0x1 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET)
+
+#define SD_EXTERNAL_CONFIG2_REG                        0x8
+#define SD_EXTERNAL_CONFIG2_PIN_DFE_EN_OFFSET  4
+#define SD_EXTERNAL_CONFIG2_PIN_DFE_EN_MASK    \
+                       (0x1 << SD_EXTERNAL_CONFIG2_PIN_DFE_EN_OFFSET)
+#define SD_EXTERNAL_CONFIG2_SSC_ENABLE_OFFSET  7
+#define SD_EXTERNAL_CONFIG2_SSC_ENABLE_MASK    \
+                       (0x1 << SD_EXTERNAL_CONFIG2_SSC_ENABLE_OFFSET)
+
+#define SD_EXTERNAL_STATUS_REG                         0xc
+#define SD_EXTERNAL_STATUS_START_RX_TRAINING_OFFSET    7
+#define SD_EXTERNAL_STATUS_START_RX_TRAINING_MASK      \
+                       (1 << SD_EXTERNAL_STATUS_START_RX_TRAINING_OFFSET)
+
+#define SD_EXTERNAL_STATUS0_REG                        0x18
+#define SD_EXTERNAL_STATUS0_PLL_TX_OFFSET      2
+#define SD_EXTERNAL_STATUS0_PLL_TX_MASK                \
+                       (0x1 << SD_EXTERNAL_STATUS0_PLL_TX_OFFSET)
+#define SD_EXTERNAL_STATUS0_PLL_RX_OFFSET      3
+#define SD_EXTERNAL_STATUS0_PLL_RX_MASK                \
+                       (0x1 << SD_EXTERNAL_STATUS0_PLL_RX_OFFSET)
+#define SD_EXTERNAL_STATUS0_RX_INIT_OFFSET     4
+#define SD_EXTERNAL_STATUS0_RX_INIT_MASK       \
+                       (0x1 << SD_EXTERNAL_STATUS0_RX_INIT_OFFSET)
+
+#define SD_EXTERNAL_STATAUS1_REG                       0x1c
+#define SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_COMP_OFFSET  0
+#define SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_COMP_MASK    \
+       (1 << SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_COMP_OFFSET)
+#define SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_FAILED_OFFSET        1
+#define SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_FAILED_MASK  \
+       (1 << SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_FAILED_OFFSET)
+
+/* HPIPE registers */
+#define HPIPE_PWR_PLL_REG                      0x4
+#define HPIPE_PWR_PLL_REF_FREQ_OFFSET          0
+#define HPIPE_PWR_PLL_REF_FREQ_MASK            \
+                       (0x1f << HPIPE_PWR_PLL_REF_FREQ_OFFSET)
+#define HPIPE_PWR_PLL_PHY_MODE_OFFSET          5
+#define HPIPE_PWR_PLL_PHY_MODE_MASK            \
+                       (0x7 << HPIPE_PWR_PLL_PHY_MODE_OFFSET)
+
+#define HPIPE_CAL_REG1_REG                     0xc
+#define HPIPE_CAL_REG_1_EXT_TXIMP_OFFSET       10
+#define HPIPE_CAL_REG_1_EXT_TXIMP_MASK         \
+                       (0x1f << HPIPE_CAL_REG_1_EXT_TXIMP_OFFSET)
+#define HPIPE_CAL_REG_1_EXT_TXIMP_EN_OFFSET    15
+#define HPIPE_CAL_REG_1_EXT_TXIMP_EN_MASK      \
+                       (0x1 << HPIPE_CAL_REG_1_EXT_TXIMP_EN_OFFSET)
+
+#define HPIPE_SQUELCH_FFE_SETTING_REG          0x18
+#define HPIPE_SQUELCH_THRESH_IN_OFFSET         8
+#define HPIPE_SQUELCH_THRESH_IN_MASK           \
+                       (0xf << HPIPE_SQUELCH_THRESH_IN_OFFSET)
+#define HPIPE_SQUELCH_DETECTED_OFFSET          14
+#define HPIPE_SQUELCH_DETECTED_MASK            \
+                       (0x1 << HPIPE_SQUELCH_DETECTED_OFFSET)
+
+#define HPIPE_DFE_REG0                         0x1c
+#define HPIPE_DFE_RES_FORCE_OFFSET             15
+#define HPIPE_DFE_RES_FORCE_MASK               \
+                       (0x1 << HPIPE_DFE_RES_FORCE_OFFSET)
+
+#define HPIPE_DFE_F3_F5_REG                    0x28
+#define HPIPE_DFE_F3_F5_DFE_EN_OFFSET          14
+#define HPIPE_DFE_F3_F5_DFE_EN_MASK            \
+                       (0x1 << HPIPE_DFE_F3_F5_DFE_EN_OFFSET)
+#define HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET                15
+#define HPIPE_DFE_F3_F5_DFE_CTRL_MASK          \
+                       (0x1 << HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET)
+
+#define HPIPE_G1_SET_0_REG                     0x34
+#define HPIPE_G1_SET_0_G1_TX_AMP_OFFSET                1
+#define HPIPE_G1_SET_0_G1_TX_AMP_MASK          \
+                       (0x1f << HPIPE_G1_SET_0_G1_TX_AMP_OFFSET)
+#define HPIPE_G1_SET_0_G1_TX_AMP_ADJ_OFFSET    6
+#define HPIPE_G1_SET_0_G1_TX_AMP_ADJ_MASK      \
+                       (0x1 << HPIPE_G1_SET_0_G1_TX_AMP_ADJ_OFFSET)
+#define HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET      7
+#define HPIPE_G1_SET_0_G1_TX_EMPH1_MASK                \
+                       (0xf << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET)
+#define HPIPE_G1_SET_0_G1_TX_EMPH1_EN_OFFSET   11
+#define HPIPE_G1_SET_0_G1_TX_EMPH1_EN_MASK     \
+                       (0x1 << HPIPE_G1_SET_0_G1_TX_EMPH1_EN_OFFSET)
+
+#define HPIPE_G1_SET_1_REG                     0x38
+#define HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET    0
+#define HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK      \
+                       (0x7 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET)
+#define HPIPE_G1_SET_1_G1_RX_SELMUPP_OFFSET    3
+#define HPIPE_G1_SET_1_G1_RX_SELMUPP_MASK      \
+                       (0x7 << HPIPE_G1_SET_1_G1_RX_SELMUPP_OFFSET)
+#define HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET    6
+#define HPIPE_G1_SET_1_G1_RX_SELMUFI_MASK      \
+                       (0x3 << HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET)
+#define HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET    8
+#define HPIPE_G1_SET_1_G1_RX_SELMUFF_MASK      \
+                       (0x3 << HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET)
+#define HPIPE_G1_SET_1_G1_RX_DFE_EN_OFFSET     10
+#define HPIPE_G1_SET_1_G1_RX_DFE_EN_MASK       \
+                       (0x1 << HPIPE_G1_SET_1_G1_RX_DFE_EN_OFFSET)
+#define HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_OFFSET  11
+#define HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_MASK    \
+                       (0x3 << HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_OFFSET)
+
+#define HPIPE_G2_SET_0_REG                     0x3c
+#define HPIPE_G2_SET_0_G2_TX_AMP_OFFSET                1
+#define HPIPE_G2_SET_0_G2_TX_AMP_MASK          \
+                       (0x1f << HPIPE_G2_SET_0_G2_TX_AMP_OFFSET)
+#define HPIPE_G2_SET_0_G2_TX_AMP_ADJ_OFFSET    6
+#define HPIPE_G2_SET_0_G2_TX_AMP_ADJ_MASK      \
+                       (0x1 << HPIPE_G2_SET_0_G2_TX_AMP_ADJ_OFFSET)
+#define HPIPE_G2_SET_0_G2_TX_EMPH1_OFFSET      7
+#define HPIPE_G2_SET_0_G2_TX_EMPH1_MASK                \
+                       (0xf << HPIPE_G2_SET_0_G2_TX_EMPH1_OFFSET)
+#define HPIPE_G2_SET_0_G2_TX_EMPH1_EN_OFFSET   11
+#define HPIPE_G2_SET_0_G2_TX_EMPH1_EN_MASK     \
+                       (0x1 << HPIPE_G2_SET_0_G2_TX_EMPH1_EN_OFFSET)
+
+#define HPIPE_G2_SET_1_REG                     0x40
+#define HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET    0
+#define HPIPE_G2_SET_1_G2_RX_SELMUPI_MASK      \
+                       (0x7 << HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET)
+#define HPIPE_G2_SET_1_G2_RX_SELMUPP_OFFSET    3
+#define HPIPE_G2_SET_1_G2_RX_SELMUPP_MASK      \
+                       (0x7 << HPIPE_G2_SET_1_G2_RX_SELMUPP_OFFSET)
+#define HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET    6
+#define HPIPE_G2_SET_1_G2_RX_SELMUFI_MASK      \
+                       (0x3 << HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET)
+#define HPIPE_G2_SET_1_G2_RX_SELMUFF_OFFSET    8
+#define HPIPE_G2_SET_1_G2_RX_SELMUFF_MASK      \
+                       (0x3 << HPIPE_G2_SET_1_G2_RX_SELMUFF_OFFSET)
+#define HPIPE_G2_SET_1_G2_RX_DFE_EN_OFFSET     10
+#define HPIPE_G2_SET_1_G2_RX_DFE_EN_MASK       \
+                       (0x1 << HPIPE_G2_SET_1_G2_RX_DFE_EN_OFFSET)
+#define HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_OFFSET  11
+#define HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_MASK    \
+                       (0x3 << HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_OFFSET)
+
+#define HPIPE_G3_SET_0_REG                     0x44
+#define HPIPE_G3_SET_0_G3_TX_AMP_OFFSET                1
+#define HPIPE_G3_SET_0_G3_TX_AMP_MASK          \
+                       (0x1f << HPIPE_G3_SET_0_G3_TX_AMP_OFFSET)
+#define HPIPE_G3_SET_0_G3_TX_AMP_ADJ_OFFSET    6
+#define HPIPE_G3_SET_0_G3_TX_AMP_ADJ_MASK      \
+                       (0x1 << HPIPE_G3_SET_0_G3_TX_AMP_ADJ_OFFSET)
+#define HPIPE_G3_SET_0_G3_TX_EMPH1_OFFSET      7
+#define HPIPE_G3_SET_0_G3_TX_EMPH1_MASK                \
+                       (0xf << HPIPE_G3_SET_0_G3_TX_EMPH1_OFFSET)
+#define HPIPE_G3_SET_0_G3_TX_EMPH1_EN_OFFSET   11
+#define HPIPE_G3_SET_0_G3_TX_EMPH1_EN_MASK     \
+                       (0x1 << HPIPE_G3_SET_0_G3_TX_EMPH1_EN_OFFSET)
+#define HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_OFFSET 12
+#define HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_MASK        \
+                       (0x7 << HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_OFFSET)
+#define HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_OFFSET 15
+#define HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_MASK \
+                       (0x1 << HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_OFFSET)
+
+#define HPIPE_G3_SET_1_REG                             0x48
+#define HPIPE_G3_SET_1_G3_RX_SELMUPI_OFFSET            0
+#define HPIPE_G3_SET_1_G3_RX_SELMUPI_MASK              \
+                       (0x7 << HPIPE_G3_SET_1_G3_RX_SELMUPI_OFFSET)
+#define HPIPE_G3_SET_1_G3_RX_SELMUPF_OFFSET            3
+#define HPIPE_G3_SET_1_G3_RX_SELMUPF_MASK              \
+                       (0x7 << HPIPE_G3_SET_1_G3_RX_SELMUPF_OFFSET)
+#define HPIPE_G3_SET_1_G3_RX_SELMUFI_OFFSET            6
+#define HPIPE_G3_SET_1_G3_RX_SELMUFI_MASK              \
+                       (0x3 << HPIPE_G3_SET_1_G3_RX_SELMUFI_OFFSET)
+#define HPIPE_G3_SET_1_G3_RX_SELMUFF_OFFSET            8
+#define HPIPE_G3_SET_1_G3_RX_SELMUFF_MASK              \
+                       (0x3 << HPIPE_G3_SET_1_G3_RX_SELMUFF_OFFSET)
+#define HPIPE_G3_SET_1_G3_RX_DFE_EN_OFFSET             10
+#define HPIPE_G3_SET_1_G3_RX_DFE_EN_MASK               \
+                       (0x1 << HPIPE_G3_SET_1_G3_RX_DFE_EN_OFFSET)
+#define HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_OFFSET          11
+#define HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_MASK            \
+                       (0x3 << HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_OFFSET)
+#define HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_OFFSET   13
+#define HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_MASK     \
+                       (0x1 << HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_OFFSET)
+
+#define HPIPE_PHY_TEST_CONTROL_REG             0x54
+#define HPIPE_PHY_TEST_PATTERN_SEL_OFFSET      4
+#define HPIPE_PHY_TEST_PATTERN_SEL_MASK                \
+                       (0xf << HPIPE_PHY_TEST_PATTERN_SEL_OFFSET)
+#define HPIPE_PHY_TEST_RESET_OFFSET            14
+#define HPIPE_PHY_TEST_RESET_MASK              \
+                       (0x1 << HPIPE_PHY_TEST_RESET_OFFSET)
+#define HPIPE_PHY_TEST_EN_OFFSET               15
+#define HPIPE_PHY_TEST_EN_MASK                 \
+                       (0x1 << HPIPE_PHY_TEST_EN_OFFSET)
+
+#define HPIPE_PHY_TEST_DATA_REG                        0x6c
+#define HPIPE_PHY_TEST_DATA_OFFSET             0
+#define HPIPE_PHY_TEST_DATA_MASK               \
+                       (0xffff << HPIPE_PHY_TEST_DATA_OFFSET)
+
+#define HPIPE_LOOPBACK_REG                     0x8c
+#define HPIPE_LOOPBACK_SEL_OFFSET              1
+#define HPIPE_LOOPBACK_SEL_MASK                        \
+                       (0x7 << HPIPE_LOOPBACK_SEL_OFFSET)
+#define HPIPE_CDR_LOCK_OFFSET                  7
+#define HPIPE_CDR_LOCK_MASK                    \
+                       (0x1 << HPIPE_CDR_LOCK_OFFSET)
+#define HPIPE_CDR_LOCK_DET_EN_OFFSET           8
+#define HPIPE_CDR_LOCK_DET_EN_MASK             \
+                       (0x1 << HPIPE_CDR_LOCK_DET_EN_OFFSET)
+
+#define HPIPE_INTERFACE_REG                    0x94
+#define HPIPE_INTERFACE_GEN_MAX_OFFSET         10
+#define HPIPE_INTERFACE_GEN_MAX_MASK           \
+                       (0x3 << HPIPE_INTERFACE_GEN_MAX_OFFSET)
+#define HPIPE_INTERFACE_DET_BYPASS_OFFSET      12
+#define HPIPE_INTERFACE_DET_BYPASS_MASK                \
+                       (0x1 << HPIPE_INTERFACE_DET_BYPASS_OFFSET)
+#define HPIPE_INTERFACE_LINK_TRAIN_OFFSET      14
+#define HPIPE_INTERFACE_LINK_TRAIN_MASK                \
+                       (0x1 << HPIPE_INTERFACE_LINK_TRAIN_OFFSET)
+
+#define HPIPE_G1_SET_2_REG                     0xf4
+#define HPIPE_G1_SET_2_G1_TX_EMPH0_OFFSET      0
+#define HPIPE_G1_SET_2_G1_TX_EMPH0_MASK                \
+                       (0xf << HPIPE_G1_SET_2_G1_TX_EMPH0_OFFSET)
+#define HPIPE_G1_SET_2_G1_TX_EMPH0_EN_OFFSET   4
+#define HPIPE_G1_SET_2_G1_TX_EMPH0_EN_MASK     \
+                       (0x1 << HPIPE_G1_SET_2_G1_TX_EMPH0_EN_OFFSET)
+
+#define HPIPE_G2_SET_2_REG                     0xf8
+#define HPIPE_G2_TX_SSC_AMP_OFFSET             9
+#define HPIPE_G2_TX_SSC_AMP_MASK               \
+                       (0x7f << HPIPE_G2_TX_SSC_AMP_OFFSET)
+
+#define HPIPE_VDD_CAL_0_REG                    0x108
+#define HPIPE_CAL_VDD_CONT_MODE_OFFSET         15
+#define HPIPE_CAL_VDD_CONT_MODE_MASK           \
+                       (0x1 << HPIPE_CAL_VDD_CONT_MODE_OFFSET)
+
+#define HPIPE_VDD_CAL_CTRL_REG                 0x114
+#define HPIPE_EXT_SELLV_RXSAMPL_OFFSET         5
+#define HPIPE_EXT_SELLV_RXSAMPL_MASK           \
+                       (0x1f << HPIPE_EXT_SELLV_RXSAMPL_OFFSET)
+
+#define HPIPE_PCIE_REG0                                0x120
+#define HPIPE_PCIE_IDLE_SYNC_OFFSET            12
+#define HPIPE_PCIE_IDLE_SYNC_MASK              \
+                       (0x1 << HPIPE_PCIE_IDLE_SYNC_OFFSET)
+#define HPIPE_PCIE_SEL_BITS_OFFSET             13
+#define HPIPE_PCIE_SEL_BITS_MASK               \
+                       (0x3 << HPIPE_PCIE_SEL_BITS_OFFSET)
+
+#define HPIPE_LANE_ALIGN_REG                   0x124
+#define HPIPE_LANE_ALIGN_OFF_OFFSET            12
+#define HPIPE_LANE_ALIGN_OFF_MASK              \
+                       (0x1 << HPIPE_LANE_ALIGN_OFF_OFFSET)
+
+#define HPIPE_MISC_REG                         0x13C
+#define HPIPE_MISC_CLK100M_125M_OFFSET         4
+#define HPIPE_MISC_CLK100M_125M_MASK           \
+                       (0x1 << HPIPE_MISC_CLK100M_125M_OFFSET)
+#define HPIPE_MISC_ICP_FORCE_OFFSET            5
+#define HPIPE_MISC_ICP_FORCE_MASK              \
+                       (0x1 << HPIPE_MISC_ICP_FORCE_OFFSET)
+#define HPIPE_MISC_TXDCLK_2X_OFFSET            6
+#define HPIPE_MISC_TXDCLK_2X_MASK              \
+                       (0x1 << HPIPE_MISC_TXDCLK_2X_OFFSET)
+#define HPIPE_MISC_CLK500_EN_OFFSET            7
+#define HPIPE_MISC_CLK500_EN_MASK              \
+                       (0x1 << HPIPE_MISC_CLK500_EN_OFFSET)
+#define HPIPE_MISC_REFCLK_SEL_OFFSET           10
+#define HPIPE_MISC_REFCLK_SEL_MASK             \
+                       (0x1 << HPIPE_MISC_REFCLK_SEL_OFFSET)
+
+#define HPIPE_RX_CONTROL_1_REG                 0x140
+#define HPIPE_RX_CONTROL_1_RXCLK2X_SEL_OFFSET  11
+#define HPIPE_RX_CONTROL_1_RXCLK2X_SEL_MASK    \
+                       (0x1 << HPIPE_RX_CONTROL_1_RXCLK2X_SEL_OFFSET)
+#define HPIPE_RX_CONTROL_1_CLK8T_EN_OFFSET     12
+#define HPIPE_RX_CONTROL_1_CLK8T_EN_MASK       \
+                       (0x1 << HPIPE_RX_CONTROL_1_CLK8T_EN_OFFSET)
+
+#define HPIPE_PWR_CTR_REG                      0x148
+#define HPIPE_PWR_CTR_RST_DFE_OFFSET           0
+#define HPIPE_PWR_CTR_RST_DFE_MASK             \
+                       (0x1 << HPIPE_PWR_CTR_RST_DFE_OFFSET)
+#define HPIPE_PWR_CTR_SFT_RST_OFFSET           10
+#define HPIPE_PWR_CTR_SFT_RST_MASK             \
+                       (0x1 << HPIPE_PWR_CTR_SFT_RST_OFFSET)
+
+#define HPIPE_SPD_DIV_FORCE_REG                                0x154
+#define HPIPE_TXDIGCK_DIV_FORCE_OFFSET                 7
+#define HPIPE_TXDIGCK_DIV_FORCE_MASK                   \
+                       (0x1 << HPIPE_TXDIGCK_DIV_FORCE_OFFSET)
+#define HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_OFFSET          8
+#define HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_MASK            \
+                       (0x3 << HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_OFFSET)
+#define HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_FORCE_OFFSET    10
+#define HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_FORCE_MASK      \
+                       (0x1 << HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_FORCE_OFFSET)
+#define HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_OFFSET          13
+#define HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_MASK            \
+                       (0x3 << HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_OFFSET)
+#define HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_OFFSET    15
+#define HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_MASK      \
+                       (0x1 << HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_OFFSET)
+
+#define HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG    0x16C
+#define HPIPE_RX_SAMPLER_OS_GAIN_OFFSET                6
+#define HPIPE_RX_SAMPLER_OS_GAIN_MASK          \
+                       (0x3 << HPIPE_RX_SAMPLER_OS_GAIN_OFFSET)
+#define HPIPE_SMAPLER_OFFSET                   12
+#define HPIPE_SMAPLER_MASK                     \
+                       (0x1 << HPIPE_SMAPLER_OFFSET)
+
+#define HPIPE_TX_REG1_REG                      0x174
+#define HPIPE_TX_REG1_TX_EMPH_RES_OFFSET       5
+#define HPIPE_TX_REG1_TX_EMPH_RES_MASK         \
+                       (0x3 << HPIPE_TX_REG1_TX_EMPH_RES_OFFSET)
+#define HPIPE_TX_REG1_SLC_EN_OFFSET            10
+#define HPIPE_TX_REG1_SLC_EN_MASK              \
+                       (0x3f << HPIPE_TX_REG1_SLC_EN_OFFSET)
+
+#define HPIPE_PWR_CTR_DTL_REG                          0x184
+#define HPIPE_PWR_CTR_DTL_SQ_DET_EN_OFFSET             0
+#define HPIPE_PWR_CTR_DTL_SQ_DET_EN_MASK               \
+                       (0x1 << HPIPE_PWR_CTR_DTL_SQ_DET_EN_OFFSET)
+#define HPIPE_PWR_CTR_DTL_SQ_PLOOP_EN_OFFSET           1
+#define HPIPE_PWR_CTR_DTL_SQ_PLOOP_EN_MASK             \
+                       (0x1 << HPIPE_PWR_CTR_DTL_SQ_PLOOP_EN_OFFSET)
+#define HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET              2
+#define HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK                        \
+                       (0x1 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET)
+#define HPIPE_PWR_CTR_DTL_CLAMPING_SEL_OFFSET          4
+#define HPIPE_PWR_CTR_DTL_CLAMPING_SEL_MASK            \
+                       (0x7 << HPIPE_PWR_CTR_DTL_CLAMPING_SEL_OFFSET)
+#define HPIPE_PWR_CTR_DTL_INTPCLK_DIV_FORCE_OFFSET     10
+#define HPIPE_PWR_CTR_DTL_INTPCLK_DIV_FORCE_MASK       \
+                       (0x1 << HPIPE_PWR_CTR_DTL_INTPCLK_DIV_FORCE_OFFSET)
+#define HPIPE_PWR_CTR_DTL_CLK_MODE_OFFSET              12
+#define HPIPE_PWR_CTR_DTL_CLK_MODE_MASK                        \
+                       (0x3 << HPIPE_PWR_CTR_DTL_CLK_MODE_OFFSET)
+#define HPIPE_PWR_CTR_DTL_CLK_MODE_FORCE_OFFSET                14
+#define HPIPE_PWR_CTR_DTL_CLK_MODE_FORCE_MASK          \
+                       (1 << HPIPE_PWR_CTR_DTL_CLK_MODE_FORCE_OFFSET)
+
+#define HPIPE_PHASE_CONTROL_REG                        0x188
+#define HPIPE_OS_PH_OFFSET_OFFSET              0
+#define HPIPE_OS_PH_OFFSET_MASK                        \
+                       (0x7f << HPIPE_OS_PH_OFFSET_OFFSET)
+#define HPIPE_OS_PH_OFFSET_FORCE_OFFSET                7
+#define HPIPE_OS_PH_OFFSET_FORCE_MASK          \
+                       (0x1 << HPIPE_OS_PH_OFFSET_FORCE_OFFSET)
+#define HPIPE_OS_PH_VALID_OFFSET               8
+#define HPIPE_OS_PH_VALID_MASK                 \
+                       (0x1 << HPIPE_OS_PH_VALID_OFFSET)
+
+#define HPIPE_SQ_GLITCH_FILTER_CTRL            0x1c8
+#define HPIPE_SQ_DEGLITCH_WIDTH_P_OFFSET       0
+#define HPIPE_SQ_DEGLITCH_WIDTH_P_MASK         \
+                       (0xf << HPIPE_SQ_DEGLITCH_WIDTH_P_OFFSET)
+#define HPIPE_SQ_DEGLITCH_WIDTH_N_OFFSET       4
+#define HPIPE_SQ_DEGLITCH_WIDTH_N_MASK         \
+                       (0xf << HPIPE_SQ_DEGLITCH_WIDTH_N_OFFSET)
+#define HPIPE_SQ_DEGLITCH_EN_OFFSET            8
+#define HPIPE_SQ_DEGLITCH_EN_MASK              \
+                       (0x1 << HPIPE_SQ_DEGLITCH_EN_OFFSET)
+
+#define HPIPE_FRAME_DETECT_CTRL_0_REG          0x214
+#define HPIPE_TRAIN_PAT_NUM_OFFSET             0x7
+#define HPIPE_TRAIN_PAT_NUM_MASK               \
+                       (0x1FF << HPIPE_TRAIN_PAT_NUM_OFFSET)
+
+#define HPIPE_FRAME_DETECT_CTRL_3_REG                  0x220
+#define HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_OFFSET      12
+#define HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_MASK                \
+                       (0x1 << HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_OFFSET)
+
+#define HPIPE_DME_REG                          0x228
+#define HPIPE_DME_ETHERNET_MODE_OFFSET         7
+#define HPIPE_DME_ETHERNET_MODE_MASK           \
+                       (0x1 << HPIPE_DME_ETHERNET_MODE_OFFSET)
+
+#define HPIPE_TX_TRAIN_CTRL_0_REG              0x268
+#define HPIPE_TX_TRAIN_P2P_HOLD_OFFSET         15
+#define HPIPE_TX_TRAIN_P2P_HOLD_MASK           \
+                       (0x1 << HPIPE_TX_TRAIN_P2P_HOLD_OFFSET)
+
+#define HPIPE_TX_TRAIN_CTRL_REG                        0x26C
+#define HPIPE_TX_TRAIN_CTRL_G1_OFFSET          0
+#define HPIPE_TX_TRAIN_CTRL_G1_MASK            \
+                       (0x1 << HPIPE_TX_TRAIN_CTRL_G1_OFFSET)
+#define HPIPE_TX_TRAIN_CTRL_GN1_OFFSET         1
+#define HPIPE_TX_TRAIN_CTRL_GN1_MASK           \
+                       (0x1 << HPIPE_TX_TRAIN_CTRL_GN1_OFFSET)
+#define HPIPE_TX_TRAIN_CTRL_G0_OFFSET          2
+#define HPIPE_TX_TRAIN_CTRL_G0_MASK            \
+                       (0x1 << HPIPE_TX_TRAIN_CTRL_G0_OFFSET)
+
+#define HPIPE_TX_TRAIN_CTRL_4_REG              0x278
+#define HPIPE_TRX_TRAIN_TIMER_OFFSET           0
+#define HPIPE_TRX_TRAIN_TIMER_MASK             \
+                       (0x3FF << HPIPE_TRX_TRAIN_TIMER_OFFSET)
+
+#define HPIPE_TX_TRAIN_CTRL_5_REG              0x2A4
+#define HPIPE_RX_TRAIN_TIMER_OFFSET            0
+#define HPIPE_RX_TRAIN_TIMER_MASK              \
+                       (0x3ff << HPIPE_RX_TRAIN_TIMER_OFFSET)
+#define HPIPE_TX_TRAIN_START_SQ_EN_OFFSET      11
+#define HPIPE_TX_TRAIN_START_SQ_EN_MASK                \
+                       (0x1 << HPIPE_TX_TRAIN_START_SQ_EN_OFFSET)
+#define HPIPE_TX_TRAIN_START_FRM_DET_EN_OFFSET 12
+#define HPIPE_TX_TRAIN_START_FRM_DET_EN_MASK   \
+                       (0x1 << HPIPE_TX_TRAIN_START_FRM_DET_EN_OFFSET)
+#define HPIPE_TX_TRAIN_START_FRM_LOCK_EN_OFFSET        13
+#define HPIPE_TX_TRAIN_START_FRM_LOCK_EN_MASK  \
+                       (0x1 << HPIPE_TX_TRAIN_START_FRM_LOCK_EN_OFFSET)
+#define HPIPE_TX_TRAIN_WAIT_TIME_EN_OFFSET     14
+#define HPIPE_TX_TRAIN_WAIT_TIME_EN_MASK       \
+                       (0x1 << HPIPE_TX_TRAIN_WAIT_TIME_EN_OFFSET)
+
+#define HPIPE_TX_TRAIN_REG                     0x31C
+#define HPIPE_TX_TRAIN_CHK_INIT_OFFSET         4
+#define HPIPE_TX_TRAIN_CHK_INIT_MASK           \
+                       (0x1 << HPIPE_TX_TRAIN_CHK_INIT_OFFSET)
+#define HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_OFFSET 7
+#define HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_MASK   \
+                       (0x1 << HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_OFFSET)
+#define HPIPE_TX_TRAIN_16BIT_AUTO_EN_OFFSET    8
+#define HPIPE_TX_TRAIN_16BIT_AUTO_EN_MASK      \
+                       (0x1 << HPIPE_TX_TRAIN_16BIT_AUTO_EN_OFFSET)
+#define HPIPE_TX_TRAIN_PAT_SEL_OFFSET          9
+#define HPIPE_TX_TRAIN_PAT_SEL_MASK            \
+                       (0x1 << HPIPE_TX_TRAIN_PAT_SEL_OFFSET)
+
+#define HPIPE_SAVED_DFE_VALUES_REG             0x328
+#define HPIPE_SAVED_DFE_VALUES_SAV_F0D_OFFSET  10
+#define HPIPE_SAVED_DFE_VALUES_SAV_F0D_MASK    \
+                       (0x3f << HPIPE_SAVED_DFE_VALUES_SAV_F0D_OFFSET)
+
+#define HPIPE_CDR_CONTROL_REG                  0x418
+#define HPIPE_CDR_RX_MAX_DFE_ADAPT_0_OFFSET    14
+#define HPIPE_CDR_RX_MAX_DFE_ADAPT_0_MASK      \
+                       (0x3 << HPIPE_CDR_RX_MAX_DFE_ADAPT_0_OFFSET)
+#define HPIPE_CDR_RX_MAX_DFE_ADAPT_1_OFFSET    12
+#define HPIPE_CDR_RX_MAX_DFE_ADAPT_1_MASK      \
+                       (0x3 << HPIPE_CDR_RX_MAX_DFE_ADAPT_1_OFFSET)
+#define HPIPE_CDR_MAX_DFE_ADAPT_0_OFFSET       9
+#define HPIPE_CDR_MAX_DFE_ADAPT_0_MASK         \
+                       (0x7 << HPIPE_CDR_MAX_DFE_ADAPT_0_OFFSET)
+#define HPIPE_CDR_MAX_DFE_ADAPT_1_OFFSET       6
+#define HPIPE_CDR_MAX_DFE_ADAPT_1_MASK         \
+                       (0x7 << HPIPE_CDR_MAX_DFE_ADAPT_1_OFFSET)
+
+#define HPIPE_TX_TRAIN_CTRL_11_REG             0x438
+#define HPIPE_TX_STATUS_CHECK_MODE_OFFSET      6
+#define HPIPE_TX_TX_STATUS_CHECK_MODE_MASK     \
+                       (0x1 << HPIPE_TX_STATUS_CHECK_MODE_OFFSET)
+#define HPIPE_TX_NUM_OF_PRESET_OFFSET          10
+#define HPIPE_TX_NUM_OF_PRESET_MASK            \
+                       (0x7 << HPIPE_TX_NUM_OF_PRESET_OFFSET)
+#define HPIPE_TX_SWEEP_PRESET_EN_OFFSET                15
+#define HPIPE_TX_SWEEP_PRESET_EN_MASK          \
+                       (0x1 << HPIPE_TX_SWEEP_PRESET_EN_OFFSET)
+
+#define HPIPE_G1_SETTINGS_3_REG                                0x440
+#define HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET      0
+#define HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK                \
+                       (0xf << HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET)
+#define HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET      4
+#define HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK                \
+                       (0x7 << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET)
+#define HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET        7
+#define HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK  \
+                       (0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET)
+#define HPIPE_G1_SETTINGS_3_G1_FBCK_SEL_OFFSET         9
+#define HPIPE_G1_SETTINGS_3_G1_FBCK_SEL_MASK           \
+                       (0x1 << HPIPE_G1_SETTINGS_3_G1_FBCK_SEL_OFFSET)
+#define HPIPE_G1_SETTINGS_3_G1_FFE_DEG_RES_LEVEL_OFFSET        12
+#define HPIPE_G1_SETTINGS_3_G1_FFE_DEG_RES_LEVEL_MASK  \
+                       (0x3 << HPIPE_G1_SETTINGS_3_G1_FFE_DEG_RES_LEVEL_OFFSET)
+#define HPIPE_G1_SETTINGS_3_G1_FFE_LOAD_RES_LEVEL_OFFSET       14
+#define HPIPE_G1_SETTINGS_3_G1_FFE_LOAD_RES_LEVEL_MASK \
+               (0x3 << HPIPE_G1_SETTINGS_3_G1_FFE_LOAD_RES_LEVEL_OFFSET)
+
+#define HPIPE_G1_SETTINGS_4_REG                        0x444
+#define HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET  8
+#define HPIPE_G1_SETTINGS_4_G1_DFE_RES_MASK    \
+                       (0x3 << HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET)
+
+#define HPIPE_G2_SETTINGS_4_REG                        0x44c
+#define HPIPE_G2_DFE_RES_OFFSET                        8
+#define HPIPE_G2_DFE_RES_MASK                  \
+                       (0x3 << HPIPE_G2_DFE_RES_OFFSET)
+
+#define HPIPE_G3_SETTING_3_REG                 0x450
+#define HPIPE_G3_FFE_CAP_SEL_OFFSET            0
+#define HPIPE_G3_FFE_CAP_SEL_MASK              \
+                       (0xf << HPIPE_G3_FFE_CAP_SEL_OFFSET)
+#define HPIPE_G3_FFE_RES_SEL_OFFSET            4
+#define HPIPE_G3_FFE_RES_SEL_MASK              \
+                       (0x7 << HPIPE_G3_FFE_RES_SEL_OFFSET)
+#define HPIPE_G3_FFE_SETTING_FORCE_OFFSET      7
+#define HPIPE_G3_FFE_SETTING_FORCE_MASK                \
+                       (0x1 << HPIPE_G3_FFE_SETTING_FORCE_OFFSET)
+#define HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET      12
+#define HPIPE_G3_FFE_DEG_RES_LEVEL_MASK                \
+                       (0x3 << HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET)
+#define HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET     14
+#define HPIPE_G3_FFE_LOAD_RES_LEVEL_MASK       \
+                       (0x3 << HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET)
+
+#define HPIPE_G3_SETTING_4_REG                 0x454
+#define HPIPE_G3_DFE_RES_OFFSET                        8
+#define HPIPE_G3_DFE_RES_MASK                  (0x3 << HPIPE_G3_DFE_RES_OFFSET)
+
+#define HPIPE_TX_PRESET_INDEX_REG              0x468
+#define HPIPE_TX_PRESET_INDEX_OFFSET           0
+#define HPIPE_TX_PRESET_INDEX_MASK             \
+                       (0xf << HPIPE_TX_PRESET_INDEX_OFFSET)
+
+#define HPIPE_DFE_CONTROL_REG                  0x470
+#define HPIPE_DFE_TX_MAX_DFE_ADAPT_OFFSET      14
+#define HPIPE_DFE_TX_MAX_DFE_ADAPT_MASK                \
+                       (0x3 << HPIPE_DFE_TX_MAX_DFE_ADAPT_OFFSET)
+
+#define HPIPE_DFE_CTRL_28_REG                  0x49C
+#define HPIPE_DFE_CTRL_28_PIPE4_OFFSET         7
+#define HPIPE_DFE_CTRL_28_PIPE4_MASK           \
+                       (0x1 << HPIPE_DFE_CTRL_28_PIPE4_OFFSET)
+
+#define HPIPE_G1_SETTING_5_REG                 0x538
+#define HPIPE_G1_SETTING_5_G1_ICP_OFFSET       0
+#define HPIPE_G1_SETTING_5_G1_ICP_MASK         \
+                       (0xf << HPIPE_G1_SETTING_5_G1_ICP_OFFSET)
+
+#define HPIPE_G3_SETTING_5_REG                 0x548
+#define HPIPE_G3_SETTING_5_G3_ICP_OFFSET       0
+#define HPIPE_G3_SETTING_5_G3_ICP_MASK         \
+                       (0xf << HPIPE_G3_SETTING_5_G3_ICP_OFFSET)
+
+#define HPIPE_LANE_CONFIG0_REG                 0x600
+#define HPIPE_LANE_CONFIG0_TXDEEMPH0_OFFSET    0
+#define HPIPE_LANE_CONFIG0_TXDEEMPH0_MASK      \
+                       (0x1 << HPIPE_LANE_CONFIG0_TXDEEMPH0_OFFSET)
+
+#define HPIPE_LANE_STATUS1_REG                 0x60C
+#define HPIPE_LANE_STATUS1_PCLK_EN_OFFSET      0
+#define HPIPE_LANE_STATUS1_PCLK_EN_MASK                \
+                       (0x1 << HPIPE_LANE_STATUS1_PCLK_EN_OFFSET)
+
+#define HPIPE_LANE_CFG4_REG                    0x620
+#define HPIPE_LANE_CFG4_DFE_CTRL_OFFSET                0
+#define HPIPE_LANE_CFG4_DFE_CTRL_MASK          \
+                       (0x7 << HPIPE_LANE_CFG4_DFE_CTRL_OFFSET)
+#define HPIPE_LANE_CFG4_DFE_EN_SEL_OFFSET      3
+#define HPIPE_LANE_CFG4_DFE_EN_SEL_MASK                \
+                       (0x1 << HPIPE_LANE_CFG4_DFE_EN_SEL_OFFSET)
+#define HPIPE_LANE_CFG4_DFE_OVER_OFFSET                6
+#define HPIPE_LANE_CFG4_DFE_OVER_MASK          \
+                       (0x1 << HPIPE_LANE_CFG4_DFE_OVER_OFFSET)
+#define HPIPE_LANE_CFG4_SSC_CTRL_OFFSET                7
+#define HPIPE_LANE_CFG4_SSC_CTRL_MASK          \
+                       (0x1 << HPIPE_LANE_CFG4_SSC_CTRL_OFFSET)
+
+#define HPIPE_LANE_EQ_REMOTE_SETTING_REG       0x6f8
+#define HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_OFFSET        0
+#define HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_MASK  \
+                       (0x1 << HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_OFFSET)
+#define HPIPE_LANE_CFG_FOM_ONLY_MODE_OFFFSET   1
+#define HPIPE_LANE_CFG_FOM_ONLY_MODE_MASK      \
+                       (0x1 << HPIPE_LANE_CFG_FOM_ONLY_MODE_OFFFSET)
+#define HPIPE_LANE_CFG_FOM_PRESET_VECTOR_OFFSET        2
+#define HPIPE_LANE_CFG_FOM_PRESET_VECTOR_MASK  \
+                       (0xf << HPIPE_LANE_CFG_FOM_PRESET_VECTOR_OFFSET)
+
+#define HPIPE_LANE_EQU_CONFIG_0_REG            0x69C
+#define HPIPE_CFG_PHY_RC_EP_OFFSET             12
+#define HPIPE_CFG_PHY_RC_EP_MASK               \
+                       (0x1 << HPIPE_CFG_PHY_RC_EP_OFFSET)
+
+#define HPIPE_LANE_EQ_CFG1_REG                 0x6a0
+#define HPIPE_CFG_UPDATE_POLARITY_OFFSET       12
+#define HPIPE_CFG_UPDATE_POLARITY_MASK         \
+                       (0x1 << HPIPE_CFG_UPDATE_POLARITY_OFFSET)
+
+#define HPIPE_LANE_EQ_CFG2_REG                 0x6a4
+#define HPIPE_CFG_EQ_BUNDLE_DIS_OFFSET         14
+#define HPIPE_CFG_EQ_BUNDLE_DIS_MASK           \
+                       (0x1 << HPIPE_CFG_EQ_BUNDLE_DIS_OFFSET)
+
+#define HPIPE_RST_CLK_CTRL_REG                 0x704
+#define HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET     0
+#define HPIPE_RST_CLK_CTRL_PIPE_RST_MASK       \
+                       (0x1 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET)
+#define HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET   2
+#define HPIPE_RST_CLK_CTRL_FIXED_PCLK_MASK     \
+                       (0x1 << HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET)
+#define HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET   3
+#define HPIPE_RST_CLK_CTRL_PIPE_WIDTH_MASK     \
+                       (0x1 << HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET)
+#define HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET        9
+#define HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_MASK  \
+                       (0x1 << HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET)
+
+#define HPIPE_TST_MODE_CTRL_REG                        0x708
+#define HPIPE_TST_MODE_CTRL_MODE_MARGIN_OFFSET 2
+#define HPIPE_TST_MODE_CTRL_MODE_MARGIN_MASK   \
+                       (0x1 << HPIPE_TST_MODE_CTRL_MODE_MARGIN_OFFSET)
+
+#define HPIPE_CLK_SRC_LO_REG                           0x70c
+#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_OFFSET      1
+#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_MASK                \
+                       (0x1 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_OFFSET)
+#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_OFFSET    2
+#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_MASK      \
+                       (0x3 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_OFFSET)
+#define HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET             5
+#define HPIPE_CLK_SRC_LO_PLL_RDY_DL_MASK               \
+                       (0x7 << HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET)
+
+#define HPIPE_CLK_SRC_HI_REG                   0x710
+#define HPIPE_CLK_SRC_HI_LANE_STRT_OFFSET      0
+#define HPIPE_CLK_SRC_HI_LANE_STRT_MASK                \
+                       (0x1 << HPIPE_CLK_SRC_HI_LANE_STRT_OFFSET)
+#define HPIPE_CLK_SRC_HI_LANE_BREAK_OFFSET     1
+#define HPIPE_CLK_SRC_HI_LANE_BREAK_MASK       \
+                       (0x1 << HPIPE_CLK_SRC_HI_LANE_BREAK_OFFSET)
+#define HPIPE_CLK_SRC_HI_LANE_MASTER_OFFSET    2
+#define HPIPE_CLK_SRC_HI_LANE_MASTER_MASK      \
+                       (0x1 << HPIPE_CLK_SRC_HI_LANE_MASTER_OFFSET)
+#define HPIPE_CLK_SRC_HI_MODE_PIPE_OFFSET      7
+#define HPIPE_CLK_SRC_HI_MODE_PIPE_MASK                \
+                       (0x1 << HPIPE_CLK_SRC_HI_MODE_PIPE_OFFSET)
+
+#define HPIPE_GLOBAL_MISC_CTRL                 0x718
+#define HPIPE_GLOBAL_PM_CTRL                   0x740
+#define HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET     0
+#define HPIPE_GLOBAL_PM_RXDLOZ_WAIT_MASK       \
+                       (0xFF << HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET)
+
+/* General defines */
+#define PLL_LOCK_TIMEOUT                       15000
+
+#endif /* _PHY_COMPHY_CP110_H */
+
diff --git a/drivers/marvell/comphy/phy-comphy-cp110.c b/drivers/marvell/comphy/phy-comphy-cp110.c
new file mode 100644 (file)
index 0000000..8b78280
--- /dev/null
@@ -0,0 +1,2319 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* Marvell CP110 SoC COMPHY unit driver */
+
+#include <debug.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <mmio.h>
+#include <mvebu_def.h>
+#include <spinlock.h>
+#include "mvebu.h"
+#include "comphy-cp110.h"
+
+/* #define DEBUG_COMPHY */
+#ifdef DEBUG_COMPHY
+#define debug(format...) printf(format)
+#else
+#define debug(format, arg...)
+#endif
+
+/* A lane is described by 4 fields:
+ *      - bit 1~0 represent comphy polarity invert
+ *      - bit 7~2 represent comphy speed
+ *      - bit 11~8 represent unit index
+ *      - bit 16~12 represent mode
+ *      - bit 17 represent comphy indication of clock source
+ *      - bit 19-18 represents pcie width (in case of pcie comphy config.)
+ *      - bit 31~20 reserved
+ */
+
+#define COMPHY_INVERT_OFFSET   0
+#define COMPHY_INVERT_LEN      2
+#define COMPHY_INVERT_MASK     COMPHY_MASK(COMPHY_INVERT_OFFSET, \
+                                               COMPHY_INVERT_LEN)
+#define COMPHY_SPEED_OFFSET    (COMPHY_INVERT_OFFSET + COMPHY_INVERT_LEN)
+#define COMPHY_SPEED_LEN       6
+#define COMPHY_SPEED_MASK      COMPHY_MASK(COMPHY_SPEED_OFFSET, \
+                                               COMPHY_SPEED_LEN)
+#define COMPHY_UNIT_ID_OFFSET  (COMPHY_SPEED_OFFSET + COMPHY_SPEED_LEN)
+#define COMPHY_UNIT_ID_LEN     4
+#define COMPHY_UNIT_ID_MASK    COMPHY_MASK(COMPHY_UNIT_ID_OFFSET, \
+                                               COMPHY_UNIT_ID_LEN)
+#define COMPHY_MODE_OFFSET     (COMPHY_UNIT_ID_OFFSET + COMPHY_UNIT_ID_LEN)
+#define COMPHY_MODE_LEN                5
+#define COMPHY_MODE_MASK       COMPHY_MASK(COMPHY_MODE_OFFSET, COMPHY_MODE_LEN)
+#define COMPHY_CLK_SRC_OFFSET  (COMPHY_MODE_OFFSET + COMPHY_MODE_LEN)
+#define COMPHY_CLK_SRC_LEN     1
+#define COMPHY_CLK_SRC_MASK    COMPHY_MASK(COMPHY_CLK_SRC_OFFSET, \
+                                               COMPHY_CLK_SRC_LEN)
+#define COMPHY_PCI_WIDTH_OFFSET        (COMPHY_CLK_SRC_OFFSET + COMPHY_CLK_SRC_LEN)
+#define COMPHY_PCI_WIDTH_LEN   3
+#define COMPHY_PCI_WIDTH_MASK  COMPHY_MASK(COMPHY_PCI_WIDTH_OFFSET, \
+                                               COMPHY_PCI_WIDTH_LEN)
+
+#define COMPHY_MASK(offset, len)       (((1 << (len)) - 1) << (offset))
+
+/* Macro which extracts mode from lane description */
+#define COMPHY_GET_MODE(x)             (((x) & COMPHY_MODE_MASK) >> \
+                                               COMPHY_MODE_OFFSET)
+/* Macro which extracts unit index from lane description */
+#define COMPHY_GET_ID(x)               (((x) & COMPHY_UNIT_ID_MASK) >> \
+                                               COMPHY_UNIT_ID_OFFSET)
+/* Macro which extracts speed from lane description */
+#define COMPHY_GET_SPEED(x)            (((x) & COMPHY_SPEED_MASK) >> \
+                                               COMPHY_SPEED_OFFSET)
+/* Macro which extracts clock source indication from lane description */
+#define COMPHY_GET_CLK_SRC(x)          (((x) & COMPHY_CLK_SRC_MASK) >> \
+                                               COMPHY_CLK_SRC_OFFSET)
+/* Macro which extracts pcie width indication from lane description */
+#define COMPHY_GET_PCIE_WIDTH(x)       (((x) & COMPHY_PCI_WIDTH_MASK) >> \
+                                               COMPHY_PCI_WIDTH_OFFSET)
+
+#define COMPHY_SATA_MODE       0x1
+#define COMPHY_SGMII_MODE      0x2     /* SGMII 1G */
+#define COMPHY_HS_SGMII_MODE   0x3     /* SGMII 2.5G */
+#define COMPHY_USB3H_MODE      0x4
+#define COMPHY_USB3D_MODE      0x5
+#define COMPHY_PCIE_MODE       0x6
+#define COMPHY_RXAUI_MODE      0x7
+#define COMPHY_XFI_MODE                0x8
+#define COMPHY_SFI_MODE                0x9
+#define COMPHY_USB3_MODE       0xa
+#define COMPHY_AP_MODE         0xb
+
+/* COMPHY speed macro */
+#define COMPHY_SPEED_1_25G             0 /* SGMII 1G */
+#define COMPHY_SPEED_2_5G              1
+#define COMPHY_SPEED_3_125G            2 /* SGMII 2.5G */
+#define COMPHY_SPEED_5G                        3
+#define COMPHY_SPEED_5_15625G          4 /* XFI 5G */
+#define COMPHY_SPEED_6G                        5
+#define COMPHY_SPEED_10_3125G          6 /* XFI 10G */
+#define COMPHY_SPEED_MAX               0x3F
+/* The  default speed for IO with fixed known speed */
+#define COMPHY_SPEED_DEFAULT           COMPHY_SPEED_MAX
+
+/* Commands for comphy driver */
+#define COMPHY_COMMAND_DIGITAL_PWR_OFF         0x00000001
+#define COMPHY_COMMAND_DIGITAL_PWR_ON          0x00000002
+
+#define COMPHY_PIPE_FROM_COMPHY_ADDR(x)        ((x & ~0xffffff) + 0x120000)
+
+/* System controller registers */
+#define PCIE_MAC_RESET_MASK_PORT0      BIT(13)
+#define PCIE_MAC_RESET_MASK_PORT1      BIT(11)
+#define PCIE_MAC_RESET_MASK_PORT2      BIT(12)
+#define SYS_CTRL_UINIT_SOFT_RESET_REG  0x268
+#define SYS_CTRL_FROM_COMPHY_ADDR(x)   ((x & ~0xffffff) + 0x440000)
+
+/* DFX register spaces */
+#define SAR_RST_PCIE0_CLOCK_CONFIG_CP1_OFFSET  (0)
+#define SAR_RST_PCIE0_CLOCK_CONFIG_CP1_MASK    (0x1 << \
+                                       SAR_RST_PCIE0_CLOCK_CONFIG_CP1_OFFSET)
+#define SAR_RST_PCIE1_CLOCK_CONFIG_CP1_OFFSET  (1)
+#define SAR_RST_PCIE1_CLOCK_CONFIG_CP1_MASK    (0x1 << \
+                                       SAR_RST_PCIE1_CLOCK_CONFIG_CP1_OFFSET)
+#define SAR_STATUS_0_REG                       200
+#define DFX_FROM_COMPHY_ADDR(x)                        ((x & ~0xffffff) + DFX_BASE)
+
+/* The same Units Soft Reset Config register are accessed in all PCIe ports
+ * initialization, so a spin lock is defined in case when more than 1 CPUs
+ * resets PCIe MAC and need to access the register in the same time. The spin
+ * lock is shared by all CP110 units.
+ */
+spinlock_t cp110_mac_reset_lock;
+
+enum reg_width_type {
+       REG_16BIT = 0,
+       REG_32BIT,
+};
+
+enum {
+       COMPHY_LANE0 = 0,
+       COMPHY_LANE1,
+       COMPHY_LANE2,
+       COMPHY_LANE3,
+       COMPHY_LANE4,
+       COMPHY_LANE5,
+       COMPHY_LANE_MAX,
+};
+
+/* These values come from the PCI Express Spec */
+enum pcie_link_width {
+       PCIE_LNK_WIDTH_RESRV    = 0x00,
+       PCIE_LNK_X1             = 0x01,
+       PCIE_LNK_X2             = 0x02,
+       PCIE_LNK_X4             = 0x04,
+       PCIE_LNK_X8             = 0x08,
+       PCIE_LNK_X12            = 0x0C,
+       PCIE_LNK_X16            = 0x10,
+       PCIE_LNK_X32            = 0x20,
+       PCIE_LNK_WIDTH_UNKNOWN  = 0xFF,
+};
+
+static inline uint32_t polling_with_timeout(uintptr_t addr,
+                                           uint32_t val,
+                                           uint32_t mask,
+                                           uint32_t usec_timeout,
+                                           enum reg_width_type type)
+{
+       uint32_t data;
+
+       do {
+               udelay(1);
+               if (type == REG_16BIT)
+                       data = mmio_read_16(addr) & mask;
+               else
+                       data = mmio_read_32(addr) & mask;
+       } while (data != val  && --usec_timeout > 0);
+
+       if (usec_timeout == 0)
+               return data;
+
+       return 0;
+}
+
+static inline void reg_set(uintptr_t addr, uint32_t data, uint32_t mask)
+{
+       debug("<atf>: WR to addr = %#010lx, data = %#010x (mask = %#010x) - ",
+             addr, data, mask);
+       debug("old value = %#010x ==> ", mmio_read_32(addr));
+       mmio_clrsetbits_32(addr, mask, data);
+
+       debug("new val %#010x\n", mmio_read_32(addr));
+}
+
+/* Clear PIPE selector - avoid collision with previous configuration */
+static void mvebu_cp110_comphy_clr_pipe_selector(uint64_t comphy_base,
+                                                uint8_t comphy_index)
+{
+       uint32_t reg, mask, field;
+       uint32_t comphy_offset =
+                       COMMON_SELECTOR_COMPHYN_FIELD_WIDTH * comphy_index;
+
+       mask = COMMON_SELECTOR_COMPHY_MASK << comphy_offset;
+       reg = mmio_read_32(comphy_base + COMMON_SELECTOR_PIPE_REG_OFFSET);
+       field = reg & mask;
+
+       if (field) {
+               reg &= ~mask;
+               mmio_write_32(comphy_base + COMMON_SELECTOR_PIPE_REG_OFFSET,
+                            reg);
+       }
+}
+
+/* Clear PHY selector - avoid collision with previous configuration */
+static void mvebu_cp110_comphy_clr_phy_selector(uint64_t comphy_base,
+                                               uint8_t comphy_index)
+{
+       uint32_t reg, mask, field;
+       uint32_t comphy_offset =
+                       COMMON_SELECTOR_COMPHYN_FIELD_WIDTH * comphy_index;
+
+       mask = COMMON_SELECTOR_COMPHY_MASK << comphy_offset;
+       reg = mmio_read_32(comphy_base + COMMON_SELECTOR_PHY_REG_OFFSET);
+       field = reg & mask;
+
+       /* Clear comphy selector - if it was already configured.
+        * (might be that this comphy was configured as PCIe/USB,
+        * in such case, no need to clear comphy selector because PCIe/USB
+        * are controlled by hpipe selector).
+        */
+       if (field) {
+               reg &= ~mask;
+               mmio_write_32(comphy_base + COMMON_SELECTOR_PHY_REG_OFFSET,
+                             reg);
+       }
+}
+
+/* PHY selector configures SATA and Network modes */
+static void mvebu_cp110_comphy_set_phy_selector(uint64_t comphy_base,
+                                    uint8_t comphy_index, uint32_t comphy_mode)
+{
+       uint32_t reg, mask;
+       uint32_t comphy_offset =
+                       COMMON_SELECTOR_COMPHYN_FIELD_WIDTH * comphy_index;
+       int mode;
+
+       /* If phy selector is used the pipe selector should be marked as
+        * unconnected.
+        */
+       mvebu_cp110_comphy_clr_pipe_selector(comphy_base, comphy_index);
+
+       /* Comphy mode (compound of the IO mode and id). Here, only the IO mode
+        * is required to distinguish between SATA and network modes.
+        */
+       mode = COMPHY_GET_MODE(comphy_mode);
+
+       mask = COMMON_SELECTOR_COMPHY_MASK << comphy_offset;
+       reg = mmio_read_32(comphy_base + COMMON_SELECTOR_PHY_REG_OFFSET);
+       reg &= ~mask;
+
+       /* SATA port 0/1 require the same configuration */
+       if (mode == COMPHY_SATA_MODE) {
+               /* SATA selector values is always 4 */
+               reg |= COMMON_SELECTOR_COMPHYN_SATA << comphy_offset;
+       } else {
+               switch (comphy_index) {
+               case(0):
+               case(1):
+               case(2):
+                       /* For comphy 0,1, and 2:
+                        * Network selector value is always 1.
+                        */
+                       reg |= COMMON_SELECTOR_COMPHY0_1_2_NETWORK <<
+                               comphy_offset;
+                       break;
+               case(3):
+                       /* For comphy 3:
+                        * 0x1 = RXAUI_Lane1
+                        * 0x2 = SGMII/HS-SGMII Port1
+                        */
+                       if (mode == COMPHY_RXAUI_MODE)
+                               reg |= COMMON_SELECTOR_COMPHY3_RXAUI <<
+                                       comphy_offset;
+                       else
+                               reg |= COMMON_SELECTOR_COMPHY3_SGMII <<
+                                       comphy_offset;
+                       break;
+               case(4):
+                        /* For comphy 4:
+                         * 0x1 = SGMII/HS-SGMII Port1, XFI1/SFI1
+                         * 0x2 = SGMII/HS-SGMII Port0: XFI0/SFI0, RXAUI_Lane0
+                         *
+                         * We want to check if SGMII1/HS_SGMII1 is the
+                         * requested mode in order to determine which value
+                         * should be set (all other modes use the same value)
+                         * so we need to strip the mode, and check the ID
+                         * because we might handle SGMII0/HS_SGMII0 too.
+                         */
+                         /* TODO: need to distinguish between CP110 and CP115
+                          * as SFI1/XFI1 available only for CP115.
+                          */
+                       if ((mode == COMPHY_SGMII_MODE ||
+                           mode == COMPHY_HS_SGMII_MODE ||
+                           mode == COMPHY_SFI_MODE) &&
+                           COMPHY_GET_ID(comphy_mode) == 1)
+                               reg |= COMMON_SELECTOR_COMPHY4_PORT1 <<
+                                       comphy_offset;
+                       else
+                               reg |= COMMON_SELECTOR_COMPHY4_ALL_OTHERS <<
+                                       comphy_offset;
+                       break;
+               case(5):
+                       /* For comphy 5:
+                        * 0x1 = SGMII/HS-SGMII Port2
+                        * 0x2 = RXAUI Lane1
+                        */
+                       if (mode == COMPHY_RXAUI_MODE)
+                               reg |= COMMON_SELECTOR_COMPHY5_RXAUI <<
+                                       comphy_offset;
+                       else
+                               reg |= COMMON_SELECTOR_COMPHY5_SGMII <<
+                                       comphy_offset;
+                       break;
+               }
+       }
+
+       mmio_write_32(comphy_base + COMMON_SELECTOR_PHY_REG_OFFSET, reg);
+}
+
+/* PIPE selector configures for PCIe, USB 3.0 Host, and USB 3.0 Device mode */
+static void mvebu_cp110_comphy_set_pipe_selector(uint64_t comphy_base,
+                                    uint8_t comphy_index, uint32_t comphy_mode)
+{
+       uint32_t reg;
+       uint32_t shift = COMMON_SELECTOR_COMPHYN_FIELD_WIDTH * comphy_index;
+       int mode = COMPHY_GET_MODE(comphy_mode);
+       uint32_t mask = COMMON_SELECTOR_COMPHY_MASK << shift;
+       uint32_t pipe_sel = 0x0;
+
+       /* If pipe selector is used the phy selector should be marked as
+        * unconnected.
+        */
+       mvebu_cp110_comphy_clr_phy_selector(comphy_base, comphy_index);
+
+       reg = mmio_read_32(comphy_base + COMMON_SELECTOR_PIPE_REG_OFFSET);
+       reg &= ~mask;
+
+       switch (mode) {
+       case (COMPHY_PCIE_MODE):
+               /* For lanes support PCIE, selector value are all same */
+               pipe_sel = COMMON_SELECTOR_PIPE_COMPHY_PCIE;
+               break;
+
+       case (COMPHY_USB3H_MODE):
+               /* Only lane 1-4 support USB host, selector value is same */
+               if (comphy_index == COMPHY_LANE0 ||
+                   comphy_index == COMPHY_LANE5)
+                       ERROR("COMPHY[%d] mode[%d] is invalid\n",
+                             comphy_index, mode);
+               else
+                       pipe_sel = COMMON_SELECTOR_PIPE_COMPHY_USBH;
+               break;
+
+       case (COMPHY_USB3D_MODE):
+               /* Lane 1 and 4 support USB device, selector value is same */
+               if (comphy_index == COMPHY_LANE1 ||
+                   comphy_index == COMPHY_LANE4)
+                       pipe_sel = COMMON_SELECTOR_PIPE_COMPHY_USBD;
+               else
+                       ERROR("COMPHY[%d] mode[%d] is invalid\n", comphy_index,
+                             mode);
+               break;
+
+       default:
+               ERROR("COMPHY[%d] mode[%d] is invalid\n", comphy_index, mode);
+               break;
+       }
+
+       mmio_write_32(comphy_base + COMMON_SELECTOR_PIPE_REG_OFFSET, reg |
+                     (pipe_sel << shift));
+}
+
+int mvebu_cp110_comphy_is_pll_locked(uint64_t comphy_base, uint8_t comphy_index)
+{
+       uintptr_t sd_ip_addr, addr;
+       uint32_t mask, data;
+       int ret = 0;
+
+       debug_enter();
+
+       sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                            comphy_index);
+
+       addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG;
+       data = SD_EXTERNAL_STATUS0_PLL_TX_MASK &
+               SD_EXTERNAL_STATUS0_PLL_RX_MASK;
+       mask = data;
+       data = polling_with_timeout(addr, data, mask,
+                                   PLL_LOCK_TIMEOUT, REG_32BIT);
+       if (data != 0) {
+               if (data & SD_EXTERNAL_STATUS0_PLL_RX_MASK)
+                       ERROR("RX PLL is not locked\n");
+               if (data & SD_EXTERNAL_STATUS0_PLL_TX_MASK)
+                       ERROR("TX PLL is not locked\n");
+
+               ret = -ETIMEDOUT;
+       }
+
+       debug_exit();
+
+       return ret;
+}
+
+static int mvebu_cp110_comphy_sata_power_on(uint64_t comphy_base,
+                                    uint8_t comphy_index, uint32_t comphy_mode)
+{
+       uintptr_t hpipe_addr, sd_ip_addr, comphy_addr;
+       uint32_t mask, data;
+       int ret = 0;
+
+       debug_enter();
+
+       /* configure phy selector for SATA */
+       mvebu_cp110_comphy_set_phy_selector(comphy_base,
+                                           comphy_index, comphy_mode);
+
+       hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                               comphy_index);
+       sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                            comphy_index);
+       comphy_addr = COMPHY_ADDR(comphy_base, comphy_index);
+
+       debug(" add hpipe 0x%lx, sd 0x%lx, comphy 0x%lx\n",
+                                          hpipe_addr, sd_ip_addr, comphy_addr);
+       debug("stage: RFU configurations - hard reset comphy\n");
+       /* RFU configurations - hard reset comphy */
+       mask = COMMON_PHY_CFG1_PWR_UP_MASK;
+       data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET;
+       mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK;
+       data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET;
+       mask |= COMMON_PHY_CFG1_PWR_ON_RESET_MASK;
+       data |= 0x0 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET;
+       mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK;
+       data |= 0x0 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET;
+       reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask);
+
+       /* Set select data  width 40Bit - SATA mode only */
+       reg_set(comphy_addr + COMMON_PHY_CFG6_REG,
+               0x1 << COMMON_PHY_CFG6_IF_40_SEL_OFFSET,
+               COMMON_PHY_CFG6_IF_40_SEL_MASK);
+
+       /* release from hard reset in SD external */
+       mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK;
+       data = 0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       /* Wait 1ms - until band gap and ref clock ready */
+       mdelay(1);
+
+       debug("stage: Comphy configuration\n");
+       /* Start comphy Configuration */
+       /* Set reference clock to comes from group 1 - choose 25Mhz */
+       reg_set(hpipe_addr + HPIPE_MISC_REG,
+               0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET,
+               HPIPE_MISC_REFCLK_SEL_MASK);
+       /* Reference frequency select set 1 (for SATA = 25Mhz) */
+       mask = HPIPE_PWR_PLL_REF_FREQ_MASK;
+       data = 0x1 << HPIPE_PWR_PLL_REF_FREQ_OFFSET;
+       /* PHY mode select (set SATA = 0x0 */
+       mask |= HPIPE_PWR_PLL_PHY_MODE_MASK;
+       data |= 0x0 << HPIPE_PWR_PLL_PHY_MODE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask);
+       /* Set max PHY generation setting - 6Gbps */
+       reg_set(hpipe_addr + HPIPE_INTERFACE_REG,
+               0x2 << HPIPE_INTERFACE_GEN_MAX_OFFSET,
+               HPIPE_INTERFACE_GEN_MAX_MASK);
+       /* Set select data  width 40Bit (SEL_BITS[2:0]) */
+       reg_set(hpipe_addr + HPIPE_LOOPBACK_REG,
+               0x2 << HPIPE_LOOPBACK_SEL_OFFSET, HPIPE_LOOPBACK_SEL_MASK);
+
+       debug("stage: Analog parameters from ETP(HW)\n");
+       /* G1 settings */
+       mask = HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK;
+       data = 0x0 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET;
+       mask |= HPIPE_G1_SET_1_G1_RX_SELMUPP_MASK;
+       data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPP_OFFSET;
+       mask |= HPIPE_G1_SET_1_G1_RX_SELMUFI_MASK;
+       data |= 0x0 << HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET;
+       mask |= HPIPE_G1_SET_1_G1_RX_SELMUFF_MASK;
+       data |= 0x3 << HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET;
+       mask |= HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_MASK;
+       data |= 0x1 << HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G1_SET_1_REG, data, mask);
+
+       mask = HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK;
+       data = 0xf << HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET;
+       mask |= HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK;
+       data |= 0x2 << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET;
+       mask |= HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK;
+       data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET;
+       mask |= HPIPE_G1_SETTINGS_3_G1_FFE_DEG_RES_LEVEL_MASK;
+       data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_DEG_RES_LEVEL_OFFSET;
+       mask |= HPIPE_G1_SETTINGS_3_G1_FFE_LOAD_RES_LEVEL_MASK;
+       data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_LOAD_RES_LEVEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
+
+       /* G2 settings */
+       mask = HPIPE_G2_SET_1_G2_RX_SELMUPI_MASK;
+       data = 0x0 << HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET;
+       mask |= HPIPE_G2_SET_1_G2_RX_SELMUPP_MASK;
+       data |= 0x1 << HPIPE_G2_SET_1_G2_RX_SELMUPP_OFFSET;
+       mask |= HPIPE_G2_SET_1_G2_RX_SELMUFI_MASK;
+       data |= 0x0 << HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET;
+       mask |= HPIPE_G2_SET_1_G2_RX_SELMUFF_MASK;
+       data |= 0x3 << HPIPE_G2_SET_1_G2_RX_SELMUFF_OFFSET;
+       mask |= HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_MASK;
+       data |= 0x1 << HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G2_SET_1_REG, data, mask);
+
+       /* G3 settings */
+       mask = HPIPE_G3_SET_1_G3_RX_SELMUPI_MASK;
+       data = 0x2 << HPIPE_G3_SET_1_G3_RX_SELMUPI_OFFSET;
+       mask |= HPIPE_G3_SET_1_G3_RX_SELMUPF_MASK;
+       data |= 0x2 << HPIPE_G3_SET_1_G3_RX_SELMUPF_OFFSET;
+       mask |= HPIPE_G3_SET_1_G3_RX_SELMUFI_MASK;
+       data |= 0x3 << HPIPE_G3_SET_1_G3_RX_SELMUFI_OFFSET;
+       mask |= HPIPE_G3_SET_1_G3_RX_SELMUFF_MASK;
+       data |= 0x3 << HPIPE_G3_SET_1_G3_RX_SELMUFF_OFFSET;
+       mask |= HPIPE_G3_SET_1_G3_RX_DFE_EN_MASK;
+       data |= 0x1 << HPIPE_G3_SET_1_G3_RX_DFE_EN_OFFSET;
+       mask |= HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_MASK;
+       data |= 0x2 << HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_OFFSET;
+       mask |= HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_MASK;
+       data |= 0x0 << HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G3_SET_1_REG, data, mask);
+
+       /* DTL Control */
+       mask = HPIPE_PWR_CTR_DTL_SQ_DET_EN_MASK;
+       data = 0x1 << HPIPE_PWR_CTR_DTL_SQ_DET_EN_OFFSET;
+       mask |= HPIPE_PWR_CTR_DTL_SQ_PLOOP_EN_MASK;
+       data |= 0x1 << HPIPE_PWR_CTR_DTL_SQ_PLOOP_EN_OFFSET;
+       mask |= HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK;
+       data |= 0x1 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET;
+       mask |= HPIPE_PWR_CTR_DTL_CLAMPING_SEL_MASK;
+       data |= 0x1 << HPIPE_PWR_CTR_DTL_CLAMPING_SEL_OFFSET;
+       mask |= HPIPE_PWR_CTR_DTL_INTPCLK_DIV_FORCE_MASK;
+       data |= 0x1 << HPIPE_PWR_CTR_DTL_INTPCLK_DIV_FORCE_OFFSET;
+       mask |= HPIPE_PWR_CTR_DTL_CLK_MODE_MASK;
+       data |= 0x1 << HPIPE_PWR_CTR_DTL_CLK_MODE_OFFSET;
+       mask |= HPIPE_PWR_CTR_DTL_CLK_MODE_FORCE_MASK;
+       data |= 0x1 << HPIPE_PWR_CTR_DTL_CLK_MODE_FORCE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask);
+
+       /* Trigger sampler enable pulse */
+       mask = HPIPE_SMAPLER_MASK;
+       data = 0x1 << HPIPE_SMAPLER_OFFSET;
+       reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask);
+       mask = HPIPE_SMAPLER_MASK;
+       data = 0x0 << HPIPE_SMAPLER_OFFSET;
+       reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask);
+
+       /* VDD Calibration Control 3 */
+       mask = HPIPE_EXT_SELLV_RXSAMPL_MASK;
+       data = 0x10 << HPIPE_EXT_SELLV_RXSAMPL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_VDD_CAL_CTRL_REG, data, mask);
+
+       /* DFE Resolution Control */
+       mask = HPIPE_DFE_RES_FORCE_MASK;
+       data = 0x1 << HPIPE_DFE_RES_FORCE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask);
+
+       /* DFE F3-F5 Coefficient Control */
+       mask = HPIPE_DFE_F3_F5_DFE_EN_MASK;
+       data = 0x0 << HPIPE_DFE_F3_F5_DFE_EN_OFFSET;
+       mask |= HPIPE_DFE_F3_F5_DFE_CTRL_MASK;
+       data = 0x0 << HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_DFE_F3_F5_REG, data, mask);
+
+       /* G3 Setting 3 */
+       mask = HPIPE_G3_FFE_CAP_SEL_MASK;
+       data = 0xf << HPIPE_G3_FFE_CAP_SEL_OFFSET;
+       mask |= HPIPE_G3_FFE_RES_SEL_MASK;
+       data |= 0x4 << HPIPE_G3_FFE_RES_SEL_OFFSET;
+       mask |= HPIPE_G3_FFE_SETTING_FORCE_MASK;
+       data |= 0x1 << HPIPE_G3_FFE_SETTING_FORCE_OFFSET;
+       mask |= HPIPE_G3_FFE_DEG_RES_LEVEL_MASK;
+       data |= 0x1 << HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET;
+       mask |= HPIPE_G3_FFE_LOAD_RES_LEVEL_MASK;
+       data |= 0x3 << HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G3_SETTING_3_REG, data, mask);
+
+       /* G3 Setting 4 */
+       mask = HPIPE_G3_DFE_RES_MASK;
+       data = 0x1 << HPIPE_G3_DFE_RES_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G3_SETTING_4_REG, data, mask);
+
+       /* Offset Phase Control */
+       mask = HPIPE_OS_PH_OFFSET_MASK;
+       data = 0x61 << HPIPE_OS_PH_OFFSET_OFFSET;
+       mask |= HPIPE_OS_PH_OFFSET_FORCE_MASK;
+       data |= 0x1 << HPIPE_OS_PH_OFFSET_FORCE_OFFSET;
+       mask |= HPIPE_OS_PH_VALID_MASK;
+       data |= 0x0 << HPIPE_OS_PH_VALID_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PHASE_CONTROL_REG, data, mask);
+       mask = HPIPE_OS_PH_VALID_MASK;
+       data = 0x1 << HPIPE_OS_PH_VALID_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PHASE_CONTROL_REG, data, mask);
+       mask = HPIPE_OS_PH_VALID_MASK;
+       data = 0x0 << HPIPE_OS_PH_VALID_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PHASE_CONTROL_REG, data, mask);
+
+       /* Set G1 TX amplitude and TX post emphasis value */
+       mask = HPIPE_G1_SET_0_G1_TX_AMP_MASK;
+       data = 0x8 << HPIPE_G1_SET_0_G1_TX_AMP_OFFSET;
+       mask |= HPIPE_G1_SET_0_G1_TX_AMP_ADJ_MASK;
+       data |= 0x1 << HPIPE_G1_SET_0_G1_TX_AMP_ADJ_OFFSET;
+       mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_MASK;
+       data |= 0x1 << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET;
+       mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_EN_MASK;
+       data |= 0x1 << HPIPE_G1_SET_0_G1_TX_EMPH1_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G1_SET_0_REG, data, mask);
+
+       /* Set G2 TX amplitude and TX post emphasis value */
+       mask = HPIPE_G2_SET_0_G2_TX_AMP_MASK;
+       data = 0xa << HPIPE_G2_SET_0_G2_TX_AMP_OFFSET;
+       mask |= HPIPE_G2_SET_0_G2_TX_AMP_ADJ_MASK;
+       data |= 0x1 << HPIPE_G2_SET_0_G2_TX_AMP_ADJ_OFFSET;
+       mask |= HPIPE_G2_SET_0_G2_TX_EMPH1_MASK;
+       data |= 0x2 << HPIPE_G2_SET_0_G2_TX_EMPH1_OFFSET;
+       mask |= HPIPE_G2_SET_0_G2_TX_EMPH1_EN_MASK;
+       data |= 0x1 << HPIPE_G2_SET_0_G2_TX_EMPH1_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G2_SET_0_REG, data, mask);
+
+       /* Set G3 TX amplitude and TX post emphasis value */
+       mask = HPIPE_G3_SET_0_G3_TX_AMP_MASK;
+       data = 0x1e << HPIPE_G3_SET_0_G3_TX_AMP_OFFSET;
+       mask |= HPIPE_G3_SET_0_G3_TX_AMP_ADJ_MASK;
+       data |= 0x1 << HPIPE_G3_SET_0_G3_TX_AMP_ADJ_OFFSET;
+       mask |= HPIPE_G3_SET_0_G3_TX_EMPH1_MASK;
+       data |= 0xe << HPIPE_G3_SET_0_G3_TX_EMPH1_OFFSET;
+       mask |= HPIPE_G3_SET_0_G3_TX_EMPH1_EN_MASK;
+       data |= 0x1 << HPIPE_G3_SET_0_G3_TX_EMPH1_EN_OFFSET;
+       mask |= HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_MASK;
+       data |= 0x4 << HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_OFFSET;
+       mask |= HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_MASK;
+       data |= 0x0 << HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G3_SET_0_REG, data, mask);
+
+       /* SERDES External Configuration 2 register */
+       mask = SD_EXTERNAL_CONFIG2_SSC_ENABLE_MASK;
+       data = 0x1 << SD_EXTERNAL_CONFIG2_SSC_ENABLE_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG2_REG, data, mask);
+
+       /* DFE reset sequence */
+       reg_set(hpipe_addr + HPIPE_PWR_CTR_REG,
+               0x1 << HPIPE_PWR_CTR_RST_DFE_OFFSET,
+               HPIPE_PWR_CTR_RST_DFE_MASK);
+       reg_set(hpipe_addr + HPIPE_PWR_CTR_REG,
+               0x0 << HPIPE_PWR_CTR_RST_DFE_OFFSET,
+               HPIPE_PWR_CTR_RST_DFE_MASK);
+       /* SW reset for interrupt logic */
+       reg_set(hpipe_addr + HPIPE_PWR_CTR_REG,
+               0x1 << HPIPE_PWR_CTR_SFT_RST_OFFSET,
+               HPIPE_PWR_CTR_SFT_RST_MASK);
+       reg_set(hpipe_addr + HPIPE_PWR_CTR_REG,
+               0x0 << HPIPE_PWR_CTR_SFT_RST_OFFSET,
+               HPIPE_PWR_CTR_SFT_RST_MASK);
+
+       debug_exit();
+
+       return ret;
+}
+
+static int mvebu_cp110_comphy_sgmii_power_on(uint64_t comphy_base,
+                                    uint8_t comphy_index, uint32_t comphy_mode)
+{
+       uintptr_t hpipe_addr, sd_ip_addr, comphy_addr, addr;
+       uint32_t mask, data, sgmii_speed = COMPHY_GET_SPEED(comphy_mode);
+       int ret = 0;
+
+       debug_enter();
+
+       hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                               comphy_index);
+       sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                            comphy_index);
+       comphy_addr = COMPHY_ADDR(comphy_base, comphy_index);
+
+       /* configure phy selector for SGMII */
+       mvebu_cp110_comphy_set_phy_selector(comphy_base, comphy_index,
+                                           comphy_mode);
+
+       /* Confiugre the lane */
+       debug("stage: RFU configurations - hard reset comphy\n");
+       /* RFU configurations - hard reset comphy */
+       mask = COMMON_PHY_CFG1_PWR_UP_MASK;
+       data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET;
+       mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK;
+       data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET;
+       reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask);
+
+       /* Select Baud Rate of Comphy And PD_PLL/Tx/Rx */
+       mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK;
+       data = 0x0 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_MASK;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_MASK;
+
+       if (sgmii_speed == COMPHY_SPEED_1_25G) {
+               /* SGMII 1G, SerDes speed 1.25G */
+               data |= 0x6 << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET;
+               data |= 0x6 << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET;
+       } else if (sgmii_speed == COMPHY_SPEED_3_125G) {
+               /* HS SGMII (2.5G), SerDes speed 3.125G */
+               data |= 0x8 << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET;
+               data |= 0x8 << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET;
+       } else {
+               /* Other rates are not supported */
+               ERROR("unsupported SGMII speed on comphy%d\n", comphy_index);
+               return -EINVAL;
+       }
+
+       mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK;
+       data |= 0 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK;
+       data |= 0 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_MASK;
+       data |= 1 << SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask);
+
+       /* Set hard reset */
+       mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK;
+       data = 0x0 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK;
+       data |= 0x0 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK;
+       data |= 0x0 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       /* Release hard reset */
+       mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK;
+       data = 0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       /* Wait 1ms - until band gap and ref clock ready */
+       mdelay(1);
+
+       /* Make sure that 40 data bits is disabled
+        * This bit is not cleared by reset
+        */
+       mask = COMMON_PHY_CFG6_IF_40_SEL_MASK;
+       data = 0 << COMMON_PHY_CFG6_IF_40_SEL_OFFSET;
+       reg_set(comphy_addr + COMMON_PHY_CFG6_REG, data, mask);
+
+       /* Start comphy Configuration */
+       debug("stage: Comphy configuration\n");
+       /* set reference clock */
+       mask = HPIPE_MISC_REFCLK_SEL_MASK;
+       data = 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_MISC_REG, data, mask);
+       /* Power and PLL Control */
+       mask = HPIPE_PWR_PLL_REF_FREQ_MASK;
+       data = 0x1 << HPIPE_PWR_PLL_REF_FREQ_OFFSET;
+       mask |= HPIPE_PWR_PLL_PHY_MODE_MASK;
+       data |= 0x4 << HPIPE_PWR_PLL_PHY_MODE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask);
+       /* Loopback register */
+       mask = HPIPE_LOOPBACK_SEL_MASK;
+       data = 0x1 << HPIPE_LOOPBACK_SEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_LOOPBACK_REG, data, mask);
+       /* rx control 1 */
+       mask = HPIPE_RX_CONTROL_1_RXCLK2X_SEL_MASK;
+       data = 0x1 << HPIPE_RX_CONTROL_1_RXCLK2X_SEL_OFFSET;
+       mask |= HPIPE_RX_CONTROL_1_CLK8T_EN_MASK;
+       data |= 0x0 << HPIPE_RX_CONTROL_1_CLK8T_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_RX_CONTROL_1_REG, data, mask);
+       /* DTL Control */
+       mask = HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK;
+       data = 0x0 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask);
+
+       /* Set analog parameters from ETP(HW) - for now use the default datas */
+       debug("stage: Analog parameters from ETP(HW)\n");
+
+       reg_set(hpipe_addr + HPIPE_G1_SET_0_REG,
+               0x1 << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET,
+               HPIPE_G1_SET_0_G1_TX_EMPH1_MASK);
+
+       debug("stage: RFU configurations- Power Up PLL,Tx,Rx\n");
+       /* SERDES External Configuration */
+       mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK;
+       data = 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask);
+
+       ret = mvebu_cp110_comphy_is_pll_locked(comphy_base, comphy_index);
+       if (ret)
+               return ret;
+
+       /* RX init */
+       mask = SD_EXTERNAL_CONFIG1_RX_INIT_MASK;
+       data = 0x1 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       /* check that RX init done */
+       addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG;
+       data = SD_EXTERNAL_STATUS0_RX_INIT_MASK;
+       mask = data;
+       data = polling_with_timeout(addr, data, mask, 100, REG_32BIT);
+       if (data != 0) {
+               ERROR("RX init failed\n");
+               ret = -ETIMEDOUT;
+       }
+
+       debug("stage: RF Reset\n");
+       /* RF Reset */
+       mask = SD_EXTERNAL_CONFIG1_RX_INIT_MASK;
+       data = 0x0 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       debug_exit();
+
+       return ret;
+}
+
+static int mvebu_cp110_comphy_xfi_power_on(uint64_t comphy_base,
+                                          uint8_t comphy_index,
+                                          uint32_t comphy_mode)
+{
+       uintptr_t hpipe_addr, sd_ip_addr, comphy_addr, addr;
+       uint32_t mask, data, speed = COMPHY_GET_SPEED(comphy_mode);
+       int ret = 0;
+
+       debug_enter();
+
+       if ((speed != COMPHY_SPEED_5_15625G) &&
+            (speed != COMPHY_SPEED_10_3125G) &&
+            (speed != COMPHY_SPEED_DEFAULT)) {
+               ERROR("comphy:%d: unsupported sfi/xfi speed\n", comphy_index);
+               return -EINVAL;
+       }
+
+       hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                               comphy_index);
+       sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                            comphy_index);
+       comphy_addr = COMPHY_ADDR(comphy_base, comphy_index);
+
+       /* configure phy selector for XFI/SFI */
+       mvebu_cp110_comphy_set_phy_selector(comphy_base, comphy_index,
+                                           comphy_mode);
+
+       debug("stage: RFU configurations - hard reset comphy\n");
+       /* RFU configurations - hard reset comphy */
+       mask = COMMON_PHY_CFG1_PWR_UP_MASK;
+       data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET;
+       mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK;
+       data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET;
+       reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask);
+
+       /* Make sure that 40 data bits is disabled
+        * This bit is not cleared by reset
+        */
+       mask = COMMON_PHY_CFG6_IF_40_SEL_MASK;
+       data = 0 << COMMON_PHY_CFG6_IF_40_SEL_OFFSET;
+       reg_set(comphy_addr + COMMON_PHY_CFG6_REG, data, mask);
+
+       /* Select Baud Rate of Comphy And PD_PLL/Tx/Rx */
+       mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK;
+       data = 0x0 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_MASK;
+       data |= 0xE << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_MASK;
+       data |= 0xE << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK;
+       data |= 0 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK;
+       data |= 0 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_MASK;
+       data |= 0 << SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask);
+
+       /* release from hard reset */
+       mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK;
+       data = 0x0 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK;
+       data |= 0x0 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK;
+       data |= 0x0 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK;
+       data = 0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       /* Wait 1ms - until band gap and ref clock ready */
+       mdelay(1);
+
+       /* Start comphy Configuration */
+       debug("stage: Comphy configuration\n");
+       /* set reference clock */
+       mask = HPIPE_MISC_ICP_FORCE_MASK;
+       data = (speed == COMPHY_SPEED_5_15625G) ?
+               (0x0 << HPIPE_MISC_ICP_FORCE_OFFSET) :
+               (0x1 << HPIPE_MISC_ICP_FORCE_OFFSET);
+       mask |= HPIPE_MISC_REFCLK_SEL_MASK;
+       data |= 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_MISC_REG, data, mask);
+       /* Power and PLL Control */
+       mask = HPIPE_PWR_PLL_REF_FREQ_MASK;
+       data = 0x1 << HPIPE_PWR_PLL_REF_FREQ_OFFSET;
+       mask |= HPIPE_PWR_PLL_PHY_MODE_MASK;
+       data |= 0x4 << HPIPE_PWR_PLL_PHY_MODE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask);
+       /* Loopback register */
+       mask = HPIPE_LOOPBACK_SEL_MASK;
+       data = 0x1 << HPIPE_LOOPBACK_SEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_LOOPBACK_REG, data, mask);
+       /* rx control 1 */
+       mask = HPIPE_RX_CONTROL_1_RXCLK2X_SEL_MASK;
+       data = 0x1 << HPIPE_RX_CONTROL_1_RXCLK2X_SEL_OFFSET;
+       mask |= HPIPE_RX_CONTROL_1_CLK8T_EN_MASK;
+       data |= 0x1 << HPIPE_RX_CONTROL_1_CLK8T_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_RX_CONTROL_1_REG, data, mask);
+       /* DTL Control */
+       mask = HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK;
+       data = 0x1 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask);
+
+       /* Transmitter/Receiver Speed Divider Force */
+       if (speed == COMPHY_SPEED_5_15625G) {
+               mask = HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_MASK;
+               data = 1 << HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_OFFSET;
+               mask |= HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_FORCE_MASK;
+               data |= 1 << HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_FORCE_OFFSET;
+               mask |= HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_MASK;
+               data |= 1 << HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_OFFSET;
+               mask |= HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_MASK;
+               data |= 1 << HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_OFFSET;
+       } else {
+               mask = HPIPE_TXDIGCK_DIV_FORCE_MASK;
+               data = 0x1 << HPIPE_TXDIGCK_DIV_FORCE_OFFSET;
+       }
+       reg_set(hpipe_addr + HPIPE_SPD_DIV_FORCE_REG, data, mask);
+
+       /* Set analog parameters from ETP(HW) */
+       debug("stage: Analog parameters from ETP(HW)\n");
+       /* SERDES External Configuration 2 */
+       mask = SD_EXTERNAL_CONFIG2_PIN_DFE_EN_MASK;
+       data = 0x1 << SD_EXTERNAL_CONFIG2_PIN_DFE_EN_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG2_REG, data, mask);
+       /* 0x7-DFE Resolution control */
+       mask = HPIPE_DFE_RES_FORCE_MASK;
+       data = 0x1 << HPIPE_DFE_RES_FORCE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask);
+       /* 0xd-G1_Setting_0 */
+       if (speed == COMPHY_SPEED_5_15625G) {
+               mask = HPIPE_G1_SET_0_G1_TX_EMPH1_MASK;
+               data = 0x6 << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET;
+       } else {
+               mask = HPIPE_G1_SET_0_G1_TX_AMP_MASK;
+               data = 0x1c << HPIPE_G1_SET_0_G1_TX_AMP_OFFSET;
+               mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_MASK;
+               data |= 0xe << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET;
+       }
+       reg_set(hpipe_addr + HPIPE_G1_SET_0_REG, data, mask);
+       /* Genration 1 setting 2 (G1_Setting_2) */
+       mask = HPIPE_G1_SET_2_G1_TX_EMPH0_MASK;
+       data = 0x0 << HPIPE_G1_SET_2_G1_TX_EMPH0_OFFSET;
+       mask |= HPIPE_G1_SET_2_G1_TX_EMPH0_EN_MASK;
+       data |= 0x1 << HPIPE_G1_SET_2_G1_TX_EMPH0_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G1_SET_2_REG, data, mask);
+       /* Transmitter Slew Rate Control register (tx_reg1) */
+       mask = HPIPE_TX_REG1_TX_EMPH_RES_MASK;
+       data = 0x3 << HPIPE_TX_REG1_TX_EMPH_RES_OFFSET;
+       mask |= HPIPE_TX_REG1_SLC_EN_MASK;
+       data |= 0x3f << HPIPE_TX_REG1_SLC_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_TX_REG1_REG, data, mask);
+       /* Impedance Calibration Control register (cal_reg1) */
+       mask = HPIPE_CAL_REG_1_EXT_TXIMP_MASK;
+       data = 0xe << HPIPE_CAL_REG_1_EXT_TXIMP_OFFSET;
+       mask |= HPIPE_CAL_REG_1_EXT_TXIMP_EN_MASK;
+       data |= 0x1 << HPIPE_CAL_REG_1_EXT_TXIMP_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_CAL_REG1_REG, data, mask);
+       /* Generation 1 Setting 5 (g1_setting_5) */
+       mask = HPIPE_G1_SETTING_5_G1_ICP_MASK;
+       data = 0 << HPIPE_CAL_REG_1_EXT_TXIMP_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G1_SETTING_5_REG, data, mask);
+
+       /* 0xE-G1_Setting_1 */
+       mask = HPIPE_G1_SET_1_G1_RX_DFE_EN_MASK;
+       data = 0x1 << HPIPE_G1_SET_1_G1_RX_DFE_EN_OFFSET;
+       if (speed == COMPHY_SPEED_5_15625G) {
+               mask |= HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK;
+               data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET;
+               mask |= HPIPE_G1_SET_1_G1_RX_SELMUPP_MASK;
+               data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPP_OFFSET;
+       } else {
+               mask |= HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK;
+               data |= 0x2 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET;
+               mask |= HPIPE_G1_SET_1_G1_RX_SELMUPP_MASK;
+               data |= 0x2 << HPIPE_G1_SET_1_G1_RX_SELMUPP_OFFSET;
+               mask |= HPIPE_G1_SET_1_G1_RX_SELMUFI_MASK;
+               data |= 0x0 << HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET;
+               mask |= HPIPE_G1_SET_1_G1_RX_SELMUFF_MASK;
+               data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET;
+               mask |= HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_MASK;
+               data |= 0x3 << HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_OFFSET;
+       }
+       reg_set(hpipe_addr + HPIPE_G1_SET_1_REG, data, mask);
+
+       /* 0xA-DFE_Reg3 */
+       mask = HPIPE_DFE_F3_F5_DFE_EN_MASK;
+       data = 0x0 << HPIPE_DFE_F3_F5_DFE_EN_OFFSET;
+       mask |= HPIPE_DFE_F3_F5_DFE_CTRL_MASK;
+       data |= 0x0 << HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_DFE_F3_F5_REG, data, mask);
+
+       /* 0x111-G1_Setting_4 */
+       mask = HPIPE_G1_SETTINGS_4_G1_DFE_RES_MASK;
+       data = 0x1 << HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G1_SETTINGS_4_REG, data, mask);
+       /* Genration 1 setting 3 (G1_Setting_3) */
+       mask = HPIPE_G1_SETTINGS_3_G1_FBCK_SEL_MASK;
+       data = 0x1 << HPIPE_G1_SETTINGS_3_G1_FBCK_SEL_OFFSET;
+       if (speed == COMPHY_SPEED_5_15625G) {
+               /* Force FFE (Feed Forward Equalization) to 5G */
+               mask |= HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK;
+               data |= 0xf << HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET;
+               mask |= HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK;
+               data |= 0x4 << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET;
+               mask |= HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK;
+               data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET;
+       }
+       reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
+
+       /* Connfigure RX training timer */
+       mask = HPIPE_RX_TRAIN_TIMER_MASK;
+       data = 0x13 << HPIPE_RX_TRAIN_TIMER_OFFSET;
+       reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_5_REG, data, mask);
+
+       /* Enable TX train peak to peak hold */
+       mask = HPIPE_TX_TRAIN_P2P_HOLD_MASK;
+       data = 0x1 << HPIPE_TX_TRAIN_P2P_HOLD_OFFSET;
+       reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_0_REG, data, mask);
+
+       /* Configure TX preset index */
+       mask = HPIPE_TX_PRESET_INDEX_MASK;
+       data = 0x2 << HPIPE_TX_PRESET_INDEX_OFFSET;
+       reg_set(hpipe_addr + HPIPE_TX_PRESET_INDEX_REG, data, mask);
+
+       /* Disable pattern lock lost timeout */
+       mask = HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_MASK;
+       data = 0x0 << HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_FRAME_DETECT_CTRL_3_REG, data, mask);
+
+       /* Configure TX training pattern and TX training 16bit auto */
+       mask = HPIPE_TX_TRAIN_16BIT_AUTO_EN_MASK;
+       data = 0x1 << HPIPE_TX_TRAIN_16BIT_AUTO_EN_OFFSET;
+       mask |= HPIPE_TX_TRAIN_PAT_SEL_MASK;
+       data |= 0x1 << HPIPE_TX_TRAIN_PAT_SEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_TX_TRAIN_REG, data, mask);
+
+       /* Configure Training patten number */
+       mask = HPIPE_TRAIN_PAT_NUM_MASK;
+       data = 0x88 << HPIPE_TRAIN_PAT_NUM_OFFSET;
+       reg_set(hpipe_addr + HPIPE_FRAME_DETECT_CTRL_0_REG, data, mask);
+
+       /* Configure differencial manchester encoter to ethernet mode */
+       mask = HPIPE_DME_ETHERNET_MODE_MASK;
+       data = 0x1 << HPIPE_DME_ETHERNET_MODE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_DME_REG, data, mask);
+
+       /* Configure VDD Continuous Calibration */
+       mask = HPIPE_CAL_VDD_CONT_MODE_MASK;
+       data = 0x1 << HPIPE_CAL_VDD_CONT_MODE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_VDD_CAL_0_REG, data, mask);
+
+       /* Trigger sampler enable pulse (by toggleing the bit) */
+       mask = HPIPE_RX_SAMPLER_OS_GAIN_MASK;
+       data = 0x3 << HPIPE_RX_SAMPLER_OS_GAIN_OFFSET;
+       mask |= HPIPE_SMAPLER_MASK;
+       data |= 0x1 << HPIPE_SMAPLER_OFFSET;
+       reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask);
+       mask = HPIPE_SMAPLER_MASK;
+       data = 0x0 << HPIPE_SMAPLER_OFFSET;
+       reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask);
+
+       /* Set External RX Regulator Control */
+       mask = HPIPE_EXT_SELLV_RXSAMPL_MASK;
+       data = 0x1A << HPIPE_EXT_SELLV_RXSAMPL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_VDD_CAL_CTRL_REG, data, mask);
+
+       debug("stage: RFU configurations- Power Up PLL,Tx,Rx\n");
+       /* SERDES External Configuration */
+       mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK;
+       data = 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask);
+
+       /* check PLL rx & tx ready */
+       addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG;
+       data = SD_EXTERNAL_STATUS0_PLL_RX_MASK |
+              SD_EXTERNAL_STATUS0_PLL_TX_MASK;
+       mask = data;
+       data = polling_with_timeout(addr, data, mask,
+                                   PLL_LOCK_TIMEOUT, REG_32BIT);
+       if (data != 0) {
+               if (data & SD_EXTERNAL_STATUS0_PLL_RX_MASK)
+                       ERROR("RX PLL is not locked\n");
+               if (data & SD_EXTERNAL_STATUS0_PLL_TX_MASK)
+                       ERROR("TX PLL is not locked\n");
+
+               ret = -ETIMEDOUT;
+       }
+
+       /* RX init */
+       mask = SD_EXTERNAL_CONFIG1_RX_INIT_MASK;
+       data = 0x1 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       /* check that RX init done */
+       addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG;
+       data = SD_EXTERNAL_STATUS0_RX_INIT_MASK;
+       mask = data;
+       data = polling_with_timeout(addr, data, mask, 100, REG_32BIT);
+       if (data != 0) {
+               ERROR("RX init failed\n");
+               ret = -ETIMEDOUT;
+       }
+
+       debug("stage: RF Reset\n");
+       /* RF Reset */
+       mask =  SD_EXTERNAL_CONFIG1_RX_INIT_MASK;
+       data = 0x0 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       debug_exit();
+
+       return ret;
+}
+
+static int mvebu_cp110_comphy_pcie_power_on(uint64_t comphy_base,
+                                    uint8_t comphy_index, uint32_t comphy_mode)
+{
+       int ret = 0;
+       uint32_t reg, mask, data, pcie_width;
+       uint32_t clk_dir;
+       uintptr_t hpipe_addr, comphy_addr, addr;
+       _Bool clk_src = COMPHY_GET_CLK_SRC(comphy_mode);
+
+       hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                               comphy_index);
+       comphy_addr = COMPHY_ADDR(comphy_base, comphy_index);
+       pcie_width = COMPHY_GET_PCIE_WIDTH(comphy_mode);
+
+       debug_enter();
+
+       spin_lock(&cp110_mac_reset_lock);
+
+       reg = mmio_read_32(SYS_CTRL_FROM_COMPHY_ADDR(comphy_base) +
+                                               SYS_CTRL_UINIT_SOFT_RESET_REG);
+       switch (comphy_index) {
+       case COMPHY_LANE0:
+               reg |= PCIE_MAC_RESET_MASK_PORT0;
+               break;
+       case COMPHY_LANE4:
+               reg |= PCIE_MAC_RESET_MASK_PORT1;
+               break;
+       case COMPHY_LANE5:
+               reg |= PCIE_MAC_RESET_MASK_PORT2;
+               break;
+       }
+
+       mmio_write_32(SYS_CTRL_FROM_COMPHY_ADDR(comphy_base) +
+                                           SYS_CTRL_UINIT_SOFT_RESET_REG, reg);
+       spin_unlock(&cp110_mac_reset_lock);
+
+       /* Configure PIPE selector for PCIE */
+       mvebu_cp110_comphy_set_pipe_selector(comphy_base, comphy_index,
+                                            comphy_mode);
+
+       /*
+        * Read SAR (Sample-At-Reset) configuration for the PCIe clock
+        * direction.
+        *
+        * SerDes Lane 4/5 got the PCIe ref-clock #1,
+        * and SerDes Lane 0 got PCIe ref-clock #0
+        */
+       reg = mmio_read_32(DFX_FROM_COMPHY_ADDR(comphy_base) +
+                          SAR_STATUS_0_REG);
+       if (comphy_index == COMPHY_LANE4 || comphy_index == COMPHY_LANE5)
+               clk_dir = (reg & SAR_RST_PCIE1_CLOCK_CONFIG_CP1_MASK) >>
+                                         SAR_RST_PCIE1_CLOCK_CONFIG_CP1_OFFSET;
+       else
+               clk_dir = (reg & SAR_RST_PCIE0_CLOCK_CONFIG_CP1_MASK) >>
+                                         SAR_RST_PCIE0_CLOCK_CONFIG_CP1_OFFSET;
+
+       debug("On lane %d\n", comphy_index);
+       debug("PCIe clock direction = %x\n", clk_dir);
+       debug("PCIe Width = %d\n", pcie_width);
+
+       /* enable PCIe X4 and X2 */
+       if (comphy_index == COMPHY_LANE0) {
+               if (pcie_width == PCIE_LNK_X4) {
+                       data = 0x1 << COMMON_PHY_SD_CTRL1_PCIE_X4_EN_OFFSET;
+                       mask = COMMON_PHY_SD_CTRL1_PCIE_X4_EN_MASK;
+                       reg_set(comphy_base + COMMON_PHY_SD_CTRL1,
+                               data, mask);
+               } else if (pcie_width == PCIE_LNK_X2) {
+                       data = 0x1 << COMMON_PHY_SD_CTRL1_PCIE_X2_EN_OFFSET;
+                       mask = COMMON_PHY_SD_CTRL1_PCIE_X2_EN_MASK;
+                       reg_set(comphy_base + COMMON_PHY_SD_CTRL1, data, mask);
+               }
+       }
+
+       /* If PCIe clock is output and clock source from SerDes lane 5,
+        * need to configure the clock-source MUX.
+        * By default, the clock source is from lane 4
+        */
+       if (clk_dir && clk_src && (comphy_index == COMPHY_LANE5)) {
+               data = DFX_DEV_GEN_PCIE_CLK_SRC_MUX <<
+                                               DFX_DEV_GEN_PCIE_CLK_SRC_OFFSET;
+               mask = DFX_DEV_GEN_PCIE_CLK_SRC_MASK;
+               reg_set(DFX_FROM_COMPHY_ADDR(comphy_base) +
+                       DFX_DEV_GEN_CTRL12_REG, data, mask);
+       }
+
+       debug("stage: RFU configurations - hard reset comphy\n");
+       /* RFU configurations - hard reset comphy */
+       mask = COMMON_PHY_CFG1_PWR_UP_MASK;
+       data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET;
+       mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK;
+       data |= 0x1 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET;
+       mask |= COMMON_PHY_CFG1_PWR_ON_RESET_MASK;
+       data |= 0x0 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET;
+       mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK;
+       data |= 0x0 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET;
+       mask |= COMMON_PHY_PHY_MODE_MASK;
+       data |= 0x0 << COMMON_PHY_PHY_MODE_OFFSET;
+       reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask);
+
+       /* release from hard reset */
+       mask = COMMON_PHY_CFG1_PWR_ON_RESET_MASK;
+       data = 0x1 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET;
+       mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK;
+       data |= 0x1 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET;
+       reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask);
+
+       /* Wait 1ms - until band gap and ref clock ready */
+       mdelay(1);
+       /* Start comphy Configuration */
+       debug("stage: Comphy configuration\n");
+       /* Set PIPE soft reset */
+       mask = HPIPE_RST_CLK_CTRL_PIPE_RST_MASK;
+       data = 0x1 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET;
+       /* Set PHY datapath width mode for V0 */
+       mask |= HPIPE_RST_CLK_CTRL_FIXED_PCLK_MASK;
+       data |= 0x1 << HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET;
+       /* Set Data bus width USB mode for V0 */
+       mask |= HPIPE_RST_CLK_CTRL_PIPE_WIDTH_MASK;
+       data |= 0x0 << HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET;
+       /* Set CORE_CLK output frequency for 250Mhz */
+       mask |= HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_MASK;
+       data |= 0x0 << HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_RST_CLK_CTRL_REG, data, mask);
+       /* Set PLL ready delay for 0x2 */
+       data = 0x2 << HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET;
+       mask = HPIPE_CLK_SRC_LO_PLL_RDY_DL_MASK;
+       if (pcie_width != PCIE_LNK_X1) {
+               data |= 0x1 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_OFFSET;
+               mask |= HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_MASK;
+               data |= 0x1 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_OFFSET;
+               mask |= HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_MASK;
+       }
+       reg_set(hpipe_addr + HPIPE_CLK_SRC_LO_REG, data, mask);
+
+       /* Set PIPE mode interface to PCIe3 - 0x1  & set lane order */
+       data = 0x1 << HPIPE_CLK_SRC_HI_MODE_PIPE_OFFSET;
+       mask = HPIPE_CLK_SRC_HI_MODE_PIPE_MASK;
+       if (pcie_width != PCIE_LNK_X1) {
+               mask |= HPIPE_CLK_SRC_HI_LANE_STRT_MASK;
+               mask |= HPIPE_CLK_SRC_HI_LANE_MASTER_MASK;
+               mask |= HPIPE_CLK_SRC_HI_LANE_BREAK_MASK;
+               if (comphy_index == 0) {
+                       data |= 0x1 << HPIPE_CLK_SRC_HI_LANE_STRT_OFFSET;
+                       data |= 0x1 << HPIPE_CLK_SRC_HI_LANE_MASTER_OFFSET;
+               } else if (comphy_index == (pcie_width - 1)) {
+                       data |= 0x1 << HPIPE_CLK_SRC_HI_LANE_BREAK_OFFSET;
+               }
+       }
+       reg_set(hpipe_addr + HPIPE_CLK_SRC_HI_REG, data, mask);
+       /* Config update polarity equalization */
+       data = 0x1 << HPIPE_CFG_UPDATE_POLARITY_OFFSET;
+       mask = HPIPE_CFG_UPDATE_POLARITY_MASK;
+       reg_set(hpipe_addr + HPIPE_LANE_EQ_CFG1_REG, data, mask);
+       /* Set PIPE version 4 to mode enable */
+       data = 0x1 << HPIPE_DFE_CTRL_28_PIPE4_OFFSET;
+       mask = HPIPE_DFE_CTRL_28_PIPE4_MASK;
+       reg_set(hpipe_addr + HPIPE_DFE_CTRL_28_REG, data, mask);
+       /* TODO: check if pcie clock is output/input - for bringup use input*/
+       /* Enable PIN clock 100M_125M */
+       mask = 0;
+       data = 0;
+       /* Only if clock is output, configure the clock-source mux */
+       if (clk_dir) {
+               mask |= HPIPE_MISC_CLK100M_125M_MASK;
+               data |= 0x1 << HPIPE_MISC_CLK100M_125M_OFFSET;
+       }
+       /* Set PIN_TXDCLK_2X Clock Freq. Selection for outputs 500MHz clock */
+       mask |= HPIPE_MISC_TXDCLK_2X_MASK;
+       data |= 0x0 << HPIPE_MISC_TXDCLK_2X_OFFSET;
+       /* Enable 500MHz Clock */
+       mask |= HPIPE_MISC_CLK500_EN_MASK;
+       data |= 0x1 << HPIPE_MISC_CLK500_EN_OFFSET;
+       if (clk_dir) { /* output */
+               /* Set reference clock comes from group 1 */
+               mask |= HPIPE_MISC_REFCLK_SEL_MASK;
+               data |= 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET;
+       } else {
+               /* Set reference clock comes from group 2 */
+               mask |= HPIPE_MISC_REFCLK_SEL_MASK;
+               data |= 0x1 << HPIPE_MISC_REFCLK_SEL_OFFSET;
+       }
+       mask |= HPIPE_MISC_ICP_FORCE_MASK;
+       data |= 0x1 << HPIPE_MISC_ICP_FORCE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_MISC_REG, data, mask);
+       if (clk_dir) { /* output */
+               /* Set reference frequcency select - 0x2 for 25MHz*/
+               mask = HPIPE_PWR_PLL_REF_FREQ_MASK;
+               data = 0x2 << HPIPE_PWR_PLL_REF_FREQ_OFFSET;
+       } else {
+               /* Set reference frequcency select - 0x0 for 100MHz*/
+               mask = HPIPE_PWR_PLL_REF_FREQ_MASK;
+               data = 0x0 << HPIPE_PWR_PLL_REF_FREQ_OFFSET;
+       }
+       /* Set PHY mode to PCIe */
+       mask |= HPIPE_PWR_PLL_PHY_MODE_MASK;
+       data |= 0x3 << HPIPE_PWR_PLL_PHY_MODE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask);
+
+       /* ref clock alignment */
+       if (pcie_width != PCIE_LNK_X1) {
+               mask = HPIPE_LANE_ALIGN_OFF_MASK;
+               data = 0x0 << HPIPE_LANE_ALIGN_OFF_OFFSET;
+               reg_set(hpipe_addr + HPIPE_LANE_ALIGN_REG, data, mask);
+       }
+
+       /* Set the amount of time spent in the LoZ state - set for 0x7 only if
+        * the PCIe clock is output
+        */
+       if (clk_dir)
+               reg_set(hpipe_addr + HPIPE_GLOBAL_PM_CTRL,
+                       0x7 << HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET,
+                       HPIPE_GLOBAL_PM_RXDLOZ_WAIT_MASK);
+
+       /* Set Maximal PHY Generation Setting(8Gbps) */
+       mask = HPIPE_INTERFACE_GEN_MAX_MASK;
+       data = 0x2 << HPIPE_INTERFACE_GEN_MAX_OFFSET;
+       /* Bypass frame detection and sync detection for RX DATA */
+       mask |= HPIPE_INTERFACE_DET_BYPASS_MASK;
+       data |= 0x1 << HPIPE_INTERFACE_DET_BYPASS_OFFSET;
+       /* Set Link Train Mode (Tx training control pins are used) */
+       mask |= HPIPE_INTERFACE_LINK_TRAIN_MASK;
+       data |= 0x1 << HPIPE_INTERFACE_LINK_TRAIN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_INTERFACE_REG, data, mask);
+
+       /* Set Idle_sync enable */
+       mask = HPIPE_PCIE_IDLE_SYNC_MASK;
+       data = 0x1 << HPIPE_PCIE_IDLE_SYNC_OFFSET;
+       /* Select bits for PCIE Gen3(32bit) */
+       mask |= HPIPE_PCIE_SEL_BITS_MASK;
+       data |= 0x2 << HPIPE_PCIE_SEL_BITS_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PCIE_REG0, data, mask);
+
+       /* Enable Tx_adapt_g1 */
+       mask = HPIPE_TX_TRAIN_CTRL_G1_MASK;
+       data = 0x1 << HPIPE_TX_TRAIN_CTRL_G1_OFFSET;
+       /* Enable Tx_adapt_gn1 */
+       mask |= HPIPE_TX_TRAIN_CTRL_GN1_MASK;
+       data |= 0x1 << HPIPE_TX_TRAIN_CTRL_GN1_OFFSET;
+       /* Disable Tx_adapt_g0 */
+       mask |= HPIPE_TX_TRAIN_CTRL_G0_MASK;
+       data |= 0x0 << HPIPE_TX_TRAIN_CTRL_G0_OFFSET;
+       reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_REG, data, mask);
+
+       /* Set reg_tx_train_chk_init */
+       mask = HPIPE_TX_TRAIN_CHK_INIT_MASK;
+       data = 0x0 << HPIPE_TX_TRAIN_CHK_INIT_OFFSET;
+       /* Enable TX_COE_FM_PIN_PCIE3_EN */
+       mask |= HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_MASK;
+       data |= 0x1 << HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_OFFSET;
+       reg_set(hpipe_addr + HPIPE_TX_TRAIN_REG, data, mask);
+
+       debug("stage: TRx training parameters\n");
+       /* Set Preset sweep configurations */
+       mask = HPIPE_TX_TX_STATUS_CHECK_MODE_MASK;
+       data = 0x1 << HPIPE_TX_STATUS_CHECK_MODE_OFFSET;
+       mask |= HPIPE_TX_NUM_OF_PRESET_MASK;
+       data |= 0x7 << HPIPE_TX_NUM_OF_PRESET_OFFSET;
+       mask |= HPIPE_TX_SWEEP_PRESET_EN_MASK;
+       data |= 0x1 << HPIPE_TX_SWEEP_PRESET_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_11_REG, data, mask);
+
+       /* Tx train start configuration */
+       mask = HPIPE_TX_TRAIN_START_SQ_EN_MASK;
+       data = 0x1 << HPIPE_TX_TRAIN_START_SQ_EN_OFFSET;
+       mask |= HPIPE_TX_TRAIN_START_FRM_DET_EN_MASK;
+       data |= 0x0 << HPIPE_TX_TRAIN_START_FRM_DET_EN_OFFSET;
+       mask |= HPIPE_TX_TRAIN_START_FRM_LOCK_EN_MASK;
+       data |= 0x0 << HPIPE_TX_TRAIN_START_FRM_LOCK_EN_OFFSET;
+       mask |= HPIPE_TX_TRAIN_WAIT_TIME_EN_MASK;
+       data |= 0x1 << HPIPE_TX_TRAIN_WAIT_TIME_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_5_REG, data, mask);
+
+       /* Enable Tx train P2P */
+       mask = HPIPE_TX_TRAIN_P2P_HOLD_MASK;
+       data = 0x1 << HPIPE_TX_TRAIN_P2P_HOLD_OFFSET;
+       reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_0_REG, data, mask);
+
+       /* Configure Tx train timeout */
+       mask = HPIPE_TRX_TRAIN_TIMER_MASK;
+       data = 0x17 << HPIPE_TRX_TRAIN_TIMER_OFFSET;
+       reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_4_REG, data, mask);
+
+       /* Disable G0/G1/GN1 adaptation */
+       mask = HPIPE_TX_TRAIN_CTRL_G1_MASK | HPIPE_TX_TRAIN_CTRL_GN1_MASK
+               | HPIPE_TX_TRAIN_CTRL_G0_OFFSET;
+       data = 0;
+       reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_REG, data, mask);
+
+       /* Disable DTL frequency loop */
+       mask = HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK;
+       data = 0x0 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask);
+
+       /* Configure G3 DFE */
+       mask = HPIPE_G3_DFE_RES_MASK;
+       data = 0x3 << HPIPE_G3_DFE_RES_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G3_SETTING_4_REG, data, mask);
+
+       /* Use TX/RX training result for DFE */
+       mask = HPIPE_DFE_RES_FORCE_MASK;
+       data = 0x0 << HPIPE_DFE_RES_FORCE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_DFE_REG0,  data, mask);
+
+       /* Configure initial and final coefficient value for receiver */
+       mask = HPIPE_G3_SET_1_G3_RX_SELMUPI_MASK;
+       data = 0x1 << HPIPE_G3_SET_1_G3_RX_SELMUPI_OFFSET;
+
+       mask |= HPIPE_G3_SET_1_G3_RX_SELMUPF_MASK;
+       data |= 0x1 << HPIPE_G3_SET_1_G3_RX_SELMUPF_OFFSET;
+
+       mask |= HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_MASK;
+       data |= 0x0 << HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G3_SET_1_REG,  data, mask);
+
+       /* Trigger sampler enable pulse */
+       mask = HPIPE_SMAPLER_MASK;
+       data = 0x1 << HPIPE_SMAPLER_OFFSET;
+       reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask);
+       udelay(5);
+       reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, 0, mask);
+
+       /* FFE resistor tuning for different bandwidth  */
+       mask = HPIPE_G3_FFE_DEG_RES_LEVEL_MASK;
+       data = 0x1 << HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET;
+       mask |= HPIPE_G3_FFE_LOAD_RES_LEVEL_MASK;
+       data |= 0x3 << HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G3_SETTING_3_REG, data, mask);
+
+       /* Pattern lock lost timeout disable */
+       mask = HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_MASK;
+       data = 0x0 << HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_FRAME_DETECT_CTRL_3_REG, data, mask);
+
+       /* Configure DFE adaptations */
+       mask = HPIPE_CDR_RX_MAX_DFE_ADAPT_0_MASK;
+       data = 0x0 << HPIPE_CDR_RX_MAX_DFE_ADAPT_0_OFFSET;
+       mask |= HPIPE_CDR_RX_MAX_DFE_ADAPT_1_MASK;
+       data |= 0x0 << HPIPE_CDR_RX_MAX_DFE_ADAPT_1_OFFSET;
+       mask |= HPIPE_CDR_MAX_DFE_ADAPT_0_MASK;
+       data |= 0x0 << HPIPE_CDR_MAX_DFE_ADAPT_0_OFFSET;
+       mask |= HPIPE_CDR_MAX_DFE_ADAPT_1_MASK;
+       data |= 0x1 << HPIPE_CDR_MAX_DFE_ADAPT_1_OFFSET;
+       reg_set(hpipe_addr + HPIPE_CDR_CONTROL_REG, data, mask);
+
+       mask = HPIPE_DFE_TX_MAX_DFE_ADAPT_MASK;
+       data = 0x0 << HPIPE_DFE_TX_MAX_DFE_ADAPT_OFFSET;
+       reg_set(hpipe_addr + HPIPE_DFE_CONTROL_REG, data, mask);
+
+       /* Genration 2 setting 1*/
+       mask = HPIPE_G2_SET_1_G2_RX_SELMUPI_MASK;
+       data = 0x0 << HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET;
+       mask |= HPIPE_G2_SET_1_G2_RX_SELMUPP_MASK;
+       data |= 0x1 << HPIPE_G2_SET_1_G2_RX_SELMUPP_OFFSET;
+       mask |= HPIPE_G2_SET_1_G2_RX_SELMUFI_MASK;
+       data |= 0x0 << HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G2_SET_1_REG, data, mask);
+
+       /* DFE enable */
+       mask = HPIPE_G2_DFE_RES_MASK;
+       data = 0x3 << HPIPE_G2_DFE_RES_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G2_SETTINGS_4_REG, data, mask);
+
+       /* Configure DFE Resolution */
+       mask = HPIPE_LANE_CFG4_DFE_EN_SEL_MASK;
+       data = 0x1 << HPIPE_LANE_CFG4_DFE_EN_SEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_LANE_CFG4_REG, data, mask);
+
+       /* VDD calibration control */
+       mask = HPIPE_EXT_SELLV_RXSAMPL_MASK;
+       data = 0x16 << HPIPE_EXT_SELLV_RXSAMPL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_VDD_CAL_CTRL_REG, data, mask);
+
+       /* Set PLL Charge-pump Current Control */
+       mask = HPIPE_G3_SETTING_5_G3_ICP_MASK;
+       data = 0x4 << HPIPE_G3_SETTING_5_G3_ICP_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G3_SETTING_5_REG, data, mask);
+
+       /* Set lane rqualization remote setting */
+       mask = HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_MASK;
+       data = 0x1 << HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_OFFSET;
+       mask |= HPIPE_LANE_CFG_FOM_ONLY_MODE_MASK;
+       data |= 0x1 << HPIPE_LANE_CFG_FOM_ONLY_MODE_OFFFSET;
+       mask |= HPIPE_LANE_CFG_FOM_PRESET_VECTOR_MASK;
+       data |= 0x6 << HPIPE_LANE_CFG_FOM_PRESET_VECTOR_OFFSET;
+       reg_set(hpipe_addr + HPIPE_LANE_EQ_REMOTE_SETTING_REG, data, mask);
+
+       mask = HPIPE_CFG_EQ_BUNDLE_DIS_MASK;
+       data = 0x1 << HPIPE_CFG_EQ_BUNDLE_DIS_OFFSET;
+       reg_set(hpipe_addr + HPIPE_LANE_EQ_CFG2_REG, data, mask);
+
+       debug("stage: Comphy power up\n");
+
+       /* For PCIe X4 or X2:
+        * release from reset only after finish to configure all lanes
+        */
+       if ((pcie_width == PCIE_LNK_X1) || (comphy_index == (pcie_width - 1))) {
+               uint32_t i, start_lane, end_lane;
+
+               if (pcie_width != PCIE_LNK_X1) {
+                       /* allows writing to all lanes in one write */
+                       data = 0x0;
+                       if (pcie_width == PCIE_LNK_X2)
+                               mask = COMMON_PHY_SD_CTRL1_COMPHY_0_1_PORT_MASK;
+                       else if (pcie_width == PCIE_LNK_X4)
+                               mask = COMMON_PHY_SD_CTRL1_COMPHY_0_3_PORT_MASK;
+                       reg_set(comphy_base + COMMON_PHY_SD_CTRL1, data, mask);
+                       start_lane = 0;
+                       end_lane = pcie_width;
+
+                       /* Release from PIPE soft reset
+                        * For PCIe by4 or by2:
+                        * release from soft reset all lanes - can't use
+                        * read modify write
+                        */
+                       reg_set(HPIPE_ADDR(
+                               COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), 0) +
+                               HPIPE_RST_CLK_CTRL_REG, 0x24, 0xffffffff);
+               } else {
+                       start_lane = comphy_index;
+                       end_lane = comphy_index + 1;
+
+                       /* Release from PIPE soft reset
+                        * for PCIe by4 or by2:
+                        * release from soft reset all lanes
+                        */
+                       reg_set(hpipe_addr + HPIPE_RST_CLK_CTRL_REG,
+                               0x0 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET,
+                               HPIPE_RST_CLK_CTRL_PIPE_RST_MASK);
+               }
+
+               if (pcie_width != PCIE_LNK_X1) {
+                       /* disable writing to all lanes with one write */
+                       if (pcie_width == PCIE_LNK_X2) {
+                               data = (COMPHY_LANE0 <<
+                               COMMON_PHY_SD_CTRL1_COMPHY_0_PORT_OFFSET) |
+                               (COMPHY_LANE1 <<
+                               COMMON_PHY_SD_CTRL1_COMPHY_1_PORT_OFFSET);
+                               mask = COMMON_PHY_SD_CTRL1_COMPHY_0_1_PORT_MASK;
+                       } else if (pcie_width == PCIE_LNK_X4) {
+                               data = (COMPHY_LANE0 <<
+                               COMMON_PHY_SD_CTRL1_COMPHY_0_PORT_OFFSET) |
+                               (COMPHY_LANE1 <<
+                               COMMON_PHY_SD_CTRL1_COMPHY_1_PORT_OFFSET) |
+                               (COMPHY_LANE2 <<
+                               COMMON_PHY_SD_CTRL1_COMPHY_2_PORT_OFFSET) |
+                               (COMPHY_LANE3 <<
+                               COMMON_PHY_SD_CTRL1_COMPHY_3_PORT_OFFSET);
+                               mask = COMMON_PHY_SD_CTRL1_COMPHY_0_3_PORT_MASK;
+                       }
+                       reg_set(comphy_base + COMMON_PHY_SD_CTRL1,
+                               data, mask);
+               }
+
+               debug("stage: Check PLL\n");
+               /* Read lane status */
+               for (i = start_lane; i < end_lane; i++) {
+                       addr = HPIPE_ADDR(
+                               COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), i) +
+                               HPIPE_LANE_STATUS1_REG;
+                       data = HPIPE_LANE_STATUS1_PCLK_EN_MASK;
+                       mask = data;
+                       ret = polling_with_timeout(addr, data, mask,
+                                                  PLL_LOCK_TIMEOUT,
+                                                  REG_32BIT);
+                       if (ret)
+                               ERROR("Failed to lock PCIE PLL\n");
+               }
+       }
+
+       debug_exit();
+
+       return ret;
+}
+
+static int mvebu_cp110_comphy_rxaui_power_on(uint64_t comphy_base,
+                                    uint8_t comphy_index, uint32_t comphy_mode)
+{
+       uintptr_t hpipe_addr, sd_ip_addr, comphy_addr, addr;
+       uint32_t mask, data;
+       int ret = 0;
+
+       debug_enter();
+
+       hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                               comphy_index);
+       comphy_addr = COMPHY_ADDR(comphy_base, comphy_index);
+       sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                            comphy_index);
+
+       /* configure phy selector for RXAUI */
+       mvebu_cp110_comphy_set_phy_selector(comphy_base, comphy_index,
+                                           comphy_mode);
+
+       /* RFU configurations - hard reset comphy */
+       mask = COMMON_PHY_CFG1_PWR_UP_MASK;
+       data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET;
+       mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK;
+       data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET;
+       reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask);
+
+       if (comphy_index == 2) {
+               reg_set(comphy_base + COMMON_PHY_SD_CTRL1,
+                       0x1 << COMMON_PHY_SD_CTRL1_RXAUI0_OFFSET,
+                       COMMON_PHY_SD_CTRL1_RXAUI0_MASK);
+       }
+       if (comphy_index == 4) {
+               reg_set(comphy_base + COMMON_PHY_SD_CTRL1,
+                       0x1 << COMMON_PHY_SD_CTRL1_RXAUI1_OFFSET,
+                       COMMON_PHY_SD_CTRL1_RXAUI1_MASK);
+       }
+
+       /* Select Baud Rate of Comphy And PD_PLL/Tx/Rx */
+       mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK;
+       data = 0x0 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_MASK;
+       data |= 0xB << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_MASK;
+       data |= 0xB << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK;
+       data |= 0x0 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK;
+       data |= 0x0 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_MASK;
+       data |= 0x0 << SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_MEDIA_MODE_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG0_MEDIA_MODE_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask);
+
+       /* release from hard reset */
+       mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK;
+       data = 0x0 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK;
+       data |= 0x0 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK;
+       data |= 0x0 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK;
+       data = 0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       /* Wait 1ms - until band gap and ref clock ready */
+       mdelay(1);
+
+       /* Start comphy Configuration */
+       debug("stage: Comphy configuration\n");
+       /* set reference clock */
+       reg_set(hpipe_addr + HPIPE_MISC_REG,
+               0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET,
+               HPIPE_MISC_REFCLK_SEL_MASK);
+       /* Power and PLL Control */
+       mask = HPIPE_PWR_PLL_REF_FREQ_MASK;
+       data = 0x1 << HPIPE_PWR_PLL_REF_FREQ_OFFSET;
+       mask |= HPIPE_PWR_PLL_PHY_MODE_MASK;
+       data |= 0x4 << HPIPE_PWR_PLL_PHY_MODE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask);
+       /* Loopback register */
+       reg_set(hpipe_addr + HPIPE_LOOPBACK_REG,
+               0x1 << HPIPE_LOOPBACK_SEL_OFFSET, HPIPE_LOOPBACK_SEL_MASK);
+       /* rx control 1 */
+       mask = HPIPE_RX_CONTROL_1_RXCLK2X_SEL_MASK;
+       data = 0x1 << HPIPE_RX_CONTROL_1_RXCLK2X_SEL_OFFSET;
+       mask |= HPIPE_RX_CONTROL_1_CLK8T_EN_MASK;
+       data |= 0x1 << HPIPE_RX_CONTROL_1_CLK8T_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_RX_CONTROL_1_REG, data, mask);
+       /* DTL Control */
+       reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG,
+               0x0 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET,
+               HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK);
+
+       /* Set analog parameters from ETP(HW) */
+       debug("stage: Analog parameters from ETP(HW)\n");
+       /* SERDES External Configuration 2 */
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG2_REG,
+               0x1 << SD_EXTERNAL_CONFIG2_PIN_DFE_EN_OFFSET,
+               SD_EXTERNAL_CONFIG2_PIN_DFE_EN_MASK);
+       /* 0x7-DFE Resolution control */
+       reg_set(hpipe_addr + HPIPE_DFE_REG0, 0x1 << HPIPE_DFE_RES_FORCE_OFFSET,
+               HPIPE_DFE_RES_FORCE_MASK);
+       /* 0xd-G1_Setting_0 */
+       reg_set(hpipe_addr + HPIPE_G1_SET_0_REG,
+               0xd << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET,
+               HPIPE_G1_SET_0_G1_TX_EMPH1_MASK);
+       /* 0xE-G1_Setting_1 */
+       mask = HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK;
+       data = 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET;
+       mask |= HPIPE_G1_SET_1_G1_RX_SELMUPP_MASK;
+       data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPP_OFFSET;
+       mask |= HPIPE_G1_SET_1_G1_RX_DFE_EN_MASK;
+       data |= 0x1 << HPIPE_G1_SET_1_G1_RX_DFE_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G1_SET_1_REG, data, mask);
+       /* 0xA-DFE_Reg3 */
+       mask = HPIPE_DFE_F3_F5_DFE_EN_MASK;
+       data = 0x0 << HPIPE_DFE_F3_F5_DFE_EN_OFFSET;
+       mask |= HPIPE_DFE_F3_F5_DFE_CTRL_MASK;
+       data |= 0x0 << HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_DFE_F3_F5_REG, data, mask);
+
+       /* 0x111-G1_Setting_4 */
+       mask = HPIPE_G1_SETTINGS_4_G1_DFE_RES_MASK;
+       data = 0x1 << HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G1_SETTINGS_4_REG, data, mask);
+
+       debug("stage: RFU configurations- Power Up PLL,Tx,Rx\n");
+       /* SERDES External Configuration */
+       mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK;
+       data = 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask);
+
+
+       /* check PLL rx & tx ready */
+       addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG;
+       data = SD_EXTERNAL_STATUS0_PLL_RX_MASK |
+               SD_EXTERNAL_STATUS0_PLL_TX_MASK;
+       mask = data;
+       data = polling_with_timeout(addr, data, mask, 15000, REG_32BIT);
+       if (data != 0) {
+               debug("Read from reg = %lx - value = 0x%x\n",
+                     sd_ip_addr + SD_EXTERNAL_STATUS0_REG, data);
+               ERROR("SD_EXTERNAL_STATUS0_PLL_RX is %d, -\"-_PLL_TX is %d\n",
+                     (data & SD_EXTERNAL_STATUS0_PLL_RX_MASK),
+                     (data & SD_EXTERNAL_STATUS0_PLL_TX_MASK));
+               ret = -ETIMEDOUT;
+       }
+
+       /* RX init */
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG,
+               0x1 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET,
+               SD_EXTERNAL_CONFIG1_RX_INIT_MASK);
+
+       /* check that RX init done */
+       addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG;
+       data = SD_EXTERNAL_STATUS0_RX_INIT_MASK;
+       mask = data;
+       data = polling_with_timeout(addr, data, mask, 100, REG_32BIT);
+       if (data != 0) {
+               debug("Read from reg = %lx - value = 0x%x\n",
+                     sd_ip_addr + SD_EXTERNAL_STATUS0_REG, data);
+               ERROR("SD_EXTERNAL_STATUS0_RX_INIT is 0\n");
+               ret = -ETIMEDOUT;
+       }
+
+       debug("stage: RF Reset\n");
+       /* RF Reset */
+       mask =  SD_EXTERNAL_CONFIG1_RX_INIT_MASK;
+       data = 0x0 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK;
+       data |= 0x1 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       debug_exit();
+
+       return ret;
+}
+
+static int mvebu_cp110_comphy_usb3_power_on(uint64_t comphy_base,
+                                    uint8_t comphy_index, uint32_t comphy_mode)
+{
+       uintptr_t hpipe_addr, comphy_addr, addr;
+       uint32_t mask, data;
+       int ret = 0;
+
+       debug_enter();
+
+       /* Configure PIPE selector for USB3 */
+       mvebu_cp110_comphy_set_pipe_selector(comphy_base, comphy_index,
+                                            comphy_mode);
+
+       hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                               comphy_index);
+       comphy_addr = COMPHY_ADDR(comphy_base, comphy_index);
+
+       debug("stage: RFU configurations - hard reset comphy\n");
+       /* RFU configurations - hard reset comphy */
+       mask = COMMON_PHY_CFG1_PWR_UP_MASK;
+       data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET;
+       mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK;
+       data |= 0x1 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET;
+       mask |= COMMON_PHY_CFG1_PWR_ON_RESET_MASK;
+       data |= 0x0 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET;
+       mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK;
+       data |= 0x0 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET;
+       mask |= COMMON_PHY_PHY_MODE_MASK;
+       data |= 0x1 << COMMON_PHY_PHY_MODE_OFFSET;
+       reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask);
+
+       /* release from hard reset */
+       mask = COMMON_PHY_CFG1_PWR_ON_RESET_MASK;
+       data = 0x1 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET;
+       mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK;
+       data |= 0x1 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET;
+       reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask);
+
+       /* Wait 1ms - until band gap and ref clock ready */
+       mdelay(1);
+
+       /* Start comphy Configuration */
+       debug("stage: Comphy configuration\n");
+       /* Set PIPE soft reset */
+       mask = HPIPE_RST_CLK_CTRL_PIPE_RST_MASK;
+       data = 0x1 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET;
+       /* Set PHY datapath width mode for V0 */
+       mask |= HPIPE_RST_CLK_CTRL_FIXED_PCLK_MASK;
+       data |= 0x0 << HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET;
+       /* Set Data bus width USB mode for V0 */
+       mask |= HPIPE_RST_CLK_CTRL_PIPE_WIDTH_MASK;
+       data |= 0x0 << HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET;
+       /* Set CORE_CLK output frequency for 250Mhz */
+       mask |= HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_MASK;
+       data |= 0x0 << HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_RST_CLK_CTRL_REG, data, mask);
+       /* Set PLL ready delay for 0x2 */
+       reg_set(hpipe_addr + HPIPE_CLK_SRC_LO_REG,
+               0x2 << HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET,
+               HPIPE_CLK_SRC_LO_PLL_RDY_DL_MASK);
+       /* Set reference clock to come from group 1 - 25Mhz */
+       reg_set(hpipe_addr + HPIPE_MISC_REG,
+               0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET,
+               HPIPE_MISC_REFCLK_SEL_MASK);
+       /* Set reference frequcency select - 0x2 */
+       mask = HPIPE_PWR_PLL_REF_FREQ_MASK;
+       data = 0x2 << HPIPE_PWR_PLL_REF_FREQ_OFFSET;
+       /* Set PHY mode to USB - 0x5 */
+       mask |= HPIPE_PWR_PLL_PHY_MODE_MASK;
+       data |= 0x5 << HPIPE_PWR_PLL_PHY_MODE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask);
+       /* Set the amount of time spent in the LoZ state - set for 0x7 */
+       reg_set(hpipe_addr + HPIPE_GLOBAL_PM_CTRL,
+               0x7 << HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET,
+               HPIPE_GLOBAL_PM_RXDLOZ_WAIT_MASK);
+       /* Set max PHY generation setting - 5Gbps */
+       reg_set(hpipe_addr + HPIPE_INTERFACE_REG,
+               0x1 << HPIPE_INTERFACE_GEN_MAX_OFFSET,
+               HPIPE_INTERFACE_GEN_MAX_MASK);
+       /* Set select data width 20Bit (SEL_BITS[2:0]) */
+       reg_set(hpipe_addr + HPIPE_LOOPBACK_REG,
+               0x1 << HPIPE_LOOPBACK_SEL_OFFSET,
+               HPIPE_LOOPBACK_SEL_MASK);
+       /* select de-emphasize 3.5db */
+       reg_set(hpipe_addr + HPIPE_LANE_CONFIG0_REG,
+               0x1 << HPIPE_LANE_CONFIG0_TXDEEMPH0_OFFSET,
+               HPIPE_LANE_CONFIG0_TXDEEMPH0_MASK);
+       /* override tx margining from the MAC */
+       reg_set(hpipe_addr + HPIPE_TST_MODE_CTRL_REG,
+               0x1 << HPIPE_TST_MODE_CTRL_MODE_MARGIN_OFFSET,
+               HPIPE_TST_MODE_CTRL_MODE_MARGIN_MASK);
+
+       /* Start analog parameters from ETP(HW) */
+       debug("stage: Analog parameters from ETP(HW)\n");
+       /* Set Pin DFE_PAT_DIS -> Bit[1]: PIN_DFE_PAT_DIS = 0x0 */
+       mask = HPIPE_LANE_CFG4_DFE_CTRL_MASK;
+       data = 0x1 << HPIPE_LANE_CFG4_DFE_CTRL_OFFSET;
+       /* Set Override PHY DFE control pins for 0x1 */
+       mask |= HPIPE_LANE_CFG4_DFE_OVER_MASK;
+       data |= 0x1 << HPIPE_LANE_CFG4_DFE_OVER_OFFSET;
+       /* Set Spread Spectrum Clock Enable fot 0x1 */
+       mask |= HPIPE_LANE_CFG4_SSC_CTRL_MASK;
+       data |= 0x1 << HPIPE_LANE_CFG4_SSC_CTRL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_LANE_CFG4_REG, data, mask);
+       /* Confifure SSC amplitude */
+       mask = HPIPE_G2_TX_SSC_AMP_MASK;
+       data = 0x1f << HPIPE_G2_TX_SSC_AMP_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G2_SET_2_REG, data, mask);
+       /* End of analog parameters */
+
+       debug("stage: Comphy power up\n");
+       /* Release from PIPE soft reset */
+       reg_set(hpipe_addr + HPIPE_RST_CLK_CTRL_REG,
+               0x0 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET,
+               HPIPE_RST_CLK_CTRL_PIPE_RST_MASK);
+
+       /* wait 15ms - for comphy calibration done */
+       debug("stage: Check PLL\n");
+       /* Read lane status */
+       addr = hpipe_addr + HPIPE_LANE_STATUS1_REG;
+       data = HPIPE_LANE_STATUS1_PCLK_EN_MASK;
+       mask = data;
+       data = polling_with_timeout(addr, data, mask, 15000, REG_32BIT);
+       if (data != 0) {
+               debug("Read from reg = %lx - value = 0x%x\n",
+                       hpipe_addr + HPIPE_LANE_STATUS1_REG, data);
+               ERROR("HPIPE_LANE_STATUS1_PCLK_EN_MASK is 0\n");
+               ret = -ETIMEDOUT;
+       }
+
+       debug_exit();
+
+       return ret;
+}
+
+/* This function performs RX training for one Feed Forward Equalization (FFE)
+ * value.
+ * The RX traiing result is stored in 'Saved DFE values Register' (SAV_F0D).
+ *
+ * Return '0' on success, error code in  a case of failure.
+ */
+static int mvebu_cp110_comphy_test_single_ffe(uint64_t comphy_base,
+                                             uint8_t comphy_index,
+                                             uint32_t ffe, uint32_t *result)
+{
+       uint32_t mask, data, timeout;
+       uintptr_t hpipe_addr, sd_ip_addr;
+
+       hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                               comphy_index);
+
+       sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                            comphy_index);
+
+       /* Configure PRBS counters */
+       mask = HPIPE_PHY_TEST_PATTERN_SEL_MASK;
+       data = 0xe << HPIPE_PHY_TEST_PATTERN_SEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PHY_TEST_CONTROL_REG, data, mask);
+
+       mask = HPIPE_PHY_TEST_DATA_MASK;
+       data = 0x64 << HPIPE_PHY_TEST_DATA_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PHY_TEST_DATA_REG, data, mask);
+
+       mask = HPIPE_PHY_TEST_EN_MASK;
+       data = 0x1 << HPIPE_PHY_TEST_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PHY_TEST_CONTROL_REG, data, mask);
+
+       mdelay(50);
+
+       /* Set the FFE value */
+       mask = HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK;
+       data = ffe << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
+
+       /* Start RX training */
+       mask = SD_EXTERNAL_STATUS_START_RX_TRAINING_MASK;
+       data = 1 << SD_EXTERNAL_STATUS_START_RX_TRAINING_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_STATUS_REG, data, mask);
+
+       /* Check the result of RX training */
+       timeout = RX_TRAINING_TIMEOUT;
+       while (timeout) {
+               data = mmio_read_32(sd_ip_addr + SD_EXTERNAL_STATAUS1_REG);
+               if (data & SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_COMP_MASK)
+                       break;
+               mdelay(1);
+               timeout--;
+       }
+
+       if (timeout == 0)
+               return -ETIMEDOUT;
+
+       if (data & SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_FAILED_MASK)
+               return -EINVAL;
+
+       /* Stop RX training */
+       mask = SD_EXTERNAL_STATUS_START_RX_TRAINING_MASK;
+       data = 0 << SD_EXTERNAL_STATUS_START_RX_TRAINING_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_STATUS_REG, data, mask);
+
+       /* Read the result */
+       data = mmio_read_32(hpipe_addr + HPIPE_SAVED_DFE_VALUES_REG);
+       data &= HPIPE_SAVED_DFE_VALUES_SAV_F0D_MASK;
+       data >>= HPIPE_SAVED_DFE_VALUES_SAV_F0D_OFFSET;
+       *result = data;
+
+       mask = HPIPE_PHY_TEST_RESET_MASK;
+       data = 0x1 << HPIPE_PHY_TEST_RESET_OFFSET;
+       mask |= HPIPE_PHY_TEST_EN_MASK;
+       data |= 0x0 << HPIPE_PHY_TEST_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PHY_TEST_CONTROL_REG, data, mask);
+
+       mask = HPIPE_PHY_TEST_RESET_MASK;
+       data = 0x0 << HPIPE_PHY_TEST_RESET_OFFSET;
+       reg_set(hpipe_addr + HPIPE_PHY_TEST_CONTROL_REG, data, mask);
+
+       return 0;
+}
+
+/* This function runs complete RX training sequence:
+ *     - Run RX training for all possible Feed Forward Equalization values
+ *     - Choose the FFE which gives the best result.
+ *     - Run RX training again with the best result.
+ *
+ * Return '0' on success, error code in  a case of failure.
+ */
+int mvebu_cp110_comphy_xfi_rx_training(uint64_t comphy_base,
+                                             uint8_t comphy_index)
+{
+       uint32_t mask, data, max_rx_train = 0, max_rx_train_index = 0;
+       uintptr_t hpipe_addr;
+       uint32_t rx_train_result;
+       int ret, i;
+
+       hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                               comphy_index);
+
+       debug_enter();
+
+       /* Configure SQ threshold and CDR lock */
+       mask = HPIPE_SQUELCH_THRESH_IN_MASK;
+       data = 0xc << HPIPE_SQUELCH_THRESH_IN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_SQUELCH_FFE_SETTING_REG, data, mask);
+
+       mask = HPIPE_SQ_DEGLITCH_WIDTH_P_MASK;
+       data = 0xf << HPIPE_SQ_DEGLITCH_WIDTH_P_OFFSET;
+       mask |= HPIPE_SQ_DEGLITCH_WIDTH_N_MASK;
+       data |= 0xf << HPIPE_SQ_DEGLITCH_WIDTH_N_OFFSET;
+       mask |= HPIPE_SQ_DEGLITCH_EN_MASK;
+       data |= 0x1 << HPIPE_SQ_DEGLITCH_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_SQ_GLITCH_FILTER_CTRL, data, mask);
+
+       mask = HPIPE_CDR_LOCK_DET_EN_MASK;
+       data = 0x1 << HPIPE_CDR_LOCK_DET_EN_OFFSET;
+       reg_set(hpipe_addr + HPIPE_LOOPBACK_REG, data, mask);
+
+       udelay(100);
+
+       /* Determine if we have a cable attached to this comphy, if not,
+        * we can't perform RX training.
+        */
+       data = mmio_read_32(hpipe_addr + HPIPE_SQUELCH_FFE_SETTING_REG);
+       if (data & HPIPE_SQUELCH_DETECTED_MASK) {
+               ERROR("Squelsh is not detected, can't perform RX training\n");
+               return -EINVAL;
+       }
+
+       data = mmio_read_32(hpipe_addr + HPIPE_LOOPBACK_REG);
+       if (!(data & HPIPE_CDR_LOCK_MASK)) {
+               ERROR("CDR is not locked, can't perform RX training\n");
+               return -EINVAL;
+       }
+
+       /* Do preparations for RX training */
+       mask = HPIPE_DFE_RES_FORCE_MASK;
+       data = 0x0 << HPIPE_DFE_RES_FORCE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask);
+
+       mask = HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK;
+       data = 0xf << HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET;
+       mask |= HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK;
+       data |= 1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET;
+       reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
+
+       /* Perform RX training for all possible FFE (Feed Forward
+        * Equalization, possible values are 0-7).
+        * We update the best value reached and the FFE which gave this value.
+        */
+       for (i = 0; i < MAX_NUM_OF_FFE; i++) {
+               rx_train_result = 0;
+               ret = mvebu_cp110_comphy_test_single_ffe(comphy_base,
+                                                        comphy_index, i,
+                                                        &rx_train_result);
+
+               if ((!ret) && (rx_train_result > max_rx_train)) {
+                       max_rx_train = rx_train_result;
+                       max_rx_train_index = i;
+               }
+       }
+
+       /* If we were able to determine which FFE gives the best value,
+        * now we need to set it and run RX training again (only for this
+        * FFE).
+        */
+       if (max_rx_train) {
+               ret = mvebu_cp110_comphy_test_single_ffe(comphy_base,
+                                                        comphy_index,
+                                                        max_rx_train_index,
+                                                        &rx_train_result);
+
+               if (ret == 0)
+                       debug("RX Training passed (FFE = %d, result = 0x%x)\n",
+                              max_rx_train_index, rx_train_result);
+       } else {
+               ERROR("RX Training failed for comphy%d\n", comphy_index);
+               ret = -EINVAL;
+       }
+
+       debug_exit();
+
+       return ret;
+}
+
+/* During AP the proper mode is auto-negotiated and the mac, pcs and serdes
+ * configuration are done by the firmware loaded to the MG's CM3 for appropriate
+ * negotiated mode. Therefore there is no need to configure the mac, pcs and
+ * serdes from u-boot. The only thing that need to be setup is powering up
+ * the comphy, which is done through Common PHY<n> Configuration 1 Register
+ * (CP0: 0xF2441000, CP1: 0xF4441000). This step can't be done by MG's CM3,
+ * since it doesn't have an access to this register-set (but it has access to
+ * the network registers like: MG, AP, MAC, PCS, Serdes etc.)
+ */
+static int mvebu_cp110_comphy_ap_power_on(uint64_t comphy_base,
+                                         uint8_t comphy_index)
+{
+       uint32_t mask, data;
+       uintptr_t comphy_addr = comphy_addr =
+                               COMPHY_ADDR(comphy_base, comphy_index);
+
+       debug_enter();
+       debug("stage: RFU configurations - hard reset comphy\n");
+       /* RFU configurations - hard reset comphy */
+       mask = COMMON_PHY_CFG1_PWR_UP_MASK;
+       data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET;
+       mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK;
+       data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET;
+       reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask);
+       debug_exit();
+
+       return 0;
+}
+
+/*
+ * This function allows to reset the digital synchronizers between
+ * the MAC and the PHY, it is required when the MAC changes its state.
+ */
+int mvebu_cp110_comphy_digital_reset(uint64_t comphy_base,
+                                    uint8_t comphy_index,
+                                    uint32_t comphy_mode, uint32_t command)
+{
+       int mode = COMPHY_GET_MODE(comphy_mode);
+       uintptr_t sd_ip_addr;
+       uint32_t mask, data;
+
+       sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                            comphy_index);
+
+       switch (mode) {
+       case (COMPHY_SGMII_MODE):
+       case (COMPHY_HS_SGMII_MODE):
+       case (COMPHY_XFI_MODE):
+       case (COMPHY_SFI_MODE):
+       case (COMPHY_RXAUI_MODE):
+               mask = SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK;
+               data = ((command == COMPHY_COMMAND_DIGITAL_PWR_OFF) ?
+                       0x0 : 0x1) << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET;
+               reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+               break;
+       default:
+               ERROR("comphy%d: Digital PWR ON/OFF is not supported\n",
+                       comphy_index);
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+int mvebu_cp110_comphy_power_on(uint64_t comphy_base, uint64_t comphy_index,
+                               uint64_t comphy_mode)
+{
+       int mode = COMPHY_GET_MODE(comphy_mode);
+       int err = 0;
+
+       debug_enter();
+
+       switch (mode) {
+       case(COMPHY_SATA_MODE):
+               err = mvebu_cp110_comphy_sata_power_on(comphy_base,
+                                                      comphy_index,
+                                                      comphy_mode);
+               break;
+       case(COMPHY_SGMII_MODE):
+       case(COMPHY_HS_SGMII_MODE):
+               err = mvebu_cp110_comphy_sgmii_power_on(comphy_base,
+                                                       comphy_index,
+                                                       comphy_mode);
+               break;
+       /* From comphy perspective, XFI and SFI are the same */
+       case (COMPHY_XFI_MODE):
+       case (COMPHY_SFI_MODE):
+               err = mvebu_cp110_comphy_xfi_power_on(comphy_base,
+                                                     comphy_index,
+                                                     comphy_mode);
+               break;
+       case (COMPHY_PCIE_MODE):
+               err = mvebu_cp110_comphy_pcie_power_on(comphy_base,
+                                                      comphy_index,
+                                                      comphy_mode);
+               break;
+       case (COMPHY_RXAUI_MODE):
+               err = mvebu_cp110_comphy_rxaui_power_on(comphy_base,
+                                                       comphy_index,
+                                                       comphy_mode);
+       case (COMPHY_USB3H_MODE):
+       case (COMPHY_USB3D_MODE):
+               err = mvebu_cp110_comphy_usb3_power_on(comphy_base,
+                                                      comphy_index,
+                                                      comphy_mode);
+               break;
+       case (COMPHY_AP_MODE):
+               err = mvebu_cp110_comphy_ap_power_on(comphy_base, comphy_index);
+               break;
+       default:
+               ERROR("comphy%lld: unsupported comphy mode\n", comphy_index);
+               err = -EINVAL;
+               break;
+       }
+
+       debug_exit();
+
+       return err;
+}
+
+int mvebu_cp110_comphy_power_off(uint64_t comphy_base, uint64_t comphy_index)
+{
+       uintptr_t sd_ip_addr, comphy_ip_addr;
+       uint32_t mask, data;
+
+       debug_enter();
+
+       sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
+                            comphy_index);
+       comphy_ip_addr = COMPHY_ADDR(comphy_base, comphy_index);
+
+       /* Hard reset the comphy, for Ethernet modes and Sata */
+       mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK;
+       data = 0x0 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK;
+       data |= 0x0 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET;
+       mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK;
+       data |= 0x0 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET;
+       reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
+
+       /* PCIe reset */
+       spin_lock(&cp110_mac_reset_lock);
+
+       /* The mvebu_cp110_comphy_power_off will be called only from Linux (to
+        * override settings done by bootloader) and it will be relevant only
+        * to PCIe (called before check if to skip pcie power off or not).
+        */
+       data = mmio_read_32(SYS_CTRL_FROM_COMPHY_ADDR(comphy_base) +
+                                                SYS_CTRL_UINIT_SOFT_RESET_REG);
+       switch (comphy_index) {
+       case COMPHY_LANE0:
+               data &= ~PCIE_MAC_RESET_MASK_PORT0;
+               break;
+       case COMPHY_LANE4:
+               data &= ~PCIE_MAC_RESET_MASK_PORT1;
+               break;
+       case COMPHY_LANE5:
+               data &= ~PCIE_MAC_RESET_MASK_PORT2;
+               break;
+       }
+
+       mmio_write_32(SYS_CTRL_FROM_COMPHY_ADDR(comphy_base) +
+                                          SYS_CTRL_UINIT_SOFT_RESET_REG, data);
+       spin_unlock(&cp110_mac_reset_lock);
+
+       /* Hard reset the comphy, for PCIe and usb3 */
+       mask = COMMON_PHY_CFG1_PWR_ON_RESET_MASK;
+       data = 0x0 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET;
+       mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK;
+       data |= 0x0 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET;
+       reg_set(comphy_ip_addr + COMMON_PHY_CFG1_REG, data, mask);
+
+       /* Clear comphy PHY and PIPE selector, can't rely on previous config. */
+       mvebu_cp110_comphy_clr_phy_selector(comphy_base, comphy_index);
+       mvebu_cp110_comphy_clr_pipe_selector(comphy_base, comphy_index);
+
+       debug_exit();
+
+       return 0;
+}
diff --git a/drivers/marvell/comphy/phy-comphy-cp110.h b/drivers/marvell/comphy/phy-comphy-cp110.h
new file mode 100644 (file)
index 0000000..ada6aec
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* Marvell CP110 SoC COMPHY unit driver */
+
+int mvebu_cp110_comphy_is_pll_locked(uint64_t comphy_base,
+                                    uint64_t comphy_index);
+int mvebu_cp110_comphy_power_off(uint64_t comphy_base,
+                                uint64_t comphy_index);
+int mvebu_cp110_comphy_power_on(uint64_t comphy_base,
+                               uint64_t comphy_index, uint64_t comphy_mode);
+int mvebu_cp110_comphy_xfi_rx_training(uint64_t comphy_base,
+                                      uint8_t comphy_index);
+int mvebu_cp110_comphy_digital_reset(uint64_t comphy_base, uint8_t comphy_index,
+                                    uint32_t comphy_mode, uint32_t command);
diff --git a/drivers/marvell/gwin.c b/drivers/marvell/gwin.c
new file mode 100644 (file)
index 0000000..2b17f35
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* GWIN unit device driver for Marvell AP810 SoC */
+
+#include <a8k_common.h>
+#include <debug.h>
+#include <gwin.h>
+#include <mmio.h>
+#include <mvebu.h>
+#include <mvebu_def.h>
+
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+#define DEBUG_ADDR_MAP
+#endif
+
+/* common defines */
+#define WIN_ENABLE_BIT                 (0x1)
+#define WIN_TARGET_MASK                        (0xF)
+#define WIN_TARGET_SHIFT               (0x8)
+#define WIN_TARGET(tgt)                        (((tgt) & WIN_TARGET_MASK) \
+                                       << WIN_TARGET_SHIFT)
+
+/* Bits[43:26] of the physical address are the window base,
+ * which is aligned to 64MB
+ */
+#define ADDRESS_RSHIFT                 (26)
+#define ADDRESS_LSHIFT                 (10)
+#define GWIN_ALIGNMENT_64M             (0x4000000)
+
+/* AP registers */
+#define GWIN_CR_OFFSET(ap, win)                (MVEBU_GWIN_BASE(ap) + 0x0 + \
+                                               (0x10 * (win)))
+#define GWIN_ALR_OFFSET(ap, win)       (MVEBU_GWIN_BASE(ap) + 0x8 + \
+                                               (0x10 * (win)))
+#define GWIN_AHR_OFFSET(ap, win)       (MVEBU_GWIN_BASE(ap) + 0xc + \
+                                               (0x10 * (win)))
+
+#define CCU_GRU_CR_OFFSET(ap)          (MVEBU_CCU_GRU_BASE(ap))
+#define CCR_GRU_CR_GWIN_MBYPASS                (1 << 1)
+
+static void gwin_check(struct addr_map_win *win)
+{
+       /* The base is always 64M aligned */
+       if (IS_NOT_ALIGN(win->base_addr, GWIN_ALIGNMENT_64M)) {
+               win->base_addr &= ~(GWIN_ALIGNMENT_64M - 1);
+               NOTICE("%s: Align the base address to 0x%llx\n",
+                      __func__, win->base_addr);
+       }
+
+       /* size parameter validity check */
+       if (IS_NOT_ALIGN(win->win_size, GWIN_ALIGNMENT_64M)) {
+               win->win_size = ALIGN_UP(win->win_size, GWIN_ALIGNMENT_64M);
+               NOTICE("%s: Aligning window size to 0x%llx\n",
+                      __func__, win->win_size);
+       }
+}
+
+static void gwin_enable_window(int ap_index, struct addr_map_win *win,
+                              uint32_t win_num)
+{
+       uint32_t alr, ahr;
+       uint64_t end_addr;
+
+       if ((win->target_id & WIN_TARGET_MASK) != win->target_id) {
+               ERROR("target ID = %d, is invalid\n", win->target_id);
+               return;
+       }
+
+       /* calculate 64bit end-address */
+       end_addr = (win->base_addr + win->win_size - 1);
+
+       alr = (uint32_t)((win->base_addr >> ADDRESS_RSHIFT) << ADDRESS_LSHIFT);
+       ahr = (uint32_t)((end_addr >> ADDRESS_RSHIFT) << ADDRESS_LSHIFT);
+
+       /* write start address and end address for GWIN */
+       mmio_write_32(GWIN_ALR_OFFSET(ap_index, win_num), alr);
+       mmio_write_32(GWIN_AHR_OFFSET(ap_index, win_num), ahr);
+
+       /* write the target ID and enable the window */
+       mmio_write_32(GWIN_CR_OFFSET(ap_index, win_num),
+                     WIN_TARGET(win->target_id) | WIN_ENABLE_BIT);
+}
+
+static void gwin_disable_window(int ap_index, uint32_t win_num)
+{
+       uint32_t win_reg;
+
+       win_reg = mmio_read_32(GWIN_CR_OFFSET(ap_index, win_num));
+       win_reg &= ~WIN_ENABLE_BIT;
+       mmio_write_32(GWIN_CR_OFFSET(ap_index, win_num), win_reg);
+}
+
+/* Insert/Remove temporary window for using the out-of reset default
+ * CPx base address to access the CP configuration space prior to
+ * the further base address update in accordance with address mapping
+ * design.
+ *
+ * NOTE: Use the same window array for insertion and removal of
+ *       temporary windows.
+ */
+void gwin_temp_win_insert(int ap_index, struct addr_map_win *win, int size)
+{
+       uint32_t win_id;
+
+       for (int i = 0; i < size; i++) {
+               win_id = MVEBU_GWIN_MAX_WINS - i - 1;
+               gwin_check(win);
+               gwin_enable_window(ap_index, win, win_id);
+               win++;
+       }
+}
+
+/*
+ * NOTE: Use the same window array for insertion and removal of
+ *       temporary windows.
+ */
+void gwin_temp_win_remove(int ap_index, struct addr_map_win *win, int size)
+{
+       uint32_t win_id;
+
+       for (int i = 0; i < size; i++) {
+               uint64_t base;
+               uint32_t target;
+
+               win_id = MVEBU_GWIN_MAX_WINS - i - 1;
+
+               target = mmio_read_32(GWIN_CR_OFFSET(ap_index, win_id));
+               target >>= WIN_TARGET_SHIFT;
+               target &= WIN_TARGET_MASK;
+
+               base = mmio_read_32(GWIN_ALR_OFFSET(ap_index, win_id));
+               base >>= ADDRESS_LSHIFT;
+               base <<= ADDRESS_RSHIFT;
+
+               if (win->target_id != target) {
+                       ERROR("%s: Trying to remove bad window-%d!\n",
+                             __func__, win_id);
+                       continue;
+               }
+               gwin_disable_window(ap_index, win_id);
+               win++;
+       }
+}
+
+#ifdef DEBUG_ADDR_MAP
+static void dump_gwin(int ap_index)
+{
+       uint32_t win_num;
+
+       /* Dump all GWIN windows */
+       tf_printf("\tbank  target     start              end\n");
+       tf_printf("\t----------------------------------------------------\n");
+       for (win_num = 0; win_num < MVEBU_GWIN_MAX_WINS; win_num++) {
+               uint32_t cr;
+               uint64_t alr, ahr;
+
+               cr  = mmio_read_32(GWIN_CR_OFFSET(ap_index, win_num));
+               /* Window enabled */
+               if (cr & WIN_ENABLE_BIT) {
+                       alr = mmio_read_32(GWIN_ALR_OFFSET(ap_index, win_num));
+                       alr = (alr >> ADDRESS_LSHIFT) << ADDRESS_RSHIFT;
+                       ahr = mmio_read_32(GWIN_AHR_OFFSET(ap_index, win_num));
+                       ahr = (ahr >> ADDRESS_LSHIFT) << ADDRESS_RSHIFT;
+                       tf_printf("\tgwin   %d     0x%016llx 0x%016llx\n",
+                                 (cr >> 8) & 0xF, alr, ahr);
+               }
+       }
+}
+#endif
+
+int init_gwin(int ap_index)
+{
+       struct addr_map_win *win;
+       uint32_t win_id;
+       uint32_t win_count;
+       uint32_t win_reg;
+
+       INFO("Initializing GWIN Address decoding\n");
+
+       /* Get the array of the windows and its size */
+       marvell_get_gwin_memory_map(ap_index, &win, &win_count);
+       if (win_count <= 0) {
+               INFO("no windows configurations found\n");
+               return 0;
+       }
+
+       if (win_count > MVEBU_GWIN_MAX_WINS) {
+               ERROR("number of windows is bigger than %d\n",
+                     MVEBU_GWIN_MAX_WINS);
+               return 0;
+       }
+
+       /* disable all windows */
+       for (win_id = 0; win_id < MVEBU_GWIN_MAX_WINS; win_id++)
+               gwin_disable_window(ap_index, win_id);
+
+       /* enable relevant windows */
+       for (win_id = 0; win_id < win_count; win_id++, win++) {
+               gwin_check(win);
+               gwin_enable_window(ap_index, win, win_id);
+       }
+
+       /* GWIN Miss feature has not verified, therefore any access towards
+        * remote AP should be accompanied with proper configuration to
+        * GWIN registers group and therefore the GWIN Miss feature
+        * should be set into Bypass mode, need to make sure all GWIN regions
+        * are defined correctly that will assure no GWIN miss occurrance
+        * JIRA-AURORA2-1630
+        */
+       INFO("Update GWIN miss bypass\n");
+       win_reg = mmio_read_32(CCU_GRU_CR_OFFSET(ap_index));
+       win_reg |= CCR_GRU_CR_GWIN_MBYPASS;
+       mmio_write_32(CCU_GRU_CR_OFFSET(ap_index), win_reg);
+
+#ifdef DEBUG_ADDR_MAP
+       dump_gwin(ap_index);
+#endif
+
+       INFO("Done GWIN Address decoding Initializing\n");
+
+       return 0;
+}
diff --git a/drivers/marvell/i2c/a8k_i2c.c b/drivers/marvell/i2c/a8k_i2c.c
new file mode 100644 (file)
index 0000000..737dd0a
--- /dev/null
@@ -0,0 +1,613 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* This driver provides I2C support for Marvell A8K and compatible SoCs */
+
+#include <a8k_i2c.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <mmio.h>
+#include <mvebu_def.h>
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+#define DEBUG_I2C
+#endif
+
+#define CONFIG_SYS_TCLK                        250000000
+#define CONFIG_SYS_I2C_SPEED           100000
+#define CONFIG_SYS_I2C_SLAVE           0x0
+#define I2C_TIMEOUT_VALUE              0x500
+#define I2C_MAX_RETRY_CNT              1000
+#define I2C_CMD_WRITE                  0x0
+#define I2C_CMD_READ                   0x1
+
+#define I2C_DATA_ADDR_7BIT_OFFS                0x1
+#define I2C_DATA_ADDR_7BIT_MASK                (0xFF << I2C_DATA_ADDR_7BIT_OFFS)
+
+#define I2C_CONTROL_ACK                        0x00000004
+#define I2C_CONTROL_IFLG               0x00000008
+#define I2C_CONTROL_STOP               0x00000010
+#define I2C_CONTROL_START              0x00000020
+#define I2C_CONTROL_TWSIEN             0x00000040
+#define I2C_CONTROL_INTEN              0x00000080
+
+#define I2C_STATUS_START                       0x08
+#define I2C_STATUS_REPEATED_START              0x10
+#define I2C_STATUS_ADDR_W_ACK                  0x18
+#define I2C_STATUS_DATA_W_ACK                  0x28
+#define I2C_STATUS_LOST_ARB_DATA_ADDR_TRANSFER 0x38
+#define I2C_STATUS_ADDR_R_ACK                  0x40
+#define I2C_STATUS_DATA_R_ACK                  0x50
+#define I2C_STATUS_DATA_R_NAK                  0x58
+#define I2C_STATUS_LOST_ARB_GENERAL_CALL       0x78
+#define I2C_STATUS_IDLE                                0xF8
+
+#define I2C_UNSTUCK_TRIGGER                    0x1
+#define I2C_UNSTUCK_ONGOING                    0x2
+#define I2C_UNSTUCK_ERROR                      0x4
+struct  marvell_i2c_regs {
+       uint32_t slave_address;
+       uint32_t data;
+       uint32_t control;
+       union {
+               uint32_t status;        /* when reading */
+               uint32_t baudrate;      /* when writing */
+       } u;
+       uint32_t xtnd_slave_addr;
+       uint32_t reserved[2];
+       uint32_t soft_reset;
+       uint8_t  reserved2[0xa0 - 0x20];
+       uint32_t unstuck;
+};
+
+static struct marvell_i2c_regs *base;
+
+static int marvell_i2c_lost_arbitration(uint32_t *status)
+{
+       *status = mmio_read_32((uintptr_t)&base->u.status);
+       if ((*status == I2C_STATUS_LOST_ARB_DATA_ADDR_TRANSFER) ||
+           (*status == I2C_STATUS_LOST_ARB_GENERAL_CALL))
+               return -EAGAIN;
+
+       return 0;
+}
+
+static void marvell_i2c_interrupt_clear(void)
+{
+       uint32_t reg;
+
+       reg = mmio_read_32((uintptr_t)&base->control);
+       reg &= ~(I2C_CONTROL_IFLG);
+       mmio_write_32((uintptr_t)&base->control, reg);
+       /* Wait for 1 us for the clear to take effect */
+       udelay(1);
+}
+
+static int marvell_i2c_interrupt_get(void)
+{
+       uint32_t reg;
+
+       /* get the interrupt flag bit */
+       reg = mmio_read_32((uintptr_t)&base->control);
+       reg &= I2C_CONTROL_IFLG;
+       return reg && I2C_CONTROL_IFLG;
+}
+
+static int marvell_i2c_wait_interrupt(void)
+{
+       uint32_t timeout = 0;
+
+       while (!marvell_i2c_interrupt_get() && (timeout++ < I2C_TIMEOUT_VALUE))
+               ;
+       if (timeout >= I2C_TIMEOUT_VALUE)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int marvell_i2c_start_bit_set(void)
+{
+       int is_int_flag = 0;
+       uint32_t status;
+
+       if (marvell_i2c_interrupt_get())
+               is_int_flag = 1;
+
+       /* set start bit */
+       mmio_write_32((uintptr_t)&base->control,
+                     mmio_read_32((uintptr_t)&base->control) |
+                     I2C_CONTROL_START);
+
+       /* in case that the int flag was set before i.e. repeated start bit */
+       if (is_int_flag) {
+               VERBOSE("%s: repeated start Bit\n", __func__);
+               marvell_i2c_interrupt_clear();
+       }
+
+       if (marvell_i2c_wait_interrupt()) {
+               ERROR("Start clear bit timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       /* check that start bit went down */
+       if ((mmio_read_32((uintptr_t)&base->control) &
+           I2C_CONTROL_START) != 0) {
+               ERROR("Start bit didn't went down\n");
+               return -EPERM;
+       }
+
+       /* check the status */
+       if (marvell_i2c_lost_arbitration(&status)) {
+               ERROR("%s - %d: Lost arbitration, got status %x\n",
+                     __func__, __LINE__, status);
+               return -EAGAIN;
+       }
+       if ((status != I2C_STATUS_START) &&
+           (status != I2C_STATUS_REPEATED_START)) {
+               ERROR("Got status %x after enable start bit.\n", status);
+               return -EPERM;
+       }
+
+       return 0;
+}
+
+static int marvell_i2c_stop_bit_set(void)
+{
+       int timeout;
+       uint32_t status;
+
+       /* Generate stop bit */
+       mmio_write_32((uintptr_t)&base->control,
+                     mmio_read_32((uintptr_t)&base->control) |
+                     I2C_CONTROL_STOP);
+       marvell_i2c_interrupt_clear();
+
+       timeout = 0;
+       /* Read control register, check the control stop bit */
+       while ((mmio_read_32((uintptr_t)&base->control) & I2C_CONTROL_STOP) &&
+              (timeout++ < I2C_TIMEOUT_VALUE))
+               ;
+       if (timeout >= I2C_TIMEOUT_VALUE) {
+               ERROR("Stop bit didn't went down\n");
+               return -ETIMEDOUT;
+       }
+
+       /* check that stop bit went down */
+       if ((mmio_read_32((uintptr_t)&base->control) & I2C_CONTROL_STOP) != 0) {
+               ERROR("Stop bit didn't went down\n");
+               return -EPERM;
+       }
+
+       /* check the status */
+       if (marvell_i2c_lost_arbitration(&status)) {
+               ERROR("%s - %d: Lost arbitration, got status %x\n",
+                     __func__, __LINE__, status);
+               return -EAGAIN;
+       }
+       if (status != I2C_STATUS_IDLE) {
+               ERROR("Got status %x after enable stop bit.\n", status);
+               return -EPERM;
+       }
+
+       return 0;
+}
+
+static int marvell_i2c_address_set(uint8_t chain, int command)
+{
+       uint32_t reg, status;
+
+       reg = (chain << I2C_DATA_ADDR_7BIT_OFFS) & I2C_DATA_ADDR_7BIT_MASK;
+       reg |= command;
+       mmio_write_32((uintptr_t)&base->data, reg);
+       udelay(1);
+
+       marvell_i2c_interrupt_clear();
+
+       if (marvell_i2c_wait_interrupt()) {
+               ERROR("Start clear bit timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       /* check the status */
+       if (marvell_i2c_lost_arbitration(&status)) {
+               ERROR("%s - %d: Lost arbitration, got status %x\n",
+                     __func__, __LINE__, status);
+               return -EAGAIN;
+       }
+       if (((status != I2C_STATUS_ADDR_R_ACK) && (command == I2C_CMD_READ)) ||
+          ((status != I2C_STATUS_ADDR_W_ACK) && (command == I2C_CMD_WRITE))) {
+               /* only in debug, since in boot we try to read the SPD
+                * of both DRAM, and we don't want error messages in cas
+                * DIMM doesn't exist.
+                */
+               INFO("%s: ERROR - status %x addr in %s mode.\n", __func__,
+                    status, (command == I2C_CMD_WRITE) ? "Write" : "Read");
+               return -EPERM;
+       }
+
+       return 0;
+}
+
+/*
+ * The I2C module contains a clock divider to generate the SCL clock.
+ * This function calculates and sets the <N> and <M> fields in the I2C Baud
+ * Rate Register (t=01) to obtain given 'requested_speed'.
+ * The requested_speed will be equal to:
+ * CONFIG_SYS_TCLK / (10 * (M + 1) * (2 << N))
+ * Where M is the value represented by bits[6:3] and N is the value represented
+ * by bits[2:0] of "I2C Baud Rate Register".
+ * Therefore max M which can be set is 16 (2^4) and max N is 8 (2^3). So the
+ * lowest possible baudrate is:
+ * CONFIG_SYS_TCLK/(10 * (16 +1) * (2 << 8), which equals to:
+ * CONFIG_SYS_TCLK/87040. Assuming that CONFIG_SYS_TCLK=250MHz, the lowest
+ * possible frequency is ~2,872KHz.
+ */
+static unsigned int marvell_i2c_bus_speed_set(unsigned int requested_speed)
+{
+       unsigned int n, m, freq, margin, min_margin = 0xffffffff;
+       unsigned int actual_n = 0, actual_m = 0;
+       int val;
+
+       /* Calculate N and M for the TWSI clock baud rate */
+       for (n = 0; n < 8; n++) {
+               for (m = 0; m < 16; m++) {
+                       freq = CONFIG_SYS_TCLK / (10 * (m + 1) * (2 << n));
+                       val = requested_speed - freq;
+                       margin = (val > 0) ? val : -val;
+
+                       if ((freq <= requested_speed) &&
+                           (margin < min_margin)) {
+                               min_margin = margin;
+                               actual_n = n;
+                               actual_m = m;
+                       }
+               }
+       }
+       VERBOSE("%s: actual_n = %u, actual_m = %u\n",
+               __func__, actual_n, actual_m);
+       /* Set the baud rate */
+       mmio_write_32((uintptr_t)&base->u.baudrate, (actual_m << 3) | actual_n);
+
+       return 0;
+}
+
+#ifdef DEBUG_I2C
+static int marvell_i2c_probe(uint8_t chip)
+{
+       int ret = 0;
+
+       ret = marvell_i2c_start_bit_set();
+       if (ret != 0) {
+               marvell_i2c_stop_bit_set();
+               ERROR("%s - %d: %s", __func__, __LINE__,
+                     "marvell_i2c_start_bit_set failed\n");
+               return -EPERM;
+       }
+
+       ret = marvell_i2c_address_set(chip, I2C_CMD_WRITE);
+       if (ret != 0) {
+               marvell_i2c_stop_bit_set();
+               ERROR("%s - %d: %s", __func__, __LINE__,
+                     "marvell_i2c_address_set failed\n");
+               return -EPERM;
+       }
+
+       marvell_i2c_stop_bit_set();
+
+       VERBOSE("%s: successful I2C probe\n", __func__);
+
+       return ret;
+}
+#endif
+
+/* regular i2c transaction */
+static int marvell_i2c_data_receive(uint8_t *p_block, uint32_t block_size)
+{
+       uint32_t reg, status, block_size_read = block_size;
+
+       /* Wait for cause interrupt */
+       if (marvell_i2c_wait_interrupt()) {
+               ERROR("Start clear bit timeout\n");
+               return -ETIMEDOUT;
+       }
+       while (block_size_read) {
+               if (block_size_read == 1) {
+                       reg = mmio_read_32((uintptr_t)&base->control);
+                       reg &= ~(I2C_CONTROL_ACK);
+                       mmio_write_32((uintptr_t)&base->control, reg);
+               }
+               marvell_i2c_interrupt_clear();
+
+               if (marvell_i2c_wait_interrupt()) {
+                       ERROR("Start clear bit timeout\n");
+                       return -ETIMEDOUT;
+               }
+               /* check the status */
+               if (marvell_i2c_lost_arbitration(&status)) {
+                       ERROR("%s - %d: Lost arbitration, got status %x\n",
+                             __func__, __LINE__, status);
+                       return -EAGAIN;
+               }
+               if ((status != I2C_STATUS_DATA_R_ACK) &&
+                   (block_size_read != 1)) {
+                       ERROR("Status %x in read transaction\n", status);
+                       return -EPERM;
+               }
+               if ((status != I2C_STATUS_DATA_R_NAK) &&
+                   (block_size_read == 1)) {
+                       ERROR("Status %x in Rd Terminate\n", status);
+                       return -EPERM;
+               }
+
+               /* read the data */
+               *p_block = (uint8_t) mmio_read_32((uintptr_t)&base->data);
+               VERBOSE("%s: place %d read %x\n", __func__,
+                       block_size - block_size_read, *p_block);
+               p_block++;
+               block_size_read--;
+       }
+
+       return 0;
+}
+
+static int marvell_i2c_data_transmit(uint8_t *p_block, uint32_t block_size)
+{
+       uint32_t status, block_size_write = block_size;
+
+       if (marvell_i2c_wait_interrupt()) {
+               ERROR("Start clear bit timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       while (block_size_write) {
+               /* write the data */
+               mmio_write_32((uintptr_t)&base->data, (uint32_t) *p_block);
+               VERBOSE("%s: index = %d, data = %x\n", __func__,
+                       block_size - block_size_write, *p_block);
+               p_block++;
+               block_size_write--;
+
+               marvell_i2c_interrupt_clear();
+
+               if (marvell_i2c_wait_interrupt()) {
+                       ERROR("Start clear bit timeout\n");
+                       return -ETIMEDOUT;
+               }
+
+               /* check the status */
+               if (marvell_i2c_lost_arbitration(&status)) {
+                       ERROR("%s - %d: Lost arbitration, got status %x\n",
+                             __func__, __LINE__, status);
+                       return -EAGAIN;
+               }
+               if (status != I2C_STATUS_DATA_W_ACK) {
+                       ERROR("Status %x in write transaction\n", status);
+                       return -EPERM;
+               }
+       }
+
+       return 0;
+}
+
+static int marvell_i2c_target_offset_set(uint8_t chip, uint32_t addr, int alen)
+{
+       uint8_t off_block[2];
+       uint32_t off_size;
+
+       if (alen == 2) { /* 2-byte addresses support */
+               off_block[0] = (addr >> 8) & 0xff;
+               off_block[1] = addr & 0xff;
+               off_size = 2;
+       } else { /* 1-byte addresses support */
+               off_block[0] = addr & 0xff;
+               off_size = 1;
+       }
+       VERBOSE("%s: off_size = %x addr1 = %x addr2 = %x\n", __func__,
+               off_size, off_block[0], off_block[1]);
+       return marvell_i2c_data_transmit(off_block, off_size);
+}
+
+static int marvell_i2c_unstuck(int ret)
+{
+       uint32_t v;
+
+       if (ret != -ETIMEDOUT)
+               return ret;
+       VERBOSE("Trying to \"unstuck i2c\"... ");
+       i2c_init(base);
+       mmio_write_32((uintptr_t)&base->unstuck, I2C_UNSTUCK_TRIGGER);
+       do {
+               v = mmio_read_32((uintptr_t)&base->unstuck);
+       } while (v & I2C_UNSTUCK_ONGOING);
+
+       if (v & I2C_UNSTUCK_ERROR) {
+               VERBOSE("failed - soft reset i2c\n");
+               ret = -EPERM;
+       } else {
+               VERBOSE("ok\n");
+               i2c_init(base);
+               ret = -EAGAIN;
+       }
+       return ret;
+}
+
+/*
+ * API Functions
+ */
+void i2c_init(void *i2c_base)
+{
+       /* For I2C speed and slave address, now we do not set them since
+        * we just provide the working speed and slave address in plat_def.h
+        * for i2c_init
+        */
+       base = (struct marvell_i2c_regs *)i2c_base;
+
+       /* Reset the I2C logic */
+       mmio_write_32((uintptr_t)&base->soft_reset, 0);
+
+       udelay(200);
+
+       marvell_i2c_bus_speed_set(CONFIG_SYS_I2C_SPEED);
+
+       /* Enable the I2C and slave */
+       mmio_write_32((uintptr_t)&base->control,
+                     I2C_CONTROL_TWSIEN | I2C_CONTROL_ACK);
+
+       /* set the I2C slave address */
+       mmio_write_32((uintptr_t)&base->xtnd_slave_addr, 0);
+       mmio_write_32((uintptr_t)&base->slave_address, CONFIG_SYS_I2C_SLAVE);
+
+       /* unmask I2C interrupt */
+       mmio_write_32((uintptr_t)&base->control,
+                     mmio_read_32((uintptr_t)&base->control) |
+                     I2C_CONTROL_INTEN);
+
+       udelay(10);
+}
+
+/*
+ * i2c_read: - Read multiple bytes from an i2c device
+ *
+ * The higher level routines take into account that this function is only
+ * called with len < page length of the device (see configuration file)
+ *
+ * @chip:      address of the chip which is to be read
+ * @addr:      i2c data address within the chip
+ * @alen:      length of the i2c data address (1..2 bytes)
+ * @buffer:    where to write the data
+ * @len:       how much byte do we want to read
+ * @return:    0 in case of success
+ */
+int i2c_read(uint8_t chip, uint32_t addr, int alen, uint8_t *buffer, int len)
+{
+       int ret = 0;
+       uint32_t counter = 0;
+
+#ifdef DEBUG_I2C
+       marvell_i2c_probe(chip);
+#endif
+
+       do {
+               if (ret != -EAGAIN && ret) {
+                       ERROR("i2c transaction failed, after %d retries\n",
+                             counter);
+                       marvell_i2c_stop_bit_set();
+                       return ret;
+               }
+
+               /* wait for 1 us for the interrupt clear to take effect */
+               if (counter > 0)
+                       udelay(1);
+               counter++;
+
+               ret = marvell_i2c_start_bit_set();
+               if (ret) {
+                       ret = marvell_i2c_unstuck(ret);
+                       continue;
+               }
+
+               /* if EEPROM device */
+               if (alen != 0) {
+                       ret = marvell_i2c_address_set(chip, I2C_CMD_WRITE);
+                       if (ret)
+                               continue;
+
+                       ret = marvell_i2c_target_offset_set(chip, addr, alen);
+                       if (ret)
+                               continue;
+                       ret = marvell_i2c_start_bit_set();
+                       if (ret)
+                               continue;
+               }
+
+               ret =  marvell_i2c_address_set(chip, I2C_CMD_READ);
+               if (ret)
+                       continue;
+
+               ret = marvell_i2c_data_receive(buffer, len);
+               if (ret)
+                       continue;
+
+               ret =  marvell_i2c_stop_bit_set();
+       } while ((ret == -EAGAIN) && (counter < I2C_MAX_RETRY_CNT));
+
+       if (counter == I2C_MAX_RETRY_CNT) {
+               ERROR("I2C transactions failed, got EAGAIN %d times\n",
+                     I2C_MAX_RETRY_CNT);
+               ret = -EPERM;
+       }
+       mmio_write_32((uintptr_t)&base->control,
+                     mmio_read_32((uintptr_t)&base->control) |
+                     I2C_CONTROL_ACK);
+
+       udelay(1);
+       return ret;
+}
+
+/*
+ * i2c_write: -  Write multiple bytes to an i2c device
+ *
+ * The higher level routines take into account that this function is only
+ * called with len < page length of the device (see configuration file)
+ *
+ * @chip:      address of the chip which is to be written
+ * @addr:      i2c data address within the chip
+ * @alen:      length of the i2c data address (1..2 bytes)
+ * @buffer:    where to find the data to be written
+ * @len:       how much byte do we want to read
+ * @return:    0 in case of success
+ */
+int i2c_write(uint8_t chip, uint32_t addr, int alen, uint8_t *buffer, int len)
+{
+       int ret = 0;
+       uint32_t counter = 0;
+
+       do {
+               if (ret != -EAGAIN && ret) {
+                       ERROR("i2c transaction failed\n");
+                       marvell_i2c_stop_bit_set();
+                       return ret;
+               }
+               /* wait for 1 us for the interrupt clear to take effect */
+               if (counter > 0)
+                       udelay(1);
+               counter++;
+
+               ret = marvell_i2c_start_bit_set();
+               if (ret) {
+                       ret = marvell_i2c_unstuck(ret);
+                       continue;
+               }
+
+               ret = marvell_i2c_address_set(chip, I2C_CMD_WRITE);
+               if (ret)
+                       continue;
+
+               /* if EEPROM device */
+               if (alen != 0) {
+                       ret = marvell_i2c_target_offset_set(chip, addr, alen);
+                       if (ret)
+                               continue;
+               }
+
+               ret = marvell_i2c_data_transmit(buffer, len);
+               if (ret)
+                       continue;
+
+               ret = marvell_i2c_stop_bit_set();
+       } while ((ret == -EAGAIN) && (counter < I2C_MAX_RETRY_CNT));
+
+       if (counter == I2C_MAX_RETRY_CNT) {
+               ERROR("I2C transactions failed, got EAGAIN %d times\n",
+                     I2C_MAX_RETRY_CNT);
+               ret = -EPERM;
+       }
+
+       udelay(1);
+       return ret;
+}
diff --git a/drivers/marvell/io_win.c b/drivers/marvell/io_win.c
new file mode 100644 (file)
index 0000000..701dbb8
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* IO Window unit device driver for Marvell AP807, AP807 and AP810 SoCs */
+
+#include <a8k_common.h>
+#include <debug.h>
+#include <io_win.h>
+#include <mmio.h>
+#include <mvebu.h>
+#include <mvebu_def.h>
+
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+#define DEBUG_ADDR_MAP
+#endif
+
+/* common defines */
+#define WIN_ENABLE_BIT                 (0x1)
+/* Physical address of the base of the window = {Addr[19:0],20`h0} */
+#define ADDRESS_SHIFT                  (20 - 4)
+#define ADDRESS_MASK                   (0xFFFFFFF0)
+#define IO_WIN_ALIGNMENT_1M            (0x100000)
+#define IO_WIN_ALIGNMENT_64K           (0x10000)
+
+/* AP registers */
+#define IO_WIN_ALR_OFFSET(ap, win)     (MVEBU_IO_WIN_BASE(ap) + 0x0 + \
+                                               (0x10 * win))
+#define IO_WIN_AHR_OFFSET(ap, win)     (MVEBU_IO_WIN_BASE(ap) + 0x8 + \
+                                               (0x10 * win))
+#define IO_WIN_CR_OFFSET(ap, win)      (MVEBU_IO_WIN_BASE(ap) + 0xC + \
+                                               (0x10 * win))
+
+/* For storage of CR, ALR, AHR abd GCR */
+static uint32_t io_win_regs_save[MVEBU_IO_WIN_MAX_WINS * 3 + 1];
+
+static void io_win_check(struct addr_map_win *win)
+{
+       /* for IO The base is always 1M aligned */
+       /* check if address is aligned to 1M */
+       if (IS_NOT_ALIGN(win->base_addr, IO_WIN_ALIGNMENT_1M)) {
+               win->base_addr = ALIGN_UP(win->base_addr, IO_WIN_ALIGNMENT_1M);
+               NOTICE("%s: Align up the base address to 0x%llx\n",
+                      __func__, win->base_addr);
+       }
+
+       /* size parameter validity check */
+       if (IS_NOT_ALIGN(win->win_size, IO_WIN_ALIGNMENT_1M)) {
+               win->win_size = ALIGN_UP(win->win_size, IO_WIN_ALIGNMENT_1M);
+               NOTICE("%s: Aligning size to 0x%llx\n",
+                      __func__, win->win_size);
+       }
+}
+
+static void io_win_enable_window(int ap_index, struct addr_map_win *win,
+                                uint32_t win_num)
+{
+       uint32_t alr, ahr;
+       uint64_t end_addr;
+
+       if (win->target_id < 0 || win->target_id >= MVEBU_IO_WIN_MAX_WINS) {
+               ERROR("target ID = %d, is invalid\n", win->target_id);
+               return;
+       }
+
+       if ((win_num == 0) || (win_num > MVEBU_IO_WIN_MAX_WINS)) {
+               ERROR("Enabling wrong IOW window %d!\n", win_num);
+               return;
+       }
+
+       /* calculate the end-address */
+       end_addr = (win->base_addr + win->win_size - 1);
+
+       alr = (uint32_t)((win->base_addr >> ADDRESS_SHIFT) & ADDRESS_MASK);
+       alr |= WIN_ENABLE_BIT;
+       ahr = (uint32_t)((end_addr >> ADDRESS_SHIFT) & ADDRESS_MASK);
+
+       /* write start address and end address for IO window */
+       mmio_write_32(IO_WIN_ALR_OFFSET(ap_index, win_num), alr);
+       mmio_write_32(IO_WIN_AHR_OFFSET(ap_index, win_num), ahr);
+
+       /* write window target */
+       mmio_write_32(IO_WIN_CR_OFFSET(ap_index, win_num), win->target_id);
+}
+
+static void io_win_disable_window(int ap_index, uint32_t win_num)
+{
+       uint32_t win_reg;
+
+       if ((win_num == 0) || (win_num > MVEBU_IO_WIN_MAX_WINS)) {
+               ERROR("Disabling wrong IOW window %d!\n", win_num);
+               return;
+       }
+
+       win_reg = mmio_read_32(IO_WIN_ALR_OFFSET(ap_index, win_num));
+       win_reg &= ~WIN_ENABLE_BIT;
+       mmio_write_32(IO_WIN_ALR_OFFSET(ap_index, win_num), win_reg);
+}
+
+/* Insert/Remove temporary window for using the out-of reset default
+ * CPx base address to access the CP configuration space prior to
+ * the further base address update in accordance with address mapping
+ * design.
+ *
+ * NOTE: Use the same window array for insertion and removal of
+ *       temporary windows.
+ */
+void iow_temp_win_insert(int ap_index, struct addr_map_win *win, int size)
+{
+       uint32_t win_id;
+
+       for (int i = 0; i < size; i++) {
+               win_id = MVEBU_IO_WIN_MAX_WINS - i - 1;
+               io_win_check(win);
+               io_win_enable_window(ap_index, win, win_id);
+               win++;
+       }
+}
+
+/*
+ * NOTE: Use the same window array for insertion and removal of
+ *       temporary windows.
+ */
+void iow_temp_win_remove(int ap_index, struct addr_map_win *win, int size)
+{
+       uint32_t win_id;
+
+       /* Start from the last window and do not touch Win0 */
+       for (int i = 0; i < size; i++) {
+               uint64_t base;
+               uint32_t target;
+
+               win_id = MVEBU_IO_WIN_MAX_WINS - i - 1;
+
+               target = mmio_read_32(IO_WIN_CR_OFFSET(ap_index, win_id));
+               base = mmio_read_32(IO_WIN_ALR_OFFSET(ap_index, win_id));
+               base &= ~WIN_ENABLE_BIT;
+               base <<= ADDRESS_SHIFT;
+
+               if ((win->target_id != target) || (win->base_addr != base)) {
+                       ERROR("%s: Trying to remove bad window-%d!\n",
+                             __func__, win_id);
+                       continue;
+               }
+               io_win_disable_window(ap_index, win_id);
+               win++;
+       }
+}
+
+#ifdef DEBUG_ADDR_MAP
+static void dump_io_win(int ap_index)
+{
+       uint32_t trgt_id, win_id;
+       uint32_t alr, ahr;
+       uint64_t start, end;
+
+       /* Dump all IO windows */
+       tf_printf("\tbank  target     start              end\n");
+       tf_printf("\t----------------------------------------------------\n");
+       for (win_id = 0; win_id < MVEBU_IO_WIN_MAX_WINS; win_id++) {
+               alr = mmio_read_32(IO_WIN_ALR_OFFSET(ap_index, win_id));
+               if (alr & WIN_ENABLE_BIT) {
+                       alr &= ~WIN_ENABLE_BIT;
+                       ahr = mmio_read_32(IO_WIN_AHR_OFFSET(ap_index, win_id));
+                       trgt_id = mmio_read_32(IO_WIN_CR_OFFSET(ap_index,
+                                                               win_id));
+                       start = ((uint64_t)alr << ADDRESS_SHIFT);
+                       end = (((uint64_t)ahr + 0x10) << ADDRESS_SHIFT);
+                       tf_printf("\tio-win %d     0x%016llx 0x%016llx\n",
+                                 trgt_id, start, end);
+               }
+       }
+       tf_printf("\tio-win gcr is %x\n",
+                 mmio_read_32(MVEBU_IO_WIN_BASE(ap_index) +
+               MVEBU_IO_WIN_GCR_OFFSET));
+}
+#endif
+
+static void iow_save_win_range(int ap_id, int win_first, int win_last,
+                              uint32_t *buffer)
+{
+       int win_id, idx;
+
+       /* Save IOW */
+       for (idx = 0, win_id = win_first; win_id <= win_last; win_id++) {
+               buffer[idx++] = mmio_read_32(IO_WIN_CR_OFFSET(ap_id, win_id));
+               buffer[idx++] = mmio_read_32(IO_WIN_ALR_OFFSET(ap_id, win_id));
+               buffer[idx++] = mmio_read_32(IO_WIN_AHR_OFFSET(ap_id, win_id));
+       }
+       buffer[idx] = mmio_read_32(MVEBU_IO_WIN_BASE(ap_id) +
+                                  MVEBU_IO_WIN_GCR_OFFSET);
+}
+
+static void iow_restore_win_range(int ap_id, int win_first, int win_last,
+                                 uint32_t *buffer)
+{
+       int win_id, idx;
+
+       /* Restore IOW */
+       for (idx = 0, win_id = win_first; win_id <= win_last; win_id++) {
+               mmio_write_32(IO_WIN_CR_OFFSET(ap_id, win_id), buffer[idx++]);
+               mmio_write_32(IO_WIN_ALR_OFFSET(ap_id, win_id), buffer[idx++]);
+               mmio_write_32(IO_WIN_AHR_OFFSET(ap_id, win_id), buffer[idx++]);
+       }
+       mmio_write_32(MVEBU_IO_WIN_BASE(ap_id) + MVEBU_IO_WIN_GCR_OFFSET,
+                     buffer[idx++]);
+}
+
+void iow_save_win_all(int ap_id)
+{
+       iow_save_win_range(ap_id, 0, MVEBU_IO_WIN_MAX_WINS - 1,
+                          io_win_regs_save);
+}
+
+void iow_restore_win_all(int ap_id)
+{
+       iow_restore_win_range(ap_id, 0, MVEBU_IO_WIN_MAX_WINS - 1,
+                             io_win_regs_save);
+}
+
+int init_io_win(int ap_index)
+{
+       struct addr_map_win *win;
+       uint32_t win_id, win_reg;
+       uint32_t win_count;
+
+       INFO("Initializing IO WIN Address decoding\n");
+
+       /* Get the array of the windows and its size */
+       marvell_get_io_win_memory_map(ap_index, &win, &win_count);
+       if (win_count <= 0)
+               INFO("no windows configurations found\n");
+
+       if (win_count > MVEBU_IO_WIN_MAX_WINS) {
+               INFO("number of windows is bigger than %d\n",
+                    MVEBU_IO_WIN_MAX_WINS);
+               return 0;
+       }
+
+       /* Get the default target id to set the GCR */
+       win_reg = marvell_get_io_win_gcr_target(ap_index);
+       mmio_write_32(MVEBU_IO_WIN_BASE(ap_index) + MVEBU_IO_WIN_GCR_OFFSET,
+                     win_reg);
+
+       /* disable all IO windows */
+       for (win_id = 1; win_id < MVEBU_IO_WIN_MAX_WINS; win_id++)
+               io_win_disable_window(ap_index, win_id);
+
+       /* enable relevant windows, starting from win_id = 1 because
+        * index 0 dedicated for BootROM
+        */
+       for (win_id = 1; win_id <= win_count; win_id++, win++) {
+               io_win_check(win);
+               io_win_enable_window(ap_index, win, win_id);
+       }
+
+#ifdef DEBUG_ADDR_MAP
+       dump_io_win(ap_index);
+#endif
+
+       INFO("Done IO WIN Address decoding Initializing\n");
+
+       return 0;
+}
diff --git a/drivers/marvell/iob.c b/drivers/marvell/iob.c
new file mode 100644 (file)
index 0000000..9f9d047
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2016 - 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* IOW unit device driver for Marvell CP110 and CP115 SoCs */
+
+#include <a8k_common.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <iob.h>
+#include <mmio.h>
+#include <mvebu.h>
+#include <mvebu_def.h>
+
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+#define DEBUG_ADDR_MAP
+#endif
+
+#define MVEBU_IOB_OFFSET               (0x190000)
+#define MVEBU_IOB_MAX_WINS             16
+
+/* common defines */
+#define WIN_ENABLE_BIT                 (0x1)
+/* Physical address of the base of the window = {AddrLow[19:0],20`h0} */
+#define ADDRESS_SHIFT                  (20 - 4)
+#define ADDRESS_MASK                   (0xFFFFFFF0)
+#define IOB_WIN_ALIGNMENT              (0x100000)
+
+/* IOB registers */
+#define IOB_WIN_CR_OFFSET(win)         (iob_base + 0x0 + (0x20 * win))
+#define IOB_TARGET_ID_OFFSET           (8)
+#define IOB_TARGET_ID_MASK             (0xF)
+
+#define IOB_WIN_SCR_OFFSET(win)                (iob_base + 0x4 + (0x20 * win))
+#define IOB_WIN_ENA_CTRL_WRITE_SECURE  (0x1)
+#define IOB_WIN_ENA_CTRL_READ_SECURE   (0x2)
+#define IOB_WIN_ENA_WRITE_SECURE       (0x4)
+#define IOB_WIN_ENA_READ_SECURE                (0x8)
+
+#define IOB_WIN_ALR_OFFSET(win)                (iob_base + 0x8 + (0x20 * win))
+#define IOB_WIN_AHR_OFFSET(win)                (iob_base + 0xC + (0x20 * win))
+
+uintptr_t iob_base;
+
+static void iob_win_check(struct addr_map_win *win, uint32_t win_num)
+{
+       /* check if address is aligned to the size */
+       if (IS_NOT_ALIGN(win->base_addr, IOB_WIN_ALIGNMENT)) {
+               win->base_addr = ALIGN_UP(win->base_addr, IOB_WIN_ALIGNMENT);
+               ERROR("Window %d: base address unaligned to 0x%x\n",
+                     win_num, IOB_WIN_ALIGNMENT);
+               tf_printf("Align up the base address to 0x%llx\n",
+                         win->base_addr);
+       }
+
+       /* size parameter validity check */
+       if (IS_NOT_ALIGN(win->win_size, IOB_WIN_ALIGNMENT)) {
+               win->win_size = ALIGN_UP(win->win_size, IOB_WIN_ALIGNMENT);
+               ERROR("Window %d: window size unaligned to 0x%x\n", win_num,
+                     IOB_WIN_ALIGNMENT);
+               tf_printf("Aligning size to 0x%llx\n", win->win_size);
+       }
+}
+
+static void iob_enable_win(struct addr_map_win *win, uint32_t win_id)
+{
+       uint32_t iob_win_reg;
+       uint32_t alr, ahr;
+       uint64_t end_addr;
+
+       end_addr = (win->base_addr + win->win_size - 1);
+       alr = (uint32_t)((win->base_addr >> ADDRESS_SHIFT) & ADDRESS_MASK);
+       ahr = (uint32_t)((end_addr >> ADDRESS_SHIFT) & ADDRESS_MASK);
+
+       mmio_write_32(IOB_WIN_ALR_OFFSET(win_id), alr);
+       mmio_write_32(IOB_WIN_AHR_OFFSET(win_id), ahr);
+
+       iob_win_reg = WIN_ENABLE_BIT;
+       iob_win_reg |= (win->target_id & IOB_TARGET_ID_MASK)
+                      << IOB_TARGET_ID_OFFSET;
+       mmio_write_32(IOB_WIN_CR_OFFSET(win_id), iob_win_reg);
+
+}
+
+#ifdef DEBUG_ADDR_MAP
+static void dump_iob(void)
+{
+       uint32_t win_id, win_cr, alr, ahr;
+       uint8_t target_id;
+       uint64_t start, end;
+       char *iob_target_name[IOB_MAX_TID] = {
+               "CFG  ", "MCI0 ", "PEX1 ", "PEX2 ",
+               "PEX0 ", "NAND ", "RUNIT", "MCI1 " };
+
+       /* Dump all IOB windows */
+       tf_printf("bank  id target  start              end\n");
+       tf_printf("----------------------------------------------------\n");
+       for (win_id = 0; win_id < MVEBU_IOB_MAX_WINS; win_id++) {
+               win_cr = mmio_read_32(IOB_WIN_CR_OFFSET(win_id));
+               if (win_cr & WIN_ENABLE_BIT) {
+                       target_id = (win_cr >> IOB_TARGET_ID_OFFSET) &
+                                    IOB_TARGET_ID_MASK;
+                       alr = mmio_read_32(IOB_WIN_ALR_OFFSET(win_id));
+                       start = ((uint64_t)alr << ADDRESS_SHIFT);
+                       if (win_id != 0) {
+                               ahr = mmio_read_32(IOB_WIN_AHR_OFFSET(win_id));
+                               end = (((uint64_t)ahr + 0x10) << ADDRESS_SHIFT);
+                       } else {
+                               /* Window #0 size is hardcoded to 16MB, as it's
+                                * reserved for CP configuration space.
+                                */
+                               end = start + (16 << 20);
+                       }
+                       tf_printf("iob   %02d %s   0x%016llx 0x%016llx\n",
+                                 win_id, iob_target_name[target_id],
+                                 start, end);
+               }
+       }
+}
+#endif
+
+void iob_cfg_space_update(int ap_idx, int cp_idx, uintptr_t base,
+                         uintptr_t new_base)
+{
+       debug_enter();
+
+       iob_base = base + MVEBU_IOB_OFFSET;
+
+       NOTICE("Change the base address of AP%d-CP%d to %lx\n",
+              ap_idx, cp_idx, new_base);
+       mmio_write_32(IOB_WIN_ALR_OFFSET(0), new_base >> ADDRESS_SHIFT);
+
+       iob_base = new_base + MVEBU_IOB_OFFSET;
+
+       /* Make sure the address was configured by the CPU before
+        * any possible access to the CP.
+        */
+       dsb();
+
+       debug_exit();
+}
+
+int init_iob(uintptr_t base)
+{
+       struct addr_map_win *win;
+       uint32_t win_id, win_reg;
+       uint32_t win_count;
+
+       INFO("Initializing IOB Address decoding\n");
+
+       /* Get the base address of the address decoding MBUS */
+       iob_base = base + MVEBU_IOB_OFFSET;
+
+       /* Get the array of the windows and fill the map data */
+       marvell_get_iob_memory_map(&win, &win_count, base);
+       if (win_count <= 0) {
+               INFO("no windows configurations found\n");
+               return 0;
+       } else if (win_count > (MVEBU_IOB_MAX_WINS - 1)) {
+               ERROR("IOB mem map array > than max available windows (%d)\n",
+                     MVEBU_IOB_MAX_WINS);
+               win_count = MVEBU_IOB_MAX_WINS;
+       }
+
+       /* disable all IOB windows, start from win_id = 1
+        * because can't disable internal register window
+        */
+       for (win_id = 1; win_id < MVEBU_IOB_MAX_WINS; win_id++) {
+               win_reg = mmio_read_32(IOB_WIN_CR_OFFSET(win_id));
+               win_reg &= ~WIN_ENABLE_BIT;
+               mmio_write_32(IOB_WIN_CR_OFFSET(win_id), win_reg);
+
+               win_reg = ~IOB_WIN_ENA_CTRL_WRITE_SECURE;
+               win_reg &= ~IOB_WIN_ENA_CTRL_READ_SECURE;
+               win_reg &= ~IOB_WIN_ENA_WRITE_SECURE;
+               win_reg &= ~IOB_WIN_ENA_READ_SECURE;
+               mmio_write_32(IOB_WIN_SCR_OFFSET(win_id), win_reg);
+       }
+
+       for (win_id = 1; win_id < win_count + 1; win_id++, win++) {
+               iob_win_check(win, win_id);
+               iob_enable_win(win, win_id);
+       }
+
+#ifdef DEBUG_ADDR_MAP
+       dump_iob();
+#endif
+
+       INFO("Done IOB Address decoding Initializing\n");
+
+       return 0;
+}
diff --git a/drivers/marvell/mci.c b/drivers/marvell/mci.c
new file mode 100644 (file)
index 0000000..721504e
--- /dev/null
@@ -0,0 +1,832 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* MCI bus driver for Marvell ARMADA 8K and 8K+ SoCs */
+
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <mci.h>
+#include <mvebu.h>
+#include <mvebu_def.h>
+#include <plat_marvell.h>
+
+/* /HB /Units /Direct_regs /Direct regs
+ * /Configuration Register Write/Read Data Register
+ */
+#define MCI_WRITE_READ_DATA_REG(mci_index)     \
+                                       MVEBU_MCI_REG_BASE_REMAP(mci_index)
+/* /HB /Units /Direct_regs /Direct regs
+ * /Configuration Register Access Command Register
+ */
+#define MCI_ACCESS_CMD_REG(mci_index)          \
+                               (MVEBU_MCI_REG_BASE_REMAP(mci_index) + 0x4)
+
+/* Access Command fields :
+ * bit[3:0]   - Sub command: 1 => Peripheral Config Register Read,
+ *                          0 => Peripheral Config Register Write,
+ *                          2 => Peripheral Assign ID request,
+ *                          3 => Circular Config Write
+ * bit[5]     - 1 => Local (same chip access) 0 => Remote
+ * bit[15:8]  - Destination hop ID. Put Global ID (GID) here (see scheme below).
+ * bit[23:22] - 0x3 IHB PHY REG address space, 0x0 IHB Controller space
+ * bit[21:16] - Low 6 bits of offset. Hight 2 bits are taken from bit[28:27]
+ *             of IHB_PHY_CTRL
+ *             (must be set before any PHY register access occurs):
+ *             /IHB_REG /IHB_REGInterchip Hopping Bus Registers
+ *             /IHB Version Control Register
+ *
+ *             ixi_ihb_top             IHB PHY
+ *  AXI -----------------------------   -------------
+ *   <--| axi_hb_top | ihb_pipe_top |-->|           |
+ *   -->|   GID=1    |     GID=0    |<--|           |
+ *      -----------------------------   -------------
+ */
+#define MCI_INDIRECT_CTRL_READ_CMD             0x1
+#define MCI_INDIRECT_CTRL_ASSIGN_CMD           0x2
+#define MCI_INDIRECT_CTRL_CIRCULAR_CMD         0x3
+#define MCI_INDIRECT_CTRL_LOCAL_PKT            (1 << 5)
+#define MCI_INDIRECT_CTRL_CMD_DONE_OFFSET      6
+#define MCI_INDIRECT_CTRL_CMD_DONE             \
+                               (1 << MCI_INDIRECT_CTRL_CMD_DONE_OFFSET)
+#define MCI_INDIRECT_CTRL_DATA_READY_OFFSET    7
+#define MCI_INDIRECT_CTRL_DATA_READY           \
+                               (1 << MCI_INDIRECT_CTRL_DATA_READY_OFFSET)
+#define MCI_INDIRECT_CTRL_HOPID_OFFSET         8
+#define MCI_INDIRECT_CTRL_HOPID(id)            \
+                       (((id) & 0xFF) << MCI_INDIRECT_CTRL_HOPID_OFFSET)
+#define MCI_INDIRECT_CTRL_REG_CHIPID_OFFSET    16
+#define MCI_INDIRECT_REG_CTRL_ADDR(reg_num)    \
+                       (reg_num << MCI_INDIRECT_CTRL_REG_CHIPID_OFFSET)
+
+/* Hop ID values */
+#define GID_IHB_PIPE                                   0
+#define GID_AXI_HB                                     1
+#define GID_IHB_EXT                                    2
+
+#define MCI_DID_GLOBAL_ASSIGNMENT_REQUEST_REG          0x2
+/* Target MCi Local ID (LID, which is = self DID) */
+#define MCI_DID_GLOBAL_ASSIGN_REQ_MCI_LOCAL_ID(val)    (((val) & 0xFF) << 16)
+/* Bits [15:8]: Number of MCis on chip of target MCi */
+#define MCI_DID_GLOBAL_ASSIGN_REQ_MCI_COUNT(val)       (((val) & 0xFF) << 8)
+/* Bits [7:0]: Number of hops on chip of target MCi */
+#define MCI_DID_GLOBAL_ASSIGN_REQ_HOPS_NUM(val)                (((val) & 0xFF) << 0)
+
+/* IHB_REG domain registers */
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers/
+ * Rx Memory Configuration Register (RX_MEM_CFG)
+ */
+#define MCI_CTRL_RX_MEM_CFG_REG_NUM                    0x0
+#define MCI_CTRL_RX_TX_MEM_CFG_RQ_THRESH(val)          (((val) & 0xFF) << 24)
+#define MCI_CTRL_RX_TX_MEM_CFG_PQ_THRESH(val)          (((val) & 0xFF) << 16)
+#define MCI_CTRL_RX_TX_MEM_CFG_NQ_THRESH(val)          (((val) & 0xFF) << 8)
+#define MCI_CTRL_RX_TX_MEM_CFG_DELTA_THRESH(val)       (((val) & 0xF) << 4)
+#define MCI_CTRL_RX_TX_MEM_CFG_RTC(val)                        (((val) & 0x3) << 2)
+#define MCI_CTRL_RX_TX_MEM_CFG_WTC(val)                        (((val) & 0x3) << 0)
+#define MCI_CTRL_RX_MEM_CFG_REG_DEF_CP_VAL             \
+                               (MCI_CTRL_RX_TX_MEM_CFG_RQ_THRESH(0x07) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_PQ_THRESH(0x3f) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_NQ_THRESH(0x3f) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_DELTA_THRESH(0xf) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_RTC(1) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_WTC(1))
+
+#define MCI_CTRL_RX_MEM_CFG_REG_DEF_AP_VAL             \
+                               (MCI_CTRL_RX_TX_MEM_CFG_RQ_THRESH(0x3f) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_PQ_THRESH(0x03) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_NQ_THRESH(0x3f) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_DELTA_THRESH(0xf) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_RTC(1) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_WTC(1))
+
+
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers/
+ * Tx Memory Configuration Register (TX_MEM_CFG)
+ */
+#define MCI_CTRL_TX_MEM_CFG_REG_NUM                    0x1
+/* field mapping for TX mem config register
+ * are the same as for RX register - see register above
+ */
+#define MCI_CTRL_TX_MEM_CFG_REG_DEF_VAL                        \
+                               (MCI_CTRL_RX_TX_MEM_CFG_RQ_THRESH(0x20) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_PQ_THRESH(0x20) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_NQ_THRESH(0x20) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_DELTA_THRESH(2) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_RTC(1) | \
+                               MCI_CTRL_RX_TX_MEM_CFG_WTC(1))
+
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers
+ * /IHB Link CRC Control
+ */
+/* MCi Link CRC Control Register (MCi_CRC_CTRL) */
+#define MCI_LINK_CRC_CTRL_REG_NUM                      0x4
+
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers
+ * /IHB Status Register
+ */
+/* MCi Status Register (MCi_STS) */
+#define MCI_CTRL_STATUS_REG_NUM                                0x5
+#define MCI_CTRL_STATUS_REG_PHY_READY                  (1 << 12)
+#define MCI_CTRL_STATUS_REG_LINK_PRESENT               (1 << 15)
+#define MCI_CTRL_STATUS_REG_PHY_CID_VIO_OFFSET         24
+#define MCI_CTRL_STATUS_REG_PHY_CID_VIO_MASK           \
+                               (0xF << MCI_CTRL_STATUS_REG_PHY_CID_VIO_OFFSET)
+/* Expected successful Link result, including reserved bit */
+#define MCI_CTRL_PHY_READY             (MCI_CTRL_STATUS_REG_PHY_READY | \
+                                       MCI_CTRL_STATUS_REG_LINK_PRESENT | \
+                                       MCI_CTRL_STATUS_REG_PHY_CID_VIO_MASK)
+
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers/
+ * MCi PHY Speed Settings Register (MCi_PHY_SETTING)
+ */
+#define MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM              0x8
+#define MCI_CTRL_MCI_PHY_SET_DLO_FIFO_FULL_TRESH(val)  (((val) & 0xF) << 28)
+#define MCI_CTRL_MCI_PHY_SET_PHY_MAX_SPEED(val)                (((val) & 0xF) << 12)
+#define MCI_CTRL_MCI_PHY_SET_PHYCLK_SEL(val)           (((val) & 0xF) << 8)
+#define MCI_CTRL_MCI_PHY_SET_REFCLK_FREQ_SEL(val)      (((val) & 0xF) << 4)
+#define MCI_CTRL_MCI_PHY_SET_AUTO_LINK_EN(val)         (((val) & 0x1) << 1)
+#define MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL               \
+                       (MCI_CTRL_MCI_PHY_SET_DLO_FIFO_FULL_TRESH(0x3) | \
+                       MCI_CTRL_MCI_PHY_SET_PHY_MAX_SPEED(0x3) | \
+                       MCI_CTRL_MCI_PHY_SET_PHYCLK_SEL(0x2) | \
+                       MCI_CTRL_MCI_PHY_SET_REFCLK_FREQ_SEL(0x1))
+#define MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL2              \
+                       (MCI_CTRL_MCI_PHY_SET_DLO_FIFO_FULL_TRESH(0x3) | \
+                       MCI_CTRL_MCI_PHY_SET_PHY_MAX_SPEED(0x3) | \
+                       MCI_CTRL_MCI_PHY_SET_PHYCLK_SEL(0x5) | \
+                       MCI_CTRL_MCI_PHY_SET_REFCLK_FREQ_SEL(0x1))
+
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers
+ * /IHB Mode Config
+ */
+#define MCI_CTRL_IHB_MODE_CFG_REG_NUM                  0x25
+#define MCI_CTRL_IHB_MODE_HBCLK_DIV(val)               ((val) & 0xFF)
+#define MCI_CTRL_IHB_MODE_CHUNK_MOD_OFFSET             8
+#define MCI_CTRL_IHB_MODE_CHUNK_MOD                    \
+                               (1 << MCI_CTRL_IHB_MODE_CHUNK_MOD_OFFSET)
+#define MCI_CTRL_IHB_MODE_FWD_MOD_OFFSET               9
+#define MCI_CTRL_IHB_MODE_FWD_MOD                      \
+                               (1 << MCI_CTRL_IHB_MODE_FWD_MOD_OFFSET)
+#define MCI_CTRL_IHB_MODE_SEQFF_FINE_MOD(val)          (((val) & 0xF) << 12)
+#define MCI_CTRL_IHB_MODE_RX_COMB_THRESH(val)          (((val) & 0xFF) << 16)
+#define MCI_CTRL_IHB_MODE_TX_COMB_THRESH(val)          (((val) & 0xFF) << 24)
+
+#define MCI_CTRL_IHB_MODE_CFG_REG_DEF_VAL              \
+                               (MCI_CTRL_IHB_MODE_HBCLK_DIV(6) | \
+                               MCI_CTRL_IHB_MODE_FWD_MOD | \
+                               MCI_CTRL_IHB_MODE_SEQFF_FINE_MOD(0xF) | \
+                               MCI_CTRL_IHB_MODE_RX_COMB_THRESH(0x3f) | \
+                               MCI_CTRL_IHB_MODE_TX_COMB_THRESH(0x40))
+/* AXI_HB registers */
+#define MCI_AXI_ACCESS_DATA_REG_NUM                    0x0
+#define MCI_AXI_ACCESS_PCIE_MODE                       1
+#define MCI_AXI_ACCESS_CACHE_CHECK_OFFSET              5
+#define MCI_AXI_ACCESS_CACHE_CHECK                     \
+                               (1 << MCI_AXI_ACCESS_CACHE_CHECK_OFFSET)
+#define MCI_AXI_ACCESS_FORCE_POST_WR_OFFSET            6
+#define MCI_AXI_ACCESS_FORCE_POST_WR                   \
+                               (1 << MCI_AXI_ACCESS_FORCE_POST_WR_OFFSET)
+#define MCI_AXI_ACCESS_DISABLE_CLK_GATING_OFFSET       9
+#define MCI_AXI_ACCESS_DISABLE_CLK_GATING              \
+                               (1 << MCI_AXI_ACCESS_DISABLE_CLK_GATING_OFFSET)
+
+/* /HB /Units /HB_REG /HB_REGHopping Bus Registers
+ * /Window 0 Address Mask Register
+ */
+#define MCI_HB_CTRL_WIN0_ADDRESS_MASK_REG_NUM          0x2
+
+/* /HB /Units /HB_REG /HB_REGHopping Bus Registers
+ * /Window 0 Destination Register
+ */
+#define MCI_HB_CTRL_WIN0_DESTINATION_REG_NUM           0x3
+#define MCI_HB_CTRL_WIN0_DEST_VALID_FLAG(val)          (((val) & 0x1) << 16)
+#define MCI_HB_CTRL_WIN0_DEST_ID(val)                  (((val) & 0xFF) << 0)
+
+/* /HB /Units /HB_REG /HB_REGHopping Bus Registers /Tx Control Register */
+#define MCI_HB_CTRL_TX_CTRL_REG_NUM                    0xD
+#define MCI_HB_CTRL_TX_CTRL_PCIE_MODE_OFFSET           24
+#define MCI_HB_CTRL_TX_CTRL_PCIE_MODE                  \
+                               (1 << MCI_HB_CTRL_TX_CTRL_PCIE_MODE_OFFSET)
+#define MCI_HB_CTRL_TX_CTRL_PRI_TH_QOS(val)            (((val) & 0xF) << 12)
+#define MCI_HB_CTRL_TX_CTRL_MAX_RD_CNT(val)            (((val) & 0x1F) << 6)
+#define MCI_HB_CTRL_TX_CTRL_MAX_WR_CNT(val)            (((val) & 0x1F) << 0)
+
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers
+ * /IHB Version Control Register
+ */
+#define MCI_PHY_CTRL_REG_NUM                           0x7
+#define MCI_PHY_CTRL_MCI_MINOR                         0x8 /* BITS [3:0] */
+#define MCI_PHY_CTRL_MCI_MAJOR_OFFSET                  4
+#define MCI_PHY_CTRL_MCI_MAJOR                         \
+                               (1 << MCI_PHY_CTRL_MCI_MAJOR_OFFSET)
+#define MCI_PHY_CTRL_MCI_SLEEP_REQ_OFFSET              11
+#define MCI_PHY_CTRL_MCI_SLEEP_REQ                     \
+                               (1 << MCI_PHY_CTRL_MCI_SLEEP_REQ_OFFSET)
+/* Host=1 / Device=0 PHY mode */
+#define MCI_PHY_CTRL_MCI_PHY_MODE_OFFSET               24
+#define MCI_PHY_CTRL_MCI_PHY_MODE_HOST                 \
+                               (1 << MCI_PHY_CTRL_MCI_PHY_MODE_OFFSET)
+/* Register=1 / PWM=0 interface */
+#define MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE_OFFSET                25
+#define MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE               \
+                               (1 << MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE_OFFSET)
+ /* PHY code InReset=1 */
+#define MCI_PHY_CTRL_MCI_PHY_RESET_CORE_OFFSET         26
+#define MCI_PHY_CTRL_MCI_PHY_RESET_CORE                        \
+                               (1 << MCI_PHY_CTRL_MCI_PHY_RESET_CORE_OFFSET)
+#define MCI_PHY_CTRL_PHY_ADDR_MSB_OFFSET               27
+#define MCI_PHY_CTRL_PHY_ADDR_MSB(addr)                        \
+                               (((addr) & 0x3) << \
+                               MCI_PHY_CTRL_PHY_ADDR_MSB_OFFSET)
+#define MCI_PHY_CTRL_PIDI_MODE_OFFSET                  31
+#define MCI_PHY_CTRL_PIDI_MODE                         \
+                               (1 << MCI_PHY_CTRL_PIDI_MODE_OFFSET)
+
+/* Number of times to wait for the MCI link ready after MCI configurations
+ * Normally takes 34-35 successive reads
+ */
+#define LINK_READY_TIMEOUT                             100
+
+enum mci_register_type {
+       MCI_REG_TYPE_PHY = 0,
+       MCI_REG_TYPE_CTRL,
+};
+
+enum {
+       MCI_CMD_WRITE,
+       MCI_CMD_READ
+};
+
+/* Write wrapper callback for debug:
+ * will print written data in case LOG_LEVEL >= 40
+ */
+static void mci_mmio_write_32(uintptr_t addr, uint32_t value)
+{
+       VERBOSE("Write:\t0x%x = 0x%x\n", (uint32_t)addr, value);
+       mmio_write_32(addr, value);
+}
+/* Read wrapper callback for debug:
+ * will print read data in case LOG_LEVEL >= 40
+ */
+static uint32_t mci_mmio_read_32(uintptr_t addr)
+{
+       uint32_t value;
+
+       value = mmio_read_32(addr);
+       VERBOSE("Read:\t0x%x = 0x%x\n", (uint32_t)addr, value);
+       return value;
+}
+
+/* MCI indirect access command completion polling:
+ * Each write/read command done via MCI indirect registers must be polled
+ * for command completions status.
+ *
+ * Returns 1 in case of error
+ * Returns 0 in case of command completed successfully.
+ */
+static int mci_poll_command_completion(int mci_index, int command_type)
+{
+       uint32_t mci_cmd_value = 0, retry_count = 100, ret = 0;
+       uint32_t completion_flags = MCI_INDIRECT_CTRL_CMD_DONE;
+
+       debug_enter();
+       /* Read commands require validating that requested data is ready */
+       if (command_type == MCI_CMD_READ)
+               completion_flags |= MCI_INDIRECT_CTRL_DATA_READY;
+
+       do {
+               /* wait 1 ms before each polling */
+               mdelay(1);
+               mci_cmd_value = mci_mmio_read_32(MCI_ACCESS_CMD_REG(mci_index));
+       } while (((mci_cmd_value & completion_flags) != completion_flags) &&
+                        (retry_count-- > 0));
+
+       if (retry_count == 0) {
+               ERROR("%s: MCI command timeout (command status = 0x%x)\n",
+                     __func__, mci_cmd_value);
+               ret = 1;
+       }
+
+       debug_exit();
+       return ret;
+}
+
+int mci_read(int mci_idx, uint32_t cmd, uint32_t *value)
+{
+       int rval;
+
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_idx), cmd);
+
+       rval = mci_poll_command_completion(mci_idx, MCI_CMD_READ);
+
+       *value = mci_mmio_read_32(MCI_WRITE_READ_DATA_REG(mci_idx));
+
+       return rval;
+}
+
+int  mci_write(int mci_idx, uint32_t cmd, uint32_t data)
+{
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_idx), data);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_idx), cmd);
+
+       return mci_poll_command_completion(mci_idx, MCI_CMD_WRITE);
+}
+
+/* Perform 3 configurations in one command: PCI mode,
+ * queues separation and cache bit
+ */
+static int mci_axi_set_pcie_mode(int mci_index)
+{
+       uint32_t reg_data, ret = 1;
+
+       debug_enter();
+       /* This configuration makes MCI IP behave consistently with AXI protocol
+        * It should be configured at one side only (for example locally at AP).
+        * The IP takes care of performing the same configurations at MCI on
+        * another side (for example remotely at CP).
+        */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+                         MCI_AXI_ACCESS_PCIE_MODE |
+                         MCI_AXI_ACCESS_CACHE_CHECK |
+                         MCI_AXI_ACCESS_FORCE_POST_WR |
+                         MCI_AXI_ACCESS_DISABLE_CLK_GATING);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_AXI_ACCESS_DATA_REG_NUM)  |
+                         MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) |
+                         MCI_INDIRECT_CTRL_LOCAL_PKT |
+                         MCI_INDIRECT_CTRL_CIRCULAR_CMD);
+
+       /* if Write command was successful, verify PCIe mode */
+       if (mci_poll_command_completion(mci_index, MCI_CMD_WRITE) == 0) {
+               /* Verify the PCIe mode selected */
+               mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                                 MCI_INDIRECT_REG_CTRL_ADDR(
+                                       MCI_HB_CTRL_TX_CTRL_REG_NUM)  |
+                                 MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) |
+                                 MCI_INDIRECT_CTRL_LOCAL_PKT |
+                                 MCI_INDIRECT_CTRL_READ_CMD);
+               /* if read was completed, verify PCIe mode */
+               if (mci_poll_command_completion(mci_index, MCI_CMD_READ) == 0) {
+                       reg_data = mci_mmio_read_32(
+                                       MCI_WRITE_READ_DATA_REG(mci_index));
+                       if (reg_data & MCI_HB_CTRL_TX_CTRL_PCIE_MODE)
+                               ret = 0;
+               }
+       }
+
+       debug_exit();
+       return ret;
+}
+
+/* Reduce sequence FIFO timer expiration threshold */
+static int mci_axi_set_fifo_thresh(int mci_index)
+{
+       uint32_t reg_data, ret = 0;
+
+       debug_enter();
+       /* This configuration reduces sequence FIFO timer expiration threshold
+        * (to 0x7 instead of 0xA).
+        * In MCI 1.6 version this configuration prevents possible functional
+        * issues.
+        * In version 1.82 the configuration prevents performance degradation
+        */
+
+       /* Configure local AP side */
+       reg_data = MCI_PHY_CTRL_PIDI_MODE |
+                  MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE |
+                  MCI_PHY_CTRL_MCI_PHY_MODE_HOST |
+                  MCI_PHY_CTRL_MCI_MAJOR |
+                  MCI_PHY_CTRL_MCI_MINOR;
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), reg_data);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) |
+                         MCI_INDIRECT_CTRL_LOCAL_PKT);
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* Reduce the threshold */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+                         MCI_CTRL_IHB_MODE_CFG_REG_DEF_VAL);
+
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_CTRL_IHB_MODE_CFG_REG_NUM) |
+                         MCI_INDIRECT_CTRL_LOCAL_PKT);
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* Exit PIDI mode */
+       reg_data = MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE |
+                  MCI_PHY_CTRL_MCI_PHY_MODE_HOST |
+                  MCI_PHY_CTRL_MCI_MAJOR |
+                  MCI_PHY_CTRL_MCI_MINOR;
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), reg_data);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) |
+                         MCI_INDIRECT_CTRL_LOCAL_PKT);
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* Configure remote CP side */
+       reg_data = MCI_PHY_CTRL_PIDI_MODE |
+                  MCI_PHY_CTRL_MCI_MAJOR |
+                  MCI_PHY_CTRL_MCI_MINOR |
+                  MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE;
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), reg_data);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) |
+                         MCI_CTRL_IHB_MODE_FWD_MOD);
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* Reduce the threshold */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+                         MCI_CTRL_IHB_MODE_CFG_REG_DEF_VAL);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_CTRL_IHB_MODE_CFG_REG_NUM) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT));
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* Exit PIDI mode */
+       reg_data = MCI_PHY_CTRL_MCI_MAJOR |
+                  MCI_PHY_CTRL_MCI_MINOR |
+                  MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE;
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), reg_data);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) |
+                         MCI_CTRL_IHB_MODE_FWD_MOD);
+
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       debug_exit();
+       return ret;
+}
+
+/* Configure:
+ * 1. AP & CP TX thresholds and delta configurations
+ * 2. DLO & DLI FIFO full threshold
+ * 3. RX thresholds and delta configurations
+ * 4. CP AR and AW outstanding
+ * 5. AP AR and AW outstanding
+ */
+static int mci_axi_set_fifo_rx_tx_thresh(int mci_index)
+{
+       uint32_t ret = 0;
+
+       debug_enter();
+       /* AP TX thresholds and delta configurations (IHB_reg 0x1) */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+                         MCI_CTRL_TX_MEM_CFG_REG_DEF_VAL);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_CTRL_TX_MEM_CFG_REG_NUM) |
+                         MCI_INDIRECT_CTRL_LOCAL_PKT);
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* CP TX thresholds and delta configurations (IHB_reg 0x1) */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+                         MCI_CTRL_TX_MEM_CFG_REG_DEF_VAL);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_CTRL_TX_MEM_CFG_REG_NUM) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT));
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* AP DLO & DLI FIFO full threshold & Auto-Link enable (IHB_reg 0x8) */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+                         MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL |
+                         MCI_CTRL_MCI_PHY_SET_AUTO_LINK_EN(1));
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM) |
+                         MCI_INDIRECT_CTRL_LOCAL_PKT);
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* CP DLO & DLI FIFO full threshold (IHB_reg 0x8) */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+                         MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT));
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* AP RX thresholds and delta configurations (IHB_reg 0x0) */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+                         MCI_CTRL_RX_MEM_CFG_REG_DEF_AP_VAL);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_CTRL_RX_MEM_CFG_REG_NUM) |
+                         MCI_INDIRECT_CTRL_LOCAL_PKT);
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* CP RX thresholds and delta configurations (IHB_reg 0x0) */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+                         MCI_CTRL_RX_MEM_CFG_REG_DEF_CP_VAL);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_CTRL_RX_MEM_CFG_REG_NUM) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT));
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* AP AR & AW maximum AXI outstanding request cfg (HB_reg 0xd) */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+                         MCI_HB_CTRL_TX_CTRL_PRI_TH_QOS(8) |
+                         MCI_HB_CTRL_TX_CTRL_MAX_RD_CNT(3) |
+                         MCI_HB_CTRL_TX_CTRL_MAX_WR_CNT(3));
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_HB_CTRL_TX_CTRL_REG_NUM) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) |
+                         MCI_INDIRECT_CTRL_LOCAL_PKT);
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* CP AR & AW maximum AXI outstanding request cfg (HB_reg 0xd) */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+                         MCI_HB_CTRL_TX_CTRL_PRI_TH_QOS(8) |
+                         MCI_HB_CTRL_TX_CTRL_MAX_RD_CNT(0xB) |
+                         MCI_HB_CTRL_TX_CTRL_MAX_WR_CNT(0x11));
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_HB_CTRL_TX_CTRL_REG_NUM) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB));
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       debug_exit();
+       return ret;
+}
+
+/* configure MCI to allow read & write transactions to arrive at the same time.
+ * Without the below configuration, MCI won't sent response to CPU for
+ * transactions which arrived simultaneously and will lead to CPU hang.
+ * The below will configure MCI to be able to pass transactions from/to CP/AP.
+ */
+static int mci_enable_simultaneous_transactions(int mci_index)
+{
+       uint32_t ret = 0;
+
+       debug_enter();
+       /* ID assignment (assigning global ID offset to CP) */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(0),
+                         MCI_DID_GLOBAL_ASSIGN_REQ_MCI_LOCAL_ID(2) |
+                         MCI_DID_GLOBAL_ASSIGN_REQ_MCI_COUNT(2) |
+                         MCI_DID_GLOBAL_ASSIGN_REQ_HOPS_NUM(2));
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(0),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_DID_GLOBAL_ASSIGNMENT_REQUEST_REG) |
+                         MCI_INDIRECT_CTRL_ASSIGN_CMD);
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* Assigning dest. ID=3 to all transactions entering from AXI at AP */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(0),
+                         MCI_HB_CTRL_WIN0_DEST_VALID_FLAG(1) |
+                         MCI_HB_CTRL_WIN0_DEST_ID(3));
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(0),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_HB_CTRL_WIN0_DESTINATION_REG_NUM) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) |
+                         MCI_INDIRECT_CTRL_LOCAL_PKT);
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* Assigning dest. ID=1 to all transactions entering from AXI at CP */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(0),
+                         MCI_HB_CTRL_WIN0_DEST_VALID_FLAG(1) |
+                         MCI_HB_CTRL_WIN0_DEST_ID(1));
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(0),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_HB_CTRL_WIN0_DESTINATION_REG_NUM) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB));
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* End address to all transactions entering from AXI at AP.
+        * This will lead to get match for any AXI address
+        * and receive destination ID=3
+        */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(0), 0xffffffff);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(0),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_HB_CTRL_WIN0_ADDRESS_MASK_REG_NUM) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) |
+                         MCI_INDIRECT_CTRL_LOCAL_PKT);
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       /* End address to all transactions entering from AXI at CP.
+        * This will lead to get match for any AXI address
+        * and receive destination ID=1
+        */
+       mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(0), 0xffffffff);
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(0),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_HB_CTRL_WIN0_ADDRESS_MASK_REG_NUM) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB));
+       ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+       debug_exit();
+       return ret;
+}
+
+/* Check if MCI simultaneous transaction was already enabled.
+ * Currently bootrom does this mci configuration only when the boot source is
+ * SAR_MCIX4, in other cases it should be done at this stage.
+ * It is worth noticing that in case of booting from uart, the bootrom
+ * flow is different and this mci initialization is skipped even if boot
+ * source is SAR_MCIX4. Therefore new verification bases on appropriate mci's
+ * register content: if the appropriate reg contains 0x0 it means that the
+ * bootrom didn't perform required mci configuration.
+ *
+ * Returns:
+ * 0 - configuration already done
+ * 1 - configuration missing
+ */
+static _Bool mci_simulatenous_trans_missing(int mci_index)
+{
+       uint32_t reg, ret;
+
+       /* read 'Window 0 Destination ID assignment' from HB register 0x3
+        * (TX_CFG_W0_DST_ID) to check whether ID assignment was already
+        * performed by BootROM.
+        */
+       debug_enter();
+       mci_mmio_write_32(MCI_ACCESS_CMD_REG(0),
+                         MCI_INDIRECT_REG_CTRL_ADDR(
+                               MCI_HB_CTRL_WIN0_DESTINATION_REG_NUM) |
+                         MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) |
+                         MCI_INDIRECT_CTRL_LOCAL_PKT |
+                         MCI_INDIRECT_CTRL_READ_CMD);
+       ret = mci_poll_command_completion(mci_index, MCI_CMD_READ);
+
+       reg = mci_mmio_read_32(MCI_WRITE_READ_DATA_REG(mci_index));
+
+       if (ret)
+               ERROR("Failed to verify MCI simultaneous read/write status\n");
+
+       debug_exit();
+       /* default ID assignment is 0, so if register doesn't contain zeros
+        * it means that bootrom already performed required configuration.
+        */
+       if (reg != 0)
+               return 0;
+
+       return 1;
+}
+
+/* For A1 revision, configure the MCI link for performance improvement:
+ * - set MCI to support read/write transactions to arrive at the same time
+ * - Switch AXI to PCIe mode
+ * - Reduce sequence FIFO threshold
+ * - Configure RX/TX FIFO thresholds
+ *
+ *   Note:
+ *   We don't exit on error code from any sub routine, to try (best effort) to
+ *   complete the MCI configuration.
+ *   (If we exit - Bootloader will surely fail to boot)
+ */
+int mci_configure(int mci_index)
+{
+       int rval;
+
+       debug_enter();
+       /* According to design guidelines the MCI simultaneous transaction
+        * shouldn't be enabled more then once - therefore make sure that it
+        * wasn't already enabled in bootrom.
+        */
+       if (mci_simulatenous_trans_missing(mci_index)) {
+               VERBOSE("Enabling MCI simultaneous transaction\n");
+               /* set MCI to support read/write transactions
+                * to arrive at the same time
+                */
+               rval = mci_enable_simultaneous_transactions(mci_index);
+               if (rval)
+                       ERROR("Failed to set MCI simultaneous read/write\n");
+       } else
+               VERBOSE("Skip MCI ID assignment - already done by bootrom\n");
+
+       /* Configure MCI for more consistent behavior with AXI protocol */
+       rval = mci_axi_set_pcie_mode(mci_index);
+       if (rval)
+               ERROR("Failed to set MCI to AXI PCIe mode\n");
+
+       /* reduce FIFO global threshold */
+       rval = mci_axi_set_fifo_thresh(mci_index);
+       if (rval)
+               ERROR("Failed to set MCI FIFO global threshold\n");
+
+       /* configure RX/TX FIFO thresholds */
+       rval = mci_axi_set_fifo_rx_tx_thresh(mci_index);
+       if (rval)
+               ERROR("Failed to set MCI RX/TX FIFO threshold\n");
+
+       debug_exit();
+       return 1;
+}
+
+int mci_get_link_status(void)
+{
+       uint32_t cmd, data;
+
+       cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_CTRL_STATUS_REG_NUM) |
+               MCI_INDIRECT_CTRL_LOCAL_PKT | MCI_INDIRECT_CTRL_READ_CMD);
+       if (mci_read(0, cmd, &data)) {
+               ERROR("Failed to read status register\n");
+               return -1;
+       }
+
+       /* Check if the link is ready */
+       if (data != MCI_CTRL_PHY_READY) {
+               ERROR("Bad link status %x\n", data);
+               return -1;
+       }
+
+       return 0;
+}
+
+void mci_turn_link_down(void)
+{
+       uint32_t cmd, data;
+       int rval = 0;
+
+       debug_enter();
+
+       /* Turn off auto-link */
+       cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM) |
+                       MCI_INDIRECT_CTRL_LOCAL_PKT);
+       data = (MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL2 |
+               MCI_CTRL_MCI_PHY_SET_AUTO_LINK_EN(0));
+       rval = mci_write(0, cmd, data);
+       if (rval)
+               ERROR("Failed to turn off auto-link\n");
+
+       /* Reset AP PHY */
+       cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) |
+               MCI_INDIRECT_CTRL_LOCAL_PKT);
+       data = (MCI_PHY_CTRL_MCI_MINOR |
+               MCI_PHY_CTRL_MCI_MAJOR |
+               MCI_PHY_CTRL_MCI_PHY_MODE_HOST |
+               MCI_PHY_CTRL_MCI_PHY_RESET_CORE);
+       rval = mci_write(0, cmd, data);
+       if (rval)
+               ERROR("Failed to reset AP PHY\n");
+
+       /* Clear all status & CRC values */
+       cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_LINK_CRC_CTRL_REG_NUM) |
+              MCI_INDIRECT_CTRL_LOCAL_PKT);
+       data = 0x0;
+       mci_write(0, cmd, data);
+       cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_CTRL_STATUS_REG_NUM) |
+              MCI_INDIRECT_CTRL_LOCAL_PKT);
+       data = 0x0;
+       rval = mci_write(0, cmd, data);
+       if (rval)
+               ERROR("Failed to reset AP PHY\n");
+
+       /* Wait 5ms before un-reset the PHY */
+       mdelay(5);
+
+       /* Un-reset AP PHY */
+       cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) |
+              MCI_INDIRECT_CTRL_LOCAL_PKT);
+       data = (MCI_PHY_CTRL_MCI_MINOR | MCI_PHY_CTRL_MCI_MAJOR |
+               MCI_PHY_CTRL_MCI_PHY_MODE_HOST);
+       rval = mci_write(0, cmd, data);
+       if (rval)
+               ERROR("Failed to un-reset AP PHY\n");
+
+       debug_exit();
+}
+
+void mci_turn_link_on(void)
+{
+       uint32_t cmd, data;
+       int rval = 0;
+
+       debug_enter();
+       /* Turn on auto-link */
+       cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM) |
+                       MCI_INDIRECT_CTRL_LOCAL_PKT);
+       data = (MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL2 |
+               MCI_CTRL_MCI_PHY_SET_AUTO_LINK_EN(1));
+       rval = mci_write(0, cmd, data);
+       if (rval)
+               ERROR("Failed to turn on auto-link\n");
+
+       debug_exit();
+}
+
+/* Initialize MCI for performance improvements */
+int mci_initialize(int mci_index)
+{
+       int ret;
+
+       debug_enter();
+       INFO("MCI%d initialization:\n", mci_index);
+
+       ret = mci_configure(mci_index);
+
+       debug_exit();
+       return ret;
+}
diff --git a/drivers/marvell/mochi/ap807_setup.c b/drivers/marvell/mochi/ap807_setup.c
new file mode 100644 (file)
index 0000000..075ca31
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* AP807 Marvell SoC driver */
+
+#include <ap_setup.h>
+#include <cache_llc.h>
+#include <ccu.h>
+#include <debug.h>
+#include <io_win.h>
+#include <mci.h>
+#include <mmio.h>
+#include <mvebu_def.h>
+
+#define SMMU_sACR                              (MVEBU_SMMU_BASE + 0x10)
+#define SMMU_sACR_PG_64K                       (1 << 16)
+
+#define CCU_GSPMU_CR                           (MVEBU_CCU_BASE(MVEBU_AP0) \
+                                                               + 0x3F0)
+#define GSPMU_CPU_CONTROL                      (0x1 << 0)
+
+#define CCU_HTC_CR                             (MVEBU_CCU_BASE(MVEBU_AP0) \
+                                                               + 0x200)
+#define CCU_SET_POC_OFFSET                     5
+
+#define DSS_CR0                                        (MVEBU_RFU_BASE + 0x100)
+#define DVM_48BIT_VA_ENABLE                    (1 << 21)
+
+/* Secure MoChi incoming access */
+#define SEC_MOCHI_IN_ACC_REG                   (MVEBU_RFU_BASE + 0x4738)
+#define SEC_MOCHI_IN_ACC_IHB0_EN               (1)
+#define SEC_MOCHI_IN_ACC_IHB1_EN               (1 << 3)
+#define SEC_MOCHI_IN_ACC_IHB2_EN               (1 << 6)
+#define SEC_MOCHI_IN_ACC_PIDI_EN               (1 << 9)
+#define SEC_IN_ACCESS_ENA_ALL_MASTERS          (SEC_MOCHI_IN_ACC_IHB0_EN | \
+                                                SEC_MOCHI_IN_ACC_IHB1_EN | \
+                                                SEC_MOCHI_IN_ACC_IHB2_EN | \
+                                                SEC_MOCHI_IN_ACC_PIDI_EN)
+
+/* SYSRST_OUTn Config definitions */
+#define MVEBU_SYSRST_OUT_CONFIG_REG            (MVEBU_MISC_SOC_BASE + 0x4)
+#define WD_MASK_SYS_RST_OUT                    (1 << 2)
+
+/* DSS PHY for DRAM */
+#define DSS_SCR_REG                            (MVEBU_RFU_BASE + 0x208)
+#define DSS_PPROT_OFFS                         4
+#define DSS_PPROT_MASK                         0x7
+#define DSS_PPROT_PRIV_SECURE_DATA             0x1
+
+/* Used for Units of AP-807 (e.g. SDIO and etc) */
+#define MVEBU_AXI_ATTR_BASE                    (MVEBU_REGS_BASE + 0x6F4580)
+#define MVEBU_AXI_ATTR_REG(index)              (MVEBU_AXI_ATTR_BASE + \
+                                                       0x4 * index)
+
+enum axi_attr {
+       AXI_SDIO_ATTR = 0,
+       AXI_DFX_ATTR,
+       AXI_MAX_ATTR,
+};
+
+static void ap_sec_masters_access_en(uint32_t enable)
+{
+       uint32_t reg;
+
+       /* Open/Close incoming access for all masters.
+        * The access is disabled in trusted boot mode
+        * Could only be done in EL3
+        */
+       reg = mmio_read_32(SEC_MOCHI_IN_ACC_REG);
+       if (enable)
+               mmio_write_32(SEC_MOCHI_IN_ACC_REG, reg |
+                             SEC_IN_ACCESS_ENA_ALL_MASTERS);
+       else
+               mmio_write_32(SEC_MOCHI_IN_ACC_REG,
+                             reg & ~SEC_IN_ACCESS_ENA_ALL_MASTERS);
+}
+
+static void setup_smmu(void)
+{
+       uint32_t reg;
+
+       /* Set the SMMU page size to 64 KB */
+       reg = mmio_read_32(SMMU_sACR);
+       reg |= SMMU_sACR_PG_64K;
+       mmio_write_32(SMMU_sACR, reg);
+}
+
+static void init_aurora2(void)
+{
+       uint32_t reg;
+
+       /* Enable GSPMU control by CPU */
+       reg = mmio_read_32(CCU_GSPMU_CR);
+       reg |= GSPMU_CPU_CONTROL;
+       mmio_write_32(CCU_GSPMU_CR, reg);
+
+#if LLC_ENABLE
+       /* Enable LLC for AP807 in exclusive mode */
+       llc_enable(0, 1);
+
+       /* Set point of coherency to DDR.
+        * This is required by units which have
+        * SW cache coherency
+        */
+       reg = mmio_read_32(CCU_HTC_CR);
+       reg |= (0x1 << CCU_SET_POC_OFFSET);
+       mmio_write_32(CCU_HTC_CR, reg);
+#endif /* LLC_ENABLE */
+}
+
+
+/* MCIx indirect access register are based by default at 0xf4000000/0xf6000000
+ * to avoid conflict of internal registers of units connected via MCIx, which
+ * can be based on the same address (i.e CP1 base is also 0xf4000000),
+ * the following routines remaps the MCIx indirect bases to another domain
+ */
+static void mci_remap_indirect_access_base(void)
+{
+       uint32_t mci;
+
+       for (mci = 0; mci < MCI_MAX_UNIT_ID; mci++)
+               mmio_write_32(MCIX4_REG_START_ADDRESS_REG(mci),
+                                 MVEBU_MCI_REG_BASE_REMAP(mci) >>
+                                 MCI_REMAP_OFF_SHIFT);
+}
+
+static void ap807_axi_attr_init(void)
+{
+       uint32_t index, data;
+
+       /* Initialize AXI attributes for AP807 */
+       /* Go over the AXI attributes and set Ax-Cache and Ax-Domain */
+       for (index = 0; index < AXI_MAX_ATTR; index++) {
+               switch (index) {
+               /* DFX works with no coherent only -
+                * there's no option to configure the Ax-Cache and Ax-Domain
+                */
+               case AXI_DFX_ATTR:
+                       continue;
+               default:
+                       /* Set Ax-Cache as cacheable, no allocate, modifiable,
+                        * bufferable.
+                        * The values are different because Read & Write
+                        * definition is different in Ax-Cache
+                        */
+                       data = mmio_read_32(MVEBU_AXI_ATTR_REG(index));
+                       data &= ~MVEBU_AXI_ATTR_ARCACHE_MASK;
+                       data |= (CACHE_ATTR_WRITE_ALLOC |
+                                CACHE_ATTR_CACHEABLE   |
+                                CACHE_ATTR_BUFFERABLE) <<
+                                MVEBU_AXI_ATTR_ARCACHE_OFFSET;
+                       data &= ~MVEBU_AXI_ATTR_AWCACHE_MASK;
+                       data |= (CACHE_ATTR_READ_ALLOC |
+                                CACHE_ATTR_CACHEABLE  |
+                                CACHE_ATTR_BUFFERABLE) <<
+                                MVEBU_AXI_ATTR_AWCACHE_OFFSET;
+                       /* Set Ax-Domain as Outer domain */
+                       data &= ~MVEBU_AXI_ATTR_ARDOMAIN_MASK;
+                       data |= DOMAIN_OUTER_SHAREABLE <<
+                               MVEBU_AXI_ATTR_ARDOMAIN_OFFSET;
+                       data &= ~MVEBU_AXI_ATTR_AWDOMAIN_MASK;
+                       data |= DOMAIN_OUTER_SHAREABLE <<
+                               MVEBU_AXI_ATTR_AWDOMAIN_OFFSET;
+                       mmio_write_32(MVEBU_AXI_ATTR_REG(index), data);
+               }
+       }
+}
+
+static void misc_soc_configurations(void)
+{
+       uint32_t reg;
+
+       /* Enable 48-bit VA */
+       mmio_setbits_32(DSS_CR0, DVM_48BIT_VA_ENABLE);
+
+       /* Un-mask Watchdog reset from influencing the SYSRST_OUTn.
+        * Otherwise, upon WD timeout, the WD reset signal won't trigger reset
+        */
+       reg = mmio_read_32(MVEBU_SYSRST_OUT_CONFIG_REG);
+       reg &= ~(WD_MASK_SYS_RST_OUT);
+       mmio_write_32(MVEBU_SYSRST_OUT_CONFIG_REG, reg);
+}
+
+void ap_init(void)
+{
+       /* Setup Aurora2. */
+       init_aurora2();
+
+       /* configure MCI mapping */
+       mci_remap_indirect_access_base();
+
+       /* configure IO_WIN windows */
+       init_io_win(MVEBU_AP0);
+
+       /* configure CCU windows */
+       init_ccu(MVEBU_AP0);
+
+       /* configure the SMMU */
+       setup_smmu();
+
+       /* Open AP incoming access for all masters */
+       ap_sec_masters_access_en(1);
+
+       /* configure axi for AP */
+       ap807_axi_attr_init();
+
+       /* misc configuration of the SoC */
+       misc_soc_configurations();
+}
+
+static void ap807_dram_phy_access_config(void)
+{
+       uint32_t reg_val;
+       /* Update DSS port access permission to DSS_PHY */
+       reg_val = mmio_read_32(DSS_SCR_REG);
+       reg_val &= ~(DSS_PPROT_MASK << DSS_PPROT_OFFS);
+       reg_val |= ((DSS_PPROT_PRIV_SECURE_DATA & DSS_PPROT_MASK) <<
+                   DSS_PPROT_OFFS);
+       mmio_write_32(DSS_SCR_REG, reg_val);
+}
+
+void ap_ble_init(void)
+{
+       /* Enable DSS port */
+       ap807_dram_phy_access_config();
+}
+
+int ap_get_count(void)
+{
+       return 1;
+}
+
+
diff --git a/drivers/marvell/mochi/apn806_setup.c b/drivers/marvell/mochi/apn806_setup.c
new file mode 100644 (file)
index 0000000..1d33be9
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* AP806 Marvell SoC driver */
+
+#include <ap_setup.h>
+#include <ccu.h>
+#include <cache_llc.h>
+#include <debug.h>
+#include <io_win.h>
+#include <mci.h>
+#include <mmio.h>
+#include <mvebu_def.h>
+
+#define SMMU_sACR                              (MVEBU_SMMU_BASE + 0x10)
+#define SMMU_sACR_PG_64K                       (1 << 16)
+
+#define CCU_GSPMU_CR                           (MVEBU_CCU_BASE(MVEBU_AP0) + \
+                                                       0x3F0)
+#define GSPMU_CPU_CONTROL                      (0x1 << 0)
+
+#define CCU_HTC_CR                             (MVEBU_CCU_BASE(MVEBU_AP0) + \
+                                                       0x200)
+#define CCU_SET_POC_OFFSET                     5
+
+#define CCU_RGF(win)                           (MVEBU_CCU_BASE(MVEBU_AP0) + \
+                                                       0x90 + 4 * (win))
+
+#define DSS_CR0                                        (MVEBU_RFU_BASE + 0x100)
+#define DVM_48BIT_VA_ENABLE                    (1 << 21)
+
+/* Secure MoChi incoming access */
+#define SEC_MOCHI_IN_ACC_REG                   (MVEBU_RFU_BASE + 0x4738)
+#define SEC_MOCHI_IN_ACC_IHB0_EN               (1)
+#define SEC_MOCHI_IN_ACC_IHB1_EN               (1 << 3)
+#define SEC_MOCHI_IN_ACC_IHB2_EN               (1 << 6)
+#define SEC_MOCHI_IN_ACC_PIDI_EN               (1 << 9)
+#define SEC_IN_ACCESS_ENA_ALL_MASTERS          (SEC_MOCHI_IN_ACC_IHB0_EN | \
+                                                SEC_MOCHI_IN_ACC_IHB1_EN | \
+                                                SEC_MOCHI_IN_ACC_IHB2_EN | \
+                                                SEC_MOCHI_IN_ACC_PIDI_EN)
+
+/* SYSRST_OUTn Config definitions */
+#define MVEBU_SYSRST_OUT_CONFIG_REG            (MVEBU_MISC_SOC_BASE + 0x4)
+#define WD_MASK_SYS_RST_OUT                    (1 << 2)
+
+/* Generic Timer System Controller */
+#define MVEBU_MSS_GTCR_REG                     (MVEBU_REGS_BASE + 0x581000)
+#define MVEBU_MSS_GTCR_ENABLE_BIT              0x1
+
+/*
+ * AXI Configuration.
+ */
+
+/* Used for Units of AP-806 (e.g. SDIO and etc) */
+#define MVEBU_AXI_ATTR_BASE                    (MVEBU_REGS_BASE + 0x6F4580)
+#define MVEBU_AXI_ATTR_REG(index)              (MVEBU_AXI_ATTR_BASE + \
+                                                       0x4 * index)
+
+enum axi_attr {
+       AXI_SDIO_ATTR = 0,
+       AXI_DFX_ATTR,
+       AXI_MAX_ATTR,
+};
+
+static void apn_sec_masters_access_en(uint32_t enable)
+{
+       uint32_t reg;
+
+       /* Open/Close incoming access for all masters.
+        * The access is disabled in trusted boot mode
+        * Could only be done in EL3
+        */
+       reg = mmio_read_32(SEC_MOCHI_IN_ACC_REG);
+       if (enable)
+               mmio_write_32(SEC_MOCHI_IN_ACC_REG, reg |
+                             SEC_IN_ACCESS_ENA_ALL_MASTERS);
+       else
+               mmio_write_32(SEC_MOCHI_IN_ACC_REG, reg &
+                             ~SEC_IN_ACCESS_ENA_ALL_MASTERS);
+}
+
+static void setup_smmu(void)
+{
+       uint32_t reg;
+
+       /* Set the SMMU page size to 64 KB */
+       reg = mmio_read_32(SMMU_sACR);
+       reg |= SMMU_sACR_PG_64K;
+       mmio_write_32(SMMU_sACR, reg);
+}
+
+static void apn806_errata_wa_init(void)
+{
+       /*
+        * ERRATA ID: RES-3033912 - Internal Address Space Init state causes
+        * a hang upon accesses to [0xf070_0000, 0xf07f_ffff]
+        * Workaround: Boot Firmware (ATF) should configure CCU_RGF_WIN(4) to
+        * split [0x6e_0000, 0xff_ffff] to values [0x6e_0000, 0x6f_ffff] and
+        * [0x80_0000, 0xff_ffff] that cause accesses to the
+        * segment of [0xf070_0000, 0xf07f_ffff] to act as RAZWI.
+        */
+       mmio_write_32(CCU_RGF(4), 0x37f9b809);
+       mmio_write_32(CCU_RGF(5), 0x7ffa0009);
+}
+
+static void init_aurora2(void)
+{
+       uint32_t reg;
+
+       /* Enable GSPMU control by CPU */
+       reg = mmio_read_32(CCU_GSPMU_CR);
+       reg |= GSPMU_CPU_CONTROL;
+       mmio_write_32(CCU_GSPMU_CR, reg);
+
+#if LLC_ENABLE
+       /* Enable LLC for AP806 in exclusive mode */
+       llc_enable(0, 1);
+
+       /* Set point of coherency to DDR.
+        * This is required by units which have
+        * SW cache coherency
+        */
+       reg = mmio_read_32(CCU_HTC_CR);
+       reg |= (0x1 << CCU_SET_POC_OFFSET);
+       mmio_write_32(CCU_HTC_CR, reg);
+#endif /* LLC_ENABLE */
+
+       apn806_errata_wa_init();
+}
+
+
+/* MCIx indirect access register are based by default at 0xf4000000/0xf6000000
+ * to avoid conflict of internal registers of units connected via MCIx, which
+ * can be based on the same address (i.e CP1 base is also 0xf4000000),
+ * the following routines remaps the MCIx indirect bases to another domain
+ */
+static void mci_remap_indirect_access_base(void)
+{
+       uint32_t mci;
+
+       for (mci = 0; mci < MCI_MAX_UNIT_ID; mci++)
+               mmio_write_32(MCIX4_REG_START_ADDRESS_REG(mci),
+                             MVEBU_MCI_REG_BASE_REMAP(mci) >>
+                             MCI_REMAP_OFF_SHIFT);
+}
+
+static void apn806_axi_attr_init(void)
+{
+       uint32_t index, data;
+
+       /* Initialize AXI attributes for APN806 */
+
+       /* Go over the AXI attributes and set Ax-Cache and Ax-Domain */
+       for (index = 0; index < AXI_MAX_ATTR; index++) {
+               switch (index) {
+               /* DFX works with no coherent only -
+                * there's no option to configure the Ax-Cache and Ax-Domain
+                */
+               case AXI_DFX_ATTR:
+                       continue;
+               default:
+                       /* Set Ax-Cache as cacheable, no allocate, modifiable,
+                        * bufferable
+                        * The values are different because Read & Write
+                        * definition is different in Ax-Cache
+                        */
+                       data = mmio_read_32(MVEBU_AXI_ATTR_REG(index));
+                       data &= ~MVEBU_AXI_ATTR_ARCACHE_MASK;
+                       data |= (CACHE_ATTR_WRITE_ALLOC |
+                                CACHE_ATTR_CACHEABLE   |
+                                CACHE_ATTR_BUFFERABLE) <<
+                                MVEBU_AXI_ATTR_ARCACHE_OFFSET;
+                       data &= ~MVEBU_AXI_ATTR_AWCACHE_MASK;
+                       data |= (CACHE_ATTR_READ_ALLOC |
+                                CACHE_ATTR_CACHEABLE  |
+                                CACHE_ATTR_BUFFERABLE) <<
+                                MVEBU_AXI_ATTR_AWCACHE_OFFSET;
+                       /* Set Ax-Domain as Outer domain */
+                       data &= ~MVEBU_AXI_ATTR_ARDOMAIN_MASK;
+                       data |= DOMAIN_OUTER_SHAREABLE <<
+                               MVEBU_AXI_ATTR_ARDOMAIN_OFFSET;
+                       data &= ~MVEBU_AXI_ATTR_AWDOMAIN_MASK;
+                       data |= DOMAIN_OUTER_SHAREABLE <<
+                               MVEBU_AXI_ATTR_AWDOMAIN_OFFSET;
+                       mmio_write_32(MVEBU_AXI_ATTR_REG(index), data);
+               }
+       }
+}
+
+static void dss_setup(void)
+{
+       /* Enable 48-bit VA */
+       mmio_setbits_32(DSS_CR0, DVM_48BIT_VA_ENABLE);
+}
+
+void misc_soc_configurations(void)
+{
+       uint32_t reg;
+
+       /* Un-mask Watchdog reset from influencing the SYSRST_OUTn.
+        * Otherwise, upon WD timeout, the WD reset signal won't trigger reset
+        */
+       reg = mmio_read_32(MVEBU_SYSRST_OUT_CONFIG_REG);
+       reg &= ~(WD_MASK_SYS_RST_OUT);
+       mmio_write_32(MVEBU_SYSRST_OUT_CONFIG_REG, reg);
+}
+
+void ap_init(void)
+{
+       /* Setup Aurora2. */
+       init_aurora2();
+
+       /* configure MCI mapping */
+       mci_remap_indirect_access_base();
+
+       /* configure IO_WIN windows */
+       init_io_win(MVEBU_AP0);
+
+       /* configure CCU windows */
+       init_ccu(MVEBU_AP0);
+
+       /* configure DSS */
+       dss_setup();
+
+       /* configure the SMMU */
+       setup_smmu();
+
+       /* Open APN incoming access for all masters */
+       apn_sec_masters_access_en(1);
+
+       /* configure axi for APN*/
+       apn806_axi_attr_init();
+
+       /* misc configuration of the SoC */
+       misc_soc_configurations();
+}
+
+void ap_ble_init(void)
+{
+}
+
+int ap_get_count(void)
+{
+       return 1;
+}
+
diff --git a/drivers/marvell/mochi/cp110_setup.c b/drivers/marvell/mochi/cp110_setup.c
new file mode 100644 (file)
index 0000000..c4cb307
--- /dev/null
@@ -0,0 +1,429 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* CP110 Marvell SoC driver */
+
+#include <amb_adec.h>
+#include <cp110_setup.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <iob.h>
+#include <plat_marvell.h>
+
+/*
+ * AXI Configuration.
+ */
+
+ /* Used for Units of CP-110 (e.g. USB device, USB Host, and etc) */
+#define MVEBU_AXI_ATTR_OFFSET                  (0x441300)
+#define MVEBU_AXI_ATTR_REG(index)              (MVEBU_AXI_ATTR_OFFSET + \
+                                                       0x4 * index)
+
+/* AXI Protection bits */
+#define MVEBU_AXI_PROT_OFFSET                          (0x441200)
+
+/* AXI Protection regs */
+#define MVEBU_AXI_PROT_REG(index)              ((index <= 4) ? \
+                                               (MVEBU_AXI_PROT_OFFSET + \
+                                                       0x4 * index) : \
+                                               (MVEBU_AXI_PROT_OFFSET + 0x18))
+#define MVEBU_AXI_PROT_REGS_NUM                        (6)
+
+#define MVEBU_SOC_CFGS_OFFSET                  (0x441900)
+#define MVEBU_SOC_CFG_REG(index)               (MVEBU_SOC_CFGS_OFFSET + \
+                                                       0x4 * index)
+#define MVEBU_SOC_CFG_REG_NUM                  (0)
+#define MVEBU_SOC_CFG_GLOG_SECURE_EN_MASK      (0xE)
+
+/* SATA3 MBUS to AXI regs */
+#define MVEBU_BRIDGE_WIN_DIS_REG               (MVEBU_SOC_CFGS_OFFSET + 0x10)
+#define MVEBU_BRIDGE_WIN_DIS_OFF               (0x0)
+
+/* SATA3 MBUS to AXI regs */
+#define MVEBU_SATA_M2A_AXI_PORT_CTRL_REG       (0x54ff04)
+
+/* AXI to MBUS bridge registers */
+#define MVEBU_AMB_IP_OFFSET                    (0x13ff00)
+#define MVEBU_AMB_IP_BRIDGE_WIN_REG(win)       (MVEBU_AMB_IP_OFFSET + \
+                                                       (win * 0x8))
+#define MVEBU_AMB_IP_BRIDGE_WIN_EN_OFFSET      0
+#define MVEBU_AMB_IP_BRIDGE_WIN_EN_MASK                \
+                               (0x1 << MVEBU_AMB_IP_BRIDGE_WIN_EN_OFFSET)
+#define MVEBU_AMB_IP_BRIDGE_WIN_SIZE_OFFSET    16
+#define MVEBU_AMB_IP_BRIDGE_WIN_SIZE_MASK      \
+                               (0xffff << MVEBU_AMB_IP_BRIDGE_WIN_SIZE_OFFSET)
+
+#define MVEBU_SAMPLE_AT_RESET_REG      (0x440600)
+#define SAR_PCIE1_CLK_CFG_OFFSET       31
+#define SAR_PCIE1_CLK_CFG_MASK         (0x1 << SAR_PCIE1_CLK_CFG_OFFSET)
+#define SAR_PCIE0_CLK_CFG_OFFSET       30
+#define SAR_PCIE0_CLK_CFG_MASK         (0x1 << SAR_PCIE0_CLK_CFG_OFFSET)
+#define SAR_I2C_INIT_EN_OFFSET         24
+#define SAR_I2C_INIT_EN_MASK           (1 << SAR_I2C_INIT_EN_OFFSET)
+
+/*******************************************************************************
+ * PCIE clock buffer control
+ ******************************************************************************/
+#define MVEBU_PCIE_REF_CLK_BUF_CTRL                    (0x4404F0)
+#define PCIE1_REFCLK_BUFF_SOURCE                       0x800
+#define PCIE0_REFCLK_BUFF_SOURCE                       0x400
+
+/*******************************************************************************
+ * MSS Device Push Set Register
+ ******************************************************************************/
+#define MVEBU_CP_MSS_DPSHSR_REG                                (0x280040)
+#define MSS_DPSHSR_REG_PCIE_CLK_SEL                    0x8
+
+/*******************************************************************************
+ * RTC Configuration
+ ******************************************************************************/
+#define MVEBU_RTC_BASE                                 (0x284000)
+#define MVEBU_RTC_STATUS_REG                           (MVEBU_RTC_BASE + 0x0)
+#define MVEBU_RTC_STATUS_ALARM1_MASK                   0x1
+#define MVEBU_RTC_STATUS_ALARM2_MASK                   0x2
+#define MVEBU_RTC_IRQ_1_CONFIG_REG                     (MVEBU_RTC_BASE + 0x4)
+#define MVEBU_RTC_IRQ_2_CONFIG_REG                     (MVEBU_RTC_BASE + 0x8)
+#define MVEBU_RTC_TIME_REG                             (MVEBU_RTC_BASE + 0xC)
+#define MVEBU_RTC_ALARM_1_REG                          (MVEBU_RTC_BASE + 0x10)
+#define MVEBU_RTC_ALARM_2_REG                          (MVEBU_RTC_BASE + 0x14)
+#define MVEBU_RTC_CCR_REG                              (MVEBU_RTC_BASE + 0x18)
+#define MVEBU_RTC_NOMINAL_TIMING                       0x2000
+#define MVEBU_RTC_NOMINAL_TIMING_MASK                  0x7FFF
+#define MVEBU_RTC_TEST_CONFIG_REG                      (MVEBU_RTC_BASE + 0x1C)
+#define MVEBU_RTC_BRIDGE_TIMING_CTRL0_REG              (MVEBU_RTC_BASE + 0x80)
+#define MVEBU_RTC_WRCLK_PERIOD_MASK                    0xFFFF
+#define MVEBU_RTC_WRCLK_PERIOD_DEFAULT                 0x3FF
+#define MVEBU_RTC_WRCLK_SETUP_OFFS                     16
+#define MVEBU_RTC_WRCLK_SETUP_MASK                     0xFFFF0000
+#define MVEBU_RTC_WRCLK_SETUP_DEFAULT                  0x29
+#define MVEBU_RTC_BRIDGE_TIMING_CTRL1_REG              (MVEBU_RTC_BASE + 0x84)
+#define MVEBU_RTC_READ_OUTPUT_DELAY_MASK               0xFFFF
+#define MVEBU_RTC_READ_OUTPUT_DELAY_DEFAULT            0x1F
+
+enum axi_attr {
+       AXI_ADUNIT_ATTR = 0,
+       AXI_COMUNIT_ATTR,
+       AXI_EIP197_ATTR,
+       AXI_USB3D_ATTR,
+       AXI_USB3H0_ATTR,
+       AXI_USB3H1_ATTR,
+       AXI_SATA0_ATTR,
+       AXI_SATA1_ATTR,
+       AXI_DAP_ATTR,
+       AXI_DFX_ATTR,
+       AXI_DBG_TRC_ATTR = 12,
+       AXI_SDIO_ATTR,
+       AXI_MSS_ATTR,
+       AXI_MAX_ATTR,
+};
+
+/* Most stream IDS are configured centrally in the CP-110 RFU
+ * but some are configured inside the unit registers
+ */
+#define RFU_STREAM_ID_BASE     (0x450000)
+#define USB3H_0_STREAM_ID_REG  (RFU_STREAM_ID_BASE + 0xC)
+#define USB3H_1_STREAM_ID_REG  (RFU_STREAM_ID_BASE + 0x10)
+#define SATA_0_STREAM_ID_REG   (RFU_STREAM_ID_BASE + 0x14)
+#define SATA_1_STREAM_ID_REG   (RFU_STREAM_ID_BASE + 0x18)
+
+#define CP_DMA_0_STREAM_ID_REG  (0x6B0010)
+#define CP_DMA_1_STREAM_ID_REG  (0x6D0010)
+
+/* We allocate IDs 128-255 for PCIe */
+#define MAX_STREAM_ID          (0x80)
+
+uintptr_t stream_id_reg[] = {
+       USB3H_0_STREAM_ID_REG,
+       USB3H_1_STREAM_ID_REG,
+       CP_DMA_0_STREAM_ID_REG,
+       CP_DMA_1_STREAM_ID_REG,
+       SATA_0_STREAM_ID_REG,
+       SATA_1_STREAM_ID_REG,
+       0
+};
+
+static void cp110_errata_wa_init(uintptr_t base)
+{
+       uint32_t data;
+
+       /* ERRATA GL-4076863:
+        * Reset value for global_secure_enable inputs must be changed
+        * from '1' to '0'.
+        * When asserted, only "secured" transactions can enter IHB
+        * configuration space.
+        * However, blocking AXI transactions is performed by IOB.
+        * Performing it also at IHB/HB complicates programming model.
+        *
+        * Enable non-secure access in SOC configuration register
+        */
+       data = mmio_read_32(base + MVEBU_SOC_CFG_REG(MVEBU_SOC_CFG_REG_NUM));
+       data &= ~MVEBU_SOC_CFG_GLOG_SECURE_EN_MASK;
+       mmio_write_32(base + MVEBU_SOC_CFG_REG(MVEBU_SOC_CFG_REG_NUM), data);
+}
+
+static void cp110_pcie_clk_cfg(uintptr_t base)
+{
+       uint32_t pcie0_clk, pcie1_clk, reg;
+
+       /*
+        * Determine the pcie0/1 clock direction (input/output) from the
+        * sample at reset.
+        */
+       reg = mmio_read_32(base + MVEBU_SAMPLE_AT_RESET_REG);
+       pcie0_clk = (reg & SAR_PCIE0_CLK_CFG_MASK) >> SAR_PCIE0_CLK_CFG_OFFSET;
+       pcie1_clk = (reg & SAR_PCIE1_CLK_CFG_MASK) >> SAR_PCIE1_CLK_CFG_OFFSET;
+
+       /* CP110 revision A2 */
+       if (cp110_rev_id_get(base) == MVEBU_CP110_REF_ID_A2) {
+               /*
+                * PCIe Reference Clock Buffer Control register must be
+                * set according to the clock direction (input/output)
+                */
+               reg = mmio_read_32(base + MVEBU_PCIE_REF_CLK_BUF_CTRL);
+               reg &= ~(PCIE0_REFCLK_BUFF_SOURCE | PCIE1_REFCLK_BUFF_SOURCE);
+               if (!pcie0_clk)
+                       reg |= PCIE0_REFCLK_BUFF_SOURCE;
+               if (!pcie1_clk)
+                       reg |= PCIE1_REFCLK_BUFF_SOURCE;
+
+               mmio_write_32(base + MVEBU_PCIE_REF_CLK_BUF_CTRL, reg);
+       }
+
+       /* CP110 revision A1 */
+       if (cp110_rev_id_get(base) == MVEBU_CP110_REF_ID_A1) {
+               if (!pcie0_clk || !pcie1_clk) {
+                       /*
+                        * if one of the pcie clocks is set to input,
+                        * we need to set mss_push[131] field, otherwise,
+                        * the pcie clock might not work.
+                        */
+                       reg = mmio_read_32(base + MVEBU_CP_MSS_DPSHSR_REG);
+                       reg |= MSS_DPSHSR_REG_PCIE_CLK_SEL;
+                       mmio_write_32(base + MVEBU_CP_MSS_DPSHSR_REG, reg);
+               }
+       }
+}
+
+/* Set a unique stream id for all DMA capable devices */
+static void cp110_stream_id_init(uintptr_t base, uint32_t stream_id)
+{
+       int i = 0;
+
+       while (stream_id_reg[i]) {
+               if (i > MAX_STREAM_ID_PER_CP) {
+                       NOTICE("Only first %d (maximum) Stream IDs allocated\n",
+                              MAX_STREAM_ID_PER_CP);
+                       return;
+               }
+
+               if ((stream_id_reg[i] == CP_DMA_0_STREAM_ID_REG) ||
+                   (stream_id_reg[i] == CP_DMA_1_STREAM_ID_REG))
+                       mmio_write_32(base + stream_id_reg[i],
+                                     stream_id << 16 |  stream_id);
+               else
+                       mmio_write_32(base + stream_id_reg[i], stream_id);
+
+               /* SATA port 0/1 are in the same SATA unit, and they should use
+                * the same STREAM ID number
+                */
+               if (stream_id_reg[i] != SATA_0_STREAM_ID_REG)
+                       stream_id++;
+
+               i++;
+       }
+}
+
+static void cp110_axi_attr_init(uintptr_t base)
+{
+       uint32_t index, data;
+
+       /* Initialize AXI attributes for Armada-7K/8K SoC */
+
+       /* Go over the AXI attributes and set Ax-Cache and Ax-Domain */
+       for (index = 0; index < AXI_MAX_ATTR; index++) {
+               switch (index) {
+               /* DFX and MSS unit works with no coherent only -
+                * there's no option to configure the Ax-Cache and Ax-Domain
+                */
+               case AXI_DFX_ATTR:
+               case AXI_MSS_ATTR:
+                       continue;
+               default:
+                       /* Set Ax-Cache as cacheable, no allocate, modifiable,
+                        * bufferable
+                        * The values are different because Read & Write
+                        * definition is different in Ax-Cache
+                        */
+                       data = mmio_read_32(base + MVEBU_AXI_ATTR_REG(index));
+                       data &= ~MVEBU_AXI_ATTR_ARCACHE_MASK;
+                       data |= (CACHE_ATTR_WRITE_ALLOC |
+                                CACHE_ATTR_CACHEABLE   |
+                                CACHE_ATTR_BUFFERABLE) <<
+                                MVEBU_AXI_ATTR_ARCACHE_OFFSET;
+                       data &= ~MVEBU_AXI_ATTR_AWCACHE_MASK;
+                       data |= (CACHE_ATTR_READ_ALLOC |
+                                CACHE_ATTR_CACHEABLE  |
+                                CACHE_ATTR_BUFFERABLE) <<
+                                MVEBU_AXI_ATTR_AWCACHE_OFFSET;
+                       /* Set Ax-Domain as Outer domain */
+                       data &= ~MVEBU_AXI_ATTR_ARDOMAIN_MASK;
+                       data |= DOMAIN_OUTER_SHAREABLE <<
+                               MVEBU_AXI_ATTR_ARDOMAIN_OFFSET;
+                       data &= ~MVEBU_AXI_ATTR_AWDOMAIN_MASK;
+                       data |= DOMAIN_OUTER_SHAREABLE <<
+                               MVEBU_AXI_ATTR_AWDOMAIN_OFFSET;
+                       mmio_write_32(base + MVEBU_AXI_ATTR_REG(index), data);
+               }
+       }
+
+       /* SATA IOCC supported, cache attributes
+        * for SATA MBUS to AXI configuration.
+        */
+       data = mmio_read_32(base + MVEBU_SATA_M2A_AXI_PORT_CTRL_REG);
+       data &= ~MVEBU_SATA_M2A_AXI_AWCACHE_MASK;
+       data |= (CACHE_ATTR_WRITE_ALLOC |
+                CACHE_ATTR_CACHEABLE   |
+                CACHE_ATTR_BUFFERABLE) <<
+                MVEBU_SATA_M2A_AXI_AWCACHE_OFFSET;
+       data &= ~MVEBU_SATA_M2A_AXI_ARCACHE_MASK;
+       data |= (CACHE_ATTR_READ_ALLOC |
+                CACHE_ATTR_CACHEABLE  |
+                CACHE_ATTR_BUFFERABLE) <<
+                MVEBU_SATA_M2A_AXI_ARCACHE_OFFSET;
+       mmio_write_32(base + MVEBU_SATA_M2A_AXI_PORT_CTRL_REG, data);
+
+       /* Set all IO's AXI attribute to non-secure access. */
+       for (index = 0; index < MVEBU_AXI_PROT_REGS_NUM; index++)
+               mmio_write_32(base + MVEBU_AXI_PROT_REG(index),
+                             DOMAIN_SYSTEM_SHAREABLE);
+}
+
+static void amb_bridge_init(uintptr_t base)
+{
+       uint32_t reg;
+
+       /* Open AMB bridge Window to Access COMPHY/MDIO registers */
+       reg = mmio_read_32(base + MVEBU_AMB_IP_BRIDGE_WIN_REG(0));
+       reg &= ~(MVEBU_AMB_IP_BRIDGE_WIN_SIZE_MASK |
+                MVEBU_AMB_IP_BRIDGE_WIN_EN_MASK);
+       reg |= (0x7ff << MVEBU_AMB_IP_BRIDGE_WIN_SIZE_OFFSET) |
+              (0x1 << MVEBU_AMB_IP_BRIDGE_WIN_EN_OFFSET);
+       mmio_write_32(base + MVEBU_AMB_IP_BRIDGE_WIN_REG(0), reg);
+}
+
+static void cp110_rtc_init(uintptr_t base)
+{
+       /* Update MBus timing parameters before accessing RTC registers */
+       mmio_clrsetbits_32(base + MVEBU_RTC_BRIDGE_TIMING_CTRL0_REG,
+                          MVEBU_RTC_WRCLK_PERIOD_MASK,
+                          MVEBU_RTC_WRCLK_PERIOD_DEFAULT);
+
+       mmio_clrsetbits_32(base + MVEBU_RTC_BRIDGE_TIMING_CTRL0_REG,
+                          MVEBU_RTC_WRCLK_SETUP_MASK,
+                          MVEBU_RTC_WRCLK_SETUP_DEFAULT <<
+                          MVEBU_RTC_WRCLK_SETUP_OFFS);
+
+       mmio_clrsetbits_32(base + MVEBU_RTC_BRIDGE_TIMING_CTRL1_REG,
+                          MVEBU_RTC_READ_OUTPUT_DELAY_MASK,
+                          MVEBU_RTC_READ_OUTPUT_DELAY_DEFAULT);
+
+       /*
+        * Issue reset to the RTC if Clock Correction register
+        * contents did not sustain the reboot/power-on.
+        */
+       if ((mmio_read_32(base + MVEBU_RTC_CCR_REG) &
+           MVEBU_RTC_NOMINAL_TIMING_MASK) != MVEBU_RTC_NOMINAL_TIMING) {
+               /* Reset Test register */
+               mmio_write_32(base + MVEBU_RTC_TEST_CONFIG_REG, 0);
+               mdelay(500);
+
+               /* Reset Time register */
+               mmio_write_32(base + MVEBU_RTC_TIME_REG, 0);
+               udelay(62);
+
+               /* Reset Status register */
+               mmio_write_32(base + MVEBU_RTC_STATUS_REG,
+                             (MVEBU_RTC_STATUS_ALARM1_MASK |
+                             MVEBU_RTC_STATUS_ALARM2_MASK));
+               udelay(62);
+
+               /* Turn off Int1 and Int2 sources & clear the Alarm count */
+               mmio_write_32(base + MVEBU_RTC_IRQ_1_CONFIG_REG, 0);
+               mmio_write_32(base + MVEBU_RTC_IRQ_2_CONFIG_REG, 0);
+               mmio_write_32(base + MVEBU_RTC_ALARM_1_REG, 0);
+               mmio_write_32(base + MVEBU_RTC_ALARM_2_REG, 0);
+
+               /* Setup nominal register access timing */
+               mmio_write_32(base + MVEBU_RTC_CCR_REG,
+                             MVEBU_RTC_NOMINAL_TIMING);
+
+               /* Reset Time register */
+               mmio_write_32(base + MVEBU_RTC_TIME_REG, 0);
+               udelay(10);
+
+               /* Reset Status register */
+               mmio_write_32(base + MVEBU_RTC_STATUS_REG,
+                             (MVEBU_RTC_STATUS_ALARM1_MASK |
+                             MVEBU_RTC_STATUS_ALARM2_MASK));
+               udelay(50);
+       }
+}
+
+static void cp110_amb_adec_init(uintptr_t base)
+{
+       /* enable AXI-MBUS by clearing "Bridge Windows Disable" */
+       mmio_clrbits_32(base + MVEBU_BRIDGE_WIN_DIS_REG,
+                       (1 << MVEBU_BRIDGE_WIN_DIS_OFF));
+
+       /* configure AXI-MBUS windows for CP */
+       init_amb_adec(base);
+}
+
+void cp110_init(uintptr_t cp110_base, uint32_t stream_id)
+{
+       INFO("%s: Initialize CPx - base = %lx\n", __func__, cp110_base);
+
+       /* configure IOB windows for CP0*/
+       init_iob(cp110_base);
+
+       /* configure AXI-MBUS windows for CP0*/
+       cp110_amb_adec_init(cp110_base);
+
+       /* configure axi for CP0*/
+       cp110_axi_attr_init(cp110_base);
+
+       /* Execute SW WA for erratas */
+       cp110_errata_wa_init(cp110_base);
+
+       /* Confiure pcie clock according to clock direction */
+       cp110_pcie_clk_cfg(cp110_base);
+
+       /* configure stream id for CP0 */
+       cp110_stream_id_init(cp110_base, stream_id);
+
+       /* Open AMB bridge for comphy for CP0 & CP1*/
+       amb_bridge_init(cp110_base);
+
+       /* Reset RTC if needed */
+       cp110_rtc_init(cp110_base);
+}
+
+/* Do the minimal setup required to configure the CP in BLE */
+void cp110_ble_init(uintptr_t cp110_base)
+{
+#if PCI_EP_SUPPORT
+       INFO("%s: Initialize CPx - base = %lx\n", __func__, cp110_base);
+
+       amb_bridge_init(cp110_base);
+
+       /* Configure PCIe clock */
+       cp110_pcie_clk_cfg(cp110_base);
+
+       /* Configure PCIe endpoint */
+       ble_plat_pcie_ep_setup();
+#endif
+}
diff --git a/drivers/marvell/thermal.c b/drivers/marvell/thermal.c
new file mode 100644 (file)
index 0000000..c7ceb92
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* Driver for thermal unit located in Marvell ARMADA 8K and compatible SoCs */
+
+#include <debug.h>
+#include <thermal.h>
+
+int marvell_thermal_init(struct tsen_config *tsen_cfg)
+{
+       if (tsen_cfg->tsen_ready == 1) {
+               INFO("thermal sensor is already initialized\n");
+               return 0;
+       }
+
+       if (tsen_cfg->ptr_tsen_probe == NULL) {
+               ERROR("initial thermal sensor configuration is missing\n");
+               return -1;
+       }
+
+       if (tsen_cfg->ptr_tsen_probe(tsen_cfg)) {
+               ERROR("thermal sensor initialization failed\n");
+               return -1;
+       }
+
+       VERBOSE("thermal sensor was initialized\n");
+
+       return 0;
+}
+
+int marvell_thermal_read(struct tsen_config *tsen_cfg, int *temp)
+{
+       if (temp == NULL) {
+               ERROR("NULL pointer for temperature read\n");
+               return -1;
+       }
+
+       if (tsen_cfg->ptr_tsen_read == NULL ||
+           tsen_cfg->tsen_ready == 0) {
+               ERROR("thermal sensor was not initialized\n");
+               return -1;
+       }
+
+       if (tsen_cfg->ptr_tsen_read(tsen_cfg, temp)) {
+               ERROR("temperature read failed\n");
+               return -1;
+       }
+
+       return 0;
+}
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
new file mode 100644 (file)
index 0000000..8fe3239
--- /dev/null
@@ -0,0 +1,714 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* Define a simple and generic interface to access eMMC and SD-card devices. */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <mmc.h>
+#include <stdbool.h>
+#include <string.h>
+#include <utils.h>
+
+#define MMC_DEFAULT_MAX_RETRIES                5
+#define SEND_OP_COND_MAX_RETRIES       100
+
+#define MULT_BY_512K_SHIFT             19
+
+static const struct mmc_ops *ops;
+static unsigned int mmc_ocr_value;
+static struct mmc_csd_emmc mmc_csd;
+static unsigned char mmc_ext_csd[512] __aligned(4);
+static unsigned int mmc_flags;
+static struct mmc_device_info *mmc_dev_info;
+static unsigned int rca;
+
+static const unsigned char tran_speed_base[16] = {
+       0, 10, 12, 13, 15, 20, 26, 30, 35, 40, 45, 52, 55, 60, 70, 80
+};
+
+static const unsigned char sd_tran_speed_base[16] = {
+       0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80
+};
+
+static bool is_cmd23_enabled(void)
+{
+       return ((mmc_flags & MMC_FLAG_CMD23) != 0U);
+}
+
+static int mmc_send_cmd(unsigned int idx, unsigned int arg,
+                       unsigned int r_type, unsigned int *r_data)
+{
+       struct mmc_cmd cmd;
+       int ret;
+
+       zeromem(&cmd, sizeof(struct mmc_cmd));
+
+       cmd.cmd_idx = idx;
+       cmd.cmd_arg = arg;
+       cmd.resp_type = r_type;
+
+       ret = ops->send_cmd(&cmd);
+
+       if ((ret == 0) && (r_data != NULL)) {
+               int i;
+
+               for (i = 0; i < 4; i++) {
+                       *r_data = cmd.resp_data[i];
+                       r_data++;
+               }
+       }
+
+       if (ret != 0) {
+               VERBOSE("Send command %u error: %d\n", idx, ret);
+       }
+
+       return ret;
+}
+
+static int mmc_device_state(void)
+{
+       int retries = MMC_DEFAULT_MAX_RETRIES;
+       unsigned int resp_data[4];
+
+       do {
+               int ret;
+
+               if (retries == 0) {
+                       ERROR("CMD13 failed after %d retries\n",
+                             MMC_DEFAULT_MAX_RETRIES);
+                       return -EIO;
+               }
+
+               ret = mmc_send_cmd(MMC_CMD(13), rca << RCA_SHIFT_OFFSET,
+                                  MMC_RESPONSE_R(1), &resp_data[0]);
+               if (ret != 0) {
+                       return ret;
+               }
+
+               if ((resp_data[0] & STATUS_SWITCH_ERROR) != 0U) {
+                       return -EIO;
+               }
+
+               retries--;
+       } while ((resp_data[0] & STATUS_READY_FOR_DATA) == 0U);
+
+       return MMC_GET_STATE(resp_data[0]);
+}
+
+static int mmc_set_ext_csd(unsigned int ext_cmd, unsigned int value)
+{
+       int ret;
+
+       ret = mmc_send_cmd(MMC_CMD(6),
+                          EXTCSD_WRITE_BYTES | EXTCSD_CMD(ext_cmd) |
+                          EXTCSD_VALUE(value) | EXTCSD_CMD_SET_NORMAL,
+                          0, NULL);
+       if (ret != 0) {
+               return ret;
+       }
+
+       do {
+               ret = mmc_device_state();
+               if (ret < 0) {
+                       return ret;
+               }
+       } while (ret == MMC_STATE_PRG);
+
+       return 0;
+}
+
+static int mmc_sd_switch(unsigned int bus_width)
+{
+       int ret;
+       int retries = MMC_DEFAULT_MAX_RETRIES;
+       unsigned int scr[2] = { 0 };
+       unsigned int bus_width_arg = 0;
+
+       ret = ops->prepare(0, (uintptr_t)&scr, sizeof(scr));
+       if (ret != 0) {
+               return ret;
+       }
+
+       /* CMD55: Application Specific Command */
+       ret = mmc_send_cmd(MMC_CMD(55), rca << RCA_SHIFT_OFFSET,
+                          MMC_RESPONSE_R(1), NULL);
+       if (ret != 0) {
+               return ret;
+       }
+
+       /* ACMD51: SEND_SCR */
+       do {
+               ret = mmc_send_cmd(MMC_ACMD(51), 0, MMC_RESPONSE_R(1), NULL);
+               if ((ret != 0) && (retries == 0)) {
+                       ERROR("ACMD51 failed after %d retries (ret=%d)\n",
+                             MMC_DEFAULT_MAX_RETRIES, ret);
+                       return ret;
+               }
+
+               retries--;
+       } while (ret != 0);
+
+       ret = ops->read(0, (uintptr_t)&scr, sizeof(scr));
+       if (ret != 0) {
+               return ret;
+       }
+
+       if (((scr[0] & SD_SCR_BUS_WIDTH_4) != 0U) &&
+           (bus_width == MMC_BUS_WIDTH_4)) {
+               bus_width_arg = 2;
+       }
+
+       /* CMD55: Application Specific Command */
+       ret = mmc_send_cmd(MMC_CMD(55), rca << RCA_SHIFT_OFFSET,
+                          MMC_RESPONSE_R(1), NULL);
+       if (ret != 0) {
+               return ret;
+       }
+
+       /* ACMD6: SET_BUS_WIDTH */
+       ret = mmc_send_cmd(MMC_ACMD(6), bus_width_arg, MMC_RESPONSE_R(1), NULL);
+       if (ret != 0) {
+               return ret;
+       }
+
+       do {
+               ret = mmc_device_state();
+               if (ret < 0) {
+                       return ret;
+               }
+       } while (ret == MMC_STATE_PRG);
+
+       return 0;
+}
+
+static int mmc_set_ios(unsigned int clk, unsigned int bus_width)
+{
+       int ret;
+       unsigned int width = bus_width;
+
+       if (mmc_dev_info->mmc_dev_type != MMC_IS_EMMC) {
+               if (width == MMC_BUS_WIDTH_8) {
+                       WARN("Wrong bus config for SD-card, force to 4\n");
+                       width = MMC_BUS_WIDTH_4;
+               }
+               ret = mmc_sd_switch(width);
+               if (ret != 0) {
+                       return ret;
+               }
+       } else if (mmc_csd.spec_vers == 4U) {
+               ret = mmc_set_ext_csd(CMD_EXTCSD_BUS_WIDTH,
+                                     (unsigned int)width);
+               if (ret != 0) {
+                       return ret;
+               }
+       } else {
+               VERBOSE("Wrong MMC type or spec version\n");
+       }
+
+       return ops->set_ios(clk, width);
+}
+
+static int mmc_fill_device_info(void)
+{
+       unsigned long long c_size;
+       unsigned int speed_idx;
+       unsigned int nb_blocks;
+       unsigned int freq_unit;
+       int ret;
+       struct mmc_csd_sd_v2 *csd_sd_v2;
+
+       switch (mmc_dev_info->mmc_dev_type) {
+       case MMC_IS_EMMC:
+               mmc_dev_info->block_size = MMC_BLOCK_SIZE;
+
+               ret = ops->prepare(0, (uintptr_t)&mmc_ext_csd,
+                                  sizeof(mmc_ext_csd));
+               if (ret != 0) {
+                       return ret;
+               }
+
+               /* MMC CMD8: SEND_EXT_CSD */
+               ret = mmc_send_cmd(MMC_CMD(8), 0, MMC_RESPONSE_R(1), NULL);
+               if (ret != 0) {
+                       return ret;
+               }
+
+               ret = ops->read(0, (uintptr_t)&mmc_ext_csd,
+                               sizeof(mmc_ext_csd));
+               if (ret != 0) {
+                       return ret;
+               }
+
+               nb_blocks = (mmc_ext_csd[CMD_EXTCSD_SEC_CNT] << 0) |
+                           (mmc_ext_csd[CMD_EXTCSD_SEC_CNT + 1] << 8) |
+                           (mmc_ext_csd[CMD_EXTCSD_SEC_CNT + 2] << 16) |
+                           (mmc_ext_csd[CMD_EXTCSD_SEC_CNT + 3] << 24);
+
+               mmc_dev_info->device_size = (unsigned long long)nb_blocks *
+                       mmc_dev_info->block_size;
+
+               break;
+
+       case MMC_IS_SD:
+               /*
+                * Use the same mmc_csd struct, as required fields here
+                * (READ_BL_LEN, C_SIZE, CSIZE_MULT) are common with eMMC.
+                */
+               mmc_dev_info->block_size = BIT_32(mmc_csd.read_bl_len);
+
+               c_size = ((unsigned long long)mmc_csd.c_size_high << 2U) |
+                        (unsigned long long)mmc_csd.c_size_low;
+               assert(c_size != 0xFFFU);
+
+               mmc_dev_info->device_size = (c_size + 1U) *
+                                           BIT_64(mmc_csd.c_size_mult + 2U) *
+                                           mmc_dev_info->block_size;
+
+               break;
+
+       case MMC_IS_SD_HC:
+               assert(mmc_csd.csd_structure == 1U);
+
+               mmc_dev_info->block_size = MMC_BLOCK_SIZE;
+
+               /* Need to use mmc_csd_sd_v2 struct */
+               csd_sd_v2 = (struct mmc_csd_sd_v2 *)&mmc_csd;
+               c_size = ((unsigned long long)csd_sd_v2->c_size_high << 16) |
+                        (unsigned long long)csd_sd_v2->c_size_low;
+
+               mmc_dev_info->device_size = (c_size + 1U) << MULT_BY_512K_SHIFT;
+
+               break;
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       if (ret != 0) {
+               return ret;
+       }
+
+       speed_idx = (mmc_csd.tran_speed & CSD_TRAN_SPEED_MULT_MASK) >>
+                        CSD_TRAN_SPEED_MULT_SHIFT;
+
+       assert(speed_idx > 0U);
+
+       if (mmc_dev_info->mmc_dev_type == MMC_IS_EMMC) {
+               mmc_dev_info->max_bus_freq = tran_speed_base[speed_idx];
+       } else {
+               mmc_dev_info->max_bus_freq = sd_tran_speed_base[speed_idx];
+       }
+
+       freq_unit = mmc_csd.tran_speed & CSD_TRAN_SPEED_UNIT_MASK;
+       while (freq_unit != 0U) {
+               mmc_dev_info->max_bus_freq *= 10U;
+               --freq_unit;
+       }
+
+       mmc_dev_info->max_bus_freq *= 10000U;
+
+       return 0;
+}
+
+static int sd_send_op_cond(void)
+{
+       int retries = SEND_OP_COND_MAX_RETRIES;
+       unsigned int resp_data[4];
+
+       do {
+               int ret;
+
+               if (retries == 0) {
+                       ERROR("ACMD41 failed after %d retries\n",
+                             SEND_OP_COND_MAX_RETRIES);
+                       return -EIO;
+               }
+
+               /* CMD55: Application Specific Command */
+               ret = mmc_send_cmd(MMC_CMD(55), 0, MMC_RESPONSE_R(1), NULL);
+               if (ret != 0) {
+                       return ret;
+               }
+
+               /* ACMD41: SD_SEND_OP_COND */
+               ret = mmc_send_cmd(MMC_ACMD(41), OCR_HCS, MMC_RESPONSE_R(3),
+                                  &resp_data[0]);
+               if (ret != 0) {
+                       return ret;
+               }
+
+               retries--;
+       } while ((resp_data[0] & OCR_POWERUP) == 0U);
+
+       mmc_ocr_value = resp_data[0];
+
+       if ((mmc_ocr_value & OCR_HCS) != 0U) {
+               mmc_dev_info->mmc_dev_type = MMC_IS_SD_HC;
+       } else {
+               mmc_dev_info->mmc_dev_type = MMC_IS_SD;
+       }
+
+       return 0;
+}
+
+static int mmc_send_op_cond(void)
+{
+       int ret;
+       int retries = SEND_OP_COND_MAX_RETRIES;
+       unsigned int resp_data[4];
+
+       /* CMD0: reset to IDLE */
+       ret = mmc_send_cmd(MMC_CMD(0), 0, 0, NULL);
+       if (ret != 0) {
+               return ret;
+       }
+
+       do {
+               if (retries == 0) {
+                       ERROR("CMD1 failed after %d retries\n",
+                             SEND_OP_COND_MAX_RETRIES);
+                       return -EIO;
+               }
+
+               /* CMD1: get OCR register (SEND_OP_COND) */
+               ret = mmc_send_cmd(MMC_CMD(1), OCR_SECTOR_MODE |
+                                  OCR_VDD_MIN_2V7 | OCR_VDD_MIN_1V7,
+                                  MMC_RESPONSE_R(3), &resp_data[0]);
+               if (ret != 0) {
+                       return ret;
+               }
+
+               retries--;
+       } while ((resp_data[0] & OCR_POWERUP) == 0U);
+
+       mmc_ocr_value = resp_data[0];
+
+       return 0;
+}
+
+static int mmc_enumerate(unsigned int clk, unsigned int bus_width)
+{
+       int ret;
+       unsigned int resp_data[4];
+
+       ops->init();
+
+       /* CMD0: reset to IDLE */
+       ret = mmc_send_cmd(MMC_CMD(0), 0, 0, NULL);
+       if (ret != 0) {
+               return ret;
+       }
+
+       /* CMD8: Send Interface Condition Command */
+       ret = mmc_send_cmd(MMC_CMD(8), VHS_2_7_3_6_V | CMD8_CHECK_PATTERN,
+                          MMC_RESPONSE_R(7), &resp_data[0]);
+
+       if ((ret == 0) && ((resp_data[0] & 0xffU) == CMD8_CHECK_PATTERN)) {
+               ret = sd_send_op_cond();
+       } else {
+               ret = mmc_send_op_cond();
+       }
+       if (ret != 0) {
+               return ret;
+       }
+
+       /* CMD2: Card Identification */
+       ret = mmc_send_cmd(MMC_CMD(2), 0, MMC_RESPONSE_R(2), NULL);
+       if (ret != 0) {
+               return ret;
+       }
+
+       /* CMD3: Set Relative Address */
+       if (mmc_dev_info->mmc_dev_type == MMC_IS_EMMC) {
+               rca = MMC_FIX_RCA;
+               ret = mmc_send_cmd(MMC_CMD(3), rca << RCA_SHIFT_OFFSET,
+                                  MMC_RESPONSE_R(1), NULL);
+               if (ret != 0) {
+                       return ret;
+               }
+       } else {
+               ret = mmc_send_cmd(MMC_CMD(3), 0,
+                                  MMC_RESPONSE_R(6), &resp_data[0]);
+               if (ret != 0) {
+                       return ret;
+               }
+
+               rca = (resp_data[0] & 0xFFFF0000U) >> 16;
+       }
+
+       /* CMD9: CSD Register */
+       ret = mmc_send_cmd(MMC_CMD(9), rca << RCA_SHIFT_OFFSET,
+                          MMC_RESPONSE_R(2), &resp_data[0]);
+       if (ret != 0) {
+               return ret;
+       }
+
+       memcpy(&mmc_csd, &resp_data, sizeof(resp_data));
+
+       /* CMD7: Select Card */
+       ret = mmc_send_cmd(MMC_CMD(7), rca << RCA_SHIFT_OFFSET,
+                          MMC_RESPONSE_R(1), NULL);
+       if (ret != 0) {
+               return ret;
+       }
+
+       do {
+               ret = mmc_device_state();
+               if (ret < 0) {
+                       return ret;
+               }
+       } while (ret != MMC_STATE_TRAN);
+
+       ret = mmc_fill_device_info();
+       if (ret != 0) {
+               return ret;
+       }
+
+       return mmc_set_ios(clk, bus_width);
+}
+
+size_t mmc_read_blocks(unsigned int lba, uintptr_t buf, size_t size)
+{
+       int ret;
+       unsigned int cmd_idx, cmd_arg;
+
+       assert((ops != NULL) &&
+              (ops->read != NULL) &&
+              (size != 0U) &&
+              ((size & MMC_BLOCK_MASK) == 0U));
+
+       ret = ops->prepare(lba, buf, size);
+       if (ret != 0) {
+               return 0;
+       }
+
+       if (is_cmd23_enabled()) {
+               /* Set block count */
+               ret = mmc_send_cmd(MMC_CMD(23), size / MMC_BLOCK_SIZE,
+                                  MMC_RESPONSE_R(1), NULL);
+               if (ret != 0) {
+                       return 0;
+               }
+
+               cmd_idx = MMC_CMD(18);
+       } else {
+               if (size > MMC_BLOCK_SIZE) {
+                       cmd_idx = MMC_CMD(18);
+               } else {
+                       cmd_idx = MMC_CMD(17);
+               }
+       }
+
+       if (((mmc_ocr_value & OCR_ACCESS_MODE_MASK) == OCR_BYTE_MODE) &&
+           (mmc_dev_info->mmc_dev_type != MMC_IS_SD_HC)) {
+               cmd_arg = lba * MMC_BLOCK_SIZE;
+       } else {
+               cmd_arg = lba;
+       }
+
+       ret = mmc_send_cmd(cmd_idx, cmd_arg, MMC_RESPONSE_R(1), NULL);
+       if (ret != 0) {
+               return 0;
+       }
+
+       ret = ops->read(lba, buf, size);
+       if (ret != 0) {
+               return 0;
+       }
+
+       /* Wait buffer empty */
+       do {
+               ret = mmc_device_state();
+               if (ret < 0) {
+                       return 0;
+               }
+       } while ((ret != MMC_STATE_TRAN) && (ret != MMC_STATE_DATA));
+
+       if (!is_cmd23_enabled() && (size > MMC_BLOCK_SIZE)) {
+               ret = mmc_send_cmd(MMC_CMD(12), 0, 0, NULL);
+               if (ret != 0) {
+                       return 0;
+               }
+       }
+
+       return size;
+}
+
+size_t mmc_write_blocks(unsigned int lba, const uintptr_t buf, size_t size)
+{
+       int ret;
+       unsigned int cmd_idx, cmd_arg;
+
+       assert((ops != NULL) &&
+              (ops->write != NULL) &&
+              (size != 0U) &&
+              ((buf & MMC_BLOCK_MASK) == 0U) &&
+              ((size & MMC_BLOCK_MASK) == 0U));
+
+       ret = ops->prepare(lba, buf, size);
+       if (ret != 0) {
+               return 0;
+       }
+
+       if (is_cmd23_enabled()) {
+               /* Set block count */
+               ret = mmc_send_cmd(MMC_CMD(23), size / MMC_BLOCK_SIZE,
+                                  MMC_RESPONSE_R(1), NULL);
+               if (ret != 0) {
+                       return 0;
+               }
+
+               cmd_idx = MMC_CMD(25);
+       } else {
+               if (size > MMC_BLOCK_SIZE) {
+                       cmd_idx = MMC_CMD(25);
+               } else {
+                       cmd_idx = MMC_CMD(24);
+               }
+       }
+
+       if ((mmc_ocr_value & OCR_ACCESS_MODE_MASK) == OCR_BYTE_MODE) {
+               cmd_arg = lba * MMC_BLOCK_SIZE;
+       } else {
+               cmd_arg = lba;
+       }
+
+       ret = mmc_send_cmd(cmd_idx, cmd_arg, MMC_RESPONSE_R(1), NULL);
+       if (ret != 0) {
+               return 0;
+       }
+
+       ret = ops->write(lba, buf, size);
+       if (ret != 0) {
+               return 0;
+       }
+
+       /* Wait buffer empty */
+       do {
+               ret = mmc_device_state();
+               if (ret < 0) {
+                       return 0;
+               }
+       } while ((ret != MMC_STATE_TRAN) && (ret != MMC_STATE_RCV));
+
+       if (!is_cmd23_enabled() && (size > MMC_BLOCK_SIZE)) {
+               ret = mmc_send_cmd(MMC_CMD(12), 0, 0, NULL);
+               if (ret != 0) {
+                       return 0;
+               }
+       }
+
+       return size;
+}
+
+size_t mmc_erase_blocks(unsigned int lba, size_t size)
+{
+       int ret;
+
+       assert(ops != NULL);
+       assert((size != 0U) && ((size & MMC_BLOCK_MASK) == 0U));
+
+       ret = mmc_send_cmd(MMC_CMD(35), lba, MMC_RESPONSE_R(1), NULL);
+       if (ret != 0) {
+               return 0;
+       }
+
+       ret = mmc_send_cmd(MMC_CMD(36), lba + (size / MMC_BLOCK_SIZE) - 1U,
+                          MMC_RESPONSE_R(1), NULL);
+       if (ret != 0) {
+               return 0;
+       }
+
+       ret = mmc_send_cmd(MMC_CMD(38), lba, MMC_RESPONSE_R(0x1B), NULL);
+       if (ret != 0) {
+               return 0;
+       }
+
+       do {
+               ret = mmc_device_state();
+               if (ret < 0) {
+                       return 0;
+               }
+       } while (ret != MMC_STATE_TRAN);
+
+       return size;
+}
+
+static inline void mmc_rpmb_enable(void)
+{
+       mmc_set_ext_csd(CMD_EXTCSD_PARTITION_CONFIG,
+                       PART_CFG_BOOT_PARTITION1_ENABLE |
+                       PART_CFG_PARTITION1_ACCESS);
+}
+
+static inline void mmc_rpmb_disable(void)
+{
+       mmc_set_ext_csd(CMD_EXTCSD_PARTITION_CONFIG,
+                       PART_CFG_BOOT_PARTITION1_ENABLE);
+}
+
+size_t mmc_rpmb_read_blocks(unsigned int lba, uintptr_t buf, size_t size)
+{
+       size_t size_read;
+
+       mmc_rpmb_enable();
+       size_read = mmc_read_blocks(lba, buf, size);
+       mmc_rpmb_disable();
+
+       return size_read;
+}
+
+size_t mmc_rpmb_write_blocks(unsigned int lba, const uintptr_t buf, size_t size)
+{
+       size_t size_written;
+
+       mmc_rpmb_enable();
+       size_written = mmc_write_blocks(lba, buf, size);
+       mmc_rpmb_disable();
+
+       return size_written;
+}
+
+size_t mmc_rpmb_erase_blocks(unsigned int lba, size_t size)
+{
+       size_t size_erased;
+
+       mmc_rpmb_enable();
+       size_erased = mmc_erase_blocks(lba, size);
+       mmc_rpmb_disable();
+
+       return size_erased;
+}
+
+int mmc_init(const struct mmc_ops *ops_ptr, unsigned int clk,
+            unsigned int width, unsigned int flags,
+            struct mmc_device_info *device_info)
+{
+       assert((ops_ptr != NULL) &&
+              (ops_ptr->init != NULL) &&
+              (ops_ptr->send_cmd != NULL) &&
+              (ops_ptr->set_ios != NULL) &&
+              (ops_ptr->prepare != NULL) &&
+              (ops_ptr->read != NULL) &&
+              (ops_ptr->write != NULL) &&
+              (device_info != NULL) &&
+              (clk != 0) &&
+              ((width == MMC_BUS_WIDTH_1) ||
+               (width == MMC_BUS_WIDTH_4) ||
+               (width == MMC_BUS_WIDTH_8) ||
+               (width == MMC_BUS_WIDTH_DDR_4) ||
+               (width == MMC_BUS_WIDTH_DDR_8)));
+
+       ops = ops_ptr;
+       mmc_flags = flags;
+       mmc_dev_info = device_info;
+
+       return mmc_enumerate(clk, width);
+}
index 5b050455ceb993d555b30980655c4be58325b161..6e66ea99e04d105587e9304ad127ca06816152fe 100644 (file)
        .section \section_name, "ax"
        .align 7, 0
        .type \label, %function
-       .func \label
        .cfi_startproc
        \label:
        .endm
 
+       /*
+        * Add the bytes until fill the full exception vector, whose size is always
+        * 32 instructions. If there are more than 32 instructions in the
+        * exception vector then an error is emitted.
+        */
+       .macro end_vector_entry label
+       .cfi_endproc
+       .fill   \label + (32 * 4) - .
+       .endm
+
        /*
         * This macro verifies that the given vector doesn't exceed the
         * architectural limit of 32 instructions. This is meant to be placed
         * vector entry as the parameter
         */
        .macro check_vector_size since
-         .endfunc
-         .cfi_endproc
-         .if (. - \since) > (32 * 4)
-           .error "Vector exceeds 32 instructions"
-         .endif
+#if ERROR_DEPRECATED
+      .error "check_vector_size must not be used. Use end_vector_entry instead"
+#endif
+       end_vector_entry \since
        .endm
 
 #if ENABLE_PLAT_COMPAT
index ca8c1ad0fc225ca9d9b689b4ae17cc29050910b2..081addcc51eeea89214b1c0b6ab6e4ea0559f5c6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -31,7 +31,6 @@
        .cfi_sections .debug_frame
        .section .text.asm.\_name, "ax"
        .type \_name, %function
-       .func \_name
        /*
         * .cfi_startproc and .cfi_endproc are needed to output entries in
         * .debug_frame
@@ -45,7 +44,6 @@
         * This macro is used to mark the end of a function.
         */
        .macro endfunc _name
-       .endfunc
        .cfi_endproc
        .size \_name, . - \_name
        .endm
index 3f0f84a1ba465fa97db6c4cecfbc82c22cdfb322..99f402c0842e9a37a68d4feebedd015a23913353 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -7,10 +7,12 @@
 #ifndef __DEBUG_H__
 #define __DEBUG_H__
 
-/* The log output macros print output to the console. These macros produce
+/*
+ * The log output macros print output to the console. These macros produce
  * compiled log output only if the LOG_LEVEL defined in the makefile (or the
  * make command line) is greater or equal than the level required for that
  * type of log output.
+ *
  * The format expected is the same as for printf(). For example:
  * INFO("Info %s.\n", "message")    -> INFO:    Info message.
  * WARN("Warning %s.\n", "message") -> WARNING: Warning message.
 #define LOG_MARKER_INFO                        "\x28"  /* 40 */
 #define LOG_MARKER_VERBOSE             "\x32"  /* 50 */
 
+/*
+ * If the log output is too low then this macro is used in place of tf_log()
+ * below. The intent is to get the compiler to evaluate the function call for
+ * type checking and format specifier correctness but let it optimize it out.
+ */
+#define no_tf_log(fmt, ...)                            \
+       do {                                            \
+               if (0) {                                \
+                       tf_log(fmt, ##__VA_ARGS__);     \
+               }                                       \
+       } while (0)
+
 #if LOG_LEVEL >= LOG_LEVEL_NOTICE
 # define NOTICE(...)   tf_log(LOG_MARKER_NOTICE __VA_ARGS__)
 #else
-# define NOTICE(...)
+# define NOTICE(...)   no_tf_log(LOG_MARKER_NOTICE __VA_ARGS__)
 #endif
 
 #if LOG_LEVEL >= LOG_LEVEL_ERROR
 # define ERROR(...)    tf_log(LOG_MARKER_ERROR __VA_ARGS__)
 #else
-# define ERROR(...)
+# define ERROR(...)    no_tf_log(LOG_MARKER_ERROR __VA_ARGS__)
 #endif
 
 #if LOG_LEVEL >= LOG_LEVEL_WARNING
 # define WARN(...)     tf_log(LOG_MARKER_WARNING __VA_ARGS__)
 #else
-# define WARN(...)
+# define WARN(...)     no_tf_log(LOG_MARKER_WARNING __VA_ARGS__)
 #endif
 
 #if LOG_LEVEL >= LOG_LEVEL_INFO
 # define INFO(...)     tf_log(LOG_MARKER_INFO __VA_ARGS__)
 #else
-# define INFO(...)
+# define INFO(...)     no_tf_log(LOG_MARKER_INFO __VA_ARGS__)
 #endif
 
 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
 # define VERBOSE(...)  tf_log(LOG_MARKER_VERBOSE __VA_ARGS__)
 #else
-# define VERBOSE(...)
+# define VERBOSE(...)  no_tf_log(LOG_MARKER_VERBOSE __VA_ARGS__)
 #endif
 
 void __dead2 do_panic(void);
diff --git a/include/drivers/marvell/a8k_i2c.h b/include/drivers/marvell/a8k_i2c.h
new file mode 100644 (file)
index 0000000..8a9abe8
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* This driver provides I2C support for Marvell A8K and compatible SoCs */
+
+#ifndef _A8K_I2C_H_
+#define _A8K_I2C_H_
+
+#include <stdint.h>
+
+/*
+ * Initialization, must be called once on start up, may be called
+ * repeatedly to change the speed and slave addresses.
+ */
+void i2c_init(void *i2c_base);
+
+/*
+ * Read/Write interface:
+ *   chip:    I2C chip address, range 0..127
+ *   addr:    Memory (register) address within the chip
+ *   alen:    Number of bytes to use for addr (typically 1, 2 for larger
+ *              memories, 0 for register type devices with only one
+ *              register)
+ *   buffer:  Where to read/write the data
+ *   len:     How many bytes to read/write
+ *
+ *   Returns: 0 on success, not 0 on failure
+ */
+int i2c_read(uint8_t chip,
+            unsigned int addr, int alen, uint8_t *buffer, int len);
+
+int i2c_write(uint8_t chip,
+             unsigned int addr, int alen, uint8_t *buffer, int len);
+#endif
diff --git a/include/drivers/marvell/addr_map.h b/include/drivers/marvell/addr_map.h
new file mode 100644 (file)
index 0000000..6b957a1
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* Address map types for Marvell address translation unit drivers */
+
+#ifndef _ADDR_MAP_H_
+#define _ADDR_MAP_H_
+
+#include <stdint.h>
+
+struct addr_map_win {
+       uint64_t base_addr;
+       uint64_t win_size;
+       uint32_t target_id;
+};
+
+#endif /* _ADDR_MAP_H_ */
diff --git a/include/drivers/marvell/amb_adec.h b/include/drivers/marvell/amb_adec.h
new file mode 100644 (file)
index 0000000..087864a
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* AXI to M-Bridge decoding unit driver for Marvell Armada 8K and 8K+ SoCs */
+
+#ifndef _AMB_ADEC_H_
+#define _AMB_ADEC_H_
+
+#include <stdint.h>
+
+enum amb_attribute_ids {
+       AMB_SPI0_CS0_ID = 0x1E,
+       AMB_SPI0_CS1_ID = 0x5E,
+       AMB_SPI0_CS2_ID = 0x9E,
+       AMB_SPI0_CS3_ID = 0xDE,
+       AMB_SPI1_CS0_ID = 0x1A,
+       AMB_SPI1_CS1_ID = 0x5A,
+       AMB_SPI1_CS2_ID = 0x9A,
+       AMB_SPI1_CS3_ID = 0xDA,
+       AMB_DEV_CS0_ID = 0x3E,
+       AMB_DEV_CS1_ID = 0x3D,
+       AMB_DEV_CS2_ID = 0x3B,
+       AMB_DEV_CS3_ID = 0x37,
+       AMB_BOOT_CS_ID = 0x2f,
+       AMB_BOOT_ROM_ID = 0x1D,
+};
+
+#define AMB_MAX_WIN_ID         7
+
+int init_amb_adec(uintptr_t base);
+
+#endif /* _AMB_ADEC_H_ */
diff --git a/include/drivers/marvell/aro.h b/include/drivers/marvell/aro.h
new file mode 100644 (file)
index 0000000..3627a20
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2017 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+#ifndef _ARO_H_
+#define _ARO_H_
+
+enum hws_freq {
+       CPU_FREQ_2000,
+       CPU_FREQ_1800,
+       CPU_FREQ_1600,
+       CPU_FREQ_1400,
+       CPU_FREQ_1300,
+       CPU_FREQ_1200,
+       CPU_FREQ_1000,
+       CPU_FREQ_600,
+       CPU_FREQ_800,
+       DDR_FREQ_LAST,
+       DDR_FREQ_SAR
+};
+
+enum cpu_clock_freq_mode {
+       CPU_2000_DDR_1200_RCLK_1200 = 0x0,
+       CPU_2000_DDR_1050_RCLK_1050 = 0x1,
+       CPU_1600_DDR_800_RCLK_800   = 0x4,
+       CPU_1800_DDR_1200_RCLK_1200 = 0x6,
+       CPU_1800_DDR_1050_RCLK_1050 = 0x7,
+       CPU_1600_DDR_900_RCLK_900   = 0x0B,
+       CPU_1600_DDR_1050_RCLK_1050 = 0x0D,
+       CPU_1600_DDR_900_RCLK_900_2 = 0x0E,
+       CPU_1000_DDR_650_RCLK_650   = 0x13,
+       CPU_1300_DDR_800_RCLK_800   = 0x14,
+       CPU_1300_DDR_650_RCLK_650   = 0x17,
+       CPU_1200_DDR_800_RCLK_800   = 0x19,
+       CPU_1400_DDR_800_RCLK_800   = 0x1a,
+       CPU_600_DDR_800_RCLK_800    = 0x1B,
+       CPU_800_DDR_800_RCLK_800    = 0x1C,
+       CPU_1000_DDR_800_RCLK_800   = 0x1D,
+       CPU_DDR_RCLK_INVALID
+};
+
+int init_aro(void);
+
+#endif /* _ARO_H_ */
diff --git a/include/drivers/marvell/cache_llc.h b/include/drivers/marvell/cache_llc.h
new file mode 100644 (file)
index 0000000..9e41793
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* LLC driver is the Last Level Cache (L3C) driver
+ * for Marvell SoCs in AP806, AP807, and AP810
+ */
+
+#ifndef _CACHE_LLC_H_
+#define _CACHE_LLC_H_
+
+#define LLC_CTRL(ap)                   (MVEBU_LLC_BASE(ap) + 0x100)
+#define LLC_SYNC(ap)                   (MVEBU_LLC_BASE(ap) + 0x700)
+#define L2X0_INV_WAY(ap)               (MVEBU_LLC_BASE(ap) + 0x77C)
+#define L2X0_CLEAN_WAY(ap)             (MVEBU_LLC_BASE(ap) + 0x7BC)
+#define L2X0_CLEAN_INV_WAY(ap)         (MVEBU_LLC_BASE(ap) + 0x7FC)
+#define LLC_TC0_LOCK(ap)               (MVEBU_LLC_BASE(ap) + 0x920)
+
+#define MASTER_LLC_CTRL                        LLC_CTRL(MVEBU_AP0)
+#define MASTER_L2X0_INV_WAY            L2X0_INV_WAY(MVEBU_AP0)
+#define MASTER_LLC_TC0_LOCK            LLC_TC0_LOCK(MVEBU_AP0)
+
+#define LLC_CTRL_EN                    1
+#define LLC_EXCLUSIVE_EN               0x100
+#define LLC_WAY_MASK                   0xFFFFFFFF
+
+#ifndef __ASSEMBLY__
+void llc_cache_sync(int ap_index);
+void llc_flush_all(int ap_index);
+void llc_clean_all(int ap_index);
+void llc_inv_all(int ap_index);
+void llc_disable(int ap_index);
+void llc_enable(int ap_index, int excl_mode);
+int llc_is_exclusive(int ap_index);
+void llc_runtime_enable(int ap_index);
+#endif
+
+#endif /* _CACHE_LLC_H_ */
+
diff --git a/include/drivers/marvell/ccu.h b/include/drivers/marvell/ccu.h
new file mode 100644 (file)
index 0000000..ff30a76
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* CCU unit device driver for Marvell AP807, AP807 and AP810 SoCs */
+
+#ifndef _CCU_H_
+#define _CCU_H_
+
+#ifndef __ASSEMBLY__
+#include <addr_map.h>
+#endif
+
+/* CCU registers definitions */
+#define CCU_WIN_CR_OFFSET(ap, win)             (MVEBU_CCU_BASE(ap) + 0x0 + \
+                                               (0x10 * win))
+#define CCU_TARGET_ID_OFFSET                   (8)
+#define CCU_TARGET_ID_MASK                     (0x7F)
+
+#define CCU_WIN_SCR_OFFSET(ap, win)            (MVEBU_CCU_BASE(ap) + 0x4 + \
+                                               (0x10 * win))
+#define CCU_WIN_ENA_WRITE_SECURE               (0x1)
+#define CCU_WIN_ENA_READ_SECURE                        (0x2)
+
+#define CCU_WIN_ALR_OFFSET(ap, win)            (MVEBU_CCU_BASE(ap) + 0x8 + \
+                                               (0x10 * win))
+#define CCU_WIN_AHR_OFFSET(ap, win)            (MVEBU_CCU_BASE(ap) + 0xC + \
+                                               (0x10 * win))
+
+#define CCU_WIN_GCR_OFFSET(ap)                 (MVEBU_CCU_BASE(ap) + 0xD0)
+#define CCU_GCR_TARGET_OFFSET                  (8)
+#define CCU_GCR_TARGET_MASK                    (0xFF)
+
+#define CCU_SRAM_WIN_CR                                CCU_WIN_CR_OFFSET(MVEBU_AP0, 1)
+
+#ifndef __ASSEMBLY__
+int init_ccu(int);
+void ccu_win_check(struct addr_map_win *win);
+void ccu_enable_win(int ap_index, struct addr_map_win *win, uint32_t win_id);
+void ccu_temp_win_insert(int ap_index, struct addr_map_win *win, int size);
+void ccu_temp_win_remove(int ap_index, struct addr_map_win *win, int size);
+void ccu_dram_win_config(int ap_index, struct addr_map_win *win);
+void ccu_dram_target_set(int ap_index, uint32_t target);
+void ccu_save_win_all(int ap_id);
+void ccu_restore_win_all(int ap_id);
+#endif
+
+#endif /* _CCU_H_ */
diff --git a/include/drivers/marvell/gwin.h b/include/drivers/marvell/gwin.h
new file mode 100644 (file)
index 0000000..5dc9f24
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* GWIN unit device driver for Marvell AP810 SoC */
+
+#ifndef _GWIN_H_
+#define _GWIN_H_
+
+#include <addr_map.h>
+
+int init_gwin(int ap_index);
+void gwin_temp_win_insert(int ap_index, struct addr_map_win *win, int size);
+void gwin_temp_win_remove(int ap_index, struct addr_map_win *win, int size);
+
+#endif /* _GWIN_H_ */
diff --git a/include/drivers/marvell/i2c.h b/include/drivers/marvell/i2c.h
new file mode 100644 (file)
index 0000000..bd14385
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef _I2C_H_
+#define _I2C_H_
+
+
+void i2c_init(void);
+
+int i2c_read(uint8_t chip,
+            unsigned int addr, int alen, uint8_t *buffer, int len);
+
+int i2c_write(uint8_t chip,
+             unsigned int addr, int alen, uint8_t *buffer, int len);
+#endif
diff --git a/include/drivers/marvell/io_win.h b/include/drivers/marvell/io_win.h
new file mode 100644 (file)
index 0000000..4102a11
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* IO Window unit device driver for Marvell AP807, AP807 and AP810 SoCs */
+
+#ifndef _IO_WIN_H_
+#define _IO_WIN_H_
+
+#include <addr_map.h>
+
+int init_io_win(int ap_index);
+void iow_temp_win_insert(int ap_index, struct addr_map_win *win, int size);
+void iow_temp_win_remove(int ap_index, struct addr_map_win *win, int size);
+void iow_save_win_all(int ap_id);
+void iow_restore_win_all(int ap_id);
+
+#endif /* _IO_WIN_H_ */
diff --git a/include/drivers/marvell/iob.h b/include/drivers/marvell/iob.h
new file mode 100644 (file)
index 0000000..9848c0a
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* IOW unit device driver for Marvell CP110 and CP115 SoCs */
+
+#ifndef _IOB_H_
+#define _IOB_H_
+
+#include <addr_map.h>
+
+enum target_ids_iob {
+       INTERNAL_TID    = 0x0,
+       MCI0_TID        = 0x1,
+       PEX1_TID        = 0x2,
+       PEX2_TID        = 0x3,
+       PEX0_TID        = 0x4,
+       NAND_TID        = 0x5,
+       RUNIT_TID       = 0x6,
+       MCI1_TID        = 0x7,
+       IOB_MAX_TID
+};
+
+int init_iob(uintptr_t base);
+void iob_cfg_space_update(int ap_idx, int cp_idx,
+                         uintptr_t base, uintptr_t new_base);
+
+#endif /* _IOB_H_ */
diff --git a/include/drivers/marvell/mci.h b/include/drivers/marvell/mci.h
new file mode 100644 (file)
index 0000000..789b3b9
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* MCI bus driver for Marvell ARMADA 8K and 8K+ SoCs */
+
+#ifndef _MCI_H_
+#define _MCI_H_
+
+int mci_initialize(int mci_index);
+void mci_turn_link_down(void);
+void mci_turn_link_on(void);
+int mci_get_link_status(void);
+
+#endif /* _MCI_H_ */
diff --git a/include/drivers/marvell/mochi/ap_setup.h b/include/drivers/marvell/mochi/ap_setup.h
new file mode 100644 (file)
index 0000000..41f2bac
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* AP8xx Marvell SoC driver */
+
+#ifndef __AP_SETUP_H__
+#define __AP_SETUP_H__
+
+void ap_init(void);
+void ap_ble_init(void);
+int ap_get_count(void);
+
+#endif /* __AP_SETUP_H__ */
diff --git a/include/drivers/marvell/mochi/cp110_setup.h b/include/drivers/marvell/mochi/cp110_setup.h
new file mode 100644 (file)
index 0000000..1c88980
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* CP110 Marvell SoC driver */
+
+#ifndef __CP110_SETUP_H__
+#define __CP110_SETUP_H__
+
+#include <mmio.h>
+#include <mvebu_def.h>
+
+#define MVEBU_DEVICE_ID_REG            (MVEBU_CP_DFX_OFFSET + 0x40)
+#define MVEBU_DEVICE_ID_OFFSET         (0)
+#define MVEBU_DEVICE_ID_MASK           (0xffff << MVEBU_DEVICE_ID_OFFSET)
+#define MVEBU_DEVICE_REV_OFFSET                (16)
+#define MVEBU_DEVICE_REV_MASK          (0xf << MVEBU_DEVICE_REV_OFFSET)
+#define MVEBU_70X0_DEV_ID              (0x7040)
+#define MVEBU_70X0_CP115_DEV_ID                (0x7045)
+#define MVEBU_80X0_DEV_ID              (0x8040)
+#define MVEBU_80X0_CP115_DEV_ID                (0x8045)
+#define MVEBU_CP110_SA_DEV_ID          (0x110)
+#define MVEBU_CP110_REF_ID_A1          1
+#define MVEBU_CP110_REF_ID_A2          2
+#define MAX_STREAM_ID_PER_CP           (0x10)
+#define STREAM_ID_BASE                 (0x40)
+
+static inline uint32_t cp110_device_id_get(uintptr_t base)
+{
+       /* Returns:
+        * - MVEBU_70X0_DEV_ID for A70X0 family
+        * - MVEBU_80X0_DEV_ID for A80X0 family
+        * - MVEBU_CP110_SA_DEV_ID for CP that connected stand alone
+        */
+       return (mmio_read_32(base + MVEBU_DEVICE_ID_REG) >>
+               MVEBU_DEVICE_ID_OFFSET) &
+               MVEBU_DEVICE_ID_MASK;
+}
+
+static inline uint32_t cp110_rev_id_get(uintptr_t base)
+{
+       return (mmio_read_32(base + MVEBU_DEVICE_ID_REG) &
+               MVEBU_DEVICE_REV_MASK) >>
+               MVEBU_DEVICE_REV_OFFSET;
+}
+
+void cp110_init(uintptr_t cp110_base, uint32_t stream_id);
+void cp110_ble_init(uintptr_t cp110_base);
+
+#endif /* __CP110_SETUP_H__ */
diff --git a/include/drivers/marvell/thermal.h b/include/drivers/marvell/thermal.h
new file mode 100644 (file)
index 0000000..191f97b
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* Driver for thermal unit located in Marvell ARMADA 8K and compatible SoCs */
+
+#ifndef _THERMAL_H
+#define _THERMAL_H
+
+struct tsen_config {
+       /* thermal temperature parameters */
+       int tsen_offset;
+       int tsen_gain;
+       int tsen_divisor;
+       /* thermal data */
+       int tsen_ready;
+       void *regs_base;
+       /* thermal functionality */
+       int (*ptr_tsen_probe)(struct tsen_config *cfg);
+       int (*ptr_tsen_read)(struct tsen_config *cfg, int *temp);
+};
+
+/* Thermal driver APIs */
+int marvell_thermal_init(struct tsen_config *tsen_cfg);
+int marvell_thermal_read(struct tsen_config *tsen_cfg, int *temp);
+struct tsen_config *marvell_thermal_config_get(void);
+
+#endif /* _THERMAL_H */
diff --git a/include/drivers/mmc.h b/include/drivers/mmc.h
new file mode 100644 (file)
index 0000000..65f4bbd
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MMC_H__
+#define __MMC_H__
+
+#include <stdint.h>
+#include <utils_def.h>
+
+#define MMC_BLOCK_SIZE                 U(512)
+#define MMC_BLOCK_MASK                 (MMC_BLOCK_SIZE - U(1))
+#define MMC_BOOT_CLK_RATE              (400 * 1000)
+
+#define MMC_CMD(_x)                    U(_x)
+
+#define MMC_ACMD(_x)                   U(_x)
+
+#define OCR_POWERUP                    BIT(31)
+#define OCR_HCS                                BIT(30)
+#define OCR_BYTE_MODE                  (U(0) << 29)
+#define OCR_SECTOR_MODE                        (U(2) << 29)
+#define OCR_ACCESS_MODE_MASK           (U(3) << 29)
+#define OCR_3_5_3_6                    BIT(23)
+#define OCR_3_4_3_5                    BIT(22)
+#define OCR_3_3_3_4                    BIT(21)
+#define OCR_3_2_3_3                    BIT(20)
+#define OCR_3_1_3_2                    BIT(19)
+#define OCR_3_0_3_1                    BIT(18)
+#define OCR_2_9_3_0                    BIT(17)
+#define OCR_2_8_2_9                    BIT(16)
+#define OCR_2_7_2_8                    BIT(15)
+#define OCR_VDD_MIN_2V7                        GENMASK(23, 15)
+#define OCR_VDD_MIN_2V0                        GENMASK(14, 8)
+#define OCR_VDD_MIN_1V7                        BIT(7)
+
+#define MMC_RESPONSE_R(_x)             U(_x)
+
+/* Value randomly chosen for eMMC RCA, it should be > 1 */
+#define MMC_FIX_RCA                    6
+#define RCA_SHIFT_OFFSET               16
+
+#define CMD_EXTCSD_PARTITION_CONFIG    179
+#define CMD_EXTCSD_BUS_WIDTH           183
+#define CMD_EXTCSD_HS_TIMING           185
+#define CMD_EXTCSD_SEC_CNT             212
+
+#define PART_CFG_BOOT_PARTITION1_ENABLE        (U(1) << 3)
+#define PART_CFG_PARTITION1_ACCESS     (U(1) << 0)
+
+/* Values in EXT CSD register */
+#define MMC_BUS_WIDTH_1                        U(0)
+#define MMC_BUS_WIDTH_4                        U(1)
+#define MMC_BUS_WIDTH_8                        U(2)
+#define MMC_BUS_WIDTH_DDR_4            U(5)
+#define MMC_BUS_WIDTH_DDR_8            U(6)
+#define MMC_BOOT_MODE_BACKWARD         (U(0) << 3)
+#define MMC_BOOT_MODE_HS_TIMING                (U(1) << 3)
+#define MMC_BOOT_MODE_DDR              (U(2) << 3)
+
+#define EXTCSD_SET_CMD                 (U(0) << 24)
+#define EXTCSD_SET_BITS                        (U(1) << 24)
+#define EXTCSD_CLR_BITS                        (U(2) << 24)
+#define EXTCSD_WRITE_BYTES             (U(3) << 24)
+#define EXTCSD_CMD(x)                  (((x) & 0xff) << 16)
+#define EXTCSD_VALUE(x)                        (((x) & 0xff) << 8)
+#define EXTCSD_CMD_SET_NORMAL          U(1)
+
+#define CSD_TRAN_SPEED_UNIT_MASK       GENMASK(2, 0)
+#define CSD_TRAN_SPEED_MULT_MASK       GENMASK(6, 3)
+#define CSD_TRAN_SPEED_MULT_SHIFT      3
+
+#define STATUS_CURRENT_STATE(x)                (((x) & 0xf) << 9)
+#define STATUS_READY_FOR_DATA          BIT(8)
+#define STATUS_SWITCH_ERROR            BIT(7)
+#define MMC_GET_STATE(x)               (((x) >> 9) & 0xf)
+#define MMC_STATE_IDLE                 0
+#define MMC_STATE_READY                        1
+#define MMC_STATE_IDENT                        2
+#define MMC_STATE_STBY                 3
+#define MMC_STATE_TRAN                 4
+#define MMC_STATE_DATA                 5
+#define MMC_STATE_RCV                  6
+#define MMC_STATE_PRG                  7
+#define MMC_STATE_DIS                  8
+#define MMC_STATE_BTST                 9
+#define MMC_STATE_SLP                  10
+
+#define MMC_FLAG_CMD23                 (U(1) << 0)
+
+#define CMD8_CHECK_PATTERN             U(0xAA)
+#define VHS_2_7_3_6_V                  BIT(8)
+
+#define SD_SCR_BUS_WIDTH_1             BIT(8)
+#define SD_SCR_BUS_WIDTH_4             BIT(10)
+
+struct mmc_cmd {
+       unsigned int    cmd_idx;
+       unsigned int    cmd_arg;
+       unsigned int    resp_type;
+       unsigned int    resp_data[4];
+};
+
+struct mmc_ops {
+       void (*init)(void);
+       int (*send_cmd)(struct mmc_cmd *cmd);
+       int (*set_ios)(unsigned int clk, unsigned int width);
+       int (*prepare)(int lba, uintptr_t buf, size_t size);
+       int (*read)(int lba, uintptr_t buf, size_t size);
+       int (*write)(int lba, const uintptr_t buf, size_t size);
+};
+
+struct mmc_csd_emmc {
+       unsigned int            not_used:               1;
+       unsigned int            crc:                    7;
+       unsigned int            ecc:                    2;
+       unsigned int            file_format:            2;
+       unsigned int            tmp_write_protect:      1;
+       unsigned int            perm_write_protect:     1;
+       unsigned int            copy:                   1;
+       unsigned int            file_format_grp:        1;
+
+       unsigned int            reserved_1:             5;
+       unsigned int            write_bl_partial:       1;
+       unsigned int            write_bl_len:           4;
+       unsigned int            r2w_factor:             3;
+       unsigned int            default_ecc:            2;
+       unsigned int            wp_grp_enable:          1;
+
+       unsigned int            wp_grp_size:            5;
+       unsigned int            erase_grp_mult:         5;
+       unsigned int            erase_grp_size:         5;
+       unsigned int            c_size_mult:            3;
+       unsigned int            vdd_w_curr_max:         3;
+       unsigned int            vdd_w_curr_min:         3;
+       unsigned int            vdd_r_curr_max:         3;
+       unsigned int            vdd_r_curr_min:         3;
+       unsigned int            c_size_low:             2;
+
+       unsigned int            c_size_high:            10;
+       unsigned int            reserved_2:             2;
+       unsigned int            dsr_imp:                1;
+       unsigned int            read_blk_misalign:      1;
+       unsigned int            write_blk_misalign:     1;
+       unsigned int            read_bl_partial:        1;
+       unsigned int            read_bl_len:            4;
+       unsigned int            ccc:                    12;
+
+       unsigned int            tran_speed:             8;
+       unsigned int            nsac:                   8;
+       unsigned int            taac:                   8;
+       unsigned int            reserved_3:             2;
+       unsigned int            spec_vers:              4;
+       unsigned int            csd_structure:          2;
+};
+
+struct mmc_csd_sd_v2 {
+       unsigned int            not_used:               1;
+       unsigned int            crc:                    7;
+       unsigned int            reserved_1:             2;
+       unsigned int            file_format:            2;
+       unsigned int            tmp_write_protect:      1;
+       unsigned int            perm_write_protect:     1;
+       unsigned int            copy:                   1;
+       unsigned int            file_format_grp:        1;
+
+       unsigned int            reserved_2:             5;
+       unsigned int            write_bl_partial:       1;
+       unsigned int            write_bl_len:           4;
+       unsigned int            r2w_factor:             3;
+       unsigned int            reserved_3:             2;
+       unsigned int            wp_grp_enable:          1;
+
+       unsigned int            wp_grp_size:            7;
+       unsigned int            sector_size:            7;
+       unsigned int            erase_block_en:         1;
+       unsigned int            reserved_4:             1;
+       unsigned int            c_size_low:             16;
+
+       unsigned int            c_size_high:            6;
+       unsigned int            reserved_5:             6;
+       unsigned int            dsr_imp:                1;
+       unsigned int            read_blk_misalign:      1;
+       unsigned int            write_blk_misalign:     1;
+       unsigned int            read_bl_partial:        1;
+       unsigned int            read_bl_len:            4;
+       unsigned int            ccc:                    12;
+
+       unsigned int            tran_speed:             8;
+       unsigned int            nsac:                   8;
+       unsigned int            taac:                   8;
+       unsigned int            reserved_6:             6;
+       unsigned int            csd_structure:          2;
+};
+
+enum mmc_device_type {
+       MMC_IS_EMMC,
+       MMC_IS_SD,
+       MMC_IS_SD_HC,
+};
+
+struct mmc_device_info {
+       unsigned long long      device_size;    /* Size of device in bytes */
+       unsigned int            block_size;     /* Block size in bytes */
+       unsigned int            max_bus_freq;   /* Max bus freq in Hz */
+       enum mmc_device_type    mmc_dev_type;   /* Type of MMC */
+};
+
+size_t mmc_read_blocks(unsigned int lba, uintptr_t buf, size_t size);
+size_t mmc_write_blocks(unsigned int lba, const uintptr_t buf, size_t size);
+size_t mmc_erase_blocks(unsigned int lba, size_t size);
+size_t mmc_rpmb_read_blocks(unsigned int lba, uintptr_t buf, size_t size);
+size_t mmc_rpmb_write_blocks(unsigned int lba, const uintptr_t buf,
+                            size_t size);
+size_t mmc_rpmb_erase_blocks(unsigned int lba, size_t size);
+int mmc_init(const struct mmc_ops *ops_ptr, unsigned int clk,
+            unsigned int width, unsigned int flags,
+            struct mmc_device_info *device_info);
+
+#endif /* __MMC_H__ */
index a940b63b59c5ea324e5626cb66d409fc712457f0..a5366495e8ae9a7c5f35e58a5c979c2180a677b8 100644 (file)
 /*******************************************************************************
  * MIDR bit definitions
  ******************************************************************************/
-#define MIDR_IMPL_MASK         0xff
-#define MIDR_IMPL_SHIFT                24
-#define MIDR_VAR_SHIFT         20
-#define MIDR_VAR_BITS          4
-#define MIDR_REV_SHIFT         0
-#define MIDR_REV_BITS          4
-#define MIDR_PN_MASK           0xfff
-#define MIDR_PN_SHIFT          4
+#define MIDR_IMPL_MASK         U(0xff)
+#define MIDR_IMPL_SHIFT                U(24)
+#define MIDR_VAR_SHIFT         U(20)
+#define MIDR_VAR_BITS          U(4)
+#define MIDR_REV_SHIFT         U(0)
+#define MIDR_REV_BITS          U(4)
+#define MIDR_PN_MASK           U(0xfff)
+#define MIDR_PN_SHIFT          U(4)
 
 /*******************************************************************************
  * MPIDR macros
  ******************************************************************************/
-#define MPIDR_MT_MASK          (1 << 24)
+#define MPIDR_MT_MASK          (U(1) << 24)
 #define MPIDR_CPU_MASK         MPIDR_AFFLVL_MASK
 #define MPIDR_CLUSTER_MASK     (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
-#define MPIDR_AFFINITY_BITS    8
-#define MPIDR_AFFLVL_MASK      0xff
-#define MPIDR_AFFLVL_SHIFT     3
-#define MPIDR_AFF0_SHIFT       0
-#define MPIDR_AFF1_SHIFT       8
-#define MPIDR_AFF2_SHIFT       16
-#define MPIDR_AFFINITY_MASK    0x00ffffff
-#define MPIDR_AFFLVL0          0
-#define MPIDR_AFFLVL1          1
-#define MPIDR_AFFLVL2          2
+#define MPIDR_AFFINITY_BITS    U(8)
+#define MPIDR_AFFLVL_MASK      U(0xff)
+#define MPIDR_AFFLVL_SHIFT     U(3)
+#define MPIDR_AFF0_SHIFT       U(0)
+#define MPIDR_AFF1_SHIFT       U(8)
+#define MPIDR_AFF2_SHIFT       U(16)
+#define MPIDR_AFFINITY_MASK    U(0x00ffffff)
+#define MPIDR_AFFLVL0          U(0)
+#define MPIDR_AFFLVL1          U(1)
+#define MPIDR_AFFLVL2          U(2)
 
 #define MPIDR_AFFLVL0_VAL(mpidr) \
                (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
                (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
 #define MPIDR_AFFLVL2_VAL(mpidr) \
                (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
-#define MPIDR_AFFLVL3_VAL(mpidr)       0
+#define MPIDR_AFFLVL3_VAL(mpidr)       U(0)
 
 /*
  * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
  * add one while using this macro to define array sizes.
  */
-#define MPIDR_MAX_AFFLVL       2
+#define MPIDR_MAX_AFFLVL       U(2)
 
 /* Data Cache set/way op type defines */
-#define DC_OP_ISW                      0x0
-#define DC_OP_CISW                     0x1
-#define DC_OP_CSW                      0x2
+#define DC_OP_ISW                      U(0x0)
+#define DC_OP_CISW                     U(0x1)
+#define DC_OP_CSW                      U(0x2)
 
 /*******************************************************************************
  * Generic timer memory mapped registers & offsets
  ******************************************************************************/
-#define CNTCR_OFF                      0x000
-#define CNTFID_OFF                     0x020
+#define CNTCR_OFF                      U(0x000)
+#define CNTFID_OFF                     U(0x020)
 
-#define CNTCR_EN                       (1 << 0)
-#define CNTCR_HDBG                     (1 << 1)
+#define CNTCR_EN                       (U(1) << 0)
+#define CNTCR_HDBG                     (U(1) << 1)
 #define CNTCR_FCREQ(x)                 ((x) << 8)
 
 /*******************************************************************************
  * System register bit definitions
  ******************************************************************************/
 /* CLIDR definitions */
-#define LOUIS_SHIFT            21
-#define LOC_SHIFT              24
-#define CLIDR_FIELD_WIDTH      3
+#define LOUIS_SHIFT            U(21)
+#define LOC_SHIFT              U(24)
+#define CLIDR_FIELD_WIDTH      U(3)
 
 /* CSSELR definitions */
-#define LEVEL_SHIFT            1
+#define LEVEL_SHIFT            U(1)
 
 /* ID_PFR0 definitions */
 #define ID_PFR0_AMU_SHIFT      U(20)
 #define ID_PFR0_AMU_MASK       U(0xf)
 
 /* ID_PFR1 definitions */
-#define ID_PFR1_VIRTEXT_SHIFT  12
-#define ID_PFR1_VIRTEXT_MASK   0xf
+#define ID_PFR1_VIRTEXT_SHIFT  U(12)
+#define ID_PFR1_VIRTEXT_MASK   U(0xf)
 #define GET_VIRT_EXT(id)       (((id) >> ID_PFR1_VIRTEXT_SHIFT) \
                                 & ID_PFR1_VIRTEXT_MASK)
-#define ID_PFR1_GIC_SHIFT      28
-#define ID_PFR1_GIC_MASK       0xf
+#define ID_PFR1_GIC_SHIFT      U(28)
+#define ID_PFR1_GIC_MASK       U(0xf)
 
 /* SCTLR definitions */
-#define SCTLR_RES1_DEF         ((1 << 23) | (1 << 22) | (1 << 4) | (1 << 3))
+#define SCTLR_RES1_DEF         ((U(1) << 23) | (U(1) << 22) | (U(1) << 4) | \
+                                (U(1) << 3))
 #if ARM_ARCH_MAJOR == 7
 #define SCTLR_RES1             SCTLR_RES1_DEF
 #else
-#define SCTLR_RES1             (SCTLR_RES1_DEF | (1 << 11))
+#define SCTLR_RES1             (SCTLR_RES1_DEF | (U(1) << 11))
 #endif
-#define SCTLR_M_BIT            (1 << 0)
-#define SCTLR_A_BIT            (1 << 1)
-#define SCTLR_C_BIT            (1 << 2)
-#define SCTLR_CP15BEN_BIT      (1 << 5)
-#define SCTLR_ITD_BIT          (1 << 7)
-#define SCTLR_Z_BIT            (1 << 11)
-#define SCTLR_I_BIT            (1 << 12)
-#define SCTLR_V_BIT            (1 << 13)
-#define SCTLR_RR_BIT           (1 << 14)
-#define SCTLR_NTWI_BIT         (1 << 16)
-#define SCTLR_NTWE_BIT         (1 << 18)
-#define SCTLR_WXN_BIT          (1 << 19)
-#define SCTLR_UWXN_BIT         (1 << 20)
-#define SCTLR_EE_BIT           (1 << 25)
-#define SCTLR_TRE_BIT          (1 << 28)
-#define SCTLR_AFE_BIT          (1 << 29)
-#define SCTLR_TE_BIT           (1 << 30)
+#define SCTLR_M_BIT            (U(1) << 0)
+#define SCTLR_A_BIT            (U(1) << 1)
+#define SCTLR_C_BIT            (U(1) << 2)
+#define SCTLR_CP15BEN_BIT      (U(1) << 5)
+#define SCTLR_ITD_BIT          (U(1) << 7)
+#define SCTLR_Z_BIT            (U(1) << 11)
+#define SCTLR_I_BIT            (U(1) << 12)
+#define SCTLR_V_BIT            (U(1) << 13)
+#define SCTLR_RR_BIT           (U(1) << 14)
+#define SCTLR_NTWI_BIT         (U(1) << 16)
+#define SCTLR_NTWE_BIT         (U(1) << 18)
+#define SCTLR_WXN_BIT          (U(1) << 19)
+#define SCTLR_UWXN_BIT         (U(1) << 20)
+#define SCTLR_EE_BIT           (U(1) << 25)
+#define SCTLR_TRE_BIT          (U(1) << 28)
+#define SCTLR_AFE_BIT          (U(1) << 29)
+#define SCTLR_TE_BIT           (U(1) << 30)
 #define SCTLR_RESET_VAL         (SCTLR_RES1 | SCTLR_NTWE_BIT |         \
                                SCTLR_NTWI_BIT | SCTLR_CP15BEN_BIT)
 
 /* SDCR definitions */
 #define SDCR_SPD(x)            ((x) << 14)
-#define SDCR_SPD_LEGACY                0x0
-#define SDCR_SPD_DISABLE       0x2
-#define SDCR_SPD_ENABLE                0x3
-#define SDCR_RESET_VAL         0x0
+#define SDCR_SPD_LEGACY                U(0x0)
+#define SDCR_SPD_DISABLE       U(0x2)
+#define SDCR_SPD_ENABLE                U(0x3)
+#define SDCR_RESET_VAL         U(0x0)
 
 #if !ERROR_DEPRECATED
 #define SDCR_DEF_VAL           SDCR_SPD(SDCR_SPD_DISABLE)
 #endif
 
 /* HSCTLR definitions */
-#define HSCTLR_RES1    ((1 << 29) | (1 << 28) | (1 << 23) | (1 << 22)  \
-                       | (1 << 18) | (1 << 16) | (1 << 11) | (1 << 4)  \
-                       | (1 << 3))
-#define HSCTLR_M_BIT           (1 << 0)
-#define HSCTLR_A_BIT           (1 << 1)
-#define HSCTLR_C_BIT           (1 << 2)
-#define HSCTLR_CP15BEN_BIT     (1 << 5)
-#define HSCTLR_ITD_BIT         (1 << 7)
-#define HSCTLR_SED_BIT         (1 << 8)
-#define HSCTLR_I_BIT           (1 << 12)
-#define HSCTLR_WXN_BIT         (1 << 19)
-#define HSCTLR_EE_BIT          (1 << 25)
-#define HSCTLR_TE_BIT          (1 << 30)
+#define HSCTLR_RES1    ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+                        (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+                        (U(1) << 11) | (U(1) << 4) | (U(1) << 3))
+
+#define HSCTLR_M_BIT           (U(1) << 0)
+#define HSCTLR_A_BIT           (U(1) << 1)
+#define HSCTLR_C_BIT           (U(1) << 2)
+#define HSCTLR_CP15BEN_BIT     (U(1) << 5)
+#define HSCTLR_ITD_BIT         (U(1) << 7)
+#define HSCTLR_SED_BIT         (U(1) << 8)
+#define HSCTLR_I_BIT           (U(1) << 12)
+#define HSCTLR_WXN_BIT         (U(1) << 19)
+#define HSCTLR_EE_BIT          (U(1) << 25)
+#define HSCTLR_TE_BIT          (U(1) << 30)
 
 /* CPACR definitions */
-#define CPACR_FPEN(x)  ((x) << 20)
-#define CPACR_FP_TRAP_PL0      0x1
-#define CPACR_FP_TRAP_ALL      0x2
-#define CPACR_FP_TRAP_NONE     0x3
+#define CPACR_FPEN(x)          ((x) << 20)
+#define CPACR_FP_TRAP_PL0      U(0x1)
+#define CPACR_FP_TRAP_ALL      U(0x2)
+#define CPACR_FP_TRAP_NONE     U(0x3)
 
 /* SCR definitions */
-#define SCR_TWE_BIT            (1 << 13)
-#define SCR_TWI_BIT            (1 << 12)
-#define SCR_SIF_BIT            (1 << 9)
-#define SCR_HCE_BIT            (1 << 8)
-#define SCR_SCD_BIT            (1 << 7)
-#define SCR_NET_BIT            (1 << 6)
-#define SCR_AW_BIT             (1 << 5)
-#define SCR_FW_BIT             (1 << 4)
-#define SCR_EA_BIT             (1 << 3)
-#define SCR_FIQ_BIT            (1 << 2)
-#define SCR_IRQ_BIT            (1 << 1)
-#define SCR_NS_BIT             (1 << 0)
-#define SCR_VALID_BIT_MASK     0x33ff
-#define SCR_RESET_VAL          0x0
+#define SCR_TWE_BIT            (U(1) << 13)
+#define SCR_TWI_BIT            (U(1) << 12)
+#define SCR_SIF_BIT            (U(1) << 9)
+#define SCR_HCE_BIT            (U(1) << 8)
+#define SCR_SCD_BIT            (U(1) << 7)
+#define SCR_NET_BIT            (U(1) << 6)
+#define SCR_AW_BIT             (U(1) << 5)
+#define SCR_FW_BIT             (U(1) << 4)
+#define SCR_EA_BIT             (U(1) << 3)
+#define SCR_FIQ_BIT            (U(1) << 2)
+#define SCR_IRQ_BIT            (U(1) << 1)
+#define SCR_NS_BIT             (U(1) << 0)
+#define SCR_VALID_BIT_MASK     U(0x33ff)
+#define SCR_RESET_VAL          U(0x0)
 
 #define GET_NS_BIT(scr)                ((scr) & SCR_NS_BIT)
 
 /* HCR definitions */
-#define HCR_AMO_BIT            (1 << 5)
-#define HCR_IMO_BIT            (1 << 4)
-#define HCR_FMO_BIT            (1 << 3)
-#define HCR_RESET_VAL          0x0
+#define HCR_AMO_BIT            (U(1) << 5)
+#define HCR_IMO_BIT            (U(1) << 4)
+#define HCR_FMO_BIT            (U(1) << 3)
+#define HCR_RESET_VAL          U(0x0)
 
 /* CNTHCTL definitions */
-#define CNTHCTL_RESET_VAL      0x0
-#define PL1PCEN_BIT            (1 << 1)
-#define PL1PCTEN_BIT           (1 << 0)
+#define CNTHCTL_RESET_VAL      U(0x0)
+#define PL1PCEN_BIT            (U(1) << 1)
+#define PL1PCTEN_BIT           (U(1) << 0)
 
 /* CNTKCTL definitions */
-#define PL0PTEN_BIT            (1 << 9)
-#define PL0VTEN_BIT            (1 << 8)
-#define PL0PCTEN_BIT           (1 << 0)
-#define PL0VCTEN_BIT           (1 << 1)
-#define EVNTEN_BIT             (1 << 2)
-#define EVNTDIR_BIT            (1 << 3)
-#define EVNTI_SHIFT            4
-#define EVNTI_MASK             0xf
+#define PL0PTEN_BIT            (U(1) << 9)
+#define PL0VTEN_BIT            (U(1) << 8)
+#define PL0PCTEN_BIT           (U(1) << 0)
+#define PL0VCTEN_BIT           (U(1) << 1)
+#define EVNTEN_BIT             (U(1) << 2)
+#define EVNTDIR_BIT            (U(1) << 3)
+#define EVNTI_SHIFT            U(4)
+#define EVNTI_MASK             U(0xf)
 
 /* HCPTR definitions */
-#define HCPTR_RES1             ((1 << 13) | (1<<12) | 0x3ff)
-#define TCPAC_BIT              (1 << 31)
-#define TAM_BIT                        (1 << 30)
-#define TTA_BIT                        (1 << 20)
-#define TCP11_BIT              (1 << 10)
-#define TCP10_BIT              (1 << 10)
+#define HCPTR_RES1             ((U(1) << 13) | (U(1) << 12) | U(0x3ff))
+#define TCPAC_BIT              (U(1) << 31)
+#define TAM_BIT                        (U(1) << 30)
+#define TTA_BIT                        (U(1) << 20)
+#define TCP11_BIT              (U(1) << 11)
+#define TCP10_BIT              (U(1) << 10)
 #define HCPTR_RESET_VAL                HCPTR_RES1
 
 /* VTTBR defintions */
 #define VTTBR_RESET_VAL                ULL(0x0)
 #define VTTBR_VMID_MASK                ULL(0xff)
-#define VTTBR_VMID_SHIFT       48
-#define VTTBR_BADDR_MASK       0xffffffffffff
-#define VTTBR_BADDR_SHIFT      0
+#define VTTBR_VMID_SHIFT       U(48)
+#define VTTBR_BADDR_MASK       ULL(0xffffffffffff)
+#define VTTBR_BADDR_SHIFT      U(0)
 
 /* HDCR definitions */
-#define HDCR_RESET_VAL         0x0
+#define HDCR_RESET_VAL         U(0x0)
 
 /* HSTR definitions */
-#define HSTR_RESET_VAL         0x0
+#define HSTR_RESET_VAL         U(0x0)
 
 /* CNTHP_CTL definitions */
-#define CNTHP_CTL_RESET_VAL    0x0
+#define CNTHP_CTL_RESET_VAL    U(0x0)
 
 /* NASCR definitions */
-#define NSASEDIS_BIT           (1 << 15)
-#define NSTRCDIS_BIT           (1 << 20)
+#define NSASEDIS_BIT           (U(1) << 15)
+#define NSTRCDIS_BIT           (U(1) << 20)
 /* NOTE: correct typo in the definitions */
 #if !ERROR_DEPRECATED
-#define NASCR_CP11_BIT         (1 << 11)
-#define NASCR_CP10_BIT         (1 << 10)
+#define NASCR_CP11_BIT         (U(1) << 11)
+#define NASCR_CP10_BIT         (U(1) << 10)
 #endif
-#define NSACR_CP11_BIT         (1 << 11)
-#define NSACR_CP10_BIT         (1 << 10)
-#define NSACR_IMP_DEF_MASK     (0x7 << 16)
+#define NSACR_CP11_BIT         (U(1) << 11)
+#define NSACR_CP10_BIT         (U(1) << 10)
+#define NSACR_IMP_DEF_MASK     (U(0x7) << 16)
 #define NSACR_ENABLE_FP_ACCESS (NSACR_CP11_BIT | NSACR_CP10_BIT)
-#define NSACR_RESET_VAL                0x0
+#define NSACR_RESET_VAL                U(0x0)
 
 /* CPACR definitions */
-#define ASEDIS_BIT             (1 << 31)
-#define TRCDIS_BIT             (1 << 28)
-#define CPACR_CP11_SHIFT       22
-#define CPACR_CP10_SHIFT       20
-#define CPACR_ENABLE_FP_ACCESS (0x3 << CPACR_CP11_SHIFT |\
-                                       0x3 << CPACR_CP10_SHIFT)
-#define CPACR_RESET_VAL         0x0
+#define ASEDIS_BIT             (U(1) << 31)
+#define TRCDIS_BIT             (U(1) << 28)
+#define CPACR_CP11_SHIFT       U(22)
+#define CPACR_CP10_SHIFT       U(20)
+#define CPACR_ENABLE_FP_ACCESS ((U(0x3) << CPACR_CP11_SHIFT) |\
+                                (U(0x3) << CPACR_CP10_SHIFT))
+#define CPACR_RESET_VAL         U(0x0)
 
 /* FPEXC definitions */
-#define FPEXC_RES1             ((1 << 10) | (1 << 9) | (1 << 8))
-#define FPEXC_EN_BIT           (1 << 30)
+#define FPEXC_RES1             ((U(1) << 10) | (U(1) << 9) | (U(1) << 8))
+#define FPEXC_EN_BIT           (U(1) << 30)
 #define FPEXC_RESET_VAL                FPEXC_RES1
 
 /* SPSR/CPSR definitions */
-#define SPSR_FIQ_BIT           (1 << 0)
-#define SPSR_IRQ_BIT           (1 << 1)
-#define SPSR_ABT_BIT           (1 << 2)
-#define SPSR_AIF_SHIFT         6
-#define SPSR_AIF_MASK          0x7
+#define SPSR_FIQ_BIT           (U(1) << 0)
+#define SPSR_IRQ_BIT           (U(1) << 1)
+#define SPSR_ABT_BIT           (U(1) << 2)
+#define SPSR_AIF_SHIFT         U(6)
+#define SPSR_AIF_MASK          U(0x7)
 
-#define SPSR_E_SHIFT           9
-#define SPSR_E_MASK            0x1
-#define SPSR_E_LITTLE          0
-#define SPSR_E_BIG             1
+#define SPSR_E_SHIFT           U(9)
+#define SPSR_E_MASK            U(0x1)
+#define SPSR_E_LITTLE          U(0)
+#define SPSR_E_BIG             U(1)
 
-#define SPSR_T_SHIFT           5
-#define SPSR_T_MASK            0x1
-#define SPSR_T_ARM             0
-#define SPSR_T_THUMB           1
-
-#define SPSR_MODE_SHIFT                0
-#define SPSR_MODE_MASK         0x7
+#define SPSR_T_SHIFT           U(5)
+#define SPSR_T_MASK            U(0x1)
+#define SPSR_T_ARM             U(0)
+#define SPSR_T_THUMB           U(1)
 
+#define SPSR_MODE_SHIFT                U(0)
+#define SPSR_MODE_MASK         U(0x7)
 
 #define DISABLE_ALL_EXCEPTIONS \
                (SPSR_FIQ_BIT | SPSR_IRQ_BIT | SPSR_ABT_BIT)
  * TTBCR definitions
  */
 /* The ARM Trusted Firmware uses the long descriptor format */
-#define TTBCR_EAE_BIT          (1 << 31)
-
-#define TTBCR_SH1_NON_SHAREABLE                (0x0 << 28)
-#define TTBCR_SH1_OUTER_SHAREABLE      (0x2 << 28)
-#define TTBCR_SH1_INNER_SHAREABLE      (0x3 << 28)
-
-#define TTBCR_RGN1_OUTER_NC    (0x0 << 26)
-#define TTBCR_RGN1_OUTER_WBA   (0x1 << 26)
-#define TTBCR_RGN1_OUTER_WT    (0x2 << 26)
-#define TTBCR_RGN1_OUTER_WBNA  (0x3 << 26)
-
-#define TTBCR_RGN1_INNER_NC    (0x0 << 24)
-#define TTBCR_RGN1_INNER_WBA   (0x1 << 24)
-#define TTBCR_RGN1_INNER_WT    (0x2 << 24)
-#define TTBCR_RGN1_INNER_WBNA  (0x3 << 24)
-
-#define TTBCR_EPD1_BIT         (1 << 23)
-#define TTBCR_A1_BIT           (1 << 22)
-
-#define TTBCR_T1SZ_SHIFT       16
-#define TTBCR_T1SZ_MASK                (0x7)
-#define TTBCR_TxSZ_MIN         0
-#define TTBCR_TxSZ_MAX         7
-
-#define TTBCR_SH0_NON_SHAREABLE                (0x0 << 12)
-#define TTBCR_SH0_OUTER_SHAREABLE      (0x2 << 12)
-#define TTBCR_SH0_INNER_SHAREABLE      (0x3 << 12)
-
-#define TTBCR_RGN0_OUTER_NC    (0x0 << 10)
-#define TTBCR_RGN0_OUTER_WBA   (0x1 << 10)
-#define TTBCR_RGN0_OUTER_WT    (0x2 << 10)
-#define TTBCR_RGN0_OUTER_WBNA  (0x3 << 10)
-
-#define TTBCR_RGN0_INNER_NC    (0x0 << 8)
-#define TTBCR_RGN0_INNER_WBA   (0x1 << 8)
-#define TTBCR_RGN0_INNER_WT    (0x2 << 8)
-#define TTBCR_RGN0_INNER_WBNA  (0x3 << 8)
-
-#define TTBCR_EPD0_BIT         (1 << 7)
-#define TTBCR_T0SZ_SHIFT       0
-#define TTBCR_T0SZ_MASK                (0x7)
-
-#define MODE_RW_SHIFT          0x4
-#define MODE_RW_MASK           0x1
-#define MODE_RW_32             0x1
-
-#define MODE32_SHIFT           0
-#define MODE32_MASK            0x1f
-#define MODE32_usr             0x10
-#define MODE32_fiq             0x11
-#define MODE32_irq             0x12
-#define MODE32_svc             0x13
-#define MODE32_mon             0x16
-#define MODE32_abt             0x17
-#define MODE32_hyp             0x1a
-#define MODE32_und             0x1b
-#define MODE32_sys             0x1f
+#define TTBCR_EAE_BIT          (U(1) << 31)
+
+#define TTBCR_SH1_NON_SHAREABLE                (U(0x0) << 28)
+#define TTBCR_SH1_OUTER_SHAREABLE      (U(0x2) << 28)
+#define TTBCR_SH1_INNER_SHAREABLE      (U(0x3) << 28)
+
+#define TTBCR_RGN1_OUTER_NC    (U(0x0) << 26)
+#define TTBCR_RGN1_OUTER_WBA   (U(0x1) << 26)
+#define TTBCR_RGN1_OUTER_WT    (U(0x2) << 26)
+#define TTBCR_RGN1_OUTER_WBNA  (U(0x3) << 26)
+
+#define TTBCR_RGN1_INNER_NC    (U(0x0) << 24)
+#define TTBCR_RGN1_INNER_WBA   (U(0x1) << 24)
+#define TTBCR_RGN1_INNER_WT    (U(0x2) << 24)
+#define TTBCR_RGN1_INNER_WBNA  (U(0x3) << 24)
+
+#define TTBCR_EPD1_BIT         (U(1) << 23)
+#define TTBCR_A1_BIT           (U(1) << 22)
+
+#define TTBCR_T1SZ_SHIFT       U(16)
+#define TTBCR_T1SZ_MASK                U(0x7)
+#define TTBCR_TxSZ_MIN         U(0)
+#define TTBCR_TxSZ_MAX         U(7)
+
+#define TTBCR_SH0_NON_SHAREABLE                (U(0x0) << 12)
+#define TTBCR_SH0_OUTER_SHAREABLE      (U(0x2) << 12)
+#define TTBCR_SH0_INNER_SHAREABLE      (U(0x3) << 12)
+
+#define TTBCR_RGN0_OUTER_NC    (U(0x0) << 10)
+#define TTBCR_RGN0_OUTER_WBA   (U(0x1) << 10)
+#define TTBCR_RGN0_OUTER_WT    (U(0x2) << 10)
+#define TTBCR_RGN0_OUTER_WBNA  (U(0x3) << 10)
+
+#define TTBCR_RGN0_INNER_NC    (U(0x0) << 8)
+#define TTBCR_RGN0_INNER_WBA   (U(0x1) << 8)
+#define TTBCR_RGN0_INNER_WT    (U(0x2) << 8)
+#define TTBCR_RGN0_INNER_WBNA  (U(0x3) << 8)
+
+#define TTBCR_EPD0_BIT         (U(1) << 7)
+#define TTBCR_T0SZ_SHIFT       U(0)
+#define TTBCR_T0SZ_MASK                U(0x7)
+
+#define MODE_RW_SHIFT          U(0x4)
+#define MODE_RW_MASK           U(0x1)
+#define MODE_RW_32             U(0x1)
+
+#define MODE32_SHIFT           U(0)
+#define MODE32_MASK            U(0x1f)
+#define MODE32_usr             U(0x10)
+#define MODE32_fiq             U(0x11)
+#define MODE32_irq             U(0x12)
+#define MODE32_svc             U(0x13)
+#define MODE32_mon             U(0x16)
+#define MODE32_abt             U(0x17)
+#define MODE32_hyp             U(0x1a)
+#define MODE32_und             U(0x1b)
+#define MODE32_sys             U(0x1f)
 
 #define GET_M32(mode)          (((mode) >> MODE32_SHIFT) & MODE32_MASK)
 
 /*
  * TTBR definitions
  */
-#define TTBR_CNP_BIT           U(0x1)
+#define TTBR_CNP_BIT           ULL(0x1)
 
 /*
  * CTR definitions
  */
-#define CTR_CWG_SHIFT          24
-#define CTR_CWG_MASK           0xf
-#define CTR_ERG_SHIFT          20
-#define CTR_ERG_MASK           0xf
-#define CTR_DMINLINE_SHIFT     16
-#define CTR_DMINLINE_WIDTH     4
-#define CTR_DMINLINE_MASK      ((1 << 4) - 1)
-#define CTR_L1IP_SHIFT         14
-#define CTR_L1IP_MASK          0x3
-#define CTR_IMINLINE_SHIFT     0
-#define CTR_IMINLINE_MASK      0xf
-
-#define MAX_CACHE_LINE_SIZE    0x800 /* 2KB */
+#define CTR_CWG_SHIFT          U(24)
+#define CTR_CWG_MASK           U(0xf)
+#define CTR_ERG_SHIFT          U(20)
+#define CTR_ERG_MASK           U(0xf)
+#define CTR_DMINLINE_SHIFT     U(16)
+#define CTR_DMINLINE_WIDTH     U(4)
+#define CTR_DMINLINE_MASK      ((U(1) << 4) - U(1))
+#define CTR_L1IP_SHIFT         U(14)
+#define CTR_L1IP_MASK          U(0x3)
+#define CTR_IMINLINE_SHIFT     U(0)
+#define CTR_IMINLINE_MASK      U(0xf)
+
+#define MAX_CACHE_LINE_SIZE    U(0x800) /* 2KB */
 
 /* PMCR definitions */
-#define PMCR_N_SHIFT           11
-#define PMCR_N_MASK            0x1f
+#define PMCR_N_SHIFT           U(11)
+#define PMCR_N_MASK            U(0x1f)
 #define PMCR_N_BITS            (PMCR_N_MASK << PMCR_N_SHIFT)
-#define PMCR_LC_BIT            (1 << 6)
-#define PMCR_DP_BIT            (1 << 5)
+#define PMCR_LC_BIT            (U(1) << 6)
+#define PMCR_DP_BIT            (U(1) << 5)
 
 /*******************************************************************************
  * Definitions of register offsets, fields and macros for CPU system
  * instructions.
  ******************************************************************************/
 
-#define TLBI_ADDR_SHIFT                0
-#define TLBI_ADDR_MASK         0xFFFFF000
+#define TLBI_ADDR_SHIFT                U(0)
+#define TLBI_ADDR_MASK         U(0xFFFFF000)
 #define TLBI_ADDR(x)           (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
 
 /*******************************************************************************
  * system level implementation of the Generic Timer.
  ******************************************************************************/
 #define CNTCTLBASE_CNTFRQ      U(0x0)
-#define CNTNSAR                        0x4
+#define CNTNSAR                        U(0x4)
 #define CNTNSAR_NS_SHIFT(x)    (x)
 
-#define CNTACR_BASE(x)         (0x40 + ((x) << 2))
-#define CNTACR_RPCT_SHIFT      0x0
-#define CNTACR_RVCT_SHIFT      0x1
-#define CNTACR_RFRQ_SHIFT      0x2
-#define CNTACR_RVOFF_SHIFT     0x3
-#define CNTACR_RWVT_SHIFT      0x4
-#define CNTACR_RWPT_SHIFT      0x5
+#define CNTACR_BASE(x)         (U(0x40) + ((x) << 2))
+#define CNTACR_RPCT_SHIFT      U(0x0)
+#define CNTACR_RVCT_SHIFT      U(0x1)
+#define CNTACR_RFRQ_SHIFT      U(0x2)
+#define CNTACR_RVOFF_SHIFT     U(0x3)
+#define CNTACR_RWVT_SHIFT      U(0x4)
+#define CNTACR_RWPT_SHIFT      U(0x5)
 
 /*******************************************************************************
  * Definitions of register offsets in the CNTBaseN Frame of the
 
 /* MAIR macros */
 #define MAIR0_ATTR_SET(attr, index)    ((attr) << ((index) << 3))
-#define MAIR1_ATTR_SET(attr, index)    ((attr) << (((index) - 3) << 3))
+#define MAIR1_ATTR_SET(attr, index)    ((attr) << (((index) - U(3)) << 3))
 
 /* System register defines The format is: coproc, opt1, CRn, CRm, opt2 */
 #define SCR            p15, 0, c1, c1, 0
 #define MAIR_NORM_WB_NTR_RA    U(0xe)
 #define MAIR_NORM_WB_NTR_RWA   U(0xf)
 
-#define MAIR_NORM_OUTER_SHIFT  4
+#define MAIR_NORM_OUTER_SHIFT  U(4)
 
-#define MAKE_MAIR_NORMAL_MEMORY(inner, outer)  ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer)  \
+               ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
 
 /*******************************************************************************
  * Definitions for system register interface to AMU for ARMv8.4 onwards
index 7cc4b23774aae40a4c8ed3681fa7ba117fed0174..7385b5d060e7caf7c0fe2bb29d85ddcca7516fff 100644 (file)
 /*******************************************************************************
  * MPIDR macros
  ******************************************************************************/
-#define MPIDR_MT_MASK          (U(1) << 24)
+#define MPIDR_MT_MASK          (ULL(1) << 24)
 #define MPIDR_CPU_MASK         MPIDR_AFFLVL_MASK
 #define MPIDR_CLUSTER_MASK     (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
 #define MPIDR_AFFINITY_BITS    U(8)
-#define MPIDR_AFFLVL_MASK      U(0xff)
+#define MPIDR_AFFLVL_MASK      ULL(0xff)
 #define MPIDR_AFF0_SHIFT       U(0)
 #define MPIDR_AFF1_SHIFT       U(8)
 #define MPIDR_AFF2_SHIFT       U(16)
 #define MPIDR_AFF3_SHIFT       U(32)
-#define MPIDR_AFFINITY_MASK    U(0xff00ffffff)
+#define MPIDR_AFFINITY_MASK    ULL(0xff00ffffff)
 #define MPIDR_AFFLVL_SHIFT     U(3)
-#define MPIDR_AFFLVL0          U(0)
-#define MPIDR_AFFLVL1          U(1)
-#define MPIDR_AFFLVL2          U(2)
-#define MPIDR_AFFLVL3          U(3)
+#define MPIDR_AFFLVL0          ULL(0x0)
+#define MPIDR_AFFLVL1          ULL(0x1)
+#define MPIDR_AFFLVL2          ULL(0x2)
+#define MPIDR_AFFLVL3          ULL(0x3)
 #define MPIDR_AFFLVL0_VAL(mpidr) \
-               ((mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+               (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
 #define MPIDR_AFFLVL1_VAL(mpidr) \
-               ((mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+               (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
 #define MPIDR_AFFLVL2_VAL(mpidr) \
-               ((mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+               (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
 #define MPIDR_AFFLVL3_VAL(mpidr) \
-               ((mpidr >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK)
+               (((mpidr) >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK)
 /*
  * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
  * add one while using this macro to define array sizes.
@@ -57,7 +57,7 @@
 #define MPIDR_MAX_AFFLVL       U(2)
 
 /* Constant to highlight the assumption that MPIDR allocation starts from 0 */
-#define FIRST_MPIDR            U(0)
+#define FIRST_MPIDR            ULL(0)
 
 /*******************************************************************************
  * Definitions for CPU system register interface to GICv3
 /* CSSELR definitions */
 #define LEVEL_SHIFT            U(1)
 
-/* D$ set/way op type defines */
+/* Data cache set/way op type defines */
 #define DCISW                  U(0x0)
 #define DCCISW                 U(0x1)
 #define DCCSW                  U(0x2)
 #define ID_AA64PFR0_EL3_SHIFT  U(12)
 #define ID_AA64PFR0_AMU_SHIFT  U(44)
 #define ID_AA64PFR0_AMU_LENGTH U(4)
-#define ID_AA64PFR0_AMU_MASK   U(0xf)
-#define ID_AA64PFR0_ELX_MASK   U(0xf)
+#define ID_AA64PFR0_AMU_MASK   ULL(0xf)
+#define ID_AA64PFR0_ELX_MASK   ULL(0xf)
 #define ID_AA64PFR0_SVE_SHIFT  U(32)
-#define ID_AA64PFR0_SVE_MASK   U(0xf)
+#define ID_AA64PFR0_SVE_MASK   ULL(0xf)
 #define ID_AA64PFR0_SVE_LENGTH U(4)
 #define ID_AA64PFR0_CSV2_SHIFT U(56)
-#define ID_AA64PFR0_CSV2_MASK  U(0xf)
+#define ID_AA64PFR0_CSV2_MASK  ULL(0xf)
 #define ID_AA64PFR0_CSV2_LENGTH        U(4)
 
 /* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
 #define ID_AA64DFR0_PMS_SHIFT  U(32)
 #define ID_AA64DFR0_PMS_LENGTH U(4)
-#define ID_AA64DFR0_PMS_MASK   U(0xf)
+#define ID_AA64DFR0_PMS_MASK   ULL(0xf)
 
-#define EL_IMPL_NONE           U(0)
-#define EL_IMPL_A64ONLY                U(1)
-#define EL_IMPL_A64_A32                U(2)
+#define EL_IMPL_NONE           ULL(0)
+#define EL_IMPL_A64ONLY                ULL(1)
+#define EL_IMPL_A64_A32                ULL(2)
 
 #define ID_AA64PFR0_GIC_SHIFT  U(24)
 #define ID_AA64PFR0_GIC_WIDTH  U(4)
-#define ID_AA64PFR0_GIC_MASK   ((U(1) << ID_AA64PFR0_GIC_WIDTH) - 1)
+#define ID_AA64PFR0_GIC_MASK   ((ULL(1) << ID_AA64PFR0_GIC_WIDTH) - ULL(1))
 
 /* ID_AA64MMFR0_EL1 definitions */
 #define ID_AA64MMFR0_EL1_PARANGE_SHIFT U(0)
-#define ID_AA64MMFR0_EL1_PARANGE_MASK  U(0xf)
+#define ID_AA64MMFR0_EL1_PARANGE_MASK  ULL(0xf)
 
 #define PARANGE_0000   U(32)
 #define PARANGE_0001   U(36)
 #define PARANGE_0110   U(52)
 
 #define ID_AA64MMFR0_EL1_TGRAN4_SHIFT          U(28)
-#define ID_AA64MMFR0_EL1_TGRAN4_MASK           U(0xf)
-#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED      U(0x0)
-#define ID_AA64MMFR0_EL1_TGRAN4_NOT_SUPPORTED  U(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN4_MASK           ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED      ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN4_NOT_SUPPORTED  ULL(0xf)
 
 #define ID_AA64MMFR0_EL1_TGRAN64_SHIFT         U(24)
-#define ID_AA64MMFR0_EL1_TGRAN64_MASK          U(0xf)
-#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED     U(0x0)
-#define ID_AA64MMFR0_EL1_TGRAN64_NOT_SUPPORTED U(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN64_MASK          ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED     ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN64_NOT_SUPPORTED ULL(0xf)
 
 #define ID_AA64MMFR0_EL1_TGRAN16_SHIFT         U(20)
-#define ID_AA64MMFR0_EL1_TGRAN16_MASK          U(0xf)
-#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED     U(0x1)
-#define ID_AA64MMFR0_EL1_TGRAN16_NOT_SUPPORTED U(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN16_MASK          ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED     ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN16_NOT_SUPPORTED ULL(0x0)
 
 /* ID_PFR1_EL1 definitions */
 #define ID_PFR1_VIRTEXT_SHIFT  U(12)
 #define ID_PFR1_VIRTEXT_MASK   U(0xf)
-#define GET_VIRT_EXT(id)       ((id >> ID_PFR1_VIRTEXT_SHIFT) \
+#define GET_VIRT_EXT(id)       (((id) >> ID_PFR1_VIRTEXT_SHIFT) \
                                 & ID_PFR1_VIRTEXT_MASK)
 
 /* SCTLR definitions */
 /* HCR definitions */
 #define HCR_RW_SHIFT           U(31)
 #define HCR_RW_BIT             (ULL(1) << HCR_RW_SHIFT)
-#define HCR_AMO_BIT            (U(1) << 5)
-#define HCR_IMO_BIT            (U(1) << 4)
-#define HCR_FMO_BIT            (U(1) << 3)
+#define HCR_AMO_BIT            (ULL(1) << 5)
+#define HCR_IMO_BIT            (ULL(1) << 4)
+#define HCR_FMO_BIT            (ULL(1) << 3)
 
 /* ISR definitions */
 #define ISR_A_SHIFT            U(8)
 #define TCR_EL1_IPS_SHIFT      U(32)
 #define TCR_EL3_PS_SHIFT       U(16)
 
-#define TCR_TxSZ_MIN           U(16)
-#define TCR_TxSZ_MAX           U(39)
+#define TCR_TxSZ_MIN           ULL(16)
+#define TCR_TxSZ_MAX           ULL(39)
 
 /* (internal) physical address size bits in EL3/EL1 */
-#define TCR_PS_BITS_4GB                U(0x0)
-#define TCR_PS_BITS_64GB       U(0x1)
-#define TCR_PS_BITS_1TB                U(0x2)
-#define TCR_PS_BITS_4TB                U(0x3)
-#define TCR_PS_BITS_16TB       U(0x4)
-#define TCR_PS_BITS_256TB      U(0x5)
+#define TCR_PS_BITS_4GB                ULL(0x0)
+#define TCR_PS_BITS_64GB       ULL(0x1)
+#define TCR_PS_BITS_1TB                ULL(0x2)
+#define TCR_PS_BITS_4TB                ULL(0x3)
+#define TCR_PS_BITS_16TB       ULL(0x4)
+#define TCR_PS_BITS_256TB      ULL(0x5)
 
 #define ADDR_MASK_48_TO_63     ULL(0xFFFF000000000000)
 #define ADDR_MASK_44_TO_47     ULL(0x0000F00000000000)
 #define ADDR_MASK_36_TO_39     ULL(0x000000F000000000)
 #define ADDR_MASK_32_TO_35     ULL(0x0000000F00000000)
 
-#define TCR_RGN_INNER_NC       (U(0x0) << 8)
-#define TCR_RGN_INNER_WBA      (U(0x1) << 8)
-#define TCR_RGN_INNER_WT       (U(0x2) << 8)
-#define TCR_RGN_INNER_WBNA     (U(0x3) << 8)
+#define TCR_RGN_INNER_NC       (ULL(0x0) << 8)
+#define TCR_RGN_INNER_WBA      (ULL(0x1) << 8)
+#define TCR_RGN_INNER_WT       (ULL(0x2) << 8)
+#define TCR_RGN_INNER_WBNA     (ULL(0x3) << 8)
 
-#define TCR_RGN_OUTER_NC       (U(0x0) << 10)
-#define TCR_RGN_OUTER_WBA      (U(0x1) << 10)
-#define TCR_RGN_OUTER_WT       (U(0x2) << 10)
-#define TCR_RGN_OUTER_WBNA     (U(0x3) << 10)
+#define TCR_RGN_OUTER_NC       (ULL(0x0) << 10)
+#define TCR_RGN_OUTER_WBA      (ULL(0x1) << 10)
+#define TCR_RGN_OUTER_WT       (ULL(0x2) << 10)
+#define TCR_RGN_OUTER_WBNA     (ULL(0x3) << 10)
 
-#define TCR_SH_NON_SHAREABLE   (U(0x0) << 12)
-#define TCR_SH_OUTER_SHAREABLE (U(0x2) << 12)
-#define TCR_SH_INNER_SHAREABLE (U(0x3) << 12)
+#define TCR_SH_NON_SHAREABLE   (ULL(0x0) << 12)
+#define TCR_SH_OUTER_SHAREABLE (ULL(0x2) << 12)
+#define TCR_SH_INNER_SHAREABLE (ULL(0x3) << 12)
 
 #define TCR_TG0_SHIFT          U(14)
-#define TCR_TG0_MASK           U(3)
+#define TCR_TG0_MASK           ULL(3)
 #define TCR_TG0_4K             (ULL(0) << TCR_TG0_SHIFT)
 #define TCR_TG0_64K            (ULL(1) << TCR_TG0_SHIFT)
 #define TCR_TG0_16K            (ULL(2) << TCR_TG0_SHIFT)
 
-#define TCR_EPD0_BIT           (U(1) << 7)
-#define TCR_EPD1_BIT           (U(1) << 23)
+#define TCR_EPD0_BIT           (ULL(1) << 7)
+#define TCR_EPD1_BIT           (ULL(1) << 23)
 
 #define MODE_SP_SHIFT          U(0x0)
 #define MODE_SP_MASK           U(0x1)
 /*
  * TTBR Definitions
  */
-#define TTBR_CNP_BIT           0x1
+#define TTBR_CNP_BIT           ULL(0x1)
 
 /*
  * CTR_EL0 definitions
 #define MAIR_NORM_WB_NTR_RA    ULL(0xe)
 #define MAIR_NORM_WB_NTR_RWA   ULL(0xf)
 
-#define MAIR_NORM_OUTER_SHIFT  4
+#define MAIR_NORM_OUTER_SHIFT  U(4)
 
-#define MAKE_MAIR_NORMAL_MEMORY(inner, outer)  ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer)  \
+               ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
 
 /* PAR_EL1 fields */
-#define PAR_F_SHIFT    0
-#define PAR_F_MASK     1
-#define PAR_ADDR_SHIFT 12
-#define PAR_ADDR_MASK  (BIT(40) - 1) /* 40-bits-wide page address */
+#define PAR_F_SHIFT    U(0)
+#define PAR_F_MASK     ULL(0x1)
+#define PAR_ADDR_SHIFT U(12)
+#define PAR_ADDR_MASK  (BIT(40) - ULL(1)) /* 40-bits-wide page address */
 
 /*******************************************************************************
  * Definitions for system register interface to SPE
  * RAS system registers
  *******************************************************************************/
 #define DISR_EL1               S3_0_C12_C1_1
-#define DISR_A_BIT             31
+#define DISR_A_BIT             U(31)
 
 #define ERRIDR_EL1             S3_0_C5_C3_0
-#define ERRIDR_MASK            0xffff
+#define ERRIDR_MASK            U(0xffff)
 
 #define ERRSELR_EL1            S3_0_C5_C3_1
 
index 0f3a5728be6552bcc6c402e1a27564c32996db21..7703be339dc4459d0ada7f6d59e5c90a4b28bfcf 100644 (file)
 # define REPORT_ERRATA 0
 #endif
 
-       /*
-        * Define the offsets to the fields in cpu_ops structure.
-        */
-       .struct 0
-CPU_MIDR: /* cpu_ops midr */
-       .space  4
-/* Reset fn is needed during reset */
-#if defined(IMAGE_AT_EL3)
-CPU_RESET_FUNC: /* cpu_ops reset_func */
-       .space  4
+
+       .equ    CPU_MIDR_SIZE, CPU_WORD_SIZE
+       .equ    CPU_RESET_FUNC_SIZE, CPU_WORD_SIZE
+       .equ    CPU_PWR_DWN_OPS_SIZE, CPU_WORD_SIZE * CPU_MAX_PWR_DWN_OPS
+       .equ    CPU_ERRATA_FUNC_SIZE, CPU_WORD_SIZE
+       .equ    CPU_ERRATA_LOCK_SIZE, CPU_WORD_SIZE
+       .equ    CPU_ERRATA_PRINTED_SIZE, CPU_WORD_SIZE
+
+#ifndef IMAGE_AT_EL3
+       .equ    CPU_RESET_FUNC_SIZE, 0
 #endif
-#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
-CPU_PWR_DWN_OPS: /* cpu_ops power down functions */
-       .space  (4 * CPU_MAX_PWR_DWN_OPS)
+
+/* The power down core and cluster is needed only in BL32 */
+#ifndef IMAGE_BL32
+       .equ    CPU_PWR_DWN_OPS_SIZE, 0
 #endif
 
-/*
- * Fields required to print errata status. Only in BL32 that the printing
- * require mutual exclusion and printed flag.
- */
-#if REPORT_ERRATA
-CPU_ERRATA_FUNC: /* CPU errata status printing function */
-       .space  4
-#if defined(IMAGE_BL32)
-CPU_ERRATA_LOCK:
-       .space  4
-CPU_ERRATA_PRINTED:
-       .space  4
+/* Fields required to print errata status  */
+#if !REPORT_ERRATA
+       .equ    CPU_ERRATA_FUNC_SIZE, 0
 #endif
+
+/* Only BL32 requires mutual exclusion and printed flag. */
+#if !(REPORT_ERRATA && defined(IMAGE_BL32))
+       .equ    CPU_ERRATA_LOCK_SIZE, 0
+       .equ    CPU_ERRATA_PRINTED_SIZE, 0
 #endif
 
-CPU_OPS_SIZE = .
+
+/*
+ * Define the offsets to the fields in cpu_ops structure.
+ * Every offset is defined based on the offset and size of the previous
+ * field.
+ */
+       .equ    CPU_MIDR, 0
+       .equ    CPU_RESET_FUNC, CPU_MIDR + CPU_MIDR_SIZE
+       .equ    CPU_PWR_DWN_OPS, CPU_RESET_FUNC + CPU_RESET_FUNC_SIZE
+       .equ    CPU_ERRATA_FUNC, CPU_PWR_DWN_OPS + CPU_PWR_DWN_OPS_SIZE
+       .equ    CPU_ERRATA_LOCK, CPU_ERRATA_FUNC + CPU_ERRATA_FUNC_SIZE
+       .equ    CPU_ERRATA_PRINTED, CPU_ERRATA_LOCK + CPU_ERRATA_LOCK_SIZE
+       .equ    CPU_OPS_SIZE, CPU_ERRATA_PRINTED + CPU_ERRATA_PRINTED_SIZE
 
        /*
         * Write given expressions as words
@@ -128,21 +137,8 @@ CPU_OPS_SIZE = .
        .word \_resetfunc
 #endif
 #ifdef IMAGE_BL32
-1:
        /* Insert list of functions */
        fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
-2:
-       /*
-        * Error if no or more than CPU_MAX_PWR_DWN_OPS were specified in the
-        * list
-        */
-       .ifeq 2b - 1b
-         .error "At least one power down function must be specified"
-       .else
-         .iflt 2b - 1b - (CPU_MAX_PWR_DWN_OPS * CPU_WORD_SIZE)
-           .error "More than CPU_MAX_PWR_DWN_OPS functions specified"
-         .endif
-       .endif
 #endif
 
 #if REPORT_ERRATA
index 9f1847061ded40eefdd9610cfbca90227c9d26ab..f5ca2ee7a41b44ad6ecd1c203fdeaca7b3d5f03a 100644 (file)
 #define CORTEX_A72_CPUACTLR_EL1_DCC_AS_DCCI                    (ULL(1) << 44)
 #define CORTEX_A72_CPUACTLR_EL1_DIS_INSTR_PREFETCH             (ULL(1) << 32)
 
+/*******************************************************************************
+ *  L2 Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_L2ACTLR_EL1                                 S3_1_C15_C0_0
+
+#define CORTEX_A72_L2ACTLR_ENABLE_UNIQUE_CLEAN                 (ULL(1) << 14)
+
 /*******************************************************************************
  * L2 Control register specific definitions.
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_deimos.h b/include/lib/cpus/aarch64/cortex_deimos.h
new file mode 100644 (file)
index 0000000..3c36567
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_DEIMOS_H__
+#define __CORTEX_DEIMOS_H__
+
+#define CORTEX_DEIMOS_MIDR                                     U(0x410FD0D0)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_DEIMOS_CPUECTLR_EL1                             S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_DEIMOS_CPUPWRCTLR_EL1                           S3_0_C15_C2_7
+#define CORTEX_DEIMOS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT            (U(1) << 0)
+
+#endif /* __CORTEX_DEIMOS_H__ */
diff --git a/include/lib/cpus/aarch64/cortex_helios.h b/include/lib/cpus/aarch64/cortex_helios.h
new file mode 100644 (file)
index 0000000..1098a12
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_HELIOS_H__
+#define __CORTEX_HELIOS_H__
+
+#define CORTEX_HELIOS_MIDR             U(0x410FD060)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_HELIOS_ECTLR_EL1                S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_HELIOS_CPUACTLR_EL1     S3_0_C15_C1_0
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions.
+ ******************************************************************************/
+
+#define CORTEX_HELIOS_CPUPWRCTLR_EL1                           S3_0_C15_C2_7
+#define CORTEX_HELIOS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT            (U(1) << 0)
+
+#endif /* __CORTEX_HELIOS_H__ */
index cd8f3e8fbd41896a80ebd17fdef7f9e742fa5903..026a48e32ea6ef9a28a00a8a32d58cc75259f6de 100644 (file)
 # define REPORT_ERRATA 0
 #endif
 
-       /*
-        * Define the offsets to the fields in cpu_ops structure.
-        */
-       .struct 0
-CPU_MIDR: /* cpu_ops midr */
-       .space  8
-/* Reset fn is needed in BL at reset vector */
-#if defined(IMAGE_AT_EL3)
-CPU_RESET_FUNC: /* cpu_ops reset_func */
-       .space  8
+
+       .equ    CPU_MIDR_SIZE, CPU_WORD_SIZE
+       .equ    CPU_EXTRA1_FUNC_SIZE, CPU_WORD_SIZE
+       .equ    CPU_EXTRA2_FUNC_SIZE, CPU_WORD_SIZE
+       .equ    CPU_RESET_FUNC_SIZE, CPU_WORD_SIZE
+       .equ    CPU_PWR_DWN_OPS_SIZE, CPU_WORD_SIZE * CPU_MAX_PWR_DWN_OPS
+       .equ    CPU_ERRATA_FUNC_SIZE, CPU_WORD_SIZE
+       .equ    CPU_ERRATA_LOCK_SIZE, CPU_WORD_SIZE
+       .equ    CPU_ERRATA_PRINTED_SIZE, CPU_WORD_SIZE
+       .equ    CPU_REG_DUMP_SIZE, CPU_WORD_SIZE
+
+#ifndef IMAGE_AT_EL3
+       .equ    CPU_RESET_FUNC_SIZE, 0
 #endif
-CPU_EXTRA1_FUNC:
-       .space  8
-CPU_EXTRA2_FUNC:
-       .space  8
-#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
-CPU_PWR_DWN_OPS: /* cpu_ops power down functions */
-       .space  (8 * CPU_MAX_PWR_DWN_OPS)
+
+/* The power down core and cluster is needed only in BL31 */
+#ifndef IMAGE_BL31
+       .equ    CPU_PWR_DWN_OPS_SIZE, 0
 #endif
 
-/*
- * Fields required to print errata status. Only in BL31 that the printing
- * require mutual exclusion and printed flag.
- */
-#if REPORT_ERRATA
-CPU_ERRATA_FUNC:
-       .space  8
-#if defined(IMAGE_BL31)
-CPU_ERRATA_LOCK:
-       .space  8
-CPU_ERRATA_PRINTED:
-       .space  8
+/* Fields required to print errata status. */
+#if !REPORT_ERRATA
+       .equ    CPU_ERRATA_FUNC_SIZE, 0
 #endif
+
+/* Only BL31 requieres mutual exclusion and printed flag.  */
+#if !(REPORT_ERRATA && defined(IMAGE_BL31))
+       .equ    CPU_ERRATA_LOCK_SIZE, 0
+       .equ    CPU_ERRATA_PRINTED_SIZE, 0
 #endif
 
-#if defined(IMAGE_BL31) && CRASH_REPORTING
-CPU_REG_DUMP: /* cpu specific register dump for crash reporting */
-       .space  8
+#if !defined(IMAGE_BL31) || !CRASH_REPORTING
+       .equ    CPU_REG_DUMP_SIZE, 0
 #endif
-CPU_OPS_SIZE = .
+
+/*
+ * Define the offsets to the fields in cpu_ops structure.
+ * Every offset is defined based in the offset and size of the previous
+ * field.
+ */
+       .equ    CPU_MIDR, 0
+       .equ    CPU_RESET_FUNC, CPU_MIDR + CPU_MIDR_SIZE
+       .equ    CPU_EXTRA1_FUNC, CPU_RESET_FUNC + CPU_RESET_FUNC_SIZE
+       .equ    CPU_EXTRA2_FUNC, CPU_EXTRA1_FUNC + CPU_EXTRA1_FUNC_SIZE
+       .equ    CPU_PWR_DWN_OPS, CPU_EXTRA2_FUNC + CPU_EXTRA2_FUNC_SIZE
+       .equ    CPU_ERRATA_FUNC, CPU_PWR_DWN_OPS + CPU_PWR_DWN_OPS_SIZE
+       .equ    CPU_ERRATA_LOCK, CPU_ERRATA_FUNC + CPU_ERRATA_FUNC_SIZE
+       .equ    CPU_ERRATA_PRINTED, CPU_ERRATA_LOCK + CPU_ERRATA_LOCK_SIZE
+       .equ    CPU_REG_DUMP, CPU_ERRATA_PRINTED + CPU_ERRATA_PRINTED_SIZE
+       .equ    CPU_OPS_SIZE, CPU_REG_DUMP + CPU_REG_DUMP_SIZE
 
        /*
         * Write given expressions as quad words
@@ -149,21 +159,8 @@ CPU_OPS_SIZE = .
        .quad \_extra1
        .quad \_extra2
 #ifdef IMAGE_BL31
-1:
        /* Insert list of functions */
        fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
-2:
-       /*
-        * Error if no or more than CPU_MAX_PWR_DWN_OPS were specified in the
-        * list
-        */
-       .ifeq 2b - 1b
-         .error "At least one power down function must be specified"
-       .else
-         .iflt 2b - 1b - (CPU_MAX_PWR_DWN_OPS * CPU_WORD_SIZE)
-           .error "More than CPU_MAX_PWR_DWN_OPS functions specified"
-         .endif
-       .endif
 #endif
 
 #if REPORT_ERRATA
index 7d210531bdee66831c8feac6e4f1852654358ca5..6ec4da80f57b9190d3d1a1b38a774c575489a86b 100644 (file)
@@ -4,15 +4,8 @@
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#ifndef __RAS_H__
-#define __RAS_H__
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <context.h>
-#include <mmio.h>
-#include <stdint.h>
+#ifndef RAS_ARCH_H
+#define RAS_ARCH_H
 
 /*
  * Size of nodes implementing Standard Error Records - currently only 4k is
 #define ERR_CTLR_ENABLE_FIELD(_ctlr, _field) \
        ERR_CTLR_SET_FIELD(_ctlr, _field, ERR_CTLR_ ##_field ##_MASK)
 
-/* Uncorrected error types */
+/* Uncorrected error types for Asynchronous exceptions */
 #define ERROR_STATUS_UET_UC    0x0     /* Uncontainable */
 #define ERROR_STATUS_UET_UEU   0x1     /* Unrecoverable */
 #define ERROR_STATUS_UET_UEO   0x2     /* Restable */
 #define ERROR_STATUS_UET_UER   0x3     /* Recoverable */
 
+/* Error types for Synchronous exceptions */
+#define ERROR_STATUS_SET_UER   0x0     /* Recoverable */
+#define ERROR_STATUS_SET_UEO   0x1     /* Restable */
+#define ERROR_STATUS_SET_UC    0x2     /* Uncontainable */
+#define ERROR_STATUS_SET_CE    0x3     /* Corrected */
+
+/* Implementation Defined Syndrome bit in ESR */
+#define SERROR_IDS_BIT         U(24)
+
+/*
+ * Asynchronous Error Type in exception syndrome. The field has same values in
+ * both DISR_EL1 and ESR_EL3 for SError.
+ */
+#define EABORT_AET_SHIFT       U(10)
+#define EABORT_AET_WIDTH       U(3)
+#define EABORT_AET_MASK                U(0x7)
+
+/* DFSC field in Asynchronous exception syndrome */
+#define EABORT_DFSC_SHIFT      U(0)
+#define EABORT_DFSC_WIDTH      U(6)
+#define EABORT_DFSC_MASK       U(0x3f)
+
+/* Synchronous Error Type in exception syndrome. */
+#define EABORT_SET_SHIFT       U(11)
+#define EABORT_SET_WIDTH       U(2)
+#define EABORT_SET_MASK                U(0x3)
+
+/* DFSC code for SErrors */
+#define DFSC_SERROR            0x11
+
+/* I/DFSC code for synchronous external abort */
+#define SYNC_EA_FSC            0x10
+
+#ifndef __ASSEMBLY__
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <context.h>
+#include <mmio.h>
+#include <stdint.h>
 
 /*
  * Standard Error Record accessors for memory-mapped registers.
@@ -221,5 +255,6 @@ static inline void ser_sys_select_record(unsigned int idx)
 /* Library functions to probe Standard Error Record */
 int ser_probe_memmap(uintptr_t base, unsigned int size_num_k, int *probe_data);
 int ser_probe_sysreg(unsigned int idx_start, unsigned int num_idx, int *probe_data);
+#endif /* __ASSEMBLY__ */
 
-#endif /* __RAS_H__ */
+#endif /* RAS_ARCH_H */
index f367a1f4581c62c286dcb9b47bb03432533a7af4..5f13e993fd4ee5ac8d7e5c5d782dcd38491c796d 100644 (file)
@@ -37,10 +37,10 @@ void clear_mem_regions(mem_region_t *tbl, size_t nregions);
  * in a way that they minimize the number of entries used in the
  * translation tables.
  */
-void clear_map_dyn_mem_regions(mem_region_t *region,
+void clear_map_dyn_mem_regions(struct mem_region *regions,
                               size_t nregions,
                               uintptr_t va,
-                              size_t chunk_size);
+                              size_t chunk);
 
 /*
  * checks that a region (addr + nbytes-1) of memory is totally covered by
index b6c53e267d7d593e64f19702617f357ce9c03d0f..a290a92d8b6f004cd536aa8727d63d9d4b570851 100644 (file)
  */
 #define XLAT_TABLE_NC                  (U(1) << 1)
 
+/*
+ * Offsets into a mmu_cfg_params array generated by setup_mmu_cfg(). All
+ * parameters are 64 bits wide.
+ */
+#define MMU_CFG_MAIR           0
+#define MMU_CFG_TCR            1
+#define MMU_CFG_TTBR0          2
+#define MMU_CFG_PARAM_MAX      3
+
 #ifndef __ASSEMBLY__
 
 #include <sys/types.h>
 
+/*
+ * Return the values that the MMU configuration registers must contain for the
+ * specified translation context. `params` must be a pointer to array of size
+ * MMU_CFG_PARAM_MAX.
+ */
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+                  const uint64_t *base_table, unsigned long long max_pa,
+                  uintptr_t max_va, int xlat_regime);
+
 #ifdef AARCH32
 /* AArch32 specific translation table API */
 void enable_mmu_secure(unsigned int flags);
index 5eb1d309d5c6fdcb7d8a8643b314065f3a4bfd46..c9d54177d5da9980bc7164e73447bc968a2ac15e 100644 (file)
@@ -62,7 +62,7 @@
 
 /*
  * The ARMv8-A architecture allows translation granule sizes of 4KB, 16KB or
- * 64KB. However, TF only supports the 4KB case at the moment.
+ * 64KB. However, only 4KB are supported at the moment.
  */
 #define PAGE_SIZE_SHIFT                FOUR_KB_SHIFT
 #define PAGE_SIZE              (U(1) << PAGE_SIZE_SHIFT)
index 20a9ea1de5602bb4027d4fe424ccae40c04e1e9b..2e5aba527b6333b4da2fe581f30df14ff78c0604 100644 (file)
@@ -121,10 +121,12 @@ typedef struct mmap_region {
 } mmap_region_t;
 
 /*
- * Translation regimes supported by this library.
+ * Translation regimes supported by this library. EL_REGIME_INVALID tells the
+ * library to detect it at runtime.
  */
 #define EL1_EL0_REGIME         1
 #define EL3_REGIME             3
+#define EL_REGIME_INVALID      -1
 
 /*
  * Declare the translation context type.
@@ -165,8 +167,7 @@ typedef struct xlat_ctx xlat_ctx_t;
                                         (_xlat_tables_count),          \
                                         (_virt_addr_space_size),       \
                                         (_phy_addr_space_size),        \
-                                        IMAGE_XLAT_DEFAULT_REGIME,     \
-                                       "xlat_table")
+                                        EL_REGIME_INVALID, "xlat_table")
 
 /*
  * Same as REGISTER_XLAT_CONTEXT plus the additional parameters:
index 56b9a93f4cfbc99ea729bcec2267642a93100b22..82d96e7d78e732d3da649035e61c7314e3b10fc7 100644 (file)
 #error "Do not include this header file directly. Include xlat_tables_v2.h instead."
 #endif
 
-/* Offsets into mmu_cfg_params array. All parameters are 32 bits wide. */
-#define MMU_CFG_MAIR0          0
-#define MMU_CFG_TCR            1
-#define MMU_CFG_TTBR0_LO       2
-#define MMU_CFG_TTBR0_HI       3
-#define MMU_CFG_PARAM_MAX      4
-
 #ifndef __ASSEMBLY__
 
 #include <cassert.h>
@@ -31,9 +24,6 @@
 #include <xlat_tables_arch.h>
 #include <xlat_tables_defs.h>
 
-/* Parameters of register values required when enabling MMU */
-extern uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
-
 /* Forward declaration */
 struct mmap_region;
 
@@ -172,29 +162,4 @@ struct xlat_ctx {
 
 #endif /*__ASSEMBLY__*/
 
-#if AARCH64
-
-/*
- * This IMAGE_EL macro must not to be used outside the library, and it is only
- * used in AArch64.
- */
-#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
-# define IMAGE_EL      3
-# define IMAGE_XLAT_DEFAULT_REGIME EL3_REGIME
-#else
-# define IMAGE_EL      1
-# define IMAGE_XLAT_DEFAULT_REGIME EL1_EL0_REGIME
-#endif
-
-#else /* if AARCH32 */
-
-/*
- * The PL1&0 translation regime in AArch32 behaves like the EL1&0 regime in
- * AArch64 except for the XN bits, but we set and unset them at the same time,
- * so there's no difference in practice.
- */
-#define IMAGE_XLAT_DEFAULT_REGIME EL1_EL0_REGIME
-
-#endif /* AARCH64 */
-
 #endif /* __XLAT_TABLES_V2_HELPERS_H__ */
index 12bf734b53d00a1cbb8d84fa75780bf7c147aef2..7953d7e24e80de8ffd5bf6c01fdb986cb445f1fe 100644 (file)
@@ -22,8 +22,7 @@ icc_regs:
 
 /* Registers common to both GICv2 and GICv3 */
 gicd_pend_reg:
-       .asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n"    \
-               " Offset:\t\t\tvalue\n"
+       .asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n Offset:\t\t\tvalue\n"
 newline:
        .asciz "\n"
 spacer:
index 6edfa099d886dd52e108848950b1b3245d6c5fd2..3f6e29b0ae27f2fda6cd27702495af387068ae0e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -22,7 +22,7 @@ SECTIONS
        *(arm_el3_tzc_dram)
        __EL3_SEC_DRAM_UNALIGNED_END__ = .;
 
-       . = NEXT(PAGE_SIZE);
+       . = ALIGN(PAGE_SIZE);
        __EL3_SEC_DRAM_END__ = .;
        } >EL3_SEC_DRAM
 }
index c26052afc0c2b39298efbfe62e75dbd3ee810b39..2f355661ec92baf4b57a30eef6f6ebb09b8ba71d 100644 (file)
@@ -77,7 +77,7 @@ void arm_setup_page_tables(const mmap_region_t bl_regions[],
  * Use this macro to instantiate lock before it is used in below
  * arm_lock_xxx() macros
  */
-#define ARM_INSTANTIATE_LOCK   DEFINE_BAKERY_LOCK(arm_lock)
+#define ARM_INSTANTIATE_LOCK   static DEFINE_BAKERY_LOCK(arm_lock)
 #define ARM_LOCK_GET_INSTANCE  (&arm_lock)
 /*
  * These are wrapper macros to the Coherent Memory Bakery Lock API.
@@ -162,7 +162,6 @@ int arm_validate_psci_entrypoint(uintptr_t entrypoint);
 int arm_validate_ns_entrypoint(uintptr_t entrypoint);
 void arm_system_pwr_domain_save(void);
 void arm_system_pwr_domain_resume(void);
-void arm_program_trusted_mailbox(uintptr_t address);
 int arm_psci_read_mem_protect(int *enabled);
 int arm_nor_psci_write_mem_protect(int val);
 void arm_nor_psci_do_static_mem_protect(void);
@@ -241,6 +240,7 @@ void plat_arm_pwrc_setup(void);
 void plat_arm_interconnect_init(void);
 void plat_arm_interconnect_enter_coherency(void);
 void plat_arm_interconnect_exit_coherency(void);
+void plat_arm_program_trusted_mailbox(uintptr_t address);
 
 #if ARM_PLAT_MT
 unsigned int plat_arm_get_cpu_pe_count(u_register_t mpidr);
diff --git a/include/plat/marvell/a8k/common/a8k_common.h b/include/plat/marvell/a8k/common/a8k_common.h
new file mode 100644 (file)
index 0000000..e727467
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __A8K_COMMON_H__
+#define __A8K_COMMON_H__
+
+#include <amb_adec.h>
+#include <io_win.h>
+#include <iob.h>
+#include <ccu.h>
+
+/*
+ * This struct supports skip image request
+ * detection_method: the method used to detect the request "signal".
+ * info:
+ *     GPIO:
+ *             detection_method: HIGH (pressed button), LOW (unpressed button),
+ *             num (button mpp number).
+ *     i2c:
+ *             i2c_addr: the address of the i2c chosen.
+ *             i2d_reg: the i2c register chosen.
+ *     test:
+ *             choose the DIE you picked the button in (AP or CP).
+ *             in case of CP(cp_index = 0 if CP0, cp_index = 1 if CP1)
+ */
+struct skip_image {
+       enum {
+               GPIO,
+               I2C,
+               USER_DEFINED
+       } detection_method;
+
+       struct {
+               struct {
+                       int num;
+                       enum {
+                               HIGH,
+                               LOW
+                       } button_state;
+
+               } gpio;
+
+               struct {
+                       int i2c_addr;
+                       int i2c_reg;
+               } i2c;
+
+               struct {
+                       enum {
+                               CP,
+                               AP
+                       } cp_ap;
+                       int cp_index;
+               } test;
+       } info;
+};
+
+/*
+ * This struct supports SoC power off method
+ * type: the method used to power off the SoC
+ * cfg:
+ *     PMIC_GPIO:
+ *     pin_count: current GPIO pin number used for toggling the signal for
+ *                notifying external PMIC
+ *     info:      holds the GPIOs information, CP GPIO should be used and
+ *                all GPIOs should be within same GPIO config. register
+ *     step_count: current step number to toggle the GPIO for PMIC
+ *     seq:       GPIO toggling values in sequence, each bit represents a GPIO.
+ *                For example, bit0 represents first GPIO used for toggling
+ *                the GPIO the last step is used to trigger the power off
+ *                signal
+ *     delay_ms:  transition interval for the GPIO setting to take effect
+ *                in unit of ms
+ */
+/* Max GPIO number used to notify PMIC to power off the SoC */
+#define PMIC_GPIO_MAX_NUMBER           8
+/* Max GPIO toggling steps in sequence to power off the SoC */
+#define PMIC_GPIO_MAX_TOGGLE_STEP      8
+
+enum gpio_output_state {
+       GPIO_LOW = 0,
+       GPIO_HIGH
+};
+
+typedef struct gpio_info {
+       int cp_index;
+       int gpio_index;
+} gpio_info_t;
+
+struct power_off_method {
+       enum {
+               PMIC_GPIO,
+       } type;
+
+       struct {
+               struct {
+                       int pin_count;
+                       struct gpio_info info[PMIC_GPIO_MAX_NUMBER];
+                       int step_count;
+                       uint32_t seq[PMIC_GPIO_MAX_TOGGLE_STEP];
+                       int delay_ms;
+               } gpio;
+       } cfg;
+};
+
+int marvell_gpio_config(void);
+uint32_t marvell_get_io_win_gcr_target(int ap_idx);
+uint32_t marvell_get_ccu_gcr_target(int ap_idx);
+
+
+/*
+ * The functions below are defined as Weak and may be overridden
+ * in specific Marvell standard platform
+ */
+int marvell_get_amb_memory_map(struct addr_map_win **win,
+                              uint32_t *size, uintptr_t base);
+int marvell_get_io_win_memory_map(int ap_idx, struct addr_map_win **win,
+                                 uint32_t *size);
+int marvell_get_iob_memory_map(struct addr_map_win **win,
+                              uint32_t *size, uintptr_t base);
+int marvell_get_ccu_memory_map(int ap_idx, struct addr_map_win **win,
+                              uint32_t *size);
+
+#endif /* __A8K_COMMON_H__ */
diff --git a/include/plat/marvell/a8k/common/board_marvell_def.h b/include/plat/marvell/a8k/common/board_marvell_def.h
new file mode 100644 (file)
index 0000000..b1054db
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __BOARD_MARVELL_DEF_H__
+#define __BOARD_MARVELL_DEF_H__
+
+/*
+ * Required platform porting definitions common to all ARM
+ * development platforms
+ */
+
+/* Size of cacheable stacks */
+#if DEBUG_XLAT_TABLE
+# define PLATFORM_STACK_SIZE 0x800
+#elif IMAGE_BL1
+#if TRUSTED_BOARD_BOOT
+# define PLATFORM_STACK_SIZE 0x1000
+#else
+# define PLATFORM_STACK_SIZE 0x440
+#endif
+#elif IMAGE_BL2
+# if TRUSTED_BOARD_BOOT
+#  define PLATFORM_STACK_SIZE 0x1000
+# else
+#  define PLATFORM_STACK_SIZE 0x400
+# endif
+#elif IMAGE_BL31
+# define PLATFORM_STACK_SIZE 0x400
+#elif IMAGE_BL32
+# define PLATFORM_STACK_SIZE 0x440
+#endif
+
+/*
+ * PLAT_MARVELL_MMAP_ENTRIES depends on the number of entries in the
+ * plat_arm_mmap array defined for each BL stage.
+ */
+#if IMAGE_BLE
+#  define PLAT_MARVELL_MMAP_ENTRIES    3
+#endif
+#if IMAGE_BL1
+#  if TRUSTED_BOARD_BOOT
+#   define PLAT_MARVELL_MMAP_ENTRIES   7
+#  else
+#   define PLAT_MARVELL_MMAP_ENTRIES   6
+#  endif       /* TRUSTED_BOARD_BOOT */
+#endif
+#if IMAGE_BL2
+#  define PLAT_MARVELL_MMAP_ENTRIES            8
+#endif
+#if IMAGE_BL31
+#define PLAT_MARVELL_MMAP_ENTRIES              5
+#endif
+
+/*
+ * Platform specific page table and MMU setup constants
+ */
+#if IMAGE_BL1
+#define MAX_XLAT_TABLES                        4
+#elif IMAGE_BLE
+#  define MAX_XLAT_TABLES              4
+#elif IMAGE_BL2
+#  define MAX_XLAT_TABLES              4
+#elif IMAGE_BL31
+# define MAX_XLAT_TABLES               4
+#elif IMAGE_BL32
+#  define MAX_XLAT_TABLES               4
+#endif
+
+#define MAX_IO_DEVICES                 3
+#define MAX_IO_HANDLES                 4
+
+#define PLAT_MARVELL_TRUSTED_SRAM_SIZE 0x80000 /* 512 KB */
+
+
+#endif /* __BOARD_MARVELL_DEF_H__ */
diff --git a/include/plat/marvell/a8k/common/marvell_def.h b/include/plat/marvell/a8k/common/marvell_def.h
new file mode 100644 (file)
index 0000000..7dacf82
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MARVELL_DEF_H__
+#define __MARVELL_DEF_H__
+
+#include <arch.h>
+#include <common_def.h>
+#include <platform_def.h>
+#include <tbbr_img_def.h>
+#include <xlat_tables.h>
+
+
+/******************************************************************************
+ * Definitions common to all MARVELL standard platforms
+ *****************************************************************************/
+
+/* Special value used to verify platform parameters from BL2 to BL31 */
+#define MARVELL_BL31_PLAT_PARAM_VAL            0x0f1e2d3c4b5a6978ULL
+
+
+#define MARVELL_CACHE_WRITEBACK_SHIFT  6
+
+/*
+ * Macros mapping the MPIDR Affinity levels to MARVELL Platform Power levels.
+ * The power levels have a 1:1 mapping with the MPIDR affinity levels.
+ */
+#define MARVELL_PWR_LVL0               MPIDR_AFFLVL0
+#define MARVELL_PWR_LVL1               MPIDR_AFFLVL1
+#define MARVELL_PWR_LVL2               MPIDR_AFFLVL2
+
+/*
+ *  Macros for local power states in Marvell platforms encoded by
+ *  State-ID field within the power-state parameter.
+ */
+/* Local power state for power domains in Run state. */
+#define MARVELL_LOCAL_STATE_RUN        0
+/* Local power state for retention. Valid only for CPU power domains */
+#define MARVELL_LOCAL_STATE_RET        1
+/*
+ * Local power state for OFF/power-down. Valid for CPU
+ * and cluster power domains
+ */
+#define MARVELL_LOCAL_STATE_OFF        2
+
+/* The first 4KB of Trusted SRAM are used as shared memory */
+#define MARVELL_TRUSTED_SRAM_BASE      PLAT_MARVELL_ATF_BASE
+#define MARVELL_SHARED_RAM_BASE                MARVELL_TRUSTED_SRAM_BASE
+#define MARVELL_SHARED_RAM_SIZE                0x00001000      /* 4 KB */
+
+/* The remaining Trusted SRAM is used to load the BL images */
+#define MARVELL_BL_RAM_BASE            (MARVELL_SHARED_RAM_BASE +      \
+                                        MARVELL_SHARED_RAM_SIZE)
+#define MARVELL_BL_RAM_SIZE            (PLAT_MARVELL_TRUSTED_SRAM_SIZE - \
+                                        MARVELL_SHARED_RAM_SIZE)
+/* Non-shared DRAM */
+#define MARVELL_DRAM_BASE              ULL(0x0)
+#define MARVELL_DRAM_SIZE              ULL(0x80000000)
+#define MARVELL_DRAM_END               (MARVELL_DRAM_BASE + \
+                                        MARVELL_DRAM_SIZE - 1)
+
+#define MARVELL_IRQ_SEC_PHY_TIMER      29
+
+#define MARVELL_IRQ_SEC_SGI_0          8
+#define MARVELL_IRQ_SEC_SGI_1          9
+#define MARVELL_IRQ_SEC_SGI_2          10
+#define MARVELL_IRQ_SEC_SGI_3          11
+#define MARVELL_IRQ_SEC_SGI_4          12
+#define MARVELL_IRQ_SEC_SGI_5          13
+#define MARVELL_IRQ_SEC_SGI_6          14
+#define MARVELL_IRQ_SEC_SGI_7          15
+
+#define MARVELL_MAP_SHARED_RAM         MAP_REGION_FLAT(                \
+                                               MARVELL_SHARED_RAM_BASE,\
+                                               MARVELL_SHARED_RAM_SIZE,\
+                                               MT_MEMORY | MT_RW | MT_SECURE)
+
+#define MARVELL_MAP_DRAM               MAP_REGION_FLAT(                \
+                                               MARVELL_DRAM_BASE,      \
+                                               MARVELL_DRAM_SIZE,      \
+                                               MT_MEMORY | MT_RW | MT_NS)
+
+
+/*
+ * The number of regions like RO(code), coherent and data required by
+ * different BL stages which need to be mapped in the MMU.
+ */
+#if USE_COHERENT_MEM
+#define MARVELL_BL_REGIONS             3
+#else
+#define MARVELL_BL_REGIONS             2
+#endif
+
+#define MAX_MMAP_REGIONS               (PLAT_MARVELL_MMAP_ENTRIES +    \
+                                        MARVELL_BL_REGIONS)
+
+#define MARVELL_CONSOLE_BAUDRATE       115200
+
+/******************************************************************************
+ * Required platform porting definitions common to all MARVELL std. platforms
+ *****************************************************************************/
+
+#define PLAT_PHY_ADDR_SPACE_SIZE       (1ULL << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE      (1ULL << 32)
+
+/*
+ * This macro defines the deepest retention state possible. A higher state
+ * id will represent an invalid or a power down state.
+ */
+#define PLAT_MAX_RET_STATE             MARVELL_LOCAL_STATE_RET
+
+/*
+ * This macro defines the deepest power down states possible. Any state ID
+ * higher than this is invalid.
+ */
+#define PLAT_MAX_OFF_STATE             MARVELL_LOCAL_STATE_OFF
+
+
+#define PLATFORM_CORE_COUNT            PLAT_MARVELL_CORE_COUNT
+#define PLAT_NUM_PWR_DOMAINS           (PLAT_MARVELL_CLUSTER_COUNT +   \
+                                        PLATFORM_CORE_COUNT)
+
+/*
+ * Some data must be aligned on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ */
+#define CACHE_WRITEBACK_GRANULE                (1 << MARVELL_CACHE_WRITEBACK_SHIFT)
+
+
+/*******************************************************************************
+ * BL1 specific defines.
+ * BL1 RW data is relocated from ROM to RAM at runtime so we need 2 sets of
+ * addresses.
+ ******************************************************************************/
+#define BL1_RO_BASE                    PLAT_MARVELL_TRUSTED_ROM_BASE
+#define BL1_RO_LIMIT                   (PLAT_MARVELL_TRUSTED_ROM_BASE  \
+                                        + PLAT_MARVELL_TRUSTED_ROM_SIZE)
+/*
+ * Put BL1 RW at the top of the Trusted SRAM.
+ */
+#define BL1_RW_BASE            (MARVELL_BL_RAM_BASE +          \
+                                       MARVELL_BL_RAM_SIZE -   \
+                                       PLAT_MARVELL_MAX_BL1_RW_SIZE)
+#define BL1_RW_LIMIT           (MARVELL_BL_RAM_BASE + MARVELL_BL_RAM_SIZE)
+
+/*******************************************************************************
+ * BLE specific defines.
+ ******************************************************************************/
+#define BLE_BASE                       PLAT_MARVELL_SRAM_BASE
+#define BLE_LIMIT                      PLAT_MARVELL_SRAM_END
+
+/*******************************************************************************
+ * BL2 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL2 just below BL31.
+ */
+#define BL2_BASE                       (BL31_BASE - PLAT_MARVELL_MAX_BL2_SIZE)
+#define BL2_LIMIT                      BL31_BASE
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL31 at the top of the Trusted SRAM.
+ */
+#define BL31_BASE                      (MARVELL_BL_RAM_BASE +          \
+                                               MARVELL_BL_RAM_SIZE -   \
+                                               PLAT_MARVEL_MAX_BL31_SIZE)
+#define BL31_PROGBITS_LIMIT            BL1_RW_BASE
+#define BL31_LIMIT                     (MARVELL_BL_RAM_BASE +  \
+                                        MARVELL_BL_RAM_SIZE)
+
+
+#endif /* __MARVELL_DEF_H__ */
diff --git a/include/plat/marvell/a8k/common/plat_marvell.h b/include/plat/marvell/a8k/common/plat_marvell.h
new file mode 100644 (file)
index 0000000..aad5da7
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __PLAT_MARVELL_H__
+#define __PLAT_MARVELL_H__
+
+#include <cassert.h>
+#include <cpu_data.h>
+#include <stdint.h>
+#include <utils.h>
+#include <xlat_tables.h>
+
+/*
+ * Extern declarations common to Marvell standard platforms
+ */
+extern const mmap_region_t plat_marvell_mmap[];
+
+#define MARVELL_CASSERT_MMAP                                           \
+       CASSERT((ARRAY_SIZE(plat_marvell_mmap) + MARVELL_BL_REGIONS)    \
+               <= MAX_MMAP_REGIONS,                                    \
+               assert_max_mmap_regions)
+
+/*
+ * Utility functions common to Marvell standard platforms
+ */
+void marvell_setup_page_tables(uintptr_t total_base,
+                              size_t total_size,
+                              uintptr_t code_start,
+                              uintptr_t code_limit,
+                              uintptr_t rodata_start,
+                              uintptr_t rodata_limit
+#if USE_COHERENT_MEM
+                            , uintptr_t coh_start,
+                              uintptr_t coh_limit
+#endif
+);
+
+/* IO storage utility functions */
+void marvell_io_setup(void);
+
+/* Systimer utility function */
+void marvell_configure_sys_timer(void);
+
+/* Topology utility function */
+int marvell_check_mpidr(u_register_t mpidr);
+
+/* BLE utility functions */
+int ble_plat_setup(int *skip);
+void plat_marvell_dram_update_topology(void);
+void ble_plat_pcie_ep_setup(void);
+struct pci_hw_cfg *plat_get_pcie_hw_data(void);
+
+/* BL1 utility functions */
+void marvell_bl1_early_platform_setup(void);
+void marvell_bl1_platform_setup(void);
+void marvell_bl1_plat_arch_setup(void);
+
+/* BL2 utility functions */
+void marvell_bl2_early_platform_setup(meminfo_t *mem_layout);
+void marvell_bl2_platform_setup(void);
+void marvell_bl2_plat_arch_setup(void);
+uint32_t marvell_get_spsr_for_bl32_entry(void);
+uint32_t marvell_get_spsr_for_bl33_entry(void);
+
+/* BL31 utility functions */
+void marvell_bl31_early_platform_setup(bl31_params_t *from_bl2,
+                               void *plat_params_from_bl2);
+void marvell_bl31_platform_setup(void);
+void marvell_bl31_plat_runtime_setup(void);
+void marvell_bl31_plat_arch_setup(void);
+
+/* Power management config to power off the SoC */
+void *plat_marvell_get_pm_cfg(void);
+
+/* Check if MSS AP CM3 firmware contains PM support */
+_Bool is_pm_fw_running(void);
+
+/* Bootrom image recovery utility functions */
+void *plat_marvell_get_skip_image_data(void);
+
+/* FIP TOC validity check */
+int marvell_io_is_toc_valid(void);
+
+/*
+ * PSCI functionality
+ */
+void marvell_psci_arch_init(int ap_idx);
+void plat_marvell_system_reset(void);
+
+/*
+ * Optional functions required in Marvell standard platforms
+ */
+void plat_marvell_io_setup(void);
+int plat_marvell_get_alt_image_source(
+       unsigned int image_id,
+       uintptr_t *dev_handle,
+       uintptr_t *image_spec);
+unsigned int plat_marvell_calc_core_pos(u_register_t mpidr);
+
+const mmap_region_t *plat_marvell_get_mmap(void);
+void marvell_ble_prepare_exit(void);
+void marvell_exit_bootrom(uintptr_t base);
+
+int plat_marvell_early_cpu_powerdown(void);
+#endif /* __PLAT_MARVELL_H__ */
diff --git a/include/plat/marvell/a8k/common/plat_pm_trace.h b/include/plat/marvell/a8k/common/plat_pm_trace.h
new file mode 100644 (file)
index 0000000..0878959
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __PLAT_PM_TRACE_H
+#define __PLAT_PM_TRACE_H
+
+
+/*
+ * PM Trace is for Debug purpose only!!!
+ * It should not be enabled during System Run time
+ */
+#undef PM_TRACE_ENABLE
+
+
+/* trace entry time */
+struct pm_trace_entry {
+       /* trace entry time stamp */
+       unsigned int timestamp;
+
+       /* trace info
+        * [16-31] - API Trace Id
+        * [00-15] - API Step Id
+        */
+       unsigned int trace_info;
+};
+
+struct pm_trace_ctrl {
+       /* trace pointer - points to next free entry in trace cyclic queue */
+       unsigned int trace_pointer;
+
+       /* trace count - number of entries in the queue, clear upon read */
+       unsigned int trace_count;
+};
+
+/* trace size definition */
+#define AP_MSS_ATF_CORE_INFO_SIZE      (256)
+#define AP_MSS_ATF_CORE_ENTRY_SIZE     (8)
+#define AP_MSS_ATF_TRACE_SIZE_MASK     (0xFF)
+
+/* trace address definition */
+#define AP_MSS_TIMER_BASE              (MVEBU_REGS_BASE_MASK + 0x580110)
+
+#define AP_MSS_ATF_CORE_0_CTRL_BASE    (MVEBU_REGS_BASE_MASK + 0x520140)
+#define AP_MSS_ATF_CORE_1_CTRL_BASE    (MVEBU_REGS_BASE_MASK + 0x520150)
+#define AP_MSS_ATF_CORE_2_CTRL_BASE    (MVEBU_REGS_BASE_MASK + 0x520160)
+#define AP_MSS_ATF_CORE_3_CTRL_BASE    (MVEBU_REGS_BASE_MASK + 0x520170)
+#define AP_MSS_ATF_CORE_CTRL_BASE      (AP_MSS_ATF_CORE_0_CTRL_BASE)
+
+#define AP_MSS_ATF_CORE_0_INFO_BASE    (MVEBU_REGS_BASE_MASK + 0x5201C0)
+#define AP_MSS_ATF_CORE_0_INFO_TRACE   (MVEBU_REGS_BASE_MASK + 0x5201C4)
+#define AP_MSS_ATF_CORE_1_INFO_BASE    (MVEBU_REGS_BASE_MASK + 0x5209C0)
+#define AP_MSS_ATF_CORE_1_INFO_TRACE   (MVEBU_REGS_BASE_MASK + 0x5209C4)
+#define AP_MSS_ATF_CORE_2_INFO_BASE    (MVEBU_REGS_BASE_MASK + 0x5211C0)
+#define AP_MSS_ATF_CORE_2_INFO_TRACE   (MVEBU_REGS_BASE_MASK + 0x5211C4)
+#define AP_MSS_ATF_CORE_3_INFO_BASE    (MVEBU_REGS_BASE_MASK + 0x5219C0)
+#define AP_MSS_ATF_CORE_3_INFO_TRACE   (MVEBU_REGS_BASE_MASK + 0x5219C4)
+#define AP_MSS_ATF_CORE_INFO_BASE      (AP_MSS_ATF_CORE_0_INFO_BASE)
+
+/* trace info definition */
+#define TRACE_PWR_DOMAIN_OFF           (0x10000)
+#define TRACE_PWR_DOMAIN_SUSPEND       (0x20000)
+#define TRACE_PWR_DOMAIN_SUSPEND_FINISH        (0x30000)
+#define TRACE_PWR_DOMAIN_ON            (0x40000)
+#define TRACE_PWR_DOMAIN_ON_FINISH     (0x50000)
+
+#define TRACE_PWR_DOMAIN_ON_MASK       (0xFF)
+
+#ifdef PM_TRACE_ENABLE
+
+/* trace API definition */
+void pm_core_0_trace(unsigned int trace);
+void pm_core_1_trace(unsigned int trace);
+void pm_core_2_trace(unsigned int trace);
+void pm_core_3_trace(unsigned int trace);
+
+typedef void (*core_trace_func)(unsigned int);
+
+extern core_trace_func funcTbl[PLATFORM_CORE_COUNT];
+
+#define PM_TRACE(trace) funcTbl[plat_my_core_pos()](trace)
+
+#else
+
+#define PM_TRACE(trace)
+
+#endif
+
+/*******************************************************************************
+ * pm_trace_add
+ *
+ * DESCRIPTION: Add PM trace
+ ******************************************************************************
+ */
+void pm_trace_add(unsigned int trace, unsigned int core);
+
+#endif /* __PLAT_PM_TRACE_H */
diff --git a/include/plat/marvell/common/aarch64/cci_macros.S b/include/plat/marvell/common/aarch64/cci_macros.S
new file mode 100644 (file)
index 0000000..d6080cf
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __CCI_MACROS_S__
+#define __CCI_MACROS_S__
+
+#include <cci.h>
+#include <platform_def.h>
+
+.section .rodata.cci_reg_name, "aS"
+cci_iface_regs:
+       .asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
+
+       /* ------------------------------------------------
+        * The below required platform porting macro prints
+        * out relevant interconnect registers whenever an
+        * unhandled exception is taken in BL31.
+        * Clobbers: x0 - x9, sp
+        * ------------------------------------------------
+        */
+       .macro print_cci_regs
+       adr     x6, cci_iface_regs
+       /* Store in x7 the base address of the first interface */
+       mov_imm x7, (PLAT_MARVELL_CCI_BASE + SLAVE_IFACE_OFFSET(        \
+                       PLAT_MARVELL_CCI_CLUSTER0_SL_IFACE_IX))
+       ldr     w8, [x7, #SNOOP_CTRL_REG]
+       /* Store in x7 the base address of the second interface */
+       mov_imm x7, (PLAT_MARVELL_CCI_BASE + SLAVE_IFACE_OFFSET(        \
+                       PLAT_MARVELL_CCI_CLUSTER1_SL_IFACE_IX))
+       ldr     w9, [x7, #SNOOP_CTRL_REG]
+       /* Store to the crash buf and print to console */
+       bl      str_in_crash_buf_print
+       .endm
+
+#endif /* __CCI_MACROS_S__ */
diff --git a/include/plat/marvell/common/aarch64/marvell_macros.S b/include/plat/marvell/common/aarch64/marvell_macros.S
new file mode 100644 (file)
index 0000000..0102af0
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MARVELL_MACROS_S__
+#define __MARVELL_MACROS_S__
+
+#include <cci.h>
+#include <gic_common.h>
+#include <gicv2.h>
+#include <gicv3.h>
+#include <platform_def.h>
+
+/*
+ *     These Macros are required by ATF
+ */
+
+.section .rodata.gic_reg_name, "aS"
+/* Applicable only to GICv2 and GICv3 with SRE disabled (legacy mode) */
+gicc_regs:
+       .asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
+
+#ifdef USE_CCI
+/* Applicable only to GICv3 with SRE enabled */
+icc_regs:
+       .asciz "icc_hppir0_el1", "icc_hppir1_el1", "icc_ctlr_el3", ""
+#endif
+/* Registers common to both GICv2 and GICv3 */
+gicd_pend_reg:
+       .asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n"    \
+               " Offset:\t\t\tvalue\n"
+newline:
+       .asciz "\n"
+spacer:
+       .asciz ":\t\t0x"
+
+       /* ---------------------------------------------
+        * The below utility macro prints out relevant GIC
+        * registers whenever an unhandled exception is
+        * taken in BL31 on ARM standard platforms.
+        * Expects: GICD base in x16, GICC base in x17
+        * Clobbers: x0 - x10, sp
+        * ---------------------------------------------
+        */
+       .macro arm_print_gic_regs
+       /* Check for GICv3 system register access */
+       mrs     x7, id_aa64pfr0_el1
+       ubfx    x7, x7, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_WIDTH
+       cmp     x7, #1
+       b.ne    print_gicv2
+
+       /* Check for SRE enable */
+       mrs     x8, ICC_SRE_EL3
+       tst     x8, #ICC_SRE_SRE_BIT
+       b.eq    print_gicv2
+
+#ifdef USE_CCI
+       /* Load the icc reg list to x6 */
+       adr     x6, icc_regs
+       /* Load the icc regs to gp regs used by str_in_crash_buf_print */
+       mrs     x8, ICC_HPPIR0_EL1
+       mrs     x9, ICC_HPPIR1_EL1
+       mrs     x10, ICC_CTLR_EL3
+       /* Store to the crash buf and print to console */
+       bl      str_in_crash_buf_print
+#endif
+       b       print_gic_common
+
+print_gicv2:
+       /* Load the gicc reg list to x6 */
+       adr     x6, gicc_regs
+       /* Load the gicc regs to gp regs used by str_in_crash_buf_print */
+       ldr     w8, [x17, #GICC_HPPIR]
+       ldr     w9, [x17, #GICC_AHPPIR]
+       ldr     w10, [x17, #GICC_CTLR]
+       /* Store to the crash buf and print to console */
+       bl      str_in_crash_buf_print
+
+print_gic_common:
+       /* Print the GICD_ISPENDR regs */
+       add     x7, x16, #GICD_ISPENDR
+       adr     x4, gicd_pend_reg
+       bl      asm_print_str
+gicd_ispendr_loop:
+       sub     x4, x7, x16
+       cmp     x4, #0x280
+       b.eq    exit_print_gic_regs
+       bl      asm_print_hex
+
+       adr     x4, spacer
+       bl      asm_print_str
+
+       ldr     x4, [x7], #8
+       bl      asm_print_hex
+
+       adr     x4, newline
+       bl      asm_print_str
+       b       gicd_ispendr_loop
+exit_print_gic_regs:
+       .endm
+
+
+.section .rodata.cci_reg_name, "aS"
+cci_iface_regs:
+       .asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
+
+       /* ------------------------------------------------
+        * The below required platform porting macro prints
+        * out relevant interconnect registers whenever an
+        * unhandled exception is taken in BL31.
+        * Clobbers: x0 - x9, sp
+        * ------------------------------------------------
+        */
+       .macro print_cci_regs
+#ifdef USE_CCI
+       adr     x6, cci_iface_regs
+       /* Store in x7 the base address of the first interface */
+       mov_imm x7, (PLAT_MARVELL_CCI_BASE + SLAVE_IFACE_OFFSET(        \
+                       PLAT_MARVELL_CCI_CLUSTER0_SL_IFACE_IX))
+       ldr     w8, [x7, #SNOOP_CTRL_REG]
+       /* Store in x7 the base address of the second interface */
+       mov_imm x7, (PLAT_MARVELL_CCI_BASE + SLAVE_IFACE_OFFSET(        \
+                       PLAT_MARVELL_CCI_CLUSTER1_SL_IFACE_IX))
+       ldr     w9, [x7, #SNOOP_CTRL_REG]
+       /* Store to the crash buf and print to console */
+       bl      str_in_crash_buf_print
+#endif
+       .endm
+
+
+#endif /* __MARVELL_MACROS_S__ */
diff --git a/include/plat/marvell/common/marvell_plat_priv.h b/include/plat/marvell/common/marvell_plat_priv.h
new file mode 100644 (file)
index 0000000..c1dad0e
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MARVELL_PLAT_PRIV_H__
+#define __MARVELL_PLAT_PRIV_H__
+
+#include <utils.h>
+
+/*****************************************************************************
+ * Function and variable prototypes
+ *****************************************************************************
+ */
+void plat_delay_timer_init(void);
+
+uint64_t mvebu_get_dram_size(uint64_t ap_base_addr);
+
+/*
+ * GIC operation, mandatory functions required in Marvell standard platforms
+ */
+void plat_marvell_gic_driver_init(void);
+void plat_marvell_gic_init(void);
+void plat_marvell_gic_cpuif_enable(void);
+void plat_marvell_gic_cpuif_disable(void);
+void plat_marvell_gic_pcpu_init(void);
+void plat_marvell_gic_irq_save(void);
+void plat_marvell_gic_irq_restore(void);
+void plat_marvell_gic_irq_pcpu_save(void);
+void plat_marvell_gic_irq_pcpu_restore(void);
+
+#endif /* __MARVELL_PLAT_PRIV_H__ */
diff --git a/include/plat/marvell/common/marvell_pm.h b/include/plat/marvell/common/marvell_pm.h
new file mode 100644 (file)
index 0000000..2817a46
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef _MARVELL_PM_H_
+#define _MARVELL_PM_H_
+
+#define MVEBU_MAILBOX_MAGIC_NUM                PLAT_MARVELL_MAILBOX_MAGIC_NUM
+#define MVEBU_MAILBOX_SUSPEND_STATE    0xb007de7c
+
+/* Mailbox entry indexes */
+/* Magic number for validity check */
+#define        MBOX_IDX_MAGIC                  0
+/* Recovery from suspend entry point */
+#define        MBOX_IDX_SEC_ADDR               1
+/* Suspend state magic number */
+#define        MBOX_IDX_SUSPEND_MAGIC          2
+/* Recovery jump address for ROM bypass */
+#define        MBOX_IDX_ROM_EXIT_ADDR          3
+/* BLE execution start counter value */
+#define        MBOX_IDX_START_CNT              4
+
+#endif /* _MARVELL_PM_H_ */
diff --git a/include/plat/marvell/common/mvebu.h b/include/plat/marvell/common/mvebu.h
new file mode 100644 (file)
index 0000000..a20e538
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C)  2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef _MVEBU_H_
+#define _MVEBU_H_
+
+/* Use this functions only when printf is allowed */
+#define debug_enter()  VERBOSE("----> Enter %s\n", __func__)
+#define debug_exit()   VERBOSE("<---- Exit  %s\n", __func__)
+
+/* Macro for testing alignment. Positive if number is NOT aligned */
+#define IS_NOT_ALIGN(number, align)    ((number) & ((align) - 1))
+
+/* Macro for alignment up. For example, ALIGN_UP(0x0330, 0x20) = 0x0340 */
+#define ALIGN_UP(number, align) (((number) & ((align) - 1)) ? \
+               (((number) + (align)) & ~((align)-1)) : (number))
+
+/* Macro for testing whether a number is a power of 2. Positive if so */
+#define IS_POWER_OF_2(number)  ((number) != 0 && \
+                               (((number) & ((number) - 1)) == 0))
+
+/*
+ * Macro for ronding up to next power of 2
+ * it is done by count leading 0 (clz assembly opcode) and see msb set bit.
+ * then you can shift it left and get number which power of 2
+ * Note: this Macro is for 32 bit number
+ */
+#define ROUND_UP_TO_POW_OF_2(number)   (1 << \
+                                       (32 - __builtin_clz((number) - 1)))
+
+#define _1MB_                          (1024ULL*1024ULL)
+#define _1GB_                          (_1MB_*1024ULL)
+
+#endif /* MVEBU_H */
index 14705d7b24aebee80ace015ead92ca7596ccf7fa..51d0b15e3d68f6bf4e0b39277bd209d55b689d7a 100644 (file)
@@ -107,19 +107,19 @@ vector_base cortex_a76_wa_cve_2018_3639_a76_vbar
         */
 vector_entry cortex_a76_sync_exception_sp_el0
        b       sync_exception_sp_el0
-       check_vector_size cortex_a76_sync_exception_sp_el0
+end_vector_entry cortex_a76_sync_exception_sp_el0
 
 vector_entry cortex_a76_irq_sp_el0
        b       irq_sp_el0
-       check_vector_size cortex_a76_irq_sp_el0
+end_vector_entry cortex_a76_irq_sp_el0
 
 vector_entry cortex_a76_fiq_sp_el0
        b       fiq_sp_el0
-       check_vector_size cortex_a76_fiq_sp_el0
+end_vector_entry cortex_a76_fiq_sp_el0
 
 vector_entry cortex_a76_serror_sp_el0
        b       serror_sp_el0
-       check_vector_size cortex_a76_serror_sp_el0
+end_vector_entry cortex_a76_serror_sp_el0
 
        /* ---------------------------------------------------------------------
         * Current EL with SP_ELx: 0x200 - 0x400
@@ -127,19 +127,19 @@ vector_entry cortex_a76_serror_sp_el0
         */
 vector_entry cortex_a76_sync_exception_sp_elx
        b       sync_exception_sp_elx
-       check_vector_size cortex_a76_sync_exception_sp_elx
+end_vector_entry cortex_a76_sync_exception_sp_elx
 
 vector_entry cortex_a76_irq_sp_elx
        b       irq_sp_elx
-       check_vector_size cortex_a76_irq_sp_elx
+end_vector_entry cortex_a76_irq_sp_elx
 
 vector_entry cortex_a76_fiq_sp_elx
        b       fiq_sp_elx
-       check_vector_size cortex_a76_fiq_sp_elx
+end_vector_entry cortex_a76_fiq_sp_elx
 
 vector_entry cortex_a76_serror_sp_elx
        b       serror_sp_elx
-       check_vector_size cortex_a76_serror_sp_elx
+end_vector_entry cortex_a76_serror_sp_elx
 
        /* ---------------------------------------------------------------------
         * Lower EL using AArch64 : 0x400 - 0x600
@@ -148,22 +148,22 @@ vector_entry cortex_a76_serror_sp_elx
 vector_entry cortex_a76_sync_exception_aarch64
        apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
        b       sync_exception_aarch64
-       check_vector_size cortex_a76_sync_exception_aarch64
+end_vector_entry cortex_a76_sync_exception_aarch64
 
 vector_entry cortex_a76_irq_aarch64
        apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
        b       irq_aarch64
-       check_vector_size cortex_a76_irq_aarch64
+end_vector_entry cortex_a76_irq_aarch64
 
 vector_entry cortex_a76_fiq_aarch64
        apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
        b       fiq_aarch64
-       check_vector_size cortex_a76_fiq_aarch64
+end_vector_entry cortex_a76_fiq_aarch64
 
 vector_entry cortex_a76_serror_aarch64
        apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
        b       serror_aarch64
-       check_vector_size cortex_a76_serror_aarch64
+end_vector_entry cortex_a76_serror_aarch64
 
        /* ---------------------------------------------------------------------
         * Lower EL using AArch32 : 0x600 - 0x800
@@ -172,22 +172,22 @@ vector_entry cortex_a76_serror_aarch64
 vector_entry cortex_a76_sync_exception_aarch32
        apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
        b       sync_exception_aarch32
-       check_vector_size cortex_a76_sync_exception_aarch32
+end_vector_entry cortex_a76_sync_exception_aarch32
 
 vector_entry cortex_a76_irq_aarch32
        apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
        b       irq_aarch32
-       check_vector_size cortex_a76_irq_aarch32
+end_vector_entry cortex_a76_irq_aarch32
 
 vector_entry cortex_a76_fiq_aarch32
        apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
        b       fiq_aarch32
-       check_vector_size cortex_a76_fiq_aarch32
+end_vector_entry cortex_a76_fiq_aarch32
 
 vector_entry cortex_a76_serror_aarch32
        apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
        b       serror_aarch32
-       check_vector_size cortex_a76_serror_aarch32
+end_vector_entry cortex_a76_serror_aarch32
 
 func check_errata_cve_2018_3639
 #if WORKAROUND_CVE_2018_3639
diff --git a/lib/cpus/aarch64/cortex_deimos.S b/lib/cpus/aarch64/cortex_deimos.S
new file mode 100644 (file)
index 0000000..aec62a2
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <cortex_deimos.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+       /* ---------------------------------------------
+        * HW will do the cache maintenance while powering down
+        * ---------------------------------------------
+        */
+func cortex_deimos_core_pwr_dwn
+       /* ---------------------------------------------
+        * Enable CPU power down bit in power control register
+        * ---------------------------------------------
+        */
+       mrs     x0, CORTEX_DEIMOS_CPUPWRCTLR_EL1
+       orr     x0, x0, #CORTEX_DEIMOS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+       msr     CORTEX_DEIMOS_CPUPWRCTLR_EL1, x0
+       isb
+       ret
+endfunc cortex_deimos_core_pwr_dwn
+
+       /* ---------------------------------------------
+        * This function provides Cortex-Deimos specific
+        * register information for crash reporting.
+        * It needs to return with x6 pointing to
+        * a list of register names in ascii and
+        * x8 - x15 having values of registers to be
+        * reported.
+        * ---------------------------------------------
+        */
+.section .rodata.cortex_deimos_regs, "aS"
+cortex_deimos_regs:  /* The ascii list of register names to be reported */
+       .asciz  "cpuectlr_el1", ""
+
+func cortex_deimos_cpu_reg_dump
+       adr     x6, cortex_deimos_regs
+       mrs     x8, CORTEX_DEIMOS_CPUECTLR_EL1
+       ret
+endfunc cortex_deimos_cpu_reg_dump
+
+declare_cpu_ops cortex_deimos, CORTEX_DEIMOS_MIDR, \
+       CPU_NO_RESET_FUNC, \
+       cortex_deimos_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_helios.S b/lib/cpus/aarch64/cortex_helios.S
new file mode 100644 (file)
index 0000000..bcda741
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <cortex_helios.h>
+#include <cpu_macros.S>
+#include <debug.h>
+#include <plat_macros.S>
+
+func cortex_helios_cpu_pwr_dwn
+       mrs     x0, CORTEX_HELIOS_CPUPWRCTLR_EL1
+       orr     x0, x0, #CORTEX_HELIOS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+       msr     CORTEX_HELIOS_CPUPWRCTLR_EL1, x0
+       isb
+       ret
+endfunc cortex_helios_cpu_pwr_dwn
+
+.section .rodata.cortex_helios_regs, "aS"
+cortex_helios_regs:  /* The ascii list of register names to be reported */
+       .asciz  "cpuectlr_el1", ""
+
+func cortex_helios_cpu_reg_dump
+       adr     x6, cortex_helios_regs
+       mrs     x8, CORTEX_HELIOS_ECTLR_EL1
+       ret
+endfunc cortex_helios_cpu_reg_dump
+
+declare_cpu_ops cortex_helios, CORTEX_HELIOS_MIDR, \
+       CPU_NO_RESET_FUNC, \
+       cortex_helios_cpu_pwr_dwn
index aee4feeea198cec0c4a21827c6930cfc6682f53b..f04dbd6c49211b57d48e52559d9c5703575bf22b 100644 (file)
@@ -55,19 +55,19 @@ vector_base workaround_bpflush_runtime_exceptions
         */
 vector_entry workaround_bpflush_sync_exception_sp_el0
        b       sync_exception_sp_el0
-       check_vector_size workaround_bpflush_sync_exception_sp_el0
+end_vector_entry workaround_bpflush_sync_exception_sp_el0
 
 vector_entry workaround_bpflush_irq_sp_el0
        b       irq_sp_el0
-       check_vector_size workaround_bpflush_irq_sp_el0
+end_vector_entry workaround_bpflush_irq_sp_el0
 
 vector_entry workaround_bpflush_fiq_sp_el0
        b       fiq_sp_el0
-       check_vector_size workaround_bpflush_fiq_sp_el0
+end_vector_entry workaround_bpflush_fiq_sp_el0
 
 vector_entry workaround_bpflush_serror_sp_el0
        b       serror_sp_el0
-       check_vector_size workaround_bpflush_serror_sp_el0
+end_vector_entry workaround_bpflush_serror_sp_el0
 
        /* ---------------------------------------------------------------------
         * Current EL with SP_ELx: 0x200 - 0x400
@@ -75,19 +75,19 @@ vector_entry workaround_bpflush_serror_sp_el0
         */
 vector_entry workaround_bpflush_sync_exception_sp_elx
        b       sync_exception_sp_elx
-       check_vector_size workaround_bpflush_sync_exception_sp_elx
+end_vector_entry workaround_bpflush_sync_exception_sp_elx
 
 vector_entry workaround_bpflush_irq_sp_elx
        b       irq_sp_elx
-       check_vector_size workaround_bpflush_irq_sp_elx
+end_vector_entry workaround_bpflush_irq_sp_elx
 
 vector_entry workaround_bpflush_fiq_sp_elx
        b       fiq_sp_elx
-       check_vector_size workaround_bpflush_fiq_sp_elx
+end_vector_entry workaround_bpflush_fiq_sp_elx
 
 vector_entry workaround_bpflush_serror_sp_elx
        b       serror_sp_elx
-       check_vector_size workaround_bpflush_serror_sp_elx
+end_vector_entry workaround_bpflush_serror_sp_elx
 
        /* ---------------------------------------------------------------------
         * Lower EL using AArch64 : 0x400 - 0x600
@@ -96,22 +96,22 @@ vector_entry workaround_bpflush_serror_sp_elx
 vector_entry workaround_bpflush_sync_exception_aarch64
        apply_workaround
        b       sync_exception_aarch64
-       check_vector_size workaround_bpflush_sync_exception_aarch64
+end_vector_entry workaround_bpflush_sync_exception_aarch64
 
 vector_entry workaround_bpflush_irq_aarch64
        apply_workaround
        b       irq_aarch64
-       check_vector_size workaround_bpflush_irq_aarch64
+end_vector_entry workaround_bpflush_irq_aarch64
 
 vector_entry workaround_bpflush_fiq_aarch64
        apply_workaround
        b       fiq_aarch64
-       check_vector_size workaround_bpflush_fiq_aarch64
+end_vector_entry workaround_bpflush_fiq_aarch64
 
 vector_entry workaround_bpflush_serror_aarch64
        apply_workaround
        b       serror_aarch64
-       check_vector_size workaround_bpflush_serror_aarch64
+end_vector_entry workaround_bpflush_serror_aarch64
 
        /* ---------------------------------------------------------------------
         * Lower EL using AArch32 : 0x600 - 0x800
@@ -120,22 +120,22 @@ vector_entry workaround_bpflush_serror_aarch64
 vector_entry workaround_bpflush_sync_exception_aarch32
        apply_workaround
        b       sync_exception_aarch32
-       check_vector_size workaround_bpflush_sync_exception_aarch32
+end_vector_entry workaround_bpflush_sync_exception_aarch32
 
 vector_entry workaround_bpflush_irq_aarch32
        apply_workaround
        b       irq_aarch32
-       check_vector_size workaround_bpflush_irq_aarch32
+end_vector_entry workaround_bpflush_irq_aarch32
 
 vector_entry workaround_bpflush_fiq_aarch32
        apply_workaround
        b       fiq_aarch32
-       check_vector_size workaround_bpflush_fiq_aarch32
+end_vector_entry workaround_bpflush_fiq_aarch32
 
 vector_entry workaround_bpflush_serror_aarch32
        apply_workaround
        b       serror_aarch32
-       check_vector_size workaround_bpflush_serror_aarch32
+end_vector_entry workaround_bpflush_serror_aarch32
 
        .global denver_disable_dco
 
index 843715515547c94b3dd750f8a23ee34fa0440e9f..c613ebdf6c870c75717956340e175ea9b34f69a4 100644 (file)
@@ -114,19 +114,19 @@ aarch32_stub:
        .word   EMIT_BPIALL
        .word   EMIT_SMC
 
-       check_vector_size bpiall_sync_exception_sp_el0
+end_vector_entry bpiall_sync_exception_sp_el0
 
 vector_entry bpiall_irq_sp_el0
        b       irq_sp_el0
-       check_vector_size bpiall_irq_sp_el0
+end_vector_entry bpiall_irq_sp_el0
 
 vector_entry bpiall_fiq_sp_el0
        b       fiq_sp_el0
-       check_vector_size bpiall_fiq_sp_el0
+end_vector_entry bpiall_fiq_sp_el0
 
 vector_entry bpiall_serror_sp_el0
        b       serror_sp_el0
-       check_vector_size bpiall_serror_sp_el0
+end_vector_entry bpiall_serror_sp_el0
 
        /* ---------------------------------------------------------------------
         * Current EL with SP_ELx: 0x200 - 0x400
@@ -134,19 +134,19 @@ vector_entry bpiall_serror_sp_el0
         */
 vector_entry bpiall_sync_exception_sp_elx
        b       sync_exception_sp_elx
-       check_vector_size bpiall_sync_exception_sp_elx
+end_vector_entry bpiall_sync_exception_sp_elx
 
 vector_entry bpiall_irq_sp_elx
        b       irq_sp_elx
-       check_vector_size bpiall_irq_sp_elx
+end_vector_entry bpiall_irq_sp_elx
 
 vector_entry bpiall_fiq_sp_elx
        b       fiq_sp_elx
-       check_vector_size bpiall_fiq_sp_elx
+end_vector_entry bpiall_fiq_sp_elx
 
 vector_entry bpiall_serror_sp_elx
        b       serror_sp_elx
-       check_vector_size bpiall_serror_sp_elx
+end_vector_entry bpiall_serror_sp_elx
 
        /* ---------------------------------------------------------------------
         * Lower EL using AArch64 : 0x400 - 0x600
@@ -154,19 +154,19 @@ vector_entry bpiall_serror_sp_elx
         */
 vector_entry bpiall_sync_exception_aarch64
        apply_cve_2017_5715_wa 1
-       check_vector_size bpiall_sync_exception_aarch64
+end_vector_entry bpiall_sync_exception_aarch64
 
 vector_entry bpiall_irq_aarch64
        apply_cve_2017_5715_wa 2
-       check_vector_size bpiall_irq_aarch64
+end_vector_entry bpiall_irq_aarch64
 
 vector_entry bpiall_fiq_aarch64
        apply_cve_2017_5715_wa 4
-       check_vector_size bpiall_fiq_aarch64
+end_vector_entry bpiall_fiq_aarch64
 
 vector_entry bpiall_serror_aarch64
        apply_cve_2017_5715_wa 8
-       check_vector_size bpiall_serror_aarch64
+end_vector_entry bpiall_serror_aarch64
 
        /* ---------------------------------------------------------------------
         * Lower EL using AArch32 : 0x600 - 0x800
@@ -174,19 +174,19 @@ vector_entry bpiall_serror_aarch64
         */
 vector_entry bpiall_sync_exception_aarch32
        apply_cve_2017_5715_wa 1
-       check_vector_size bpiall_sync_exception_aarch32
+end_vector_entry bpiall_sync_exception_aarch32
 
 vector_entry bpiall_irq_aarch32
        apply_cve_2017_5715_wa 2
-       check_vector_size bpiall_irq_aarch32
+end_vector_entry bpiall_irq_aarch32
 
 vector_entry bpiall_fiq_aarch32
        apply_cve_2017_5715_wa 4
-       check_vector_size bpiall_fiq_aarch32
+end_vector_entry bpiall_fiq_aarch32
 
 vector_entry bpiall_serror_aarch32
        apply_cve_2017_5715_wa 8
-       check_vector_size bpiall_serror_aarch32
+end_vector_entry bpiall_serror_aarch32
 
        /* ---------------------------------------------------------------------
         * This vector table is used while the workaround is executing.  It
@@ -203,19 +203,19 @@ vector_base wa_cve_2017_5715_bpiall_ret_vbar
         */
 vector_entry bpiall_ret_sync_exception_sp_el0
        b       report_unhandled_exception
-       check_vector_size bpiall_ret_sync_exception_sp_el0
+end_vector_entry bpiall_ret_sync_exception_sp_el0
 
 vector_entry bpiall_ret_irq_sp_el0
        b       report_unhandled_interrupt
-       check_vector_size bpiall_ret_irq_sp_el0
+end_vector_entry bpiall_ret_irq_sp_el0
 
 vector_entry bpiall_ret_fiq_sp_el0
        b       report_unhandled_interrupt
-       check_vector_size bpiall_ret_fiq_sp_el0
+end_vector_entry bpiall_ret_fiq_sp_el0
 
 vector_entry bpiall_ret_serror_sp_el0
        b       report_unhandled_exception
-       check_vector_size bpiall_ret_serror_sp_el0
+end_vector_entry bpiall_ret_serror_sp_el0
 
        /* ---------------------------------------------------------------------
         * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
@@ -223,19 +223,19 @@ vector_entry bpiall_ret_serror_sp_el0
         */
 vector_entry bpiall_ret_sync_exception_sp_elx
        b       report_unhandled_exception
-       check_vector_size bpiall_ret_sync_exception_sp_elx
+end_vector_entry bpiall_ret_sync_exception_sp_elx
 
 vector_entry bpiall_ret_irq_sp_elx
        b       report_unhandled_interrupt
-       check_vector_size bpiall_ret_irq_sp_elx
+end_vector_entry bpiall_ret_irq_sp_elx
 
 vector_entry bpiall_ret_fiq_sp_elx
        b       report_unhandled_interrupt
-       check_vector_size bpiall_ret_fiq_sp_elx
+end_vector_entry bpiall_ret_fiq_sp_elx
 
 vector_entry bpiall_ret_serror_sp_elx
        b       report_unhandled_exception
-       check_vector_size bpiall_ret_serror_sp_elx
+end_vector_entry bpiall_ret_serror_sp_elx
 
        /* ---------------------------------------------------------------------
         * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
@@ -243,19 +243,19 @@ vector_entry bpiall_ret_serror_sp_elx
         */
 vector_entry bpiall_ret_sync_exception_aarch64
        b       report_unhandled_exception
-       check_vector_size bpiall_ret_sync_exception_aarch64
+end_vector_entry bpiall_ret_sync_exception_aarch64
 
 vector_entry bpiall_ret_irq_aarch64
        b       report_unhandled_interrupt
-       check_vector_size bpiall_ret_irq_aarch64
+end_vector_entry bpiall_ret_irq_aarch64
 
 vector_entry bpiall_ret_fiq_aarch64
        b       report_unhandled_interrupt
-       check_vector_size bpiall_ret_fiq_aarch64
+end_vector_entry bpiall_ret_fiq_aarch64
 
 vector_entry bpiall_ret_serror_aarch64
        b       report_unhandled_exception
-       check_vector_size bpiall_ret_serror_aarch64
+end_vector_entry bpiall_ret_serror_aarch64
 
        /* ---------------------------------------------------------------------
         * Lower EL using AArch32 : 0x600 - 0x800
@@ -324,7 +324,7 @@ vector_entry bpiall_ret_sync_exception_aarch32
 1:
        ldp     x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
        b       sync_exception_aarch64
-       check_vector_size bpiall_ret_sync_exception_aarch32
+end_vector_entry bpiall_ret_sync_exception_aarch32
 
 vector_entry bpiall_ret_irq_aarch32
        b       report_unhandled_interrupt
@@ -346,12 +346,12 @@ bpiall_ret_fiq:
 bpiall_ret_serror:
        ldp     x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
        b       serror_aarch64
-       check_vector_size bpiall_ret_irq_aarch32
+end_vector_entry bpiall_ret_irq_aarch32
 
 vector_entry bpiall_ret_fiq_aarch32
        b       report_unhandled_interrupt
-       check_vector_size bpiall_ret_fiq_aarch32
+end_vector_entry bpiall_ret_fiq_aarch32
 
 vector_entry bpiall_ret_serror_aarch32
        b       report_unhandled_exception
-       check_vector_size bpiall_ret_serror_aarch32
+end_vector_entry bpiall_ret_serror_aarch32
index a556d1fee0e4e1ab128512bb39e2edd4d283df0c..d7b6e26e40f7128ab148d3a2cac9e74f06022410 100644 (file)
@@ -66,19 +66,19 @@ vector_base wa_cve_2017_5715_mmu_vbar
         */
 vector_entry mmu_sync_exception_sp_el0
        b       sync_exception_sp_el0
-       check_vector_size mmu_sync_exception_sp_el0
+end_vector_entry mmu_sync_exception_sp_el0
 
 vector_entry mmu_irq_sp_el0
        b       irq_sp_el0
-       check_vector_size mmu_irq_sp_el0
+end_vector_entry mmu_irq_sp_el0
 
 vector_entry mmu_fiq_sp_el0
        b       fiq_sp_el0
-       check_vector_size mmu_fiq_sp_el0
+end_vector_entry mmu_fiq_sp_el0
 
 vector_entry mmu_serror_sp_el0
        b       serror_sp_el0
-       check_vector_size mmu_serror_sp_el0
+end_vector_entry mmu_serror_sp_el0
 
        /* ---------------------------------------------------------------------
         * Current EL with SP_ELx: 0x200 - 0x400
@@ -86,19 +86,19 @@ vector_entry mmu_serror_sp_el0
         */
 vector_entry mmu_sync_exception_sp_elx
        b       sync_exception_sp_elx
-       check_vector_size mmu_sync_exception_sp_elx
+end_vector_entry mmu_sync_exception_sp_elx
 
 vector_entry mmu_irq_sp_elx
        b       irq_sp_elx
-       check_vector_size mmu_irq_sp_elx
+end_vector_entry mmu_irq_sp_elx
 
 vector_entry mmu_fiq_sp_elx
        b       fiq_sp_elx
-       check_vector_size mmu_fiq_sp_elx
+end_vector_entry mmu_fiq_sp_elx
 
 vector_entry mmu_serror_sp_elx
        b       serror_sp_elx
-       check_vector_size mmu_serror_sp_elx
+end_vector_entry mmu_serror_sp_elx
 
        /* ---------------------------------------------------------------------
         * Lower EL using AArch64 : 0x400 - 0x600
@@ -107,22 +107,22 @@ vector_entry mmu_serror_sp_elx
 vector_entry mmu_sync_exception_aarch64
        apply_cve_2017_5715_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
        b       sync_exception_aarch64
-       check_vector_size mmu_sync_exception_aarch64
+end_vector_entry mmu_sync_exception_aarch64
 
 vector_entry mmu_irq_aarch64
        apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
        b       irq_aarch64
-       check_vector_size mmu_irq_aarch64
+end_vector_entry mmu_irq_aarch64
 
 vector_entry mmu_fiq_aarch64
        apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
        b       fiq_aarch64
-       check_vector_size mmu_fiq_aarch64
+end_vector_entry mmu_fiq_aarch64
 
 vector_entry mmu_serror_aarch64
        apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
        b       serror_aarch64
-       check_vector_size mmu_serror_aarch64
+end_vector_entry mmu_serror_aarch64
 
        /* ---------------------------------------------------------------------
         * Lower EL using AArch32 : 0x600 - 0x800
@@ -131,19 +131,19 @@ vector_entry mmu_serror_aarch64
 vector_entry mmu_sync_exception_aarch32
        apply_cve_2017_5715_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
        b       sync_exception_aarch32
-       check_vector_size mmu_sync_exception_aarch32
+end_vector_entry mmu_sync_exception_aarch32
 
 vector_entry mmu_irq_aarch32
        apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
        b       irq_aarch32
-       check_vector_size mmu_irq_aarch32
+end_vector_entry mmu_irq_aarch32
 
 vector_entry mmu_fiq_aarch32
        apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
        b       fiq_aarch32
-       check_vector_size mmu_fiq_aarch32
+end_vector_entry mmu_fiq_aarch32
 
 vector_entry mmu_serror_aarch32
        apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
        b       serror_aarch32
-       check_vector_size mmu_serror_aarch32
+end_vector_entry mmu_serror_aarch32
index e9541ba320edade7e552b3d88471ce5ef88861e1..a5c3c61700033d8a81118b29beff55a95f96adcf 100644 (file)
@@ -50,7 +50,7 @@ void clear_mem_regions(mem_region_t *tbl, size_t nregions)
  * be cleared, and chunk is the amount of memory mapped and
  * cleared in every iteration.
  */
-void clear_map_dyn_mem_regions(mem_region_t *regions,
+void clear_map_dyn_mem_regions(struct mem_region *regions,
                               size_t nregions,
                               uintptr_t va,
                               size_t chunk)
index b42cd6814245d1cd5fcbb00bd58dca5f820d2529..ce6e341a2fb4a31f27cd9a37f38a8d473d3809e9 100644 (file)
@@ -195,6 +195,10 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
        desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
        desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
        desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+       /*
+        * Always set the access flag, as this library assumes access flag
+        * faults aren't managed.
+        */
        desc |= LOWER_ATTRS(ACCESS_FLAG);
        desc |= ap1_mask;
 
@@ -222,9 +226,10 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
        } else { /* Normal memory */
                /*
                 * Always map read-write normal memory as execute-never.
-                * (Trusted Firmware doesn't self-modify its code, therefore
-                * R/W memory is reserved for data storage, which must not be
-                * executable.)
+                * This library assumes that it is used by software that does
+                * not self-modify its code, therefore R/W memory is reserved
+                * for data storage, which must not be executable.
+                *
                 * Note that setting the XN bit here is for consistency only.
                 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
                 * which makes any writable memory region to be treated as
index 97cdde7516b0effd0a1f968b5684a3e73d2ad0ba..99cf0881e92c0f05c014ce6c1583d4d05b2b705b 100644 (file)
@@ -24,17 +24,17 @@ func enable_mmu_direct
        mov     r3, r0
        ldr     r0, =mmu_cfg_params
 
-       /* MAIR0 */
-       ldr     r1, [r0, #(MMU_CFG_MAIR0 << 2)]
+       /* MAIR0. Only the lower 32 bits are used. */
+       ldr     r1, [r0, #(MMU_CFG_MAIR << 3)]
        stcopr  r1, MAIR0
 
-       /* TTBCR */
-       ldr     r2, [r0, #(MMU_CFG_TCR << 2)]
+       /* TTBCR. Only the lower 32 bits are used. */
+       ldr     r2, [r0, #(MMU_CFG_TCR << 3)]
        stcopr  r2, TTBCR
 
        /* TTBR0 */
-       ldr     r1, [r0, #(MMU_CFG_TTBR0_LO << 2)]
-       ldr     r2, [r0, #(MMU_CFG_TTBR0_HI << 2)]
+       ldr     r1, [r0, #(MMU_CFG_TTBR0 << 3)]
+       ldr     r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)]
        stcopr16        r1, r2, TTBR0_64
 
        /* TTBR1 is unused right now; set it to 0. */
index 6e971925821acef79d41cddc98b336773d65acd4..5e3220c606ae5869711a6eb0bfc203b84944d734 100644 (file)
 #error ARMv7 target does not support LPAE MMU descriptors
 #endif
 
-uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
-
 /*
  * Returns 1 if the provided granule size is supported, 0 otherwise.
  */
 int xlat_arch_is_granule_size_supported(size_t size)
 {
        /*
-        * The Trusted Firmware uses long descriptor translation table format,
-        * which supports 4 KiB pages only.
+        * The library uses the long descriptor translation table format, which
+        * supports 4 KiB pages only.
         */
        return (size == (4U * 1024U));
 }
@@ -50,18 +48,12 @@ int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
        return (read_sctlr() & SCTLR_M_BIT) != 0;
 }
 
-void xlat_arch_tlbi_va(uintptr_t va)
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime __unused)
 {
-       /*
-        * Ensure the translation table write has drained into memory before
-        * invalidating the TLB entry.
-        */
-       dsbishst();
-
-       tlbimvaais(TLBI_ADDR(va));
+       return UPPER_ATTRS(XN);
 }
 
-void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime __unused)
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime __unused)
 {
        /*
         * Ensure the translation table write has drained into memory before
@@ -103,29 +95,32 @@ int xlat_arch_current_el(void)
        /*
         * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
         * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
+        *
+        * The PL1&0 translation regime in AArch32 behaves like the EL1&0 regime
+        * in AArch64 except for the XN bits, but we set and unset them at the
+        * same time, so there's no difference in practice.
         */
-       return 3;
+       return 1;
 }
 
 /*******************************************************************************
  * Function for enabling the MMU in Secure PL1, assuming that the page tables
  * have already been created.
  ******************************************************************************/
-void setup_mmu_cfg(unsigned int flags,
-               const uint64_t *base_table,
-               unsigned long long max_pa,
-               uintptr_t max_va)
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+                  const uint64_t *base_table, unsigned long long max_pa,
+                  uintptr_t max_va, __unused int xlat_regime)
 {
-       u_register_t mair0, ttbcr;
-       uint64_t ttbr0;
+       uint64_t mair, ttbr0;
+       uint32_t ttbcr;
 
        assert(IS_IN_SECURE());
 
        /* Set attributes in the right indices of the MAIR */
-       mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
-       mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
+       mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+       mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
                        ATTR_IWBWA_OWBWA_NTR_INDEX);
-       mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
+       mair |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
                        ATTR_NON_CACHEABLE_INDEX);
 
        /*
@@ -173,17 +168,17 @@ void setup_mmu_cfg(unsigned int flags,
 
        /* Set TTBR0 bits as well */
        ttbr0 = (uint64_t)(uintptr_t) base_table;
+
 #if ARM_ARCH_AT_LEAST(8, 2)
        /*
-        * Enable CnP bit so as to share page tables with all PEs.
-        * Mandatory for ARMv8.2 implementations.
+        * Enable CnP bit so as to share page tables with all PEs. This
+        * is mandatory for ARMv8.2 implementations.
         */
        ttbr0 |= TTBR_CNP_BIT;
 #endif
 
        /* Now populate MMU configuration */
-       mmu_cfg_params[MMU_CFG_MAIR0] = mair0;
-       mmu_cfg_params[MMU_CFG_TCR] = ttbcr;
-       mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr0;
-       mmu_cfg_params[MMU_CFG_TTBR0_HI] = ttbr0 >> 32;
+       params[MMU_CFG_MAIR] = mair;
+       params[MMU_CFG_TCR] = (uint64_t) ttbcr;
+       params[MMU_CFG_TTBR0] = ttbr0;
 }
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
deleted file mode 100644 (file)
index 9b41f4d..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
-#define __XLAT_TABLES_ARCH_PRIVATE_H__
-
-#include <xlat_tables_defs.h>
-#include <xlat_tables_v2.h>
-
-/*
- * Return the execute-never mask that will prevent instruction fetch at the
- * given translation regime.
- */
-static inline uint64_t xlat_arch_regime_get_xn_desc(int regime __unused)
-{
-       return UPPER_ATTRS(XN);
-}
-
-#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */
index a72c7fae51c1beeea5b1eb2edf4e80afbaa32779..5c5a2a927c680836435a239c424d08b0e357796e 100644 (file)
                ldr     x0, =mmu_cfg_params
 
                /* MAIR */
-               ldr     w1, [x0, #(MMU_CFG_MAIR0 << 2)]
+               ldr     x1, [x0, #(MMU_CFG_MAIR << 3)]
                _msr    mair, \el, x1
 
                /* TCR */
-               ldr     w2, [x0, #(MMU_CFG_TCR << 2)]
+               ldr     x2, [x0, #(MMU_CFG_TCR << 3)]
                _msr    tcr, \el, x2
 
                /* TTBR */
-               ldr     w3, [x0, #(MMU_CFG_TTBR0_LO << 2)]
-               ldr     w4, [x0, #(MMU_CFG_TTBR0_HI << 2)]
-               orr     x3, x3, x4, lsl #32
+               ldr     x3, [x0, #(MMU_CFG_TTBR0 << 3)]
                _msr    ttbr0, \el, x3
 
                /*
index 4bbbe54434d7b353f6d6ab505b2267e69dbbadd3..0f289e28ea67d23333d59198e107d9b46fae585c 100644 (file)
@@ -13,8 +13,6 @@
 #include <xlat_tables_v2.h>
 #include "../xlat_tables_private.h"
 
-uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
-
 /*
  * Returns 1 if the provided granule size is supported, 0 otherwise.
  */
@@ -113,19 +111,17 @@ int is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
        }
 }
 
-
-void xlat_arch_tlbi_va(uintptr_t va)
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
 {
-#if IMAGE_EL == 1
-       assert(IS_IN_EL(1));
-       xlat_arch_tlbi_va_regime(va, EL1_EL0_REGIME);
-#elif IMAGE_EL == 3
-       assert(IS_IN_EL(3));
-       xlat_arch_tlbi_va_regime(va, EL3_REGIME);
-#endif
+       if (xlat_regime == EL1_EL0_REGIME) {
+               return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
+       } else {
+               assert(xlat_regime == EL3_REGIME);
+               return UPPER_ATTRS(XN);
+       }
 }
 
-void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime)
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
 {
        /*
         * Ensure the translation table write has drained into memory before
@@ -182,12 +178,11 @@ int xlat_arch_current_el(void)
        return el;
 }
 
-void setup_mmu_cfg(unsigned int flags,
-               const uint64_t *base_table,
-               unsigned long long max_pa,
-               uintptr_t max_va)
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+                  const uint64_t *base_table, unsigned long long max_pa,
+                  uintptr_t max_va, int xlat_regime)
 {
-       uint64_t mair, ttbr, tcr;
+       uint64_t mair, ttbr0, tcr;
        uintptr_t virtual_addr_space_size;
 
        /* Set attributes in the right indices of the MAIR. */
@@ -195,8 +190,6 @@ void setup_mmu_cfg(unsigned int flags,
        mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
        mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
 
-       ttbr = (uint64_t) base_table;
-
        /*
         * Limit the input address ranges and memory region sizes translated
         * using TTBR0 to the given virtual address space size.
@@ -232,30 +225,29 @@ void setup_mmu_cfg(unsigned int flags,
         */
        unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa);
 
-#if IMAGE_EL == 1
-       assert(IS_IN_EL(1));
-       /*
-        * TCR_EL1.EPD1: Disable translation table walk for addresses that are
-        * translated using TTBR1_EL1.
-        */
-       tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
-#elif IMAGE_EL == 3
-       assert(IS_IN_EL(3));
-       tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
-#endif
-
-       mmu_cfg_params[MMU_CFG_MAIR0] = (uint32_t) mair;
-       mmu_cfg_params[MMU_CFG_TCR] = (uint32_t) tcr;
-
-       /* Set TTBR bits as well */
-       if (ARM_ARCH_AT_LEAST(8, 2)) {
+       if (xlat_regime == EL1_EL0_REGIME) {
                /*
-                * Enable CnP bit so as to share page tables with all PEs. This
-                * is mandatory for ARMv8.2 implementations.
+                * TCR_EL1.EPD1: Disable translation table walk for addresses
+                * that are translated using TTBR1_EL1.
                 */
-               ttbr |= TTBR_CNP_BIT;
+               tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
+       } else {
+               assert(xlat_regime == EL3_REGIME);
+               tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
        }
 
-       mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr;
-       mmu_cfg_params[MMU_CFG_TTBR0_HI] = (uint32_t) (ttbr >> 32);
+       /* Set TTBR bits as well */
+       ttbr0 = (uint64_t) base_table;
+
+#if ARM_ARCH_AT_LEAST(8, 2)
+       /*
+        * Enable CnP bit so as to share page tables with all PEs. This
+        * is mandatory for ARMv8.2 implementations.
+        */
+       ttbr0 |= TTBR_CNP_BIT;
+#endif
+
+       params[MMU_CFG_MAIR] = mair;
+       params[MMU_CFG_TCR] = tcr;
+       params[MMU_CFG_TTBR0] = ttbr0;
 }
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
deleted file mode 100644 (file)
index 39b0a65..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
-#define __XLAT_TABLES_ARCH_PRIVATE_H__
-
-#include <assert.h>
-#include <xlat_tables_defs.h>
-#include <xlat_tables_v2.h>
-
-/*
- * Return the execute-never mask that will prevent instruction fetch at all ELs
- * that are part of the given translation regime.
- */
-static inline uint64_t xlat_arch_regime_get_xn_desc(int regime)
-{
-       if (regime == EL1_EL0_REGIME) {
-               return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
-       } else {
-               assert(regime == EL3_REGIME);
-               return UPPER_ATTRS(XN);
-       }
-}
-
-#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */
index b25c805cf28989a7cf14389d6d5eb8897b1029eb..9507ad7155f92a263e6155bb500de688f7a251e5 100644 (file)
@@ -10,5 +10,3 @@ XLAT_TABLES_LIB_SRCS  :=      $(addprefix lib/xlat_tables_v2/,        \
                                xlat_tables_context.c                   \
                                xlat_tables_core.c                      \
                                xlat_tables_utils.c)
-
-INCLUDES               +=      -Ilib/xlat_tables_v2/${ARCH}
index 0964b49b2a11ff9f36e0f25e7d788c61e9f0c37c..76c429d759ece20c869ee34c9f7e6d9cbe8cde3d 100644 (file)
@@ -4,6 +4,7 @@
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
+#include <assert.h>
 #include <debug.h>
 #include <platform_def.h>
 #include <xlat_tables_defs.h>
 
 #include "xlat_tables_private.h"
 
+/*
+ * MMU configuration register values for the active translation context. Used
+ * from the MMU assembly helpers.
+ */
+uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
 /*
  * Each platform can define the size of its physical and virtual address spaces.
  * If the platform hasn't defined one or both of them, default to
@@ -69,6 +76,17 @@ int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
 
 void init_xlat_tables(void)
 {
+       assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
+
+       int current_el = xlat_arch_current_el();
+
+       if (current_el == 1) {
+               tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
+       } else {
+               assert(current_el == 3);
+               tf_xlat_ctx.xlat_regime = EL3_REGIME;
+       }
+
        init_xlat_tables_ctx(&tf_xlat_ctx);
 }
 
@@ -93,8 +111,9 @@ void init_xlat_tables(void)
 
 void enable_mmu_secure(unsigned int flags)
 {
-       setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
-                       tf_xlat_ctx.va_max_address);
+       setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+                     tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+                     tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
        enable_mmu_direct(flags);
 }
 
@@ -102,15 +121,17 @@ void enable_mmu_secure(unsigned int flags)
 
 void enable_mmu_el1(unsigned int flags)
 {
-       setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
-                       tf_xlat_ctx.va_max_address);
+       setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+                     tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+                     tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
        enable_mmu_direct_el1(flags);
 }
 
 void enable_mmu_el3(unsigned int flags)
 {
-       setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
-                       tf_xlat_ctx.va_max_address);
+       setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+                     tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+                     tf_xlat_ctx.va_max_address, EL3_REGIME);
        enable_mmu_direct_el3(flags);
 }
 
index f555524a958b27c82f784217efc0cae54d7cac5b..e3306e6df78fff4ad1aa8242f21641e72ced12d9 100644 (file)
@@ -12,7 +12,6 @@
 #include <string.h>
 #include <types.h>
 #include <utils_def.h>
-#include <xlat_tables_arch_private.h>
 #include <xlat_tables_defs.h>
 #include <xlat_tables_v2.h>
 
@@ -104,12 +103,14 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
         */
        desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
        /*
-        * Always set the access flag, as TF doesn't manage access flag faults.
+        * Always set the access flag, as this library assumes access flag
+        * faults aren't managed.
+        */
+       desc |= LOWER_ATTRS(ACCESS_FLAG);
+       /*
         * Deduce other fields of the descriptor based on the MT_NS and MT_RW
         * memory region attributes.
         */
-       desc |= LOWER_ATTRS(ACCESS_FLAG);
-
        desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
        desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
 
@@ -155,9 +156,10 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
        } else { /* Normal memory */
                /*
                 * Always map read-write normal memory as execute-never.
-                * (Trusted Firmware doesn't self-modify its code, therefore
-                * R/W memory is reserved for data storage, which must not be
-                * executable.)
+                * This library assumes that it is used by software that does
+                * not self-modify its code, therefore R/W memory is reserved
+                * for data storage, which must not be executable.
+                *
                 * Note that setting the XN bit here is for consistency only.
                 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
                 * which makes any writable memory region to be treated as
@@ -311,7 +313,7 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
                if (action == ACTION_WRITE_BLOCK_ENTRY) {
 
                        table_base[table_idx] = INVALID_DESC;
-                       xlat_arch_tlbi_va_regime(table_idx_va, ctx->xlat_regime);
+                       xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
 
                } else if (action == ACTION_RECURSE_INTO_TABLE) {
 
@@ -327,8 +329,8 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
                         */
                        if (xlat_table_is_empty(ctx, subtable)) {
                                table_base[table_idx] = INVALID_DESC;
-                               xlat_arch_tlbi_va_regime(table_idx_va,
-                                               ctx->xlat_regime);
+                               xlat_arch_tlbi_va(table_idx_va,
+                                                 ctx->xlat_regime);
                        }
 
                } else {
index 4a54ec5d0196c4c6203c79330ec38f8454dce733..93640ddbce7e33df3071c5d232dee49f044efb3f 100644 (file)
 
 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
 
+/*
+ * Return the execute-never mask that will prevent instruction fetch at the
+ * given translation regime.
+ */
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime);
+
 /*
  * Invalidate all TLB entries that match the given virtual address. This
  * operation applies to all PEs in the same Inner Shareable domain as the PE
  * that executes this function. This functions must be called for every
- * translation table entry that is modified.
- *
- * xlat_arch_tlbi_va() applies the invalidation to the exception level of the
- * current translation regime, whereas xlat_arch_tlbi_va_regime() applies it to
- * the given translation regime.
+ * translation table entry that is modified. It only affects the specified
+ * translation regime.
  *
  * Note, however, that it is architecturally UNDEFINED to invalidate TLB entries
  * pertaining to a higher exception level, e.g. invalidating EL3 entries from
  * S-EL1.
  */
-void xlat_arch_tlbi_va(uintptr_t va);
-void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime);
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime);
 
 /*
  * This function has to be called at the end of any code that uses the function
@@ -86,10 +88,6 @@ int xlat_arch_current_el(void);
  */
 unsigned long long xlat_arch_get_max_supported_pa(void);
 
-/* Enable MMU and configure it to use the specified translation tables. */
-void setup_mmu_cfg(unsigned int flags, const uint64_t *base_table,
-               unsigned long long max_pa, uintptr_t max_va);
-
 /*
  * Return 1 if the MMU of the translation regime managed by the given xlat_ctx_t
  * is enabled, 0 otherwise.
index 5a78434ab42590320d371648c2c3117f39034cb7..90a0a86269d5fa9f98cd1e5003e15c67484b00b8 100644 (file)
@@ -11,7 +11,6 @@
 #include <platform_def.h>
 #include <types.h>
 #include <utils_def.h>
-#include <xlat_tables_arch_private.h>
 #include <xlat_tables_defs.h>
 #include <xlat_tables_v2.h>
 
@@ -544,7 +543,7 @@ int change_mem_attributes(xlat_ctx_t *ctx,
                *entry = INVALID_DESC;
 
                /* Invalidate any cached copy of this mapping in the TLBs. */
-               xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
+               xlat_arch_tlbi_va(base_va, ctx->xlat_regime);
 
                /* Ensure completion of the invalidation. */
                xlat_arch_tlbi_va_sync();
index 638f66a7ce9d95ccfd3cbb23dcbddbb3a4f7aae9..76fede821dd82efce3cb41a4d23f0e47551b560a 100644 (file)
@@ -66,6 +66,14 @@ MediaTek platform ports
 :G: `mtk09422`_
 :F: plat/mediatek/
 
+Marvell platform ports and SoC drivers
+--------------------------------------
+:M: Konstantin Porotchkin <kostap@marvell.com>
+:G: `kostapr`_
+:F: docs/plat/marvell/
+:F: plat/marvell/
+:F: drivers/marvell/
+
 NVidia platform ports
 ---------------------
 :M: Varun Wadekar <vwadekar@nvidia.com>
@@ -165,6 +173,7 @@ Xilinx platform port
 .. _glneo: https://github.com/glneo
 .. _hzhuang1: https://github.com/hzhuang1
 .. _jenswi-linaro: https://github.com/jenswi-linaro
+.. _kostapr: https://github.com/kostapr
 .. _masahir0y: https://github.com/masahir0y
 .. _mtk09422: https://github.com/mtk09422
 .. _qoriq-open-source: https://github.com/qoriq-open-source
index 2a6ded497064a09e24266ad5f94effd1803f9fa1..1184b7af8996f6b4df30b28ebcf768b069d47b2a 100644 (file)
@@ -290,6 +290,7 @@ define MAKE_BL
         $(eval DUMP       := $(call IMG_DUMP,$(1)))
         $(eval BIN        := $(call IMG_BIN,$(1)))
         $(eval BL_LINKERFILE := $(BL$(call uppercase,$(1))_LINKERFILE))
+        $(eval BL_LIBS    := $(BL$(call uppercase,$(1))_LIBS))
         # We use sort only to get a list of unique object directory names.
         # ordering is not relevant but sort removes duplicates.
         $(eval TEMP_OBJ_DIRS := $(sort $(dir ${OBJS} ${LINKERFILE})))
@@ -312,7 +313,7 @@ bl${1}_dirs: | ${OBJ_DIRS}
 $(eval $(call MAKE_OBJS,$(BUILD_DIR),$(SOURCES),$(1)))
 $(eval $(call MAKE_LD,$(LINKERFILE),$(BL_LINKERFILE),$(1)))
 
-$(ELF): $(OBJS) $(LINKERFILE) | bl$(1)_dirs
+$(ELF): $(OBJS) $(LINKERFILE) | bl$(1)_dirs $(BL_LIBS)
        @echo "  LD      $$@"
 ifdef MAKE_BUILD_STRINGS
        $(call MAKE_BUILD_STRINGS, $(BUILD_DIR)/build_message.o)
@@ -322,7 +323,7 @@ else
                $$(CC) $$(TF_CFLAGS) $$(CFLAGS) -xc -c - -o $(BUILD_DIR)/build_message.o
 endif
        $$(Q)$$(LD) -o $$@ $$(TF_LDFLAGS) $$(LDFLAGS) -Map=$(MAPFILE) \
-               --script $(LINKERFILE) $(BUILD_DIR)/build_message.o $(OBJS) $(LDLIBS)
+               --script $(LINKERFILE) $(BUILD_DIR)/build_message.o $(OBJS) $(LDLIBS) $(BL_LIBS)
 
 $(DUMP): $(ELF)
        @echo "  OD      $$@"
index 361e84de2ac2b99f15720012c95ceca2011b0620..b9ab3f36e8701b830825ca82ae4d1e04bc6563c1 100644 (file)
@@ -9,7 +9,7 @@
 #include "fvp_def.h"
 #include "fvp_private.h"
 
-void bl2u_early_platform_setup(meminfo_t *mem_layout, void *plat_info)
+void bl2u_early_platform_setup(struct meminfo *mem_layout, void *plat_info)
 {
        arm_bl2u_early_platform_setup(mem_layout, plat_info);
 
index ed41d4cb8606544c1cc5c73881d796802e22446d..2b1e0ac70f2631be3c566bd109b0ae6d81371659 100644 (file)
@@ -116,7 +116,8 @@ FVP_CPU_LIBS                +=      lib/cpus/aarch64/cortex_a35.S                   \
                                lib/cpus/aarch64/cortex_a73.S                   \
                                lib/cpus/aarch64/cortex_a75.S                   \
                                lib/cpus/aarch64/cortex_a76.S                   \
-                               lib/cpus/aarch64/cortex_ares.S
+                               lib/cpus/aarch64/cortex_ares.S                  \
+                               lib/cpus/aarch64/cortex_deimos.S
 else
 FVP_CPU_LIBS           +=      lib/cpus/aarch32/cortex_a32.S
 endif
index 180eae98b79ec94385f91662a7e24de95f9f9edc..94839767dbe88b46dc6153add4041b87076f6780 100644 (file)
@@ -41,7 +41,7 @@
 /* Data structure which holds the extents of the trusted SRAM for BL1*/
 static meminfo_t bl1_tzram_layout;
 
-meminfo_t *bl1_plat_sec_mem_layout(void)
+struct meminfo *bl1_plat_sec_mem_layout(void)
 {
        return &bl1_tzram_layout;
 }
@@ -160,7 +160,7 @@ void bl1_plat_prepare_exit(entry_point_info_t *ep_info)
         * in order to release secondary CPUs from their holding pen and make
         * them jump there.
         */
-       arm_program_trusted_mailbox(ep_info->pc);
+       plat_arm_program_trusted_mailbox(ep_info->pc);
        dsbsy();
        sev();
 #endif
index 88bf1e6cd7cb11e2d88ed9304f1b3139920b0359..88c0bc9239ba6c2b954ebb9d4473424504124f8b 100644 (file)
@@ -177,7 +177,8 @@ struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
  * in x0. This memory layout is sitting at the base of the free trusted SRAM.
  * Copy it to a safe location before its reclaimed by later BL2 functionality.
  ******************************************************************************/
-void arm_bl2_early_platform_setup(uintptr_t tb_fw_config, meminfo_t *mem_layout)
+void arm_bl2_early_platform_setup(uintptr_t tb_fw_config,
+                                 struct meminfo *mem_layout)
 {
        /* Initialize the console to provide early debug support */
        arm_console_boot_init();
index 77d1186c7f8daaf7d4794a8616f4db28963480ad..2bf8a936e5ef913810c4ec4c0a0972647a260f63 100644 (file)
@@ -38,7 +38,7 @@ void bl2u_platform_setup(void)
        arm_bl2u_platform_setup();
 }
 
-void arm_bl2u_early_platform_setup(meminfo_t *mem_layout, void *plat_info)
+void arm_bl2u_early_platform_setup(struct meminfo *mem_layout, void *plat_info)
 {
        /* Initialize the console to provide early debug support */
        arm_console_boot_init();
@@ -52,7 +52,7 @@ void arm_bl2u_early_platform_setup(meminfo_t *mem_layout, void *plat_info)
  * In case of ARM FVP platforms x1 is not used.
  * In both cases, x0 contains the extents of the memory available to BL2U
  ******************************************************************************/
-void bl2u_early_platform_setup(meminfo_t *mem_layout, void *plat_info)
+void bl2u_early_platform_setup(struct meminfo *mem_layout, void *plat_info)
 {
        arm_bl2u_early_platform_setup(mem_layout, plat_info);
 }
index 6be2933478383e17ebcc64b6fcbeeebb6eba183f..557854c52e916ede70eb48367c696ff458c1df3d 100644 (file)
@@ -48,7 +48,7 @@ CASSERT(BL31_BASE >= ARM_TB_FW_CONFIG_LIMIT, assert_bl31_base_overflows);
  * while BL32 corresponds to the secure image type. A NULL pointer is returned
  * if the image does not exist.
  ******************************************************************************/
-entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
 {
        entry_point_info_t *next_image_info;
 
index 52d22684611a6934a8cde1cd3c394fab22f6fb7d..f83005f64c69e678715c308ab4752e18f01560c1 100644 (file)
@@ -14,8 +14,6 @@
 #include <platform.h>
 #include <secure_partition.h>
 
-extern const mmap_region_t plat_arm_mmap[];
-
 /* Weak definitions may be overridden in specific ARM standard platform */
 #pragma weak plat_get_ns_image_entrypoint
 #pragma weak plat_arm_get_mmap
index 533084708063a9d9c525568eca5957bac561000a..67b574de6c70f0c9023d0217d94798cafd4a1203 100644 (file)
@@ -166,7 +166,7 @@ BL1_SOURCES         +=      drivers/arm/sp805/sp805.c                       \
                                plat/arm/common/arm_err.c                       \
                                plat/arm/common/arm_io_storage.c
 ifdef EL3_PAYLOAD_BASE
-# Need the arm_program_trusted_mailbox() function to release secondary CPUs from
+# Need the plat_arm_program_trusted_mailbox() function to release secondary CPUs from
 # their holding pen
 BL1_SOURCES            +=      plat/arm/common/arm_pm.c
 endif
index 916fa8dc89014e94ed7dd4ea6bc19b513b0f375c..4f86efdf9254ab80c49ad17f2f7a2e339756baf4 100644 (file)
@@ -28,7 +28,7 @@ void plat_flush_next_bl_params(void)
 /*******************************************************************************
  * This function returns the list of loadable images.
  ******************************************************************************/
-bl_load_info_t *plat_get_bl_image_load_info(void)
+struct bl_load_info *plat_get_bl_image_load_info(void)
 {
        return get_bl_load_info_from_mem_params_desc();
 }
@@ -36,7 +36,7 @@ bl_load_info_t *plat_get_bl_image_load_info(void)
 /*******************************************************************************
  * This function returns the list of executable images.
  ******************************************************************************/
-bl_params_t *plat_get_next_bl_params(void)
+struct bl_params *plat_get_next_bl_params(void)
 {
        bl_params_t *next_bl_params = get_next_bl_params_from_mem_params_desc();
 
index 4632099e7f49611bff6dea3fbdedc72866678770..d0350d6c5d5cb5b5c2bf80c8cbad9836a10b0307 100644 (file)
 #include <platform_def.h>
 #include <psci.h>
 
-/* Allow ARM Standard platforms to override this function */
+/* Allow ARM Standard platforms to override these functions */
 #pragma weak plat_arm_psci_override_pm_ops
-
-/* Standard ARM platforms are expected to export plat_arm_psci_pm_ops */
-extern plat_psci_ops_t plat_arm_psci_pm_ops;
+#pragma weak plat_arm_program_trusted_mailbox
 
 #if ARM_RECOM_STATE_ID_ENC
 extern unsigned int arm_pm_idle_states[];
@@ -192,11 +190,11 @@ void arm_system_pwr_domain_resume(void)
 }
 
 /*******************************************************************************
- * Private function to program the mailbox for a cpu before it is released
+ * ARM platform function to program the mailbox for a cpu before it is released
  * from reset. This function assumes that the Trusted mail box base is within
  * the ARM_SHARED_RAM region
  ******************************************************************************/
-void arm_program_trusted_mailbox(uintptr_t address)
+void plat_arm_program_trusted_mailbox(uintptr_t address)
 {
        uintptr_t *mailbox = (void *) PLAT_ARM_TRUSTED_MAILBOX_BASE;
 
@@ -221,6 +219,6 @@ int plat_setup_psci_ops(uintptr_t sec_entrypoint,
        *psci_ops = plat_arm_psci_override_pm_ops(&plat_arm_psci_pm_ops);
 
        /* Setup mailbox with entry point. */
-       arm_program_trusted_mailbox(sec_entrypoint);
+       plat_arm_program_trusted_mailbox(sec_entrypoint);
        return 0;
 }
index 72d5527b65d0680ba76778731e0de77eb3008bd0..29dd01d19c340c04a0dfab174bba7d08fb8d9bbc 100644 (file)
@@ -32,6 +32,7 @@ BL31_SOURCES          +=      plat/arm/css/drivers/scp/css_pm_scpi.c          \
                                plat/arm/css/drivers/scpi/css_scpi.c
 else
 BL31_SOURCES           +=      plat/arm/css/drivers/scp/css_pm_scmi.c          \
+                               plat/arm/css/drivers/scmi/scmi_ap_core_proto.c  \
                                plat/arm/css/drivers/scmi/scmi_common.c         \
                                plat/arm/css/drivers/scmi/scmi_pwr_dmn_proto.c  \
                                plat/arm/css/drivers/scmi/scmi_sys_pwr_proto.c  \
index b9faf679b340960011328e1fc5ff7694ebc95730..54f3e057d7e3af421ac3207b86ed74b77e38a990 100644 (file)
@@ -9,7 +9,7 @@
 #include "css_mhu_doorbell.h"
 #include "../scmi/scmi.h"
 
-void mhu_ring_doorbell(scmi_channel_plat_info_t *plat_info)
+void mhu_ring_doorbell(struct scmi_channel_plat_info *plat_info)
 {
        MHU_RING_DOORBELL(plat_info->db_reg_addr,
                        plat_info->db_modify_mask,
@@ -17,7 +17,7 @@ void mhu_ring_doorbell(scmi_channel_plat_info_t *plat_info)
        return;
 }
 
-void mhuv2_ring_doorbell(scmi_channel_plat_info_t *plat_info)
+void mhuv2_ring_doorbell(struct scmi_channel_plat_info *plat_info)
 {
        /* wake receiver */
        MHU_V2_ACCESS_REQUEST(MHUV2_BASE_ADDR);
index cf9ef5e9a00054254cfcbcb2442ad1b31a5ec029..723fd06edc21e5470bbc445717c258a44e82cc5b 100644 (file)
@@ -12,6 +12,7 @@
 #include <stdint.h>
 
 /* Supported SCMI Protocol Versions */
+#define SCMI_AP_CORE_PROTO_VER                 MAKE_SCMI_VERSION(1, 0)
 #define SCMI_PWR_DMN_PROTO_VER                 MAKE_SCMI_VERSION(1, 0)
 #define SCMI_SYS_PWR_PROTO_VER                 MAKE_SCMI_VERSION(1, 0)
 
@@ -29,6 +30,8 @@
 /* SCMI Protocol identifiers */
 #define SCMI_PWR_DMN_PROTO_ID                  0x11
 #define SCMI_SYS_PWR_PROTO_ID                  0x12
+/* The AP core protocol is a CSS platform-specific extension */
+#define SCMI_AP_CORE_PROTO_ID                  0x90
 
 /* Mandatory messages IDs for all SCMI protocols */
 #define SCMI_PROTO_VERSION_MSG                 0x0
 #define SCMI_SYS_PWR_STATE_SET_MSG             0x3
 #define SCMI_SYS_PWR_STATE_GET_MSG             0x4
 
+/* SCMI AP core protocol message IDs */
+#define SCMI_AP_CORE_RESET_ADDR_SET_MSG                0x3
+#define SCMI_AP_CORE_RESET_ADDR_GET_MSG                0x4
+
 /* Helper macros for system power management protocol commands */
 
 /*
 #define SCMI_SYS_PWR_POWER_UP                  0x3
 #define SCMI_SYS_PWR_SUSPEND                   0x4
 
+/*
+ * Macros to describe the bit-fields of the `attribute` of AP core protocol
+ * AP_CORE_RESET_ADDR set/get messages.
+ */
+#define SCMI_AP_CORE_LOCK_ATTR_SHIFT           0x0
+#define SCMI_AP_CORE_LOCK_ATTR                 (1U << SCMI_AP_CORE_LOCK_ATTR_SHIFT)
+
 /* SCMI Error code definitions */
 #define SCMI_E_QUEUED                  1
 #define SCMI_E_SUCCESS                 0
@@ -133,4 +147,8 @@ int scmi_pwr_state_get(void *p, uint32_t domain_id, uint32_t *scmi_pwr_state);
 int scmi_sys_pwr_state_set(void *p, uint32_t flags, uint32_t system_state);
 int scmi_sys_pwr_state_get(void *p, uint32_t *system_state);
 
+/* SCMI AP core configuration protocol commands. */
+int scmi_ap_core_set_reset_addr(void *p, uint64_t reset_addr, uint32_t attr);
+int scmi_ap_core_get_reset_addr(void *p, uint64_t *reset_addr, uint32_t *attr);
+
 #endif /* __CSS_SCMI_H__ */
diff --git a/plat/arm/css/drivers/scmi/scmi_ap_core_proto.c b/plat/arm/css/drivers/scmi/scmi_ap_core_proto.c
new file mode 100644 (file)
index 0000000..1438cba
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include "scmi.h"
+#include "scmi_private.h"
+
+/*
+ * API to set the SCMI AP core reset address and attributes
+ */
+int scmi_ap_core_set_reset_addr(void *p, uint64_t reset_addr, uint32_t attr)
+{
+       mailbox_mem_t *mbx_mem;
+       int token = 0, ret;
+       scmi_channel_t *ch = (scmi_channel_t *)p;
+
+       validate_scmi_channel(ch);
+
+       scmi_get_channel(ch);
+
+       mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+       mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_AP_CORE_PROTO_ID,
+                       SCMI_AP_CORE_RESET_ADDR_SET_MSG, token);
+       mbx_mem->len = SCMI_AP_CORE_RESET_ADDR_SET_MSG_LEN;
+       mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+       SCMI_PAYLOAD_ARG3(mbx_mem->payload, reset_addr & 0xffffffff,
+               reset_addr >> 32, attr);
+
+       scmi_send_sync_command(ch);
+
+       /* Get the return values */
+       SCMI_PAYLOAD_RET_VAL1(mbx_mem->payload, ret);
+       assert(mbx_mem->len == SCMI_AP_CORE_RESET_ADDR_SET_RESP_LEN);
+       assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+       scmi_put_channel(ch);
+
+       return ret;
+}
+
+/*
+ * API to get the SCMI AP core reset address and attributes
+ */
+int scmi_ap_core_get_reset_addr(void *p, uint64_t *reset_addr, uint32_t *attr)
+{
+       mailbox_mem_t *mbx_mem;
+       int token = 0, ret;
+       scmi_channel_t *ch = (scmi_channel_t *)p;
+       uint32_t lo_addr, hi_addr;
+
+       validate_scmi_channel(ch);
+
+       scmi_get_channel(ch);
+
+       mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+       mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_AP_CORE_PROTO_ID,
+                       SCMI_AP_CORE_RESET_ADDR_GET_MSG, token);
+       mbx_mem->len = SCMI_AP_CORE_RESET_ADDR_GET_MSG_LEN;
+       mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+
+       scmi_send_sync_command(ch);
+
+       /* Get the return values */
+       SCMI_PAYLOAD_RET_VAL4(mbx_mem->payload, ret, lo_addr, hi_addr, *attr);
+       *reset_addr = lo_addr | (uint64_t)hi_addr << 32;
+       assert(mbx_mem->len == SCMI_AP_CORE_RESET_ADDR_GET_RESP_LEN);
+       assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+       scmi_put_channel(ch);
+
+       return ret;
+}
index 67fe7481ff48085b6cc4f7288933881a3f6b6682..39bc8ccb34df6e7d45f5d8d7bf84245183f5258a 100644 (file)
 #define SCMI_PROTO_MSG_ATTR_MSG_LEN            8
 #define SCMI_PROTO_MSG_ATTR_RESP_LEN           12
 
+#define SCMI_AP_CORE_RESET_ADDR_SET_MSG_LEN    16
+#define SCMI_AP_CORE_RESET_ADDR_SET_RESP_LEN   8
+
+#define SCMI_AP_CORE_RESET_ADDR_GET_MSG_LEN    4
+#define SCMI_AP_CORE_RESET_ADDR_GET_RESP_LEN   20
+
 #define SCMI_PWR_STATE_SET_MSG_LEN             16
 #define SCMI_PWR_STATE_SET_RESP_LEN            8
 
                (val3) = mmio_read_32((uintptr_t)&payld_arr[2]);        \
        } while (0)
 
+#define SCMI_PAYLOAD_RET_VAL4(payld_arr, val1, val2, val3, val4)       do {    \
+               SCMI_PAYLOAD_RET_VAL3(payld_arr, val1, val2, val3);             \
+               (val4) = mmio_read_32((uintptr_t)&payld_arr[3]);                \
+       } while (0)
+
 /*
  * Private data structure for representing the mailbox memory layout. Refer
  * the SCMI specification for more details.
index 91ea63a44e4f9916a58b135d05428094c2459739..7032267715cb53457fc435282ff5f14d818275ce 100644 (file)
@@ -142,7 +142,7 @@ void css_scp_suspend(const struct psci_power_state *target_state)
  * Helper function to turn off a CPU power domain and its parent power domains
  * if applicable.
  */
-void css_scp_off(const psci_power_state_t *target_state)
+void css_scp_off(const struct psci_power_state *target_state)
 {
        int lvl = 0, ret;
        uint32_t scmi_pwr_state = 0;
@@ -298,7 +298,7 @@ void __dead2 css_scp_sys_reboot(void)
        css_scp_system_off(SCMI_SYS_PWR_COLD_RESET);
 }
 
-scmi_channel_plat_info_t plat_css_scmi_plat_info = {
+static scmi_channel_plat_info_t plat_css_scmi_plat_info = {
                .scmi_mbx_mem = CSS_SCMI_PAYLOAD_BASE,
                .db_reg_addr = PLAT_CSS_MHU_BASE + CSS_SCMI_MHU_DB_REG_OFF,
                .db_preserve_mask = 0xfffffffe,
@@ -306,6 +306,28 @@ scmi_channel_plat_info_t plat_css_scmi_plat_info = {
                .ring_doorbell = &mhu_ring_doorbell,
 };
 
+static int scmi_ap_core_init(scmi_channel_t *ch)
+{
+#if PROGRAMMABLE_RESET_ADDRESS
+       uint32_t version;
+       int ret;
+
+       ret = scmi_proto_version(ch, SCMI_AP_CORE_PROTO_ID, &version);
+       if (ret != SCMI_E_SUCCESS) {
+               WARN("SCMI AP core protocol version message failed\n");
+               return -1;
+       }
+
+       if (!is_scmi_version_compatible(SCMI_AP_CORE_PROTO_VER, version)) {
+               WARN("SCMI AP core protocol version 0x%x incompatible with driver version 0x%x\n",
+                       version, SCMI_AP_CORE_PROTO_VER);
+               return -1;
+       }
+       INFO("SCMI AP core protocol version 0x%x detected\n", version);
+#endif
+       return 0;
+}
+
 void plat_arm_pwrc_setup(void)
 {
        channel.info = &plat_css_scmi_plat_info;
@@ -315,6 +337,10 @@ void plat_arm_pwrc_setup(void)
                ERROR("SCMI Initialization failed\n");
                panic();
        }
+       if (scmi_ap_core_init(&channel) < 0) {
+               ERROR("SCMI AP core protocol initialization failed\n");
+               panic();
+       }
 }
 
 /******************************************************************************
@@ -386,3 +412,18 @@ int css_system_reset2(int is_vendor, int reset_type, u_register_t cookie)
         */
        return 0;
 }
+
+#if PROGRAMMABLE_RESET_ADDRESS
+void plat_arm_program_trusted_mailbox(uintptr_t address)
+{
+       int ret;
+
+       assert(scmi_handle);
+       ret = scmi_ap_core_set_reset_addr(scmi_handle, address,
+               SCMI_AP_CORE_LOCK_ATTR);
+       if (ret != SCMI_E_SUCCESS) {
+               ERROR("CSS: Failed to program reset address: %d\n", ret);
+               panic();
+       }
+}
+#endif
index 18e71f6e3ca3bbbcd3cf61dcfdf6ed442eb49809..123d54f4e5140b6f9266cd750c40e8c146fa8bc4 100644 (file)
@@ -47,7 +47,7 @@ void css_scp_suspend(const struct psci_power_state *target_state)
  * if applicable. Since SCPI doesn't differentiate between OFF and suspend, we
  * call the suspend helper here.
  */
-void css_scp_off(const psci_power_state_t *target_state)
+void css_scp_off(const struct psci_power_state *target_state)
 {
        css_scp_suspend(target_state);
 }
index a7a51ba2df4d9c533cde1d334f8cd6eb918928f2..561e97b2e4f2fdd63e3c1436ee94f21b20925db9 100644 (file)
@@ -11,6 +11,7 @@
 #include <delay_timer.h>
 #include <platform.h>
 #include <stdint.h>
+#include "css_scp.h"
 #include "../sds/sds.h"
 
 int css_scp_boot_image_xfer(void *image, unsigned int image_size)
index ff3787da09165124e143707a66cd3b382dfea87f..4aef0df9ffde74b6819fcb8a7b250aba4c2fcd78 100644 (file)
@@ -80,7 +80,7 @@ typedef enum {
 } sds_access_mode_t;
 
 int sds_init(void);
-int sds_struct_exists(uint32_t structure_id);
+int sds_struct_exists(unsigned int structure_id);
 int sds_struct_read(uint32_t structure_id, unsigned int fld_off, void *data,
                size_t size, sds_access_mode_t mode);
 int sds_struct_write(uint32_t structure_id, unsigned int fld_off, void *data,
index a413f5fd1f000479ebe7908bde5c6e3f1be5ad13..a5d26c0133e0b62430b0f336263dc3d45691d2bc 100644 (file)
        .weak   bl31_plat_enable_mmu
        .weak   bl32_plat_enable_mmu
 
+       .weak   plat_handle_uncontainable_ea
+       .weak   plat_handle_double_fault
+       .weak   plat_handle_el3_ea
+
 #if !ENABLE_PLAT_COMPAT
        .globl  platform_get_core_pos
 
@@ -186,3 +190,34 @@ endfunc bl31_plat_enable_mmu
 func bl32_plat_enable_mmu
        b       enable_mmu_direct_el1
 endfunc bl32_plat_enable_mmu
+
+
+       /* -----------------------------------------------------
+        * Platform handler for Uncontainable External Abort.
+        *
+        * x0: EA reason
+        * x1: EA syndrome
+        * -----------------------------------------------------
+        */
+func plat_handle_uncontainable_ea
+       b       report_unhandled_exception
+endfunc plat_handle_uncontainable_ea
+
+       /* -----------------------------------------------------
+        * Platform handler for Double Fault.
+        *
+        * x0: EA reason
+        * x1: EA syndrome
+        * -----------------------------------------------------
+        */
+func plat_handle_double_fault
+       b       report_unhandled_exception
+endfunc plat_handle_double_fault
+
+       /* -----------------------------------------------------
+        * Platform handler for EL3 External Abort.
+        * -----------------------------------------------------
+        */
+func plat_handle_el3_ea
+       b       report_unhandled_exception
+endfunc plat_handle_el3_ea
index c5bbe74381d83e0bc0e7453f54d57f66e45629c8..6777979952d8a63980b4df8897434c3768ecfad4 100644 (file)
@@ -34,7 +34,7 @@ unsigned int bl1_plat_get_next_image_id(void)
 }
 
 void bl1_plat_set_ep_info(unsigned int image_id,
-               entry_point_info_t *ep_info)
+               struct entry_point_info *ep_info)
 {
 
 }
@@ -48,7 +48,7 @@ int bl1_plat_handle_pre_image_load(unsigned int image_id)
  * Following is the default definition that always
  * returns BL2 image details.
  */
-image_desc_t *bl1_plat_get_image_desc(unsigned int image_id)
+struct image_desc *bl1_plat_get_image_desc(unsigned int image_id)
 {
        static image_desc_t bl2_img_desc = BL2_IMAGE_DESC;
        return &bl2_img_desc;
index 90c2f8165d8178fa6ec55541e758355b757954a7..ef552244887dc11ec6c869fd5f5c2884d5300cd1 100644 (file)
@@ -89,6 +89,44 @@ static const io_uuid_spec_t scp_bl2_uuid_spec = {
        .uuid = UUID_SCP_FIRMWARE_SCP_BL2,
 };
 
+#if TRUSTED_BOARD_BOOT
+static const io_uuid_spec_t trusted_key_cert_uuid_spec = {
+       .uuid = UUID_TRUSTED_KEY_CERT,
+};
+
+static const io_uuid_spec_t scp_fw_key_cert_uuid_spec = {
+       .uuid = UUID_SCP_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t soc_fw_key_cert_uuid_spec = {
+       .uuid = UUID_SOC_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t tos_fw_key_cert_uuid_spec = {
+       .uuid = UUID_TRUSTED_OS_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t nt_fw_key_cert_uuid_spec = {
+       .uuid = UUID_NON_TRUSTED_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t scp_fw_cert_uuid_spec = {
+       .uuid = UUID_SCP_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t soc_fw_cert_uuid_spec = {
+       .uuid = UUID_SOC_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t tos_fw_cert_uuid_spec = {
+       .uuid = UUID_TRUSTED_OS_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t nt_fw_cert_uuid_spec = {
+       .uuid = UUID_NON_TRUSTED_FW_CONTENT_CERT,
+};
+#endif /* TRUSTED_BOARD_BOOT */
+
 static const struct plat_io_policy policies[] = {
        [FIP_IMAGE_ID] = {
                &emmc_dev_handle,
@@ -124,7 +162,54 @@ static const struct plat_io_policy policies[] = {
                &fip_dev_handle,
                (uintptr_t)&bl33_uuid_spec,
                check_fip
-       }
+       },
+#if TRUSTED_BOARD_BOOT
+       [TRUSTED_KEY_CERT_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&trusted_key_cert_uuid_spec,
+               check_fip
+       },
+       [SCP_FW_KEY_CERT_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&scp_fw_key_cert_uuid_spec,
+               check_fip
+       },
+       [SOC_FW_KEY_CERT_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&soc_fw_key_cert_uuid_spec,
+               check_fip
+       },
+       [TRUSTED_OS_FW_KEY_CERT_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&tos_fw_key_cert_uuid_spec,
+               check_fip
+       },
+       [NON_TRUSTED_FW_KEY_CERT_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&nt_fw_key_cert_uuid_spec,
+               check_fip
+       },
+       [SCP_FW_CONTENT_CERT_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&scp_fw_cert_uuid_spec,
+               check_fip
+       },
+       [SOC_FW_CONTENT_CERT_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&soc_fw_cert_uuid_spec,
+               check_fip
+       },
+       [TRUSTED_OS_FW_CONTENT_CERT_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&tos_fw_cert_uuid_spec,
+               check_fip
+       },
+       [NON_TRUSTED_FW_CONTENT_CERT_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&nt_fw_cert_uuid_spec,
+               check_fip
+       },
+#endif /* TRUSTED_BOARD_BOOT */
 };
 
 static int check_emmc(const uintptr_t spec)
diff --git a/plat/hisilicon/hikey/hikey_rotpk.S b/plat/hisilicon/hikey/hikey_rotpk.S
new file mode 100644 (file)
index 0000000..f308eee
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+       .global hikey_rotpk_hash
+       .global hikey_rotpk_hash_end
+       .section .rodata.hikey_rotpk_hash, "a"
+hikey_rotpk_hash:
+       /* DER header */
+       .byte 0x30, 0x31, 0x30, 0x0D, 0x06, 0x09, 0x60, 0x86, 0x48
+       .byte 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20
+       /* SHA256 */
+       .incbin ROTPK_HASH
+hikey_rotpk_hash_end:
diff --git a/plat/hisilicon/hikey/hikey_tbbr.c b/plat/hisilicon/hikey/hikey_tbbr.c
new file mode 100644 (file)
index 0000000..20eda36
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+
+extern char hikey_rotpk_hash[], hikey_rotpk_hash_end[];
+
+int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
+                       unsigned int *flags)
+{
+       *key_ptr = hikey_rotpk_hash;
+       *key_len = hikey_rotpk_hash_end - hikey_rotpk_hash;
+       *flags = ROTPK_IS_HASH;
+
+       return 0;
+}
+
+int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
+{
+       *nv_ctr = 0;
+
+       return 0;
+}
+
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
+{
+       return 1;
+}
index 637a1c9be6379dfcee46b81e9c7e926eab2a2c5c..acc7ad6f0a78870327ba8d7178ff2c2d852fc00b 100644 (file)
  * + loader +
  * ++++++++++  0xF980_1000
  * + BL1_RO +
- * ++++++++++  0xF981_0000
+ * ++++++++++  0xF981_8000
  * + BL1_RW +
  * ++++++++++  0xF989_8000
  */
 #define BL1_RO_BASE                    (XG2RAM0_BASE + BL1_XG2RAM0_OFFSET)
-#define BL1_RO_LIMIT                   (XG2RAM0_BASE + 0x10000)
-#define BL1_RW_BASE                    (BL1_RO_LIMIT)  /* 0xf981_0000 */
-#define BL1_RW_SIZE                    (0x00088000)
+#define BL1_RO_LIMIT                   (XG2RAM0_BASE + 0x18000)
+#define BL1_RW_BASE                    (BL1_RO_LIMIT)  /* 0xf981_8000 */
+#define BL1_RW_SIZE                    (0x00080000)
 #define BL1_RW_LIMIT                   (0xF9898000)
 
 /*
  * Non-Secure BL1U specific defines.
  */
-#define NS_BL1U_BASE                   (0xf9818000)
+#define NS_BL1U_BASE                   (0xf9828000)
 #define NS_BL1U_SIZE                   (0x00010000)
 #define NS_BL1U_LIMIT                  (NS_BL1U_BASE + NS_BL1U_SIZE)
 
  * + loader +
  * ++++++++++ 0xF980_1000
  * +  BL2   +
- * ++++++++++ 0xF981_8000
+ * ++++++++++ 0xF983_0000
  */
 #define BL2_BASE                       (BL1_RO_BASE)           /* 0xf980_1000 */
-#define BL2_LIMIT                      (0xF9818000)            /* 0xf981_8000 */
+#define BL2_LIMIT                      (0xF9830000)            /* 0xf983_0000 */
 
 /*
  * SCP_BL2 specific defines.
index 8c560047c049a5bc82d9b91ad657bbd48a483585..b2404483a02ddcaeff3b8d9c395fc02f23375350 100644 (file)
@@ -21,7 +21,7 @@
  */
 
 /* Size of cacheable stacks */
-#define PLATFORM_STACK_SIZE            0x800
+#define PLATFORM_STACK_SIZE            0x1000
 
 #define FIRMWARE_WELCOME_STR           "Booting Trusted Firmware\n"
 
index 38eb148cf5b51ffc11e353baa35b75d399a1c4fa..6a2474e8eb4c0df823ce23237a119822892ef533 100644 (file)
@@ -122,6 +122,46 @@ BL31_SOURCES               +=      plat/hisilicon/hikey/hisi_sip_svc.c                     \
                                lib/pmf/pmf_smc.c
 endif
 
+ifneq (${TRUSTED_BOARD_BOOT},0)
+
+include drivers/auth/mbedtls/mbedtls_crypto.mk
+include drivers/auth/mbedtls/mbedtls_x509.mk
+
+USE_TBBR_DEFS          :=      1
+
+AUTH_SOURCES           :=      drivers/auth/auth_mod.c                 \
+                               drivers/auth/crypto_mod.c               \
+                               drivers/auth/img_parser_mod.c           \
+                               drivers/auth/tbbr/tbbr_cot.c
+
+BL1_SOURCES            +=      ${AUTH_SOURCES}                         \
+                               plat/common/tbbr/plat_tbbr.c            \
+                               plat/hisilicon/hikey/hikey_tbbr.c       \
+                               plat/hisilicon/hikey/hikey_rotpk.S
+
+BL2_SOURCES            +=      ${AUTH_SOURCES}                         \
+                               plat/common/tbbr/plat_tbbr.c            \
+                               plat/hisilicon/hikey/hikey_tbbr.c       \
+                               plat/hisilicon/hikey/hikey_rotpk.S
+
+ROT_KEY                =       $(BUILD_PLAT)/rot_key.pem
+ROTPK_HASH             =       $(BUILD_PLAT)/rotpk_sha256.bin
+
+$(eval $(call add_define_val,ROTPK_HASH,'"$(ROTPK_HASH)"'))
+$(BUILD_PLAT)/bl1/hikey_rotpk.o: $(ROTPK_HASH)
+$(BUILD_PLAT)/bl2/hikey_rotpk.o: $(ROTPK_HASH)
+
+certificates: $(ROT_KEY)
+$(ROT_KEY): | $(BUILD_PLAT)
+       @echo "  OPENSSL $@"
+       $(Q)openssl genrsa 2048 > $@ 2>/dev/null
+
+$(ROTPK_HASH): $(ROT_KEY)
+       @echo "  OPENSSL $@"
+       $(Q)openssl rsa -in $< -pubout -outform DER 2>/dev/null |\
+       openssl dgst -sha256 -binary > $@ 2>/dev/null
+endif
+
 # Enable workarounds for selected Cortex-A53 errata.
 ERRATA_A53_836870              :=      1
 ERRATA_A53_843419              :=      1
diff --git a/plat/imx/common/imx8_psci.c b/plat/imx/common/imx8_psci.c
new file mode 100644 (file)
index 0000000..22a531b
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <plat_imx8.h>
+#include <sci/sci.h>
+#include <stdbool.h>
+
+void __dead2 imx_system_off(void)
+{
+       sc_pm_set_sys_power_mode(ipc_handle, SC_PM_PW_MODE_OFF);
+       wfi();
+       ERROR("power off failed.\n");
+       panic();
+}
+
+void __dead2 imx_system_reset(void)
+{
+       sc_pm_reset(ipc_handle, SC_PM_RESET_TYPE_BOARD);
+       wfi();
+       ERROR("system reset failed.\n");
+       panic();
+}
+
+int imx_validate_power_state(unsigned int power_state,
+                        psci_power_state_t *req_state)
+{
+       /* TODO */
+       return PSCI_E_INVALID_PARAMS;
+}
+
+void imx_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+       unsigned int i;
+
+       /* CPU & cluster off, system in retention */
+       for (i = MPIDR_AFFLVL0; i < PLAT_MAX_PWR_LVL; i++)
+               req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+       req_state->pwr_domain_state[PLAT_MAX_PWR_LVL] = PLAT_MAX_RET_STATE;
+}
+
index bcb7d59f041fffb9db3f81a11c258c4f9bfc62b8..64145c4ca89744d9a0b5c79c86882a734967206e 100644 (file)
@@ -11,7 +11,8 @@
 const unsigned char imx_power_domain_tree_desc[] = {
        PWR_DOMAIN_AT_MAX_LVL,
        PLATFORM_CLUSTER_COUNT,
-       PLATFORM_CORE_COUNT,
+       PLATFORM_CLUSTER0_CORE_COUNT,
+       PLATFORM_CLUSTER1_CORE_COUNT,
 };
 
 const unsigned char *plat_get_power_domain_tree_desc(void)
index 27d4c376e697edc74899ea7010c01ca9be0fed1e..a333bfbe11202a1f4a752ed12a1bdf3661e1c526 100644 (file)
@@ -8,6 +8,7 @@
 #define __PLAT_IMX8_H__
 
 #include <gicv3.h>
+#include <psci.h>
 
 unsigned int plat_calc_core_pos(uint64_t mpidr);
 void imx_mailbox_init(uintptr_t base_addr);
@@ -17,4 +18,9 @@ void plat_gic_cpuif_enable(void);
 void plat_gic_cpuif_disable(void);
 void plat_gic_pcpu_init(void);
 
+void __dead2 imx_system_off(void);
+void __dead2 imx_system_reset(void);
+int imx_validate_power_state(unsigned int power_state,
+                       psci_power_state_t *req_state);
+void imx_get_sys_suspend_power_state(psci_power_state_t *req_state);
 #endif /*__PLAT_IMX8_H__ */
index b9b794becb6e84a17e5ce911d707252ed9b5c90b..c37c39c40313bdf06b09cee00706ede526066edf 100644 (file)
 #include <sci/sci.h>
 #include <stdbool.h>
 
+#define CORE_PWR_STATE(state) \
+       ((state)->pwr_domain_state[MPIDR_AFFLVL0])
+#define CLUSTER_PWR_STATE(state) \
+       ((state)->pwr_domain_state[MPIDR_AFFLVL1])
+#define SYSTEM_PWR_STATE(state) \
+       ((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
+
 const static int ap_core_index[PLATFORM_CORE_COUNT] = {
        SC_R_A53_0, SC_R_A53_1, SC_R_A53_2,
        SC_R_A53_3, SC_R_A72_0, SC_R_A72_1,
 };
 
-/* need to enable USE_COHERENT_MEM to avoid coherence issue */
-#if USE_COHERENT_MEM
-static unsigned int a53_cpu_on_number __section("tzfw_coherent_mem");
-static unsigned int a72_cpu_on_number __section("tzfw_coherent_mem");
-#endif
-
 int imx_pwr_domain_on(u_register_t mpidr)
 {
        int ret = PSCI_E_SUCCESS;
@@ -37,9 +38,8 @@ int imx_pwr_domain_on(u_register_t mpidr)
        tf_printf("imx_pwr_domain_on cluster_id %d, cpu_id %d\n", cluster_id, cpu_id);
 
        if (cluster_id == 0) {
-               if (a53_cpu_on_number == 0)
-                       sc_pm_set_resource_power_mode(ipc_handle, SC_R_A53, SC_PM_PW_MODE_ON);
-
+               sc_pm_set_resource_power_mode(ipc_handle, SC_R_A53,
+                       SC_PM_PW_MODE_ON);
                if (sc_pm_set_resource_power_mode(ipc_handle, ap_core_index[cpu_id],
                        SC_PM_PW_MODE_ON) != SC_ERR_NONE) {
                        ERROR("cluster0 core %d power on failed!\n", cpu_id);
@@ -52,9 +52,8 @@ int imx_pwr_domain_on(u_register_t mpidr)
                        ret = PSCI_E_INTERN_FAIL;
                }
        } else {
-               if (a72_cpu_on_number == 0)
-                       sc_pm_set_resource_power_mode(ipc_handle, SC_R_A72, SC_PM_PW_MODE_ON);
-
+               sc_pm_set_resource_power_mode(ipc_handle, SC_R_A72,
+                       SC_PM_PW_MODE_ON);
                if (sc_pm_set_resource_power_mode(ipc_handle, ap_core_index[cpu_id + 4],
                        SC_PM_PW_MODE_ON) != SC_ERR_NONE) {
                        ERROR(" cluster1 core %d power on failed!\n", cpu_id);
@@ -74,17 +73,56 @@ int imx_pwr_domain_on(u_register_t mpidr)
 void imx_pwr_domain_on_finish(const psci_power_state_t *target_state)
 {
        uint64_t mpidr = read_mpidr_el1();
-       unsigned int cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
 
-       if (cluster_id == 0 && a53_cpu_on_number++ == 0)
-               cci_enable_snoop_dvm_reqs(0);
-       if (cluster_id == 1 && a72_cpu_on_number++ == 0)
-               cci_enable_snoop_dvm_reqs(1);
+       if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
+               cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
 
        plat_gic_pcpu_init();
        plat_gic_cpuif_enable();
 }
 
+void imx_pwr_domain_off(const psci_power_state_t *target_state)
+{
+       u_register_t mpidr = read_mpidr_el1();
+       unsigned int cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
+       unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+       plat_gic_cpuif_disable();
+       sc_pm_req_cpu_low_power_mode(ipc_handle,
+               ap_core_index[cpu_id + cluster_id * 4],
+               SC_PM_PW_MODE_OFF,
+               SC_PM_WAKE_SRC_NONE);
+       if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
+               cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
+       tf_printf("turn off cluster:%d core:%d\n", cluster_id, cpu_id);
+}
+
+void imx_domain_suspend(const psci_power_state_t *target_state)
+{
+       u_register_t mpidr = read_mpidr_el1();
+       unsigned int cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
+       unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+       plat_gic_cpuif_disable();
+
+       cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
+
+       sc_pm_set_cpu_resume_addr(ipc_handle,
+               ap_core_index[cpu_id + cluster_id * 4], BL31_BASE);
+       sc_pm_req_cpu_low_power_mode(ipc_handle,
+               ap_core_index[cpu_id + cluster_id * 4],
+               SC_PM_PW_MODE_OFF, SC_PM_WAKE_SRC_GIC);
+}
+
+void imx_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+       u_register_t mpidr = read_mpidr_el1();
+
+       cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
+
+       plat_gic_cpuif_enable();
+}
+
 int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint)
 {
        return PSCI_E_SUCCESS;
@@ -93,22 +131,42 @@ int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint)
 static const plat_psci_ops_t imx_plat_psci_ops = {
        .pwr_domain_on = imx_pwr_domain_on,
        .pwr_domain_on_finish = imx_pwr_domain_on_finish,
+       .pwr_domain_off = imx_pwr_domain_off,
+       .pwr_domain_suspend = imx_domain_suspend,
+       .pwr_domain_suspend_finish = imx_domain_suspend_finish,
+       .get_sys_suspend_power_state = imx_get_sys_suspend_power_state,
+       .validate_power_state = imx_validate_power_state,
        .validate_ns_entrypoint = imx_validate_ns_entrypoint,
+       .system_off = imx_system_off,
+       .system_reset = imx_system_reset,
 };
 
 int plat_setup_psci_ops(uintptr_t sec_entrypoint,
                        const plat_psci_ops_t **psci_ops)
 {
-       uint64_t mpidr = read_mpidr_el1();
-       unsigned int cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
-
        imx_mailbox_init(sec_entrypoint);
        *psci_ops = &imx_plat_psci_ops;
 
-       if (cluster_id == 0)
-               a53_cpu_on_number++;
-       else
-               a72_cpu_on_number++;
+       /* Request low power mode for cluster/cci, only need to do once */
+       sc_pm_req_low_power_mode(ipc_handle, SC_R_A72, SC_PM_PW_MODE_OFF);
+       sc_pm_req_low_power_mode(ipc_handle, SC_R_A53, SC_PM_PW_MODE_OFF);
+       sc_pm_req_low_power_mode(ipc_handle, SC_R_CCI, SC_PM_PW_MODE_OFF);
+
+       /* Request RUN and LP modes for DDR, system interconnect etc. */
+       sc_pm_req_sys_if_power_mode(ipc_handle, SC_R_A53,
+               SC_PM_SYS_IF_DDR, SC_PM_PW_MODE_ON, SC_PM_PW_MODE_STBY);
+       sc_pm_req_sys_if_power_mode(ipc_handle, SC_R_A72,
+               SC_PM_SYS_IF_DDR, SC_PM_PW_MODE_ON, SC_PM_PW_MODE_STBY);
+       sc_pm_req_sys_if_power_mode(ipc_handle, SC_R_A53,
+               SC_PM_SYS_IF_MU, SC_PM_PW_MODE_ON, SC_PM_PW_MODE_STBY);
+       sc_pm_req_sys_if_power_mode(ipc_handle, SC_R_A72,
+               SC_PM_SYS_IF_MU, SC_PM_PW_MODE_ON, SC_PM_PW_MODE_STBY);
+       sc_pm_req_sys_if_power_mode(ipc_handle, SC_R_A53,
+               SC_PM_SYS_IF_INTERCONNECT, SC_PM_PW_MODE_ON,
+               SC_PM_PW_MODE_STBY);
+       sc_pm_req_sys_if_power_mode(ipc_handle, SC_R_A72,
+               SC_PM_SYS_IF_INTERCONNECT, SC_PM_PW_MODE_ON,
+               SC_PM_PW_MODE_STBY);
 
        return 0;
 }
index c295e14bfd3c3ab56a4c6508844a3e7d2b6a18d6..022ad9902a85646bb30a715660bc033d01146f23 100644 (file)
@@ -21,6 +21,7 @@ BL31_SOURCES          +=      plat/imx/common/lpuart_console.S        \
                                plat/imx/imx8qm/imx8qm_bl31_setup.c     \
                                plat/imx/imx8qm/imx8qm_psci.c           \
                                plat/imx/common/imx8_topology.c         \
+                               plat/imx/common/imx8_psci.c             \
                                lib/xlat_tables/aarch64/xlat_tables.c           \
                                lib/xlat_tables/xlat_tables_common.c            \
                                lib/cpus/aarch64/cortex_a53.S                   \
index 47233dc8d00e5146f77e6f8e618ace4d2d258a55..f1df267a814c912df1537681d1a054e52dff5c73 100644 (file)
@@ -18,13 +18,6 @@ const static int ap_core_index[PLATFORM_CORE_COUNT] = {
        SC_R_A35_0, SC_R_A35_1, SC_R_A35_2, SC_R_A35_3
 };
 
-plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
-                                            const plat_local_state_t *target_state,
-                                            unsigned int ncpu)
-{
-       return 0;
-}
-
 int imx_pwr_domain_on(u_register_t mpidr)
 {
        int ret = PSCI_E_SUCCESS;
@@ -60,10 +53,51 @@ int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint)
        return PSCI_E_SUCCESS;
 }
 
+void imx_pwr_domain_off(const psci_power_state_t *target_state)
+{
+       u_register_t mpidr = read_mpidr_el1();
+       unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+       plat_gic_cpuif_disable();
+       sc_pm_req_cpu_low_power_mode(ipc_handle, ap_core_index[cpu_id],
+               SC_PM_PW_MODE_OFF, SC_PM_WAKE_SRC_NONE);
+       tf_printf("turn off core:%d\n", cpu_id);
+}
+
+void imx_domain_suspend(const psci_power_state_t *target_state)
+{
+       u_register_t mpidr = read_mpidr_el1();
+       unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+       plat_gic_cpuif_disable();
+
+       sc_pm_set_cpu_resume_addr(ipc_handle, ap_core_index[cpu_id], BL31_BASE);
+       sc_pm_req_cpu_low_power_mode(ipc_handle, ap_core_index[cpu_id],
+               SC_PM_PW_MODE_OFF, SC_PM_WAKE_SRC_GIC);
+}
+
+void imx_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+       u_register_t mpidr = read_mpidr_el1();
+       unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+       sc_pm_req_low_power_mode(ipc_handle, ap_core_index[cpu_id],
+               SC_PM_PW_MODE_ON);
+
+       plat_gic_cpuif_enable();
+}
+
 static const plat_psci_ops_t imx_plat_psci_ops = {
        .pwr_domain_on = imx_pwr_domain_on,
        .pwr_domain_on_finish = imx_pwr_domain_on_finish,
        .validate_ns_entrypoint = imx_validate_ns_entrypoint,
+       .system_off = imx_system_off,
+       .system_reset = imx_system_reset,
+       .pwr_domain_off = imx_pwr_domain_off,
+       .pwr_domain_suspend = imx_domain_suspend,
+       .pwr_domain_suspend_finish = imx_domain_suspend_finish,
+       .get_sys_suspend_power_state = imx_get_sys_suspend_power_state,
+       .validate_power_state = imx_validate_power_state,
 };
 
 int plat_setup_psci_ops(uintptr_t sec_entrypoint,
@@ -72,5 +106,17 @@ int plat_setup_psci_ops(uintptr_t sec_entrypoint,
        imx_mailbox_init(sec_entrypoint);
        *psci_ops = &imx_plat_psci_ops;
 
+       /* Request low power mode for A35 cluster, only need to do once */
+       sc_pm_req_low_power_mode(ipc_handle, SC_R_A35, SC_PM_PW_MODE_OFF);
+
+       /* Request RUN and LP modes for DDR, system interconnect etc. */
+       sc_pm_req_sys_if_power_mode(ipc_handle, SC_R_A35,
+               SC_PM_SYS_IF_DDR, SC_PM_PW_MODE_ON, SC_PM_PW_MODE_STBY);
+       sc_pm_req_sys_if_power_mode(ipc_handle, SC_R_A35,
+               SC_PM_SYS_IF_MU, SC_PM_PW_MODE_ON, SC_PM_PW_MODE_STBY);
+       sc_pm_req_sys_if_power_mode(ipc_handle, SC_R_A35,
+               SC_PM_SYS_IF_INTERCONNECT, SC_PM_PW_MODE_ON,
+               SC_PM_PW_MODE_STBY);
+
        return 0;
 }
index 2cd140097db141777b5fd9d9b9e385c201dd8613..8c86174695104ff974f3eb970d215bd3f9a857cf 100644 (file)
@@ -17,6 +17,8 @@
 #define PLATFORM_MAX_CPU_PER_CLUSTER   4
 #define PLATFORM_CLUSTER_COUNT         1
 #define PLATFORM_CORE_COUNT            4
+#define PLATFORM_CLUSTER0_CORE_COUNT   4
+#define PLATFORM_CLUSTER1_CORE_COUNT   0
 
 #define PWR_DOMAIN_AT_MAX_LVL           1
 #define PLAT_MAX_PWR_LVL                2
index c16ce6e603d3c8a9a2a65c4db9d314ea8251f1aa..067661890bee70b5a30db9c21cf9aad1b4a6e093 100644 (file)
@@ -20,6 +20,8 @@ BL31_SOURCES          +=      plat/imx/common/lpuart_console.S        \
                                plat/imx/imx8qx/imx8qx_bl31_setup.c     \
                                plat/imx/imx8qx/imx8qx_psci.c           \
                                plat/imx/common/imx8_topology.c         \
+                               plat/imx/common/imx8_psci.c             \
+                               plat/common/plat_psci_common.c          \
                                lib/xlat_tables/xlat_tables_common.c    \
                                lib/xlat_tables/aarch64/xlat_tables.c   \
                                lib/cpus/aarch64/cortex_a35.S           \
diff --git a/plat/marvell/a8k/a70x0/board/dram_port.c b/plat/marvell/a8k/a70x0/board/dram_port.c
new file mode 100644 (file)
index 0000000..c670258
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <mv_ddr_if.h>
+#include <plat_marvell.h>
+
+/*
+ * This function may modify the default DRAM parameters
+ * based on information received from SPD or bootloader
+ * configuration located on non volatile storage
+ */
+void plat_marvell_dram_update_topology(void)
+{
+}
+
+/*
+ * This struct provides the DRAM training code with
+ * the appropriate board DRAM configuration
+ */
+static struct mv_ddr_topology_map board_topology_map = {
+/* FIXME: MISL board 2CS 4Gb x8 devices of micron - 2133P */
+       DEBUG_LEVEL_ERROR,
+       0x1, /* active interfaces */
+       /* cs_mask, mirror, dqs_swap, ck_swap X subphys */
+       { { { {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0} },
+          SPEED_BIN_DDR_2133P,         /* speed_bin */
+          MV_DDR_DEV_WIDTH_8BIT,       /* sdram device width */
+          MV_DDR_DIE_CAP_4GBIT,        /* die capacity */
+          MV_DDR_FREQ_SAR,             /* frequency */
+          0, 0,                        /* cas_l, cas_wl */
+          MV_DDR_TEMP_LOW} },          /* temperature */
+       MV_DDR_32BIT_ECC_PUP8_BUS_MASK, /* subphys mask */
+       MV_DDR_CFG_DEFAULT,             /* ddr configuration data source */
+       { {0} },                        /* raw spd data */
+       {0},                            /* timing parameters */
+       {                               /* electrical configuration */
+               {                       /* memory electrical configuration */
+                       MV_DDR_RTT_NOM_PARK_RZQ_DISABLE,        /* rtt_nom */
+                       {
+                               MV_DDR_RTT_NOM_PARK_RZQ_DIV4, /* rtt_park 1cs */
+                               MV_DDR_RTT_NOM_PARK_RZQ_DIV1  /* rtt_park 2cs */
+                       },
+                       {
+                               MV_DDR_RTT_WR_DYN_ODT_OFF,      /* rtt_wr 1cs */
+                               MV_DDR_RTT_WR_RZQ_DIV2          /* rtt_wr 2cs */
+                       },
+                       MV_DDR_DIC_RZQ_DIV7     /* dic */
+               },
+               {                       /* phy electrical configuration */
+                       MV_DDR_OHM_30,  /* data_drv_p */
+                       MV_DDR_OHM_30,  /* data_drv_n */
+                       MV_DDR_OHM_30,  /* ctrl_drv_p */
+                       MV_DDR_OHM_30,  /* ctrl_drv_n */
+                       {
+                               MV_DDR_OHM_60,  /* odt_p 1cs */
+                               MV_DDR_OHM_120  /* odt_p 2cs */
+                       },
+                       {
+                               MV_DDR_OHM_60,  /* odt_n 1cs */
+                               MV_DDR_OHM_120  /* odt_n 2cs */
+                       },
+               },
+               {                       /* mac electrical configuration */
+                       MV_DDR_ODT_CFG_NORMAL,          /* odtcfg_pattern */
+                       MV_DDR_ODT_CFG_ALWAYS_ON,       /* odtcfg_write */
+                       MV_DDR_ODT_CFG_NORMAL,          /* odtcfg_read */
+               },
+       }
+};
+
+struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
+{
+       /* Return the board topology as defined in the board code */
+       return &board_topology_map;
+}
diff --git a/plat/marvell/a8k/a70x0/board/marvell_plat_config.c b/plat/marvell/a8k/a70x0/board/marvell_plat_config.c
new file mode 100644 (file)
index 0000000..9171986
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+
+/*
+ * If bootrom is currently at BLE there's no need to include the memory
+ * maps structure at this point
+ */
+#include <mvebu_def.h>
+#ifndef IMAGE_BLE
+
+/*****************************************************************************
+ * AMB Configuration
+ *****************************************************************************
+ */
+struct addr_map_win amb_memory_map[] = {
+       /* CP0 SPI1 CS0 Direct Mode access */
+       {0xf900,        0x1000000,      AMB_SPI1_CS0_ID},
+};
+
+int marvell_get_amb_memory_map(struct addr_map_win **win,
+                              uint32_t *size, uintptr_t base)
+{
+       *win = amb_memory_map;
+       if (*win == NULL)
+               *size = 0;
+       else
+               *size = ARRAY_SIZE(amb_memory_map);
+
+       return 0;
+}
+#endif
+
+/*****************************************************************************
+ * IO_WIN Configuration
+ *****************************************************************************
+ */
+struct addr_map_win io_win_memory_map[] = {
+#ifndef IMAGE_BLE
+       /* MCI 0 indirect window */
+       {MVEBU_MCI_REG_BASE_REMAP(0),   0x100000, MCI_0_TID},
+       /* MCI 1 indirect window */
+       {MVEBU_MCI_REG_BASE_REMAP(1),   0x100000, MCI_1_TID},
+#endif
+};
+
+uint32_t marvell_get_io_win_gcr_target(int ap_index)
+{
+       return PIDI_TID;
+}
+
+int marvell_get_io_win_memory_map(int ap_index, struct addr_map_win **win,
+                                 uint32_t *size)
+{
+       *win = io_win_memory_map;
+       if (*win == NULL)
+               *size = 0;
+       else
+               *size = ARRAY_SIZE(io_win_memory_map);
+
+       return 0;
+}
+
+#ifndef IMAGE_BLE
+/*****************************************************************************
+ * IOB Configuration
+ *****************************************************************************
+ */
+struct addr_map_win iob_memory_map[] = {
+       /* PEX1_X1 window */
+       {0x00000000f7000000,    0x1000000,      PEX1_TID},
+       /* PEX2_X1 window */
+       {0x00000000f8000000,    0x1000000,      PEX2_TID},
+       /* PEX0_X4 window */
+       {0x00000000f6000000,    0x1000000,      PEX0_TID},
+       /* SPI1_CS0 (RUNIT) window */
+       {0x00000000f9000000,    0x1000000,      RUNIT_TID},
+};
+
+int marvell_get_iob_memory_map(struct addr_map_win **win, uint32_t *size,
+                              uintptr_t base)
+{
+       *win = iob_memory_map;
+       *size = ARRAY_SIZE(iob_memory_map);
+
+       return 0;
+}
+#endif
+
+/*****************************************************************************
+ * CCU Configuration
+ *****************************************************************************
+ */
+struct addr_map_win ccu_memory_map[] = {       /* IO window */
+#ifdef IMAGE_BLE
+       {0x00000000f2000000,    0x4000000,      IO_0_TID}, /* IO window */
+#else
+       {0x00000000f2000000,    0xe000000,      IO_0_TID},
+#endif
+};
+
+uint32_t marvell_get_ccu_gcr_target(int ap)
+{
+       return DRAM_0_TID;
+}
+
+int marvell_get_ccu_memory_map(int ap_index, struct addr_map_win **win,
+                              uint32_t *size)
+{
+       *win = ccu_memory_map;
+       *size = ARRAY_SIZE(ccu_memory_map);
+
+       return 0;
+}
+
+#ifdef IMAGE_BLE
+/*****************************************************************************
+ * SKIP IMAGE Configuration
+ *****************************************************************************
+ */
+#if PLAT_RECOVERY_IMAGE_ENABLE
+struct skip_image skip_im = {
+       .detection_method = GPIO,
+       .info.gpio.num = 33,
+       .info.gpio.button_state = HIGH,
+       .info.test.cp_ap = CP,
+       .info.test.cp_index = 0,
+};
+
+void *plat_marvell_get_skip_image_data(void)
+{
+       /* Return the skip_image configurations */
+       return &skip_im;
+}
+#endif
+#endif
diff --git a/plat/marvell/a8k/a70x0/mvebu_def.h b/plat/marvell/a8k/a70x0/mvebu_def.h
new file mode 100644 (file)
index 0000000..a7c5abb
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MVEBU_DEF_H__
+#define __MVEBU_DEF_H__
+
+#include <a8k_plat_def.h>
+
+#define CP_COUNT               1       /* A70x0 has single CP0 */
+
+#endif /* __MVEBU_DEF_H__ */
diff --git a/plat/marvell/a8k/a70x0/platform.mk b/plat/marvell/a8k/a70x0/platform.mk
new file mode 100644 (file)
index 0000000..29dfd95
--- /dev/null
@@ -0,0 +1,16 @@
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:     BSD-3-Clause
+# https://spdx.org/licenses
+#
+
+PCI_EP_SUPPORT         := 0
+
+DOIMAGE_SEC            :=      tools/doimage/secure/sec_img_7K.cfg
+
+MARVELL_MOCHI_DRV      :=      drivers/marvell/mochi/apn806_setup.c
+
+include plat/marvell/a8k/common/a8k_common.mk
+
+include plat/marvell/common/marvell_common.mk
diff --git a/plat/marvell/a8k/a70x0_amc/board/dram_port.c b/plat/marvell/a8k/a70x0_amc/board/dram_port.c
new file mode 100644 (file)
index 0000000..ab1df46
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <mv_ddr_if.h>
+#include <plat_marvell.h>
+
+/*
+ * This function may modify the default DRAM parameters
+ * based on information received from SPD or bootloader
+ * configuration located on non volatile storage
+ */
+void plat_marvell_dram_update_topology(void)
+{
+}
+
+/*
+ * This struct provides the DRAM training code with
+ * the appropriate board DRAM configuration
+ */
+static struct mv_ddr_topology_map board_topology_map = {
+/* FIXME: MISL board 2CS 8Gb x8 devices of micron - 2133P */
+       DEBUG_LEVEL_ERROR,
+       0x1, /* active interfaces */
+       /* cs_mask, mirror, dqs_swap, ck_swap X subphys */
+       { { { {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0},
+             {0x3, 0x2, 0, 0} },
+          SPEED_BIN_DDR_2400T,         /* speed_bin */
+          MV_DDR_DEV_WIDTH_8BIT,       /* sdram device width */
+          MV_DDR_DIE_CAP_8GBIT,        /* die capacity */
+          MV_DDR_FREQ_SAR,             /* frequency */
+          0, 0,                        /* cas_l, cas_wl */
+          MV_DDR_TEMP_LOW} },          /* temperature */
+       MV_DDR_32BIT_ECC_PUP8_BUS_MASK, /* subphys mask */
+       MV_DDR_CFG_DEFAULT,             /* ddr configuration data source */
+       { {0} },                        /* raw spd data */
+       {0},                            /* timing parameters */
+       {                               /* electrical configuration */
+               {                       /* memory electrical configuration */
+                       MV_DDR_RTT_NOM_PARK_RZQ_DISABLE,        /* rtt_nom */
+                       {
+                               MV_DDR_RTT_NOM_PARK_RZQ_DIV4, /* rtt_park 1cs */
+                               MV_DDR_RTT_NOM_PARK_RZQ_DIV1  /* rtt_park 2cs */
+                       },
+                       {
+                               MV_DDR_RTT_WR_DYN_ODT_OFF,      /* rtt_wr 1cs */
+                               MV_DDR_RTT_WR_RZQ_DIV2          /* rtt_wr 2cs */
+                       },
+                       MV_DDR_DIC_RZQ_DIV7     /* dic */
+               },
+               {                       /* phy electrical configuration */
+                       MV_DDR_OHM_30,  /* data_drv_p */
+                       MV_DDR_OHM_30,  /* data_drv_n */
+                       MV_DDR_OHM_30,  /* ctrl_drv_p */
+                       MV_DDR_OHM_30,  /* ctrl_drv_n */
+                       {
+                               MV_DDR_OHM_60,  /* odt_p 1cs */
+                               MV_DDR_OHM_120  /* odt_p 2cs */
+                       },
+                       {
+                               MV_DDR_OHM_60,  /* odt_n 1cs */
+                               MV_DDR_OHM_120  /* odt_n 2cs */
+                       },
+               },
+               {                       /* mac electrical configuration */
+                       MV_DDR_ODT_CFG_NORMAL,          /* odtcfg_pattern */
+                       MV_DDR_ODT_CFG_ALWAYS_ON,       /* odtcfg_write */
+                       MV_DDR_ODT_CFG_NORMAL,          /* odtcfg_read */
+               },
+       }
+};
+
+struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
+{
+       /* Return the board topology as defined in the board code */
+       return &board_topology_map;
+}
diff --git a/plat/marvell/a8k/a70x0_amc/board/marvell_plat_config.c b/plat/marvell/a8k/a70x0_amc/board/marvell_plat_config.c
new file mode 100644 (file)
index 0000000..ec4124c
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+
+/*
+ * If bootrom is currently at BLE there's no need to include the memory
+ * maps structure at this point
+ */
+#include <mvebu_def.h>
+#ifndef IMAGE_BLE
+
+/*****************************************************************************
+ * AMB Configuration
+ *****************************************************************************
+ */
+struct addr_map_win *amb_memory_map;
+
+int marvell_get_amb_memory_map(struct addr_map_win **win, uint32_t *size,
+                              uintptr_t base)
+{
+       *win = amb_memory_map;
+       if (*win == NULL)
+               *size = 0;
+       else
+               *size = ARRAY_SIZE(amb_memory_map);
+
+       return 0;
+}
+#endif
+
+/*****************************************************************************
+ * IO WIN Configuration
+ *****************************************************************************
+ */
+struct addr_map_win io_win_memory_map[] = {
+#ifndef IMAGE_BLE
+       /* MCI 0 indirect window */
+       {MVEBU_MCI_REG_BASE_REMAP(0),   0x100000,       MCI_0_TID},
+       /* MCI 1 indirect window */
+       {MVEBU_MCI_REG_BASE_REMAP(1),   0x100000,       MCI_1_TID},
+#endif
+};
+
+uint32_t marvell_get_io_win_gcr_target(int ap_index)
+{
+       return PIDI_TID;
+}
+
+int marvell_get_io_win_memory_map(int ap_index, struct addr_map_win **win,
+                                 uint32_t *size)
+{
+       *win = io_win_memory_map;
+       if (*win == NULL)
+               *size = 0;
+       else
+               *size = ARRAY_SIZE(io_win_memory_map);
+
+       return 0;
+}
+
+#ifndef IMAGE_BLE
+/*****************************************************************************
+ * IOB Configuration
+ *****************************************************************************
+ */
+struct addr_map_win iob_memory_map[] = {
+       /* PEX0_X4 window */
+       {0x00000000f6000000,    0x6000000,      PEX0_TID},
+       {0x00000000c0000000,    0x30000000,     PEX0_TID},
+       {0x0000000800000000,    0x200000000,    PEX0_TID},
+};
+
+int marvell_get_iob_memory_map(struct addr_map_win **win, uint32_t *size,
+                              uintptr_t base)
+{
+       *win = iob_memory_map;
+       *size = ARRAY_SIZE(iob_memory_map);
+
+       return 0;
+}
+#endif
+
+/*****************************************************************************
+ * CCU Configuration
+ *****************************************************************************
+ */
+struct addr_map_win ccu_memory_map[] = {
+#ifdef IMAGE_BLE
+       {0x00000000f2000000,    0x4000000,      IO_0_TID}, /* IO window */
+#else
+       {0x00000000f2000000,    0xe000000,      IO_0_TID},
+       {0x00000000c0000000,    0x30000000,     IO_0_TID}, /* IO window */
+       {0x0000000800000000,    0x200000000,    IO_0_TID}, /* IO window */
+#endif
+};
+
+uint32_t marvell_get_ccu_gcr_target(int ap)
+{
+       return DRAM_0_TID;
+}
+
+int marvell_get_ccu_memory_map(int ap_index, struct addr_map_win **win,
+                              uint32_t *size)
+{
+       *win = ccu_memory_map;
+       *size = ARRAY_SIZE(ccu_memory_map);
+
+       return 0;
+}
+
+#ifdef IMAGE_BLE
+
+struct pci_hw_cfg *plat_get_pcie_hw_data(void)
+{
+       return NULL;
+}
+
+/*****************************************************************************
+ * SKIP IMAGE Configuration
+ *****************************************************************************
+ */
+#if PLAT_RECOVERY_IMAGE_ENABLE
+struct skip_image skip_im = {
+       .detection_method = GPIO,
+       .info.gpio.num = 33,
+       .info.gpio.button_state = HIGH,
+       .info.test.cp_ap = CP,
+       .info.test.cp_index = 0,
+};
+
+void *plat_marvell_get_skip_image_data(void)
+{
+       /* Return the skip_image configurations */
+       return &skip_im;
+}
+#endif
+#endif
diff --git a/plat/marvell/a8k/a70x0_amc/mvebu_def.h b/plat/marvell/a8k/a70x0_amc/mvebu_def.h
new file mode 100644 (file)
index 0000000..5c66552
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MVEBU_DEF_H__
+#define __MVEBU_DEF_H__
+
+#include <a8k_plat_def.h>
+
+#define CP_COUNT               1       /* A70x0 has single CP0 */
+
+/***********************************************************************
+ * Required platform porting definitions common to all
+ * Management Compute SubSystems (MSS)
+ ***********************************************************************
+ */
+/*
+ * Load address of SCP_BL2
+ * SCP_BL2 is loaded to the same place as BL31.
+ * Once SCP_BL2 is transferred to the SCP,
+ * it is discarded and BL31 is loaded over the top.
+ */
+#ifdef SCP_IMAGE
+#define SCP_BL2_BASE           BL31_BASE
+#endif
+
+
+#endif /* __MVEBU_DEF_H__ */
diff --git a/plat/marvell/a8k/a70x0_amc/platform.mk b/plat/marvell/a8k/a70x0_amc/platform.mk
new file mode 100644 (file)
index 0000000..29dfd95
--- /dev/null
@@ -0,0 +1,16 @@
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:     BSD-3-Clause
+# https://spdx.org/licenses
+#
+
+PCI_EP_SUPPORT         := 0
+
+DOIMAGE_SEC            :=      tools/doimage/secure/sec_img_7K.cfg
+
+MARVELL_MOCHI_DRV      :=      drivers/marvell/mochi/apn806_setup.c
+
+include plat/marvell/a8k/common/a8k_common.mk
+
+include plat/marvell/common/marvell_common.mk
diff --git a/plat/marvell/a8k/a80x0/board/dram_port.c b/plat/marvell/a8k/a80x0/board/dram_port.c
new file mode 100644 (file)
index 0000000..c720c11
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch_helpers.h>
+#include <a8k_i2c.h>
+#include <debug.h>
+#include <mmio.h>
+#include <mv_ddr_if.h>
+#include <mvebu_def.h>
+#include <plat_marvell.h>
+
+#define MVEBU_AP_MPP_CTRL0_7_REG               MVEBU_AP_MPP_REGS(0)
+#define MVEBU_AP_MPP_CTRL4_OFFS                        16
+#define MVEBU_AP_MPP_CTRL5_OFFS                        20
+#define MVEBU_AP_MPP_CTRL4_I2C0_SDA_ENA                0x3
+#define MVEBU_AP_MPP_CTRL5_I2C0_SCK_ENA                0x3
+
+#define MVEBU_CP_MPP_CTRL37_OFFS               20
+#define MVEBU_CP_MPP_CTRL38_OFFS               24
+#define MVEBU_CP_MPP_CTRL37_I2C0_SCK_ENA       0x2
+#define MVEBU_CP_MPP_CTRL38_I2C0_SDA_ENA       0x2
+
+#define MVEBU_MPP_CTRL_MASK                    0xf
+
+/*
+ * This struct provides the DRAM training code with
+ * the appropriate board DRAM configuration
+ */
+static struct mv_ddr_topology_map board_topology_map = {
+       /* MISL board with 1CS 8Gb x4 devices of Micron 2400T */
+       DEBUG_LEVEL_ERROR,
+       0x1, /* active interfaces */
+       /* cs_mask, mirror, dqs_swap, ck_swap X subphys */
+       { { { {0x1, 0x0, 0, 0}, /* FIXME: change the cs mask for all 64 bit */
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0} },
+          /* TODO: double check if the speed bin is 2400T */
+          SPEED_BIN_DDR_2400T,         /* speed_bin */
+          MV_DDR_DEV_WIDTH_8BIT,       /* sdram device width */
+          MV_DDR_DIE_CAP_8GBIT,        /* die capacity */
+          MV_DDR_FREQ_SAR,             /* frequency */
+          0, 0,                        /* cas_l, cas_wl */
+          MV_DDR_TEMP_LOW} },          /* temperature */
+       MV_DDR_64BIT_ECC_PUP8_BUS_MASK, /* subphys mask */
+       MV_DDR_CFG_SPD,                 /* ddr configuration data source */
+       { {0} },                        /* raw spd data */
+       {0},                            /* timing parameters */
+       {                               /* electrical configuration */
+               {                       /* memory electrical configuration */
+                       MV_DDR_RTT_NOM_PARK_RZQ_DISABLE,        /* rtt_nom */
+                       {
+                               MV_DDR_RTT_NOM_PARK_RZQ_DIV4, /* rtt_park 1cs */
+                               MV_DDR_RTT_NOM_PARK_RZQ_DIV1  /* rtt_park 2cs */
+                       },
+                       {
+                               MV_DDR_RTT_WR_DYN_ODT_OFF,      /* rtt_wr 1cs */
+                               MV_DDR_RTT_WR_RZQ_DIV2          /* rtt_wr 2cs */
+                       },
+                       MV_DDR_DIC_RZQ_DIV7     /* dic */
+               },
+               {                       /* phy electrical configuration */
+                       MV_DDR_OHM_30,  /* data_drv_p */
+                       MV_DDR_OHM_30,  /* data_drv_n */
+                       MV_DDR_OHM_30,  /* ctrl_drv_p */
+                       MV_DDR_OHM_30,  /* ctrl_drv_n */
+                       {
+                               MV_DDR_OHM_60,  /* odt_p 1cs */
+                               MV_DDR_OHM_120  /* odt_p 2cs */
+                       },
+                       {
+                               MV_DDR_OHM_60,  /* odt_n 1cs */
+                               MV_DDR_OHM_120  /* odt_n 2cs */
+                       },
+               },
+               {                       /* mac electrical configuration */
+                       MV_DDR_ODT_CFG_NORMAL,          /* odtcfg_pattern */
+                       MV_DDR_ODT_CFG_ALWAYS_ON,       /* odtcfg_write */
+                       MV_DDR_ODT_CFG_NORMAL,          /* odtcfg_read */
+               },
+       }
+};
+
+struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
+{
+       /* Return the board topology as defined in the board code */
+       return &board_topology_map;
+}
+
+static void mpp_config(void)
+{
+       uintptr_t reg;
+       uint32_t val;
+
+       reg = MVEBU_CP_MPP_REGS(0, 4);
+       /* configure CP0 MPP 37 and 38 to i2c */
+       val = mmio_read_32(reg);
+       val &= ~((MVEBU_MPP_CTRL_MASK << MVEBU_CP_MPP_CTRL37_OFFS) |
+               (MVEBU_MPP_CTRL_MASK << MVEBU_CP_MPP_CTRL38_OFFS));
+       val |= (MVEBU_CP_MPP_CTRL37_I2C0_SCK_ENA <<
+                       MVEBU_CP_MPP_CTRL37_OFFS) |
+               (MVEBU_CP_MPP_CTRL38_I2C0_SDA_ENA <<
+                       MVEBU_CP_MPP_CTRL38_OFFS);
+       mmio_write_32(reg, val);
+}
+
+/*
+ * This function may modify the default DRAM parameters
+ * based on information received from SPD or bootloader
+ * configuration located on non volatile storage
+ */
+void plat_marvell_dram_update_topology(void)
+{
+       struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
+
+       INFO("Gathering DRAM information\n");
+
+       if (tm->cfg_src == MV_DDR_CFG_SPD) {
+               /* configure MPPs to enable i2c */
+               mpp_config();
+
+               /* initialize i2c */
+               i2c_init((void *)MVEBU_CP0_I2C_BASE);
+
+               /* select SPD memory page 0 to access DRAM configuration */
+               i2c_write(I2C_SPD_P0_ADDR, 0x0, 1, tm->spd_data.all_bytes, 1);
+
+               /* read data from spd */
+               i2c_read(I2C_SPD_ADDR, 0x0, 1, tm->spd_data.all_bytes,
+                        sizeof(tm->spd_data.all_bytes));
+       }
+}
diff --git a/plat/marvell/a8k/a80x0/board/marvell_plat_config.c b/plat/marvell/a8k/a80x0/board/marvell_plat_config.c
new file mode 100644 (file)
index 0000000..43beffa
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+/*
+ * If bootrom is currently at BLE there's no need to include the memory
+ * maps structure at this point
+ */
+#include <mvebu_def.h>
+#ifndef IMAGE_BLE
+
+/*****************************************************************************
+ * AMB Configuration
+ *****************************************************************************
+ */
+struct addr_map_win amb_memory_map[] = {
+       /* CP1 SPI1 CS0 Direct Mode access */
+       {0xf900,        0x1000000,      AMB_SPI1_CS0_ID},
+};
+
+int marvell_get_amb_memory_map(struct addr_map_win **win, uint32_t *size,
+                              uintptr_t base)
+{
+       *win = amb_memory_map;
+       if (*win == NULL)
+               *size = 0;
+       else
+               *size = ARRAY_SIZE(amb_memory_map);
+
+       return 0;
+}
+#endif
+
+/*****************************************************************************
+ * IO WIN Configuration
+ *****************************************************************************
+ */
+struct addr_map_win io_win_memory_map[] = {
+       /* CP1 (MCI0) internal regs */
+       {0x00000000f4000000,            0x2000000,  MCI_0_TID},
+#ifndef IMAGE_BLE
+       /* PCIe0 and SPI1_CS0 (RUNIT) on CP1*/
+       {0x00000000f9000000,            0x2000000,  MCI_0_TID},
+       /* PCIe1 on CP1*/
+       {0x00000000fb000000,            0x1000000,  MCI_0_TID},
+       /* PCIe2 on CP1*/
+       {0x00000000fc000000,            0x1000000,  MCI_0_TID},
+       /* MCI 0 indirect window */
+       {MVEBU_MCI_REG_BASE_REMAP(0),   0x100000,  MCI_0_TID},
+       /* MCI 1 indirect window */
+       {MVEBU_MCI_REG_BASE_REMAP(1),   0x100000,  MCI_1_TID},
+#endif
+};
+
+uint32_t marvell_get_io_win_gcr_target(int ap_index)
+{
+       return PIDI_TID;
+}
+
+int marvell_get_io_win_memory_map(int ap_index, struct addr_map_win **win,
+                                 uint32_t *size)
+{
+       *win = io_win_memory_map;
+       if (*win == NULL)
+               *size = 0;
+       else
+               *size = ARRAY_SIZE(io_win_memory_map);
+
+       return 0;
+}
+
+#ifndef IMAGE_BLE
+/*****************************************************************************
+ * IOB Configuration
+ *****************************************************************************
+ */
+struct addr_map_win iob_memory_map_cp0[] = {
+       /* CP0 */
+       /* PEX1_X1 window */
+       {0x00000000f7000000,    0x1000000,      PEX1_TID},
+       /* PEX2_X1 window */
+       {0x00000000f8000000,    0x1000000,      PEX2_TID},
+       /* PEX0_X4 window */
+       {0x00000000f6000000,    0x1000000,      PEX0_TID}
+};
+
+struct addr_map_win iob_memory_map_cp1[] = {
+       /* CP1 */
+       /* SPI1_CS0 (RUNIT) window */
+       {0x00000000f9000000,    0x1000000,      RUNIT_TID},
+       /* PEX1_X1 window */
+       {0x00000000fb000000,    0x1000000,      PEX1_TID},
+       /* PEX2_X1 window */
+       {0x00000000fc000000,    0x1000000,      PEX2_TID},
+       /* PEX0_X4 window */
+       {0x00000000fa000000,    0x1000000,      PEX0_TID}
+};
+
+int marvell_get_iob_memory_map(struct addr_map_win **win, uint32_t *size,
+                              uintptr_t base)
+{
+       switch (base) {
+       case MVEBU_CP_REGS_BASE(0):
+               *win = iob_memory_map_cp0;
+               *size = ARRAY_SIZE(iob_memory_map_cp0);
+               return 0;
+       case MVEBU_CP_REGS_BASE(1):
+               *win = iob_memory_map_cp1;
+               *size = ARRAY_SIZE(iob_memory_map_cp1);
+               return 0;
+       default:
+               *size = 0;
+               *win = 0;
+               return 1;
+       }
+}
+#endif
+
+/*****************************************************************************
+ * CCU Configuration
+ *****************************************************************************
+ */
+struct addr_map_win ccu_memory_map[] = {
+#ifdef IMAGE_BLE
+       {0x00000000f2000000,    0x4000000,  IO_0_TID}, /* IO window */
+#else
+       {0x00000000f2000000,    0xe000000,  IO_0_TID}, /* IO window */
+#endif
+};
+
+uint32_t marvell_get_ccu_gcr_target(int ap)
+{
+       return DRAM_0_TID;
+}
+
+int marvell_get_ccu_memory_map(int ap, struct addr_map_win **win,
+                              uint32_t *size)
+{
+       *win = ccu_memory_map;
+       *size = ARRAY_SIZE(ccu_memory_map);
+
+       return 0;
+}
+
+#ifndef IMAGE_BLE
+/*****************************************************************************
+ * SoC PM configuration
+ *****************************************************************************
+ */
+/* CP GPIO should be used and the GPIOs should be within same GPIO register */
+struct power_off_method pm_cfg = {
+       .type = PMIC_GPIO,
+       .cfg.gpio.pin_count = 1,
+       .cfg.gpio.info = {{0, 35} },
+       .cfg.gpio.step_count = 7,
+       .cfg.gpio.seq = {1, 0, 1, 0, 1, 0, 1},
+       .cfg.gpio.delay_ms = 10,
+};
+
+void *plat_marvell_get_pm_cfg(void)
+{
+       /* Return the PM configurations */
+       return &pm_cfg;
+}
+
+/* In reference to #ifndef IMAGE_BLE, this part is used for BLE only. */
+#else
+/*****************************************************************************
+ * SKIP IMAGE Configuration
+ *****************************************************************************
+ */
+#if PLAT_RECOVERY_IMAGE_ENABLE
+struct skip_image skip_im = {
+       .detection_method = GPIO,
+       .info.gpio.num = 33,
+       .info.gpio.button_state = HIGH,
+       .info.test.cp_ap = CP,
+       .info.test.cp_index = 0,
+};
+
+void *plat_marvell_get_skip_image_data(void)
+{
+       /* Return the skip_image configurations */
+       return &skip_im;
+}
+#endif
+#endif
diff --git a/plat/marvell/a8k/a80x0/mvebu_def.h b/plat/marvell/a8k/a80x0/mvebu_def.h
new file mode 100644 (file)
index 0000000..5bff12c
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MVEBU_DEF_H__
+#define __MVEBU_DEF_H__
+
+#include <a8k_plat_def.h>
+
+#define CP_COUNT               2       /* A80x0 has both CP0 & CP1 */
+#define I2C_SPD_ADDR           0x53    /* Access SPD data */
+#define I2C_SPD_P0_ADDR                0x36    /* Select SPD data page 0 */
+
+#endif /* __MVEBU_DEF_H__ */
diff --git a/plat/marvell/a8k/a80x0/platform.mk b/plat/marvell/a8k/a80x0/platform.mk
new file mode 100644 (file)
index 0000000..0fe235b
--- /dev/null
@@ -0,0 +1,16 @@
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:     BSD-3-Clause
+# https://spdx.org/licenses
+#
+
+PCI_EP_SUPPORT         := 0
+
+DOIMAGE_SEC            :=      tools/doimage/secure/sec_img_8K.cfg
+
+MARVELL_MOCHI_DRV      :=      drivers/marvell/mochi/apn806_setup.c
+
+include plat/marvell/a8k/common/a8k_common.mk
+
+include plat/marvell/common/marvell_common.mk
diff --git a/plat/marvell/a8k/a80x0_mcbin/board/dram_port.c b/plat/marvell/a8k/a80x0_mcbin/board/dram_port.c
new file mode 100644 (file)
index 0000000..b455b83
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch_helpers.h>
+#include <a8k_i2c.h>
+#include <debug.h>
+#include <mmio.h>
+#include <mv_ddr_if.h>
+#include <mvebu_def.h>
+#include <plat_marvell.h>
+
+#define MVEBU_CP_MPP_CTRL37_OFFS               20
+#define MVEBU_CP_MPP_CTRL38_OFFS               24
+#define MVEBU_CP_MPP_CTRL37_I2C0_SCK_ENA       0x2
+#define MVEBU_CP_MPP_CTRL38_I2C0_SDA_ENA       0x2
+
+#define MVEBU_MPP_CTRL_MASK                    0xf
+
+/*
+ * This struct provides the DRAM training code with
+ * the appropriate board DRAM configuration
+ */
+static struct mv_ddr_topology_map board_topology_map = {
+       /* Board with 1CS 8Gb x4 devices of Micron 2400T */
+       DEBUG_LEVEL_ERROR,
+       0x1, /* active interfaces */
+       /* cs_mask, mirror, dqs_swap, ck_swap X subphys */
+       { { { {0x1, 0x0, 0, 0}, /* FIXME: change the cs mask for all 64 bit */
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0},
+             {0x1, 0x0, 0, 0} },
+          /* TODO: double check if the speed bin is 2400T */
+          SPEED_BIN_DDR_2400T,         /* speed_bin */
+          MV_DDR_DEV_WIDTH_8BIT,       /* sdram device width */
+          MV_DDR_DIE_CAP_8GBIT,        /* die capacity */
+          MV_DDR_FREQ_SAR,             /* frequency */
+          0, 0,                        /* cas_l, cas_wl */
+          MV_DDR_TEMP_LOW} },          /* temperature */
+          MV_DDR_64BIT_BUS_MASK,       /* subphys mask */
+          MV_DDR_CFG_SPD,              /* ddr configuration data source */
+       { {0} },                        /* raw spd data */
+       {0},                            /* timing parameters */
+       {                               /* electrical configuration */
+               {                       /* memory electrical configuration */
+                       MV_DDR_RTT_NOM_PARK_RZQ_DISABLE,        /* rtt_nom */
+                       {
+                               MV_DDR_RTT_NOM_PARK_RZQ_DIV4, /* rtt_park 1cs */
+                               MV_DDR_RTT_NOM_PARK_RZQ_DIV1  /* rtt_park 2cs */
+                       },
+                       {
+                               MV_DDR_RTT_WR_DYN_ODT_OFF,      /* rtt_wr 1cs */
+                               MV_DDR_RTT_WR_RZQ_DIV2          /* rtt_wr 2cs */
+                       },
+                       MV_DDR_DIC_RZQ_DIV7     /* dic */
+               },
+               {                       /* phy electrical configuration */
+                       MV_DDR_OHM_30,  /* data_drv_p */
+                       MV_DDR_OHM_30,  /* data_drv_n */
+                       MV_DDR_OHM_30,  /* ctrl_drv_p */
+                       MV_DDR_OHM_30,  /* ctrl_drv_n */
+                       {
+                               MV_DDR_OHM_60,  /* odt_p 1cs */
+                               MV_DDR_OHM_120  /* odt_p 2cs */
+                       },
+                       {
+                               MV_DDR_OHM_60,  /* odt_n 1cs */
+                               MV_DDR_OHM_120  /* odt_n 2cs */
+                       },
+               },
+               {                       /* mac electrical configuration */
+                       MV_DDR_ODT_CFG_NORMAL,          /* odtcfg_pattern */
+                       MV_DDR_ODT_CFG_ALWAYS_ON,       /* odtcfg_write */
+                       MV_DDR_ODT_CFG_NORMAL,          /* odtcfg_read */
+               },
+       }
+};
+
+struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
+{
+       /* Return the board topology as defined in the board code */
+       return &board_topology_map;
+}
+
+static void mpp_config(void)
+{
+       uint32_t val;
+       uintptr_t reg = MVEBU_CP_MPP_REGS(0, 4);
+
+       /* configure CP0 MPP 37 and 38 to i2c */
+       val = mmio_read_32(reg);
+       val &= ~((MVEBU_MPP_CTRL_MASK << MVEBU_CP_MPP_CTRL37_OFFS) |
+               (MVEBU_MPP_CTRL_MASK << MVEBU_CP_MPP_CTRL38_OFFS));
+       val |= (MVEBU_CP_MPP_CTRL37_I2C0_SCK_ENA << MVEBU_CP_MPP_CTRL37_OFFS) |
+               (MVEBU_CP_MPP_CTRL38_I2C0_SDA_ENA << MVEBU_CP_MPP_CTRL38_OFFS);
+       mmio_write_32(reg, val);
+}
+
+/*
+ * This function may modify the default DRAM parameters
+ * based on information received from SPD or bootloader
+ * configuration located on non volatile storage
+ */
+void plat_marvell_dram_update_topology(void)
+{
+       struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
+
+       INFO("Gathering DRAM information\n");
+
+       if (tm->cfg_src == MV_DDR_CFG_SPD) {
+               /* configure MPPs to enable i2c */
+               mpp_config();
+               /* initialize the i2c */
+               i2c_init((void *)MVEBU_CP0_I2C_BASE);
+               /* select SPD memory page 0 to access DRAM configuration */
+               i2c_write(I2C_SPD_P0_ADDR, 0x0, 1, tm->spd_data.all_bytes, 1);
+               /* read data from spd */
+               i2c_read(I2C_SPD_ADDR, 0x0, 1, tm->spd_data.all_bytes,
+                        sizeof(tm->spd_data.all_bytes));
+       }
+}
diff --git a/plat/marvell/a8k/a80x0_mcbin/board/marvell_plat_config.c b/plat/marvell/a8k/a80x0_mcbin/board/marvell_plat_config.c
new file mode 100644 (file)
index 0000000..079bd8f
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <delay_timer.h>
+#include <mmio.h>
+/*
+ * If bootrom is currently at BLE there's no need to include the memory
+ * maps structure at this point
+ */
+#include <mvebu_def.h>
+#ifndef IMAGE_BLE
+
+/*****************************************************************************
+ * GPIO Configuration
+ *****************************************************************************
+ */
+#define MPP_CONTROL_REGISTER           0xf2440018
+#define MPP_CONTROL_MPP_SEL_52_MASK    0xf0000
+#define GPIO_DATA_OUT1_REGISTER                0xf2440140
+#define GPIO_DATA_OUT_EN_CTRL1_REGISTER 0xf2440144
+#define GPIO52_MASK                    0x100000
+
+/* Reset PCIe via GPIO number 52 */
+int marvell_gpio_config(void)
+{
+       uint32_t reg;
+
+       reg = mmio_read_32(MPP_CONTROL_REGISTER);
+       reg |= MPP_CONTROL_MPP_SEL_52_MASK;
+       mmio_write_32(MPP_CONTROL_REGISTER, reg);
+
+       reg = mmio_read_32(GPIO_DATA_OUT1_REGISTER);
+       reg |= GPIO52_MASK;
+       mmio_write_32(GPIO_DATA_OUT1_REGISTER, reg);
+
+       reg = mmio_read_32(GPIO_DATA_OUT_EN_CTRL1_REGISTER);
+       reg &= ~GPIO52_MASK;
+       mmio_write_32(GPIO_DATA_OUT_EN_CTRL1_REGISTER, reg);
+       udelay(100);
+
+       return 0;
+}
+
+/*****************************************************************************
+ * AMB Configuration
+ *****************************************************************************
+ */
+struct addr_map_win amb_memory_map[] = {
+       /* CP1 SPI1 CS0 Direct Mode access */
+       {0xf900,        0x1000000,      AMB_SPI1_CS0_ID},
+};
+
+int marvell_get_amb_memory_map(struct addr_map_win **win, uint32_t *size,
+                              uintptr_t base)
+{
+       *win = amb_memory_map;
+       if (*win == NULL)
+               *size = 0;
+       else
+               *size = ARRAY_SIZE(amb_memory_map);
+
+       return 0;
+}
+#endif
+
+/*****************************************************************************
+ * IO WIN Configuration
+ *****************************************************************************
+ */
+struct addr_map_win io_win_memory_map[] = {
+       /* CP1 (MCI0) internal regs */
+       {0x00000000f4000000,            0x2000000,  MCI_0_TID},
+#ifndef IMAGE_BLE
+       /* PCIe0 and SPI1_CS0 (RUNIT) on CP1*/
+       {0x00000000f9000000,            0x2000000,  MCI_0_TID},
+       /* PCIe1 on CP1*/
+       {0x00000000fb000000,            0x1000000,  MCI_0_TID},
+       /* PCIe2 on CP1*/
+       {0x00000000fc000000,            0x1000000,  MCI_0_TID},
+       /* MCI 0 indirect window */
+       {MVEBU_MCI_REG_BASE_REMAP(0),   0x100000,   MCI_0_TID},
+       /* MCI 1 indirect window */
+       {MVEBU_MCI_REG_BASE_REMAP(1),   0x100000,   MCI_1_TID},
+#endif
+};
+
+uint32_t marvell_get_io_win_gcr_target(int ap_index)
+{
+       return PIDI_TID;
+}
+
+int marvell_get_io_win_memory_map(int ap_index, struct addr_map_win **win,
+                                 uint32_t *size)
+{
+       *win = io_win_memory_map;
+       if (*win == NULL)
+               *size = 0;
+       else
+               *size = ARRAY_SIZE(io_win_memory_map);
+
+       return 0;
+}
+
+#ifndef IMAGE_BLE
+/*****************************************************************************
+ * IOB Configuration
+ *****************************************************************************
+ */
+struct addr_map_win iob_memory_map_cp0[] = {
+       /* CP0 */
+       /* PEX1_X1 window */
+       {0x00000000f7000000,    0x1000000,      PEX1_TID},
+       /* PEX2_X1 window */
+       {0x00000000f8000000,    0x1000000,      PEX2_TID},
+       /* PEX0_X4 window */
+       {0x00000000f6000000,    0x1000000,      PEX0_TID},
+       {0x00000000c0000000,    0x30000000,     PEX0_TID},
+       {0x0000000800000000,    0x100000000,    PEX0_TID},
+};
+
+struct addr_map_win iob_memory_map_cp1[] = {
+       /* CP1 */
+       /* SPI1_CS0 (RUNIT) window */
+       {0x00000000f9000000,    0x1000000,      RUNIT_TID},
+       /* PEX1_X1 window */
+       {0x00000000fb000000,    0x1000000,      PEX1_TID},
+       /* PEX2_X1 window */
+       {0x00000000fc000000,    0x1000000,      PEX2_TID},
+       /* PEX0_X4 window */
+       {0x00000000fa000000,    0x1000000,      PEX0_TID}
+};
+
+int marvell_get_iob_memory_map(struct addr_map_win **win, uint32_t *size,
+                              uintptr_t base)
+{
+       switch (base) {
+       case MVEBU_CP_REGS_BASE(0):
+               *win = iob_memory_map_cp0;
+               *size = ARRAY_SIZE(iob_memory_map_cp0);
+               return 0;
+       case MVEBU_CP_REGS_BASE(1):
+               *win = iob_memory_map_cp1;
+               *size = ARRAY_SIZE(iob_memory_map_cp1);
+               return 0;
+       default:
+               *size = 0;
+               *win = 0;
+               return 1;
+       }
+}
+#endif
+
+/*****************************************************************************
+ * CCU Configuration
+ *****************************************************************************
+ */
+struct addr_map_win ccu_memory_map[] = {
+#ifdef IMAGE_BLE
+       {0x00000000f2000000,    0x4000000,  IO_0_TID}, /* IO window */
+#else
+       {0x00000000f2000000,    0xe000000,  IO_0_TID}, /* IO window */
+       {0x00000000c0000000,    0x30000000,  IO_0_TID}, /* IO window */
+       {0x0000000800000000,    0x100000000,  IO_0_TID}, /* IO window */
+#endif
+};
+
+uint32_t marvell_get_ccu_gcr_target(int ap)
+{
+       return DRAM_0_TID;
+}
+
+int marvell_get_ccu_memory_map(int ap_index, struct addr_map_win **win,
+                              uint32_t *size)
+{
+       *win = ccu_memory_map;
+       *size = ARRAY_SIZE(ccu_memory_map);
+
+       return 0;
+}
+
+/* In reference to #ifndef IMAGE_BLE, this part is used for BLE only. */
+
+/*****************************************************************************
+ * SKIP IMAGE Configuration
+ *****************************************************************************
+ */
+void *plat_marvell_get_skip_image_data(void)
+{
+       /* No recovery button on A8k-MCBIN board */
+       return NULL;
+}
diff --git a/plat/marvell/a8k/a80x0_mcbin/mvebu_def.h b/plat/marvell/a8k/a80x0_mcbin/mvebu_def.h
new file mode 100644 (file)
index 0000000..5bff12c
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MVEBU_DEF_H__
+#define __MVEBU_DEF_H__
+
+#include <a8k_plat_def.h>
+
+#define CP_COUNT               2       /* A80x0 has both CP0 & CP1 */
+#define I2C_SPD_ADDR           0x53    /* Access SPD data */
+#define I2C_SPD_P0_ADDR                0x36    /* Select SPD data page 0 */
+
+#endif /* __MVEBU_DEF_H__ */
diff --git a/plat/marvell/a8k/a80x0_mcbin/platform.mk b/plat/marvell/a8k/a80x0_mcbin/platform.mk
new file mode 100644 (file)
index 0000000..0fe235b
--- /dev/null
@@ -0,0 +1,16 @@
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:     BSD-3-Clause
+# https://spdx.org/licenses
+#
+
+PCI_EP_SUPPORT         := 0
+
+DOIMAGE_SEC            :=      tools/doimage/secure/sec_img_8K.cfg
+
+MARVELL_MOCHI_DRV      :=      drivers/marvell/mochi/apn806_setup.c
+
+include plat/marvell/a8k/common/a8k_common.mk
+
+include plat/marvell/common/marvell_common.mk
diff --git a/plat/marvell/a8k/common/a8k_common.mk b/plat/marvell/a8k/common/a8k_common.mk
new file mode 100644 (file)
index 0000000..3bcce96
--- /dev/null
@@ -0,0 +1,122 @@
+#
+# Copyright (C) 2016 - 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:     BSD-3-Clause
+# https://spdx.org/licenses
+
+include tools/doimage/doimage.mk
+
+PLAT_FAMILY            := a8k
+PLAT_FAMILY_BASE       := plat/marvell/$(PLAT_FAMILY)
+PLAT_INCLUDE_BASE      := include/plat/marvell/$(PLAT_FAMILY)
+PLAT_COMMON_BASE       := $(PLAT_FAMILY_BASE)/common
+MARVELL_DRV_BASE       := drivers/marvell
+MARVELL_COMMON_BASE    := plat/marvell/common
+
+ERRATA_A72_859971      := 1
+
+# Enable MSS support for a8k family
+MSS_SUPPORT            := 1
+
+# Disable EL3 cache for power management
+BL31_CACHE_DISABLE     := 1
+$(eval $(call add_define,BL31_CACHE_DISABLE))
+
+$(eval $(call add_define,PCI_EP_SUPPORT))
+$(eval $(call assert_boolean,PCI_EP_SUPPORT))
+
+DOIMAGEPATH            ?=      tools/doimage
+DOIMAGETOOL            ?=      ${DOIMAGEPATH}/doimage
+
+ROM_BIN_EXT ?= $(BUILD_PLAT)/ble.bin
+DOIMAGE_FLAGS  += -b $(ROM_BIN_EXT) $(NAND_DOIMAGE_FLAGS) $(DOIMAGE_SEC_FLAGS)
+
+# This define specifies DDR type for BLE
+$(eval $(call add_define,CONFIG_DDR4))
+
+MARVELL_GIC_SOURCES    :=      drivers/arm/gic/common/gic_common.c     \
+                               drivers/arm/gic/v2/gicv2_main.c         \
+                               drivers/arm/gic/v2/gicv2_helpers.c      \
+                               plat/common/plat_gicv2.c
+
+ATF_INCLUDES           :=      -Iinclude/common/tbbr
+
+PLAT_INCLUDES          :=      -I$(PLAT_FAMILY_BASE)/$(PLAT)           \
+                               -I$(PLAT_COMMON_BASE)/include           \
+                               -I$(PLAT_INCLUDE_BASE)/common           \
+                               -Iinclude/drivers/marvell               \
+                               -Iinclude/drivers/marvell/mochi         \
+                               $(ATF_INCLUDES)
+
+PLAT_BL_COMMON_SOURCES :=      $(PLAT_COMMON_BASE)/aarch64/a8k_common.c \
+                               drivers/console/aarch64/console.S        \
+                               drivers/ti/uart/aarch64/16550_console.S
+
+BLE_PORTING_SOURCES    :=      $(PLAT_FAMILY_BASE)/$(PLAT)/board/dram_port.c \
+                               $(PLAT_FAMILY_BASE)/$(PLAT)/board/marvell_plat_config.c
+
+MARVELL_MOCHI_DRV      +=      $(MARVELL_DRV_BASE)/mochi/cp110_setup.c
+
+BLE_SOURCES            :=      $(PLAT_COMMON_BASE)/plat_ble_setup.c            \
+                               $(MARVELL_MOCHI_DRV)                           \
+                               $(MARVELL_DRV_BASE)/i2c/a8k_i2c.c               \
+                               $(PLAT_COMMON_BASE)/plat_pm.c                   \
+                               $(MARVELL_DRV_BASE)/thermal.c                   \
+                               $(PLAT_COMMON_BASE)/plat_thermal.c              \
+                               $(BLE_PORTING_SOURCES)                          \
+                               $(MARVELL_DRV_BASE)/ccu.c                       \
+                               $(MARVELL_DRV_BASE)/io_win.c
+
+BL1_SOURCES            +=      $(PLAT_COMMON_BASE)/aarch64/plat_helpers.S \
+                               lib/cpus/aarch64/cortex_a72.S
+
+MARVELL_DRV            :=      $(MARVELL_DRV_BASE)/io_win.c    \
+                               $(MARVELL_DRV_BASE)/iob.c       \
+                               $(MARVELL_DRV_BASE)/mci.c       \
+                               $(MARVELL_DRV_BASE)/amb_adec.c  \
+                               $(MARVELL_DRV_BASE)/ccu.c       \
+                               $(MARVELL_DRV_BASE)/cache_llc.c \
+                               $(MARVELL_DRV_BASE)/comphy/phy-comphy-cp110.c
+
+BL31_PORTING_SOURCES   :=      $(PLAT_FAMILY_BASE)/$(PLAT)/board/marvell_plat_config.c
+
+BL31_SOURCES           +=      lib/cpus/aarch64/cortex_a72.S                  \
+                               $(PLAT_COMMON_BASE)/aarch64/plat_helpers.S     \
+                               $(PLAT_COMMON_BASE)/aarch64/plat_arch_config.c \
+                               $(PLAT_COMMON_BASE)/plat_pm.c                  \
+                               $(PLAT_COMMON_BASE)/plat_bl31_setup.c          \
+                               $(MARVELL_COMMON_BASE)/marvell_gicv2.c         \
+                               $(MARVELL_COMMON_BASE)/mrvl_sip_svc.c          \
+                               $(MARVELL_COMMON_BASE)/marvell_ddr_info.c      \
+                               $(BL31_PORTING_SOURCES)                        \
+                               $(MARVELL_DRV)                                 \
+                               $(MARVELL_MOCHI_DRV)                           \
+                               $(MARVELL_GIC_SOURCES)
+
+# Add trace functionality for PM
+BL31_SOURCES           +=      $(PLAT_COMMON_BASE)/plat_pm_trace.c
+
+# Disable the PSCI platform compatibility layer (allows porting
+# from Old Platform APIs to the new APIs).
+# It is not needed since Marvell platform already used the new platform APIs.
+ENABLE_PLAT_COMPAT     :=      0
+
+# Force builds with BL2 image on a80x0 platforms
+ifndef SCP_BL2
+ $(error "Error: SCP_BL2 image is mandatory for a8k family")
+endif
+
+# MSS (SCP) build
+include $(PLAT_COMMON_BASE)/mss/mss_a8k.mk
+
+# BLE (ROM context execution code, AKA binary extension)
+BLE_PATH       ?=  ble
+
+include ${BLE_PATH}/ble.mk
+$(eval $(call MAKE_BL,e))
+
+mrvl_flash: ${BUILD_PLAT}/${FIP_NAME} ${DOIMAGETOOL} ${BUILD_PLAT}/ble.bin
+       $(shell truncate -s %128K ${BUILD_PLAT}/bl1.bin)
+       $(shell cat ${BUILD_PLAT}/bl1.bin ${BUILD_PLAT}/${FIP_NAME} > ${BUILD_PLAT}/${BOOT_IMAGE})
+       ${DOIMAGETOOL} ${DOIMAGE_FLAGS} ${BUILD_PLAT}/${BOOT_IMAGE} ${BUILD_PLAT}/${FLASH_IMAGE}
+
diff --git a/plat/marvell/a8k/common/aarch64/a8k_common.c b/plat/marvell/a8k/common/aarch64/a8k_common.c
new file mode 100644 (file)
index 0000000..7c2bf31
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <plat_marvell.h>
+
+
+/* MMU entry for internal (register) space access */
+#define MAP_DEVICE0    MAP_REGION_FLAT(DEVICE0_BASE,                   \
+                                       DEVICE0_SIZE,                   \
+                                       MT_DEVICE | MT_RW | MT_SECURE)
+
+/*
+ * Table of regions for various BL stages to map using the MMU.
+ */
+#if IMAGE_BL1
+const mmap_region_t plat_marvell_mmap[] = {
+       MARVELL_MAP_SHARED_RAM,
+       MAP_DEVICE0,
+       {0}
+};
+#endif
+#if IMAGE_BL2
+const mmap_region_t plat_marvell_mmap[] = {
+       MARVELL_MAP_SHARED_RAM,
+       MAP_DEVICE0,
+       MARVELL_MAP_DRAM,
+       {0}
+};
+#endif
+
+#if IMAGE_BL2U
+const mmap_region_t plat_marvell_mmap[] = {
+       MAP_DEVICE0,
+       {0}
+};
+#endif
+
+#if IMAGE_BLE
+const mmap_region_t plat_marvell_mmap[] = {
+       MAP_DEVICE0,
+       {0}
+};
+#endif
+
+#if IMAGE_BL31
+const mmap_region_t plat_marvell_mmap[] = {
+       MARVELL_MAP_SHARED_RAM,
+       MAP_DEVICE0,
+       MARVELL_MAP_DRAM,
+       {0}
+};
+#endif
+#if IMAGE_BL32
+const mmap_region_t plat_marvell_mmap[] = {
+       MAP_DEVICE0,
+       {0}
+};
+#endif
+
+MARVELL_CASSERT_MMAP;
diff --git a/plat/marvell/a8k/common/aarch64/plat_arch_config.c b/plat/marvell/a8k/common/aarch64/plat_arch_config.c
new file mode 100644 (file)
index 0000000..8667331
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <platform.h>
+#include <arch_helpers.h>
+#include <mmio.h>
+#include <debug.h>
+#include <cache_llc.h>
+
+
+#define CCU_HTC_ASET                   (MVEBU_CCU_BASE(MVEBU_AP0) + 0x264)
+#define MVEBU_IO_AFFINITY              (0xF00)
+
+
+static void plat_enable_affinity(void)
+{
+       int cluster_id;
+       int affinity;
+
+       /* set CPU Affinity */
+       cluster_id = plat_my_core_pos() / PLAT_MARVELL_CLUSTER_CORE_COUNT;
+       affinity = (MVEBU_IO_AFFINITY | (1 << cluster_id));
+       mmio_write_32(CCU_HTC_ASET, affinity);
+
+       /* set barier */
+       isb();
+}
+
+void marvell_psci_arch_init(int die_index)
+{
+#if LLC_ENABLE
+       /* check if LLC is in exclusive mode
+        * as L2 is configured to UniqueClean eviction
+        * (in a8k reset handler)
+        */
+       if (llc_is_exclusive(0) == 0)
+               ERROR("LLC should be configured to exclusice mode\n");
+#endif
+
+       /* Enable Affinity */
+       plat_enable_affinity();
+}
diff --git a/plat/marvell/a8k/common/aarch64/plat_helpers.S b/plat/marvell/a8k/common/aarch64/plat_helpers.S
new file mode 100644 (file)
index 0000000..fadc4c2
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <asm_macros.S>
+#include <platform_def.h>
+#include <marvell_pm.h>
+
+       .globl  plat_secondary_cold_boot_setup
+       .globl  plat_get_my_entrypoint
+       .globl  plat_is_my_cpu_primary
+       .globl  plat_reset_handler
+
+       /* -----------------------------------------------------
+        * void plat_secondary_cold_boot_setup (void);
+        *
+        * This function performs any platform specific actions
+        * needed for a secondary cpu after a cold reset. Right
+        * now this is a stub function.
+        * -----------------------------------------------------
+        */
+func plat_secondary_cold_boot_setup
+       mov     x0, #0
+       ret
+endfunc plat_secondary_cold_boot_setup
+
+       /* ---------------------------------------------------------------------
+        * unsigned long plat_get_my_entrypoint (void);
+        *
+        * Main job of this routine is to distinguish
+        * between a cold and warm boot
+        * For a cold boot, return 0.
+        * For a warm boot, read the mailbox and return the address it contains.
+        *
+        * ---------------------------------------------------------------------
+        */
+func plat_get_my_entrypoint
+       /* Read first word and compare it with magic num */
+       mov_imm x0, PLAT_MARVELL_MAILBOX_BASE
+       ldr     x1, [x0]
+       mov_imm x2, MVEBU_MAILBOX_MAGIC_NUM
+       cmp     x1, x2
+       beq     warm_boot  /* If compare failed, return 0, i.e. cold boot */
+       mov     x0, #0
+       ret
+warm_boot:
+       mov_imm x1, MBOX_IDX_SEC_ADDR           /* Get the jump address */
+       subs    x1, x1, #1
+       mov     x2, #(MBOX_IDX_SEC_ADDR * 8)
+       lsl     x3, x2, x1
+       add     x0, x0, x3
+       ldr     x0, [x0]
+       ret
+endfunc plat_get_my_entrypoint
+
+       /* -----------------------------------------------------
+        * unsigned int plat_is_my_cpu_primary (void);
+        *
+        * Find out whether the current cpu is the primary
+        * cpu.
+        * -----------------------------------------------------
+        */
+func plat_is_my_cpu_primary
+       mrs     x0, mpidr_el1
+       and     x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+       cmp     x0, #MVEBU_PRIMARY_CPU
+       cset    w0, eq
+       ret
+endfunc plat_is_my_cpu_primary
+
+        /* -----------------------------------------------------
+        * void plat_reset_handler (void);
+         *
+        * Platform specific configuration right after cpu is
+        * is our of reset.
+        *
+         * The plat_reset_handler can clobber x0 - x18, x30.
+         * -----------------------------------------------------
+         */
+func plat_reset_handler
+       /*
+        * Note: the configurations below  should be done before MMU,
+        *        I Cache and L2are enabled.
+        *        The reset handler is executed right after reset
+        *        and before Caches are enabled.
+        */
+
+       /* Enable L1/L2 ECC and Parity */
+       mrs x5, s3_1_c11_c0_2  /* L2 Ctrl */
+       orr x5, x5, #(1 << 21) /* Enable L1/L2 cache ECC & Parity */
+       msr s3_1_c11_c0_2, x5  /* L2 Ctrl */
+
+#if LLC_ENABLE
+       /*
+        * Enable L2 UniqueClean evictions
+        *  Note: this configuration assumes that LLC is configured
+        *        in exclusive mode.
+        *        Later on in the code this assumption will be validated
+        */
+       mrs x5, s3_1_c15_c0_0  /* L2 Ctrl */
+       orr x5, x5, #(1 << 14) /* Enable UniqueClean evictions with data */
+       msr s3_1_c15_c0_0, x5  /* L2 Ctrl */
+#endif
+
+       /* Instruction Barrier to allow msr command completion */
+       isb
+
+        ret
+endfunc plat_reset_handler
diff --git a/plat/marvell/a8k/common/include/a8k_plat_def.h b/plat/marvell/a8k/common/include/a8k_plat_def.h
new file mode 100644 (file)
index 0000000..4ed8c7e
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __A8K_PLAT_DEF_H__
+#define __A8K_PLAT_DEF_H__
+
+#include <marvell_def.h>
+
+#define MVEBU_PRIMARY_CPU              0x0
+#define MVEBU_AP0                      0x0
+
+/* APN806 revision ID */
+#define MVEBU_CSS_GWD_CTRL_IIDR2_REG   (MVEBU_REGS_BASE + 0x610FCC)
+#define GWD_IIDR2_REV_ID_OFFSET                12
+#define GWD_IIDR2_REV_ID_MASK          0xF
+#define GWD_IIDR2_CHIP_ID_OFFSET       20
+#define GWD_IIDR2_CHIP_ID_MASK         (0xFFF << GWD_IIDR2_CHIP_ID_OFFSET)
+
+#define CHIP_ID_AP806                  0x806
+#define CHIP_ID_AP807                  0x807
+
+#define COUNTER_FREQUENCY              25000000
+
+#define MVEBU_REGS_BASE                        0xF0000000
+#define MVEBU_REGS_BASE_MASK           0xF0000000
+#define MVEBU_REGS_BASE_AP(ap)         MVEBU_REGS_BASE
+#define MVEBU_CP_REGS_BASE(cp_index)   (0xF2000000 + (cp_index) * 0x2000000)
+#define MVEBU_RFU_BASE                 (MVEBU_REGS_BASE + 0x6F0000)
+#define MVEBU_IO_WIN_BASE(ap_index)    (MVEBU_RFU_BASE)
+#define MVEBU_IO_WIN_GCR_OFFSET                (0x70)
+#define MVEBU_IO_WIN_MAX_WINS          (7)
+
+/* Misc SoC configurations Base */
+#define MVEBU_MISC_SOC_BASE            (MVEBU_REGS_BASE + 0x6F4300)
+
+#define MVEBU_CCU_BASE(ap_index)       (MVEBU_REGS_BASE + 0x4000)
+#define MVEBU_CCU_MAX_WINS             (8)
+
+#define MVEBU_LLC_BASE(ap_index)       (MVEBU_REGS_BASE + 0x8000)
+#define MVEBU_DRAM_MAC_BASE            (MVEBU_REGS_BASE + 0x20000)
+#define MVEBU_DRAM_PHY_BASE            (MVEBU_REGS_BASE + 0x20000)
+#define MVEBU_SMMU_BASE                        (MVEBU_REGS_BASE + 0x100000)
+#define MVEBU_CP_MPP_REGS(cp_index, n) (MVEBU_CP_REGS_BASE(cp_index) + \
+                                               0x440000 + ((n) << 2))
+#define MVEBU_PM_MPP_REGS(cp_index, n) (MVEBU_CP_REGS_BASE(cp_index) + \
+                                               0x440000 + ((n / 8) << 2))
+#define MVEBU_CP_GPIO_DATA_OUT(cp_index, n) \
+                                       (MVEBU_CP_REGS_BASE(cp_index) + \
+                                       0x440100 + ((n > 32) ? 0x40 : 0x00))
+#define MVEBU_CP_GPIO_DATA_OUT_EN(cp_index, n) \
+                                       (MVEBU_CP_REGS_BASE(cp_index) + \
+                                       0x440104 + ((n > 32) ? 0x40 : 0x00))
+#define MVEBU_CP_GPIO_DATA_IN(cp_index, n) (MVEBU_CP_REGS_BASE(cp_index) + \
+                                       0x440110 + ((n > 32) ? 0x40 : 0x00))
+#define MVEBU_AP_MPP_REGS(n)           (MVEBU_RFU_BASE + 0x4000 + ((n) << 2))
+#define MVEBU_AP_GPIO_REGS             (MVEBU_RFU_BASE + 0x5040)
+#define MVEBU_AP_GPIO_DATA_IN          (MVEBU_AP_GPIO_REGS + 0x10)
+#define MVEBU_AP_I2C_BASE              (MVEBU_REGS_BASE + 0x511000)
+#define MVEBU_CP0_I2C_BASE             (MVEBU_CP_REGS_BASE(0) + 0x701000)
+#define MVEBU_AP_EXT_TSEN_BASE         (MVEBU_RFU_BASE + 0x8084)
+
+#define MVEBU_AP_MC_TRUSTZONE_REG_LOW(ap, win) (MVEBU_REGS_BASE_AP(ap) + \
+                                                       0x20080 + ((win) * 0x8))
+#define MVEBU_AP_MC_TRUSTZONE_REG_HIGH(ap, win)        (MVEBU_REGS_BASE_AP(ap) + \
+                                                       0x20084 + ((win) * 0x8))
+
+/* MCI indirect access definitions */
+#define MCI_MAX_UNIT_ID                                2
+/* SoC RFU / IHBx4 Control */
+#define MCIX4_REG_START_ADDRESS_REG(unit_id)   (MVEBU_RFU_BASE + \
+                                               0x4218 + (unit_id * 0x20))
+#define MCI_REMAP_OFF_SHIFT                    8
+
+#define MVEBU_MCI_REG_BASE_REMAP(index)                (0xFD000000 + \
+                                               ((index) * 0x1000000))
+
+#define MVEBU_PCIE_X4_MAC_BASE(x)      (MVEBU_CP_REGS_BASE(x) + 0x600000)
+#define MVEBU_COMPHY_BASE(x)           (MVEBU_CP_REGS_BASE(x) + 0x441000)
+#define MVEBU_HPIPE_BASE(x)            (MVEBU_CP_REGS_BASE(x) + 0x120000)
+#define MVEBU_CP_DFX_OFFSET            (0x400200)
+
+/*****************************************************************************
+ * MVEBU memory map related constants
+ *****************************************************************************
+ */
+/* Aggregate of all devices in the first GB */
+#define DEVICE0_BASE                   MVEBU_REGS_BASE
+#define DEVICE0_SIZE                   0x10000000
+
+/*****************************************************************************
+ * GIC-400 & interrupt handling related constants
+ *****************************************************************************
+ */
+/* Base MVEBU compatible GIC memory map */
+#define MVEBU_GICD_BASE                        0x210000
+#define MVEBU_GICC_BASE                        0x220000
+
+
+/*****************************************************************************
+ * AXI Configuration
+ *****************************************************************************
+ */
+#define MVEBU_AXI_ATTR_ARCACHE_OFFSET          4
+#define MVEBU_AXI_ATTR_ARCACHE_MASK            (0xF << \
+                                                MVEBU_AXI_ATTR_ARCACHE_OFFSET)
+#define MVEBU_AXI_ATTR_ARDOMAIN_OFFSET         12
+#define MVEBU_AXI_ATTR_ARDOMAIN_MASK           (0x3 << \
+                                                MVEBU_AXI_ATTR_ARDOMAIN_OFFSET)
+#define MVEBU_AXI_ATTR_AWCACHE_OFFSET          20
+#define MVEBU_AXI_ATTR_AWCACHE_MASK            (0xF << \
+                                                MVEBU_AXI_ATTR_AWCACHE_OFFSET)
+#define MVEBU_AXI_ATTR_AWDOMAIN_OFFSET         28
+#define MVEBU_AXI_ATTR_AWDOMAIN_MASK           (0x3 << \
+                                                MVEBU_AXI_ATTR_AWDOMAIN_OFFSET)
+
+/* SATA MBUS to AXI configuration */
+#define MVEBU_SATA_M2A_AXI_ARCACHE_OFFSET      1
+#define MVEBU_SATA_M2A_AXI_ARCACHE_MASK                (0xF << \
+                                       MVEBU_SATA_M2A_AXI_ARCACHE_OFFSET)
+#define MVEBU_SATA_M2A_AXI_AWCACHE_OFFSET      5
+#define MVEBU_SATA_M2A_AXI_AWCACHE_MASK                (0xF << \
+                                       MVEBU_SATA_M2A_AXI_AWCACHE_OFFSET)
+
+/* ARM cache attributes */
+#define CACHE_ATTR_BUFFERABLE                  0x1
+#define CACHE_ATTR_CACHEABLE                   0x2
+#define CACHE_ATTR_READ_ALLOC                  0x4
+#define CACHE_ATTR_WRITE_ALLOC                 0x8
+/* Domain */
+#define DOMAIN_NON_SHAREABLE                   0x0
+#define DOMAIN_INNER_SHAREABLE                 0x1
+#define DOMAIN_OUTER_SHAREABLE                 0x2
+#define DOMAIN_SYSTEM_SHAREABLE                        0x3
+
+/************************************************************************
+ * Required platform porting definitions common to all
+ * Management Compute SubSystems (MSS)
+ ************************************************************************
+ */
+/*
+ * Load address of SCP_BL2
+ * SCP_BL2 is loaded to the same place as BL31.
+ * Once SCP_BL2 is transferred to the SCP,
+ * it is discarded and BL31 is loaded over the top.
+ */
+#ifdef SCP_IMAGE
+#define SCP_BL2_BASE                    BL31_BASE
+#endif
+
+#ifndef __ASSEMBLER__
+enum ap806_sar_target_dev {
+       SAR_PIDI_MCIX2          = 0x0,
+       SAR_MCIX4               = 0x1,
+       SAR_SPI                 = 0x2,
+       SAR_SD                  = 0x3,
+       SAR_PIDI_MCIX2_BD       = 0x4, /* BootRom disabled */
+       SAR_MCIX4_DB            = 0x5, /* BootRom disabled */
+       SAR_SPI_DB              = 0x6, /* BootRom disabled */
+       SAR_EMMC                = 0x7
+};
+
+enum io_win_target_ids {
+       MCI_0_TID        = 0x0,
+       MCI_1_TID        = 0x1,
+       MCI_2_TID        = 0x2,
+       PIDI_TID         = 0x3,
+       SPI_TID          = 0x4,
+       STM_TID          = 0x5,
+       BOOTROM_TID      = 0x6,
+       IO_WIN_MAX_TID
+};
+
+enum ccu_target_ids {
+       IO_0_TID        = 0x00,
+       DRAM_0_TID      = 0x03,
+       IO_1_TID        = 0x0F,
+       CFG_REG_TID     = 0x10,
+       RAR_TID         = 0x20,
+       SRAM_TID        = 0x40,
+       DRAM_1_TID      = 0xC0,
+       CCU_MAX_TID,
+       INVALID_TID     = 0xFF
+};
+#endif /* __ASSEMBLER__ */
+
+#endif /* __A8K_PLAT_DEF_H__ */
diff --git a/plat/marvell/a8k/common/include/ddr_info.h b/plat/marvell/a8k/common/include/ddr_info.h
new file mode 100644 (file)
index 0000000..e19036a
--- /dev/null
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#define DRAM_MAX_IFACE                 1
+#define DRAM_CH0_MMAP_LOW_OFFSET       0x20200
diff --git a/plat/marvell/a8k/common/include/plat_macros.S b/plat/marvell/a8k/common/include/plat_macros.S
new file mode 100644 (file)
index 0000000..2a6ccf2
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+#include <marvell_macros.S>
+
+/*
+ * Required platform porting macros
+ * (Provided by included headers)
+ */
+.macro plat_crash_print_regs
+.endm
+
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/marvell/a8k/common/include/platform_def.h b/plat/marvell/a8k/common/include/platform_def.h
new file mode 100644 (file)
index 0000000..f7bd23f
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <board_marvell_def.h>
+#include <gic_common.h>
+#include <interrupt_props.h>
+#include <mvebu_def.h>
+#ifndef __ASSEMBLY__
+#include <stdio.h>
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Most platform porting definitions provided by included headers
+ */
+
+/*
+ * DRAM Memory layout:
+ *             +-----------------------+
+ *             :                       :
+ *             :       Linux           :
+ * 0x04X00000-->+-----------------------+
+ *             |       BL3-3(u-boot)   |>>}>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+ *             |-----------------------|  }                                   |
+ *             |       BL3-[0,1, 2]    |  }---------------------------------> |
+ *             |-----------------------|  }                            ||     |
+ *             |       BL2             |  }->FIP (loaded by            ||     |
+ *             |-----------------------|  }       BootROM to DRAM)     ||     |
+ *             |       FIP_TOC         |  }                            ||     |
+ * 0x04120000-->|-----------------------|                              ||     |
+ *             |       BL1 (RO)        |                               ||     |
+ * 0x04100000-->+-----------------------+                              ||     |
+ *             :                       :                               ||     |
+ *             : Trusted SRAM section  :                               \/     |
+ * 0x04040000-->+-----------------------+  Replaced by BL2  +----------------+ |
+ *             |       BL1 (RW)        |  <<<<<<<<<<<<<<<< | BL3-1 NOBITS   | |
+ * 0x04037000-->|-----------------------|  <<<<<<<<<<<<<<<< |----------------| |
+ *             |                       |  <<<<<<<<<<<<<<<< | BL3-1 PROGBITS | |
+ * 0x04023000-->|-----------------------|                  +----------------+ |
+ *             |       BL2             |                                      |
+ *             |-----------------------|                                      |
+ *             |                       |                                      |
+ * 0x04001000-->|-----------------------|                                     |
+ *             |       Shared          |                                      |
+ * 0x04000000-->+-----------------------+                                     |
+ *             :                       :                                      |
+ *             :       Linux           :                                      |
+ *             :                       :                                      |
+ *             |-----------------------|                                      |
+ *             |                       |       U-Boot(BL3-3) Loaded by BL2    |
+ *             |       U-Boot          |       <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
+ * 0x00000000-->+-----------------------+
+ *
+ * Trusted SRAM section 0x4000000..0x4200000:
+ * ----------------------------------------
+ * SRAM_BASE           = 0x4001000
+ * BL2_BASE                    = 0x4006000
+ * BL2_LIMIT           = BL31_BASE
+ * BL31_BASE           = 0x4023000 = (64MB + 256KB - 0x1D000)
+ * BL31_PROGBITS_LIMIT = BL1_RW_BASE
+ * BL1_RW_BASE         = 0x4037000 = (64MB + 256KB - 0x9000)
+ * BL1_RW_LIMIT                = BL31_LIMIT = 0x4040000
+ *
+ *
+ * PLAT_MARVELL_FIP_BASE       = 0x4120000
+ */
+
+/*
+ * Since BL33 is loaded by BL2 (and validated by BL31) to DRAM offset 0,
+ * it is allowed to load/copy images to 'NULL' pointers
+ */
+#if defined(IMAGE_BL2) || defined(IMAGE_BL31)
+#define PLAT_ALLOW_ZERO_ADDR_COPY
+#endif
+
+#define PLAT_MARVELL_SRAM_BASE                 0xFFE1C048
+#define PLAT_MARVELL_SRAM_END                  0xFFE78000
+
+#define PLAT_MARVELL_ATF_BASE                  0x4000000
+#define PLAT_MARVELL_ATF_LOAD_ADDR             (PLAT_MARVELL_ATF_BASE + \
+                                                               0x100000)
+
+#define PLAT_MARVELL_FIP_BASE                  (PLAT_MARVELL_ATF_LOAD_ADDR + \
+                                                               0x20000)
+#define PLAT_MARVELL_FIP_MAX_SIZE              0x4000000
+
+#define PLAT_MARVELL_NORTHB_COUNT              1
+
+#define PLAT_MARVELL_CLUSTER_COUNT             2
+#define PLAT_MARVELL_CLUSTER_CORE_COUNT                2
+
+#define PLAT_MARVELL_CORE_COUNT                        (PLAT_MARVELL_CLUSTER_COUNT * \
+                                               PLAT_MARVELL_CLUSTER_CORE_COUNT)
+
+/* DRAM[2MB..66MB] is used as Trusted ROM */
+#define PLAT_MARVELL_TRUSTED_ROM_BASE          PLAT_MARVELL_ATF_LOAD_ADDR
+/* 64 MB TODO: reduce this to minimum needed according to fip image size */
+#define PLAT_MARVELL_TRUSTED_ROM_SIZE          0x04000000
+/* Reserve 16M for SCP (Secure PayLoad) Trusted DRAM */
+#define PLAT_MARVELL_TRUSTED_DRAM_BASE         0x04400000
+#define PLAT_MARVELL_TRUSTED_DRAM_SIZE         0x01000000      /* 16 MB */
+
+/*
+ * PLAT_ARM_MAX_BL1_RW_SIZE is calculated using the current BL1 RW debug size
+ * plus a little space for growth.
+ */
+#define PLAT_MARVELL_MAX_BL1_RW_SIZE           0xA000
+
+/*
+ * PLAT_ARM_MAX_BL2_SIZE is calculated using the current BL2 debug size plus a
+ * little space for growth.
+ */
+#define PLAT_MARVELL_MAX_BL2_SIZE              0xF000
+
+/*
+ * PLAT_ARM_MAX_BL31_SIZE is calculated using the current BL31 debug size plus a
+ * little space for growth.
+ */
+#define PLAT_MARVEL_MAX_BL31_SIZE              0x5D000
+
+#define PLAT_MARVELL_CPU_ENTRY_ADDR            BL1_RO_BASE
+
+/* GIC related definitions */
+#define PLAT_MARVELL_GICD_BASE         (MVEBU_REGS_BASE + MVEBU_GICD_BASE)
+#define PLAT_MARVELL_GICC_BASE         (MVEBU_REGS_BASE + MVEBU_GICC_BASE)
+
+#define PLAT_MARVELL_G0_IRQ_PROPS(grp) \
+       INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, grp, \
+                       GIC_INTR_CFG_LEVEL), \
+       INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, grp, \
+                       GIC_INTR_CFG_LEVEL)
+
+#define PLAT_MARVELL_G1S_IRQ_PROPS(grp) \
+       INTR_PROP_DESC(MARVELL_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, \
+                       grp, GIC_INTR_CFG_LEVEL), \
+       INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY, grp, \
+                       GIC_INTR_CFG_LEVEL), \
+       INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_2, GIC_HIGHEST_SEC_PRIORITY, grp, \
+                       GIC_INTR_CFG_LEVEL), \
+       INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_3, GIC_HIGHEST_SEC_PRIORITY, grp, \
+                       GIC_INTR_CFG_LEVEL), \
+       INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_4, GIC_HIGHEST_SEC_PRIORITY, grp, \
+                       GIC_INTR_CFG_LEVEL), \
+       INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_5, GIC_HIGHEST_SEC_PRIORITY, grp, \
+                       GIC_INTR_CFG_LEVEL), \
+       INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
+                       GIC_INTR_CFG_LEVEL)
+
+#define PLAT_MARVELL_SHARED_RAM_CACHED         1
+
+/*
+ * Load address of BL3-3 for this platform port
+ */
+#define PLAT_MARVELL_NS_IMAGE_OFFSET           0x0
+
+/* System Reference Clock*/
+#define PLAT_REF_CLK_IN_HZ                     COUNTER_FREQUENCY
+
+/*
+ * PL011 related constants
+ */
+#define PLAT_MARVELL_BOOT_UART_BASE            (MVEBU_REGS_BASE + 0x512000)
+#define PLAT_MARVELL_BOOT_UART_CLK_IN_HZ       200000000
+
+#define PLAT_MARVELL_CRASH_UART_BASE           PLAT_MARVELL_BOOT_UART_BASE
+#define PLAT_MARVELL_CRASH_UART_CLK_IN_HZ      PLAT_MARVELL_BOOT_UART_CLK_IN_HZ
+
+#define PLAT_MARVELL_BL31_RUN_UART_BASE                PLAT_MARVELL_BOOT_UART_BASE
+#define PLAT_MARVELL_BL31_RUN_UART_CLK_IN_HZ   PLAT_MARVELL_BOOT_UART_CLK_IN_HZ
+
+/* Recovery image enable */
+#define PLAT_RECOVERY_IMAGE_ENABLE             0
+
+/* Required platform porting definitions */
+#define PLAT_MAX_PWR_LVL                       MPIDR_AFFLVL1
+
+/* System timer related constants */
+#define PLAT_MARVELL_NSTIMER_FRAME_ID          1
+
+/* Mailbox base address (note the lower memory space
+ * is reserved for BLE data)
+ */
+#define PLAT_MARVELL_MAILBOX_BASE              (MARVELL_TRUSTED_SRAM_BASE \
+                                                       + 0x400)
+#define PLAT_MARVELL_MAILBOX_SIZE              0x100
+#define PLAT_MARVELL_MAILBOX_MAGIC_NUM         0x6D72766C      /* mrvl */
+
+/* Securities */
+#define IRQ_SEC_OS_TICK_INT                    MARVELL_IRQ_SEC_PHY_TIMER
+
+#define TRUSTED_DRAM_BASE                      PLAT_MARVELL_TRUSTED_DRAM_BASE
+#define TRUSTED_DRAM_SIZE                      PLAT_MARVELL_TRUSTED_DRAM_SIZE
+
+#define BL32_BASE                              TRUSTED_DRAM_BASE
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/marvell/a8k/common/mss/mss_a8k.mk b/plat/marvell/a8k/common/mss/mss_a8k.mk
new file mode 100644 (file)
index 0000000..58f23d8
--- /dev/null
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:     BSD-3-Clause
+# https://spdx.org/licenses
+#
+
+PLAT_MARVELL           :=      plat/marvell
+A8K_MSS_SOURCE         :=      $(PLAT_MARVELL)/a8k/common/mss
+
+BL2_SOURCES            +=      $(A8K_MSS_SOURCE)/mss_bl2_setup.c
+
+BL31_SOURCES           +=      $(A8K_MSS_SOURCE)/mss_pm_ipc.c
+
+PLAT_INCLUDES          +=      -I$(A8K_MSS_SOURCE)
+
+ifneq (${SCP_BL2},)
+# This define is used to inidcate the SCP image is present
+$(eval $(call add_define,SCP_IMAGE))
+endif
diff --git a/plat/marvell/a8k/common/mss/mss_bl2_setup.c b/plat/marvell/a8k/common/mss/mss_bl2_setup.c
new file mode 100644 (file)
index 0000000..6688551
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <bl_common.h>
+#include <ccu.h>
+#include <cp110_setup.h>
+#include <debug.h>
+#include <marvell_plat_priv.h> /* timer functionality */
+#include <mmio.h>
+#include <platform_def.h>
+
+#include "mss_scp_bootloader.h"
+
+/* IO windows configuration */
+#define IOW_GCR_OFFSET         (0x70)
+
+/* MSS windows configuration */
+#define MSS_AEBR(base)                 (base + 0x160)
+#define MSS_AIBR(base)                 (base + 0x164)
+#define MSS_AEBR_MASK                  0xFFF
+#define MSS_AIBR_MASK                  0xFFF
+
+#define MSS_EXTERNAL_SPACE             0x50000000
+#define MSS_EXTERNAL_ACCESS_BIT                28
+#define MSS_EXTERNAL_ADDR_MASK         0xfffffff
+#define MSS_INTERNAL_ACCESS_BIT                28
+
+struct addr_map_win ccu_mem_map[] = {
+       {MVEBU_CP_REGS_BASE(0), 0x4000000, IO_0_TID}
+};
+
+/* Since the scp_bl2 image can contain firmware for cp1 and cp0 coprocessors,
+ * the access to cp0 and cp1 need to be provided. More precisely it is
+ * required to:
+ *  - get the information about device id which is stored in CP0 registers
+ *    (to distinguish between cases where we have cp0 and cp1 or standalone cp0)
+ *  - get the access to cp which is needed for loading fw for cp0/cp1
+ *    coprocessors
+ * This function configures ccu windows accordingly.
+ *
+ * Note: there is no need to restore previous ccu configuration, since in next
+ * phase (BL31) the init_ccu will be called (via apn806_init/
+ * bl31_plat_arch_setu) and therefore the ccu configuration will be overwritten.
+ */
+static int bl2_plat_mmap_init(void)
+{
+       int cfg_num, win_id, cfg_idx;
+
+       cfg_num =  ARRAY_SIZE(ccu_mem_map);
+
+       /* CCU window-0 should not be counted - it's already used */
+       if (cfg_num > (MVEBU_CCU_MAX_WINS - 1)) {
+               ERROR("BL2: %s: trying to open too many windows\n", __func__);
+               return -1;
+       }
+
+       /* Enable required CCU windows
+        * Do not touch CCU window 0,
+        * it's used for the internal registers access
+        */
+       for (cfg_idx = 0, win_id = 1; cfg_idx < cfg_num; cfg_idx++, win_id++) {
+               /* Enable required CCU windows */
+               ccu_win_check(&ccu_mem_map[cfg_idx]);
+               ccu_enable_win(MVEBU_AP0, &ccu_mem_map[cfg_idx], win_id);
+       }
+
+       /* Set the default target id to PIDI */
+       mmio_write_32(MVEBU_IO_WIN_BASE(MVEBU_AP0) + IOW_GCR_OFFSET, PIDI_TID);
+
+       return 0;
+}
+
+/*****************************************************************************
+ * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
+ * Return 0 on success, -1 otherwise.
+ *****************************************************************************
+ */
+int bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info)
+{
+       int ret;
+
+       INFO("BL2: Initiating SCP_BL2 transfer to SCP\n");
+       printf("BL2: Initiating SCP_BL2 transfer to SCP\n");
+
+       /* initialize time (for delay functionality) */
+       plat_delay_timer_init();
+
+       ret = bl2_plat_mmap_init();
+       if (ret != 0)
+               return ret;
+
+       ret = scp_bootloader_transfer((void *)scp_bl2_image_info->image_base,
+               scp_bl2_image_info->image_size);
+
+       if (ret == 0)
+               INFO("BL2: SCP_BL2 transferred to SCP\n");
+       else
+               ERROR("BL2: SCP_BL2 transfer failure\n");
+
+       return ret;
+}
+
+uintptr_t bl2_plat_get_cp_mss_regs(int ap_idx, int cp_idx)
+{
+       return MVEBU_CP_REGS_BASE(cp_idx) + 0x280000;
+}
+
+uintptr_t bl2_plat_get_ap_mss_regs(int ap_idx)
+{
+       return MVEBU_REGS_BASE + 0x580000;
+}
+
+uint32_t bl2_plat_get_cp_count(int ap_idx)
+{
+       uint32_t revision = cp110_device_id_get(MVEBU_CP_REGS_BASE(0));
+       /* A8040: two CPs.
+        * A7040: one CP.
+        */
+       if (revision == MVEBU_80X0_DEV_ID ||
+           revision == MVEBU_80X0_CP115_DEV_ID)
+               return 2;
+       else
+               return 1;
+}
+
+uint32_t bl2_plat_get_ap_count(void)
+{
+       /* A8040 and A7040 have only one AP */
+       return 1;
+}
+
+void bl2_plat_configure_mss_windows(uintptr_t mss_regs)
+{
+       /* set AXI External and Internal Address Bus extension */
+       mmio_write_32(MSS_AEBR(mss_regs),
+                     ((0x0 >> MSS_EXTERNAL_ACCESS_BIT) & MSS_AEBR_MASK));
+       mmio_write_32(MSS_AIBR(mss_regs),
+                     ((mss_regs >> MSS_INTERNAL_ACCESS_BIT) & MSS_AIBR_MASK));
+}
diff --git a/plat/marvell/a8k/common/mss/mss_pm_ipc.c b/plat/marvell/a8k/common/mss/mss_pm_ipc.c
new file mode 100644 (file)
index 0000000..6ff4abc
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <debug.h>
+#include <mmio.h>
+#include <psci.h>
+#include <string.h>
+
+#include <mss_pm_ipc.h>
+
+/*
+ * SISR is 32 bit interrupt register representing 32 interrupts
+ *
+ * +======+=============+=============+
+ * + Bits + 31          + 30 - 00     +
+ * +======+=============+=============+
+ * + Desc + MSS Msg Int + Reserved    +
+ * +======+=============+=============+
+ */
+#define MSS_SISR               (MVEBU_REGS_BASE + 0x5800D0)
+#define MSS_SISTR              (MVEBU_REGS_BASE + 0x5800D8)
+
+#define MSS_MSG_INT_MASK       (0x80000000)
+#define MSS_TIMER_BASE         (MVEBU_REGS_BASE_MASK + 0x580110)
+#define MSS_TRIGGER_TIMEOUT    (1000)
+
+/*****************************************************************************
+ * mss_pm_ipc_msg_send
+ *
+ * DESCRIPTION: create and transmit IPC message
+ *****************************************************************************
+ */
+int mss_pm_ipc_msg_send(unsigned int channel_id, unsigned int msg_id,
+                       const psci_power_state_t *target_state)
+{
+       /* Transmit IPC message */
+#ifndef DISABLE_CLUSTER_LEVEL
+       mv_pm_ipc_msg_tx(channel_id, msg_id,
+                        (unsigned int)target_state->pwr_domain_state[
+                                       MPIDR_AFFLVL1]);
+#else
+       mv_pm_ipc_msg_tx(channel_id, msg_id, 0);
+#endif
+
+       return 0;
+}
+
+/*****************************************************************************
+ * mss_pm_ipc_msg_trigger
+ *
+ * DESCRIPTION: Trigger IPC message interrupt to MSS
+ *****************************************************************************
+ */
+int mss_pm_ipc_msg_trigger(void)
+{
+       unsigned int timeout;
+       unsigned int t_end;
+       unsigned int t_start = mmio_read_32(MSS_TIMER_BASE);
+
+       mmio_write_32(MSS_SISR, MSS_MSG_INT_MASK);
+
+       do {
+               /* wait while SCP process incoming interrupt */
+               if (mmio_read_32(MSS_SISTR) != MSS_MSG_INT_MASK)
+                       break;
+
+               /* check timeout */
+               t_end = mmio_read_32(MSS_TIMER_BASE);
+
+               timeout = ((t_start > t_end) ?
+                          (t_start - t_end) : (t_end - t_start));
+               if (timeout > MSS_TRIGGER_TIMEOUT) {
+                       ERROR("PM MSG Trigger Timeout\n");
+                       break;
+               }
+
+       } while (1);
+
+       return 0;
+}
diff --git a/plat/marvell/a8k/common/mss/mss_pm_ipc.h b/plat/marvell/a8k/common/mss/mss_pm_ipc.h
new file mode 100644 (file)
index 0000000..0f69457
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MSS_PM_IPC_H
+#define __MSS_PM_IPC_H
+
+#include <mss_ipc_drv.h>
+
+/* Currently MSS does not support Cluster level Power Down */
+#define DISABLE_CLUSTER_LEVEL
+
+
+/*****************************************************************************
+ * mss_pm_ipc_msg_send
+ *
+ * DESCRIPTION: create and transmit IPC message
+ *****************************************************************************
+ */
+int mss_pm_ipc_msg_send(unsigned int channel_id, unsigned int msg_id,
+                       const psci_power_state_t *target_state);
+
+/*****************************************************************************
+ * mss_pm_ipc_msg_trigger
+ *
+ * DESCRIPTION: Trigger IPC message interrupt to MSS
+ *****************************************************************************
+ */
+int mss_pm_ipc_msg_trigger(void);
+
+
+#endif /* __MSS_PM_IPC_H */
diff --git a/plat/marvell/a8k/common/plat_bl1_setup.c b/plat/marvell/a8k/common/plat_bl1_setup.c
new file mode 100644 (file)
index 0000000..5d85102
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <mmio.h>
+#include <plat_marvell.h>
+
+void marvell_bl1_setup_mpps(void)
+{
+       /* Enable UART MPPs.
+        ** In a normal system, this is done by Bootrom.
+        */
+       mmio_write_32(MVEBU_AP_MPP_REGS(1), 0x3000);
+       mmio_write_32(MVEBU_AP_MPP_REGS(2), 0x3000);
+}
diff --git a/plat/marvell/a8k/common/plat_bl31_setup.c b/plat/marvell/a8k/common/plat_bl31_setup.c
new file mode 100644 (file)
index 0000000..6c85fcc
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <ap_setup.h>
+#include <cp110_setup.h>
+#include <debug.h>
+#include <marvell_plat_priv.h>
+#include <marvell_pm.h>
+#include <mmio.h>
+#include <mci.h>
+#include <plat_marvell.h>
+
+#include <mss_ipc_drv.h>
+#include <mss_mem.h>
+
+/* In Armada-8k family AP806/AP807, CP0 connected to PIDI
+ * and CP1 connected to IHB via MCI #0
+ */
+#define MVEBU_MCI0             0
+
+static _Bool pm_fw_running;
+
+/* Set a weak stub for platforms that don't need to configure GPIO */
+#pragma weak marvell_gpio_config
+int marvell_gpio_config(void)
+{
+       return 0;
+}
+
+static void marvell_bl31_mpp_init(int cp)
+{
+       uint32_t reg;
+
+       /* need to do for CP#0 only */
+       if (cp)
+               return;
+
+
+       /*
+        * Enable CP0 I2C MPPs (MPP: 37-38)
+        * U-Boot rely on proper MPP settings for I2C EEPROM usage
+        * (only for CP0)
+        */
+       reg = mmio_read_32(MVEBU_CP_MPP_REGS(0, 4));
+       mmio_write_32(MVEBU_CP_MPP_REGS(0, 4), reg | 0x2200000);
+}
+
+void marvell_bl31_mss_init(void)
+{
+       struct mss_pm_ctrl_block *mss_pm_crtl =
+                       (struct mss_pm_ctrl_block *)MSS_SRAM_PM_CONTROL_BASE;
+
+       /* Check that the image was loaded successfully */
+       if (mss_pm_crtl->handshake != HOST_ACKNOWLEDGMENT) {
+               NOTICE("MSS PM is not supported in this build\n");
+               return;
+       }
+
+       /* If we got here it means that the PM firmware is running */
+       pm_fw_running = 1;
+
+       INFO("MSS IPC init\n");
+
+       if (mss_pm_crtl->ipc_state == IPC_INITIALIZED)
+               mv_pm_ipc_init(mss_pm_crtl->ipc_base_address | MVEBU_REGS_BASE);
+}
+
+_Bool is_pm_fw_running(void)
+{
+       return pm_fw_running;
+}
+
+/* This function overruns the same function in marvell_bl31_setup.c */
+void bl31_plat_arch_setup(void)
+{
+       int cp;
+       uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
+
+       /* initialize the timer for mdelay/udelay functionality */
+       plat_delay_timer_init();
+
+       /* configure apn806 */
+       ap_init();
+
+       /* In marvell_bl31_plat_arch_setup, el3 mmu is configured.
+        * el3 mmu configuration MUST be called after apn806_init, if not,
+        * this will cause an hang in init_io_win
+        * (after setting the IO windows GCR values).
+        */
+       if (mailbox[MBOX_IDX_MAGIC] != MVEBU_MAILBOX_MAGIC_NUM ||
+           mailbox[MBOX_IDX_SUSPEND_MAGIC] != MVEBU_MAILBOX_SUSPEND_STATE)
+               marvell_bl31_plat_arch_setup();
+
+       for (cp = 0; cp < CP_COUNT; cp++) {
+       /* configure cp110 for CP0*/
+               if (cp == 1)
+                       mci_initialize(MVEBU_MCI0);
+
+       /* initialize MCI & CP1 */
+               cp110_init(MVEBU_CP_REGS_BASE(cp),
+                          STREAM_ID_BASE + (cp * MAX_STREAM_ID_PER_CP));
+
+       /* Should be called only after setting IOB windows */
+               marvell_bl31_mpp_init(cp);
+       }
+
+       /* initialize IPC between MSS and ATF */
+       if (mailbox[MBOX_IDX_MAGIC] != MVEBU_MAILBOX_MAGIC_NUM ||
+           mailbox[MBOX_IDX_SUSPEND_MAGIC] != MVEBU_MAILBOX_SUSPEND_STATE)
+               marvell_bl31_mss_init();
+
+       /* Configure GPIO */
+       marvell_gpio_config();
+}
diff --git a/plat/marvell/a8k/common/plat_ble_setup.c b/plat/marvell/a8k/common/plat_ble_setup.c
new file mode 100644 (file)
index 0000000..0cd62cb
--- /dev/null
@@ -0,0 +1,570 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <ap_setup.h>
+#include <aro.h>
+#include <ccu.h>
+#include <cp110_setup.h>
+#include <debug.h>
+#include <io_win.h>
+#include <mv_ddr_if.h>
+#include <mvebu_def.h>
+#include <plat_marvell.h>
+
+/* Register for skip image use */
+#define SCRATCH_PAD_REG2               0xF06F00A8
+#define SCRATCH_PAD_SKIP_VAL           0x01
+#define NUM_OF_GPIO_PER_REG 32
+
+#define MMAP_SAVE_AND_CONFIG   0
+#define MMAP_RESTORE_SAVED     1
+
+/* SAR clock settings */
+#define MVEBU_AP_GEN_MGMT_BASE         (MVEBU_RFU_BASE + 0x8000)
+#define MVEBU_AP_SAR_REG_BASE(r)       (MVEBU_AP_GEN_MGMT_BASE + 0x200 +\
+                                                               ((r) << 2))
+
+#define SAR_CLOCK_FREQ_MODE_OFFSET     (0)
+#define SAR_CLOCK_FREQ_MODE_MASK       (0x1f << SAR_CLOCK_FREQ_MODE_OFFSET)
+#define SAR_PIDI_LOW_SPEED_OFFSET      (20)
+#define SAR_PIDI_LOW_SPEED_MASK                (1 << SAR_PIDI_LOW_SPEED_OFFSET)
+#define SAR_PIDI_LOW_SPEED_SHIFT       (15)
+#define SAR_PIDI_LOW_SPEED_SET         (1 << SAR_PIDI_LOW_SPEED_SHIFT)
+
+#define FREQ_MODE_AP_SAR_REG_NUM       (0)
+#define SAR_CLOCK_FREQ_MODE(v)         (((v) & SAR_CLOCK_FREQ_MODE_MASK) >> \
+                                       SAR_CLOCK_FREQ_MODE_OFFSET)
+
+#define AVS_EN_CTRL_REG                        (MVEBU_AP_GEN_MGMT_BASE + 0x130)
+#define AVS_ENABLE_OFFSET              (0)
+#define AVS_SOFT_RESET_OFFSET          (2)
+#define AVS_LOW_VDD_LIMIT_OFFSET       (4)
+#define AVS_HIGH_VDD_LIMIT_OFFSET      (12)
+#define AVS_TARGET_DELTA_OFFSET                (21)
+#define AVS_VDD_LOW_LIMIT_MASK         (0xFF << AVS_LOW_VDD_LIMIT_OFFSET)
+#define AVS_VDD_HIGH_LIMIT_MASK                (0xFF << AVS_HIGH_VDD_LIMIT_OFFSET)
+/* VDD limit is 0.9V for A70x0 @ CPU frequency < 1600MHz */
+#define AVS_A7K_LOW_CLK_VALUE          ((0x80 << AVS_TARGET_DELTA_OFFSET) | \
+                                        (0x1A << AVS_HIGH_VDD_LIMIT_OFFSET) | \
+                                        (0x1A << AVS_LOW_VDD_LIMIT_OFFSET) | \
+                                        (0x1 << AVS_SOFT_RESET_OFFSET) | \
+                                        (0x1 << AVS_ENABLE_OFFSET))
+/* VDD limit is 1.0V for all A80x0 devices */
+#define AVS_A8K_CLK_VALUE              ((0x80 << AVS_TARGET_DELTA_OFFSET) | \
+                                        (0x24 << AVS_HIGH_VDD_LIMIT_OFFSET) | \
+                                        (0x24 << AVS_LOW_VDD_LIMIT_OFFSET) | \
+                                        (0x1 << AVS_SOFT_RESET_OFFSET) | \
+                                        (0x1 << AVS_ENABLE_OFFSET))
+
+#define AVS_A3900_CLK_VALUE            ((0x80 << 24) | \
+                                        (0x2c2 << 13) | \
+                                        (0x2c2 << 3) | \
+                                        (0x1 << AVS_SOFT_RESET_OFFSET) | \
+                                        (0x1 << AVS_ENABLE_OFFSET))
+
+#define MVEBU_AP_EFUSE_SRV_CTRL_REG    (MVEBU_AP_GEN_MGMT_BASE + 0x8)
+#define EFUSE_SRV_CTRL_LD_SELECT_OFFS  6
+#define EFUSE_SRV_CTRL_LD_SEL_USER_MASK        (1 << EFUSE_SRV_CTRL_LD_SELECT_OFFS)
+
+/* Notify bootloader on DRAM setup */
+#define AP807_CPU_ARO_0_CTRL_0         (MVEBU_RFU_BASE + 0x82A8)
+#define AP807_CPU_ARO_1_CTRL_0         (MVEBU_RFU_BASE + 0x8D00)
+
+/* 0 - ARO clock is enabled, 1 - ARO clock is disabled */
+#define AP807_CPU_ARO_CLK_EN_OFFSET    0
+#define AP807_CPU_ARO_CLK_EN_MASK      (0x1 << AP807_CPU_ARO_CLK_EN_OFFSET)
+
+/* 0 - ARO is the clock source, 1 - PLL is the clock source */
+#define AP807_CPU_ARO_SEL_PLL_OFFSET   5
+#define AP807_CPU_ARO_SEL_PLL_MASK     (0x1 << AP807_CPU_ARO_SEL_PLL_OFFSET)
+
+/*
+ * - AVS work points in the LD0 eFuse:
+ *     SVC1 work point:     LD0[88:81]
+ *     SVC2 work point:     LD0[96:89]
+ *     SVC3 work point:     LD0[104:97]
+ *     SVC4 work point:     LD0[112:105]
+ * - Identification information in the LD-0 eFuse:
+ *     DRO:           LD0[74:65] - Not used by the SW
+ *     Revision:      LD0[78:75] - Not used by the SW
+ *     Bin:           LD0[80:79] - Not used by the SW
+ *     SW Revision:   LD0[115:113]
+ *     Cluster 1 PWR: LD0[193] - if set to 1, power down CPU Cluster-1
+ *                               resulting in 2 CPUs active only (7020)
+ */
+#define MVEBU_AP_LD_EFUSE_BASE         (MVEBU_AP_GEN_MGMT_BASE + 0xF00)
+/* Bits [94:63] - 32 data bits total */
+#define MVEBU_AP_LD0_94_63_EFUSE_OFFS  (MVEBU_AP_LD_EFUSE_BASE + 0x8)
+/* Bits [125:95] - 31 data bits total, 32nd bit is parity for bits [125:63] */
+#define MVEBU_AP_LD0_125_95_EFUSE_OFFS (MVEBU_AP_LD_EFUSE_BASE + 0xC)
+/* Bits [220:189] - 32 data bits total */
+#define MVEBU_AP_LD0_220_189_EFUSE_OFFS        (MVEBU_AP_LD_EFUSE_BASE + 0x18)
+/* Offsets for the above 2 fields combined into single 64-bit value [125:63] */
+#define EFUSE_AP_LD0_DRO_OFFS          2               /* LD0[74:65] */
+#define EFUSE_AP_LD0_DRO_MASK          0x3FF
+#define EFUSE_AP_LD0_REVID_OFFS                12              /* LD0[78:75] */
+#define EFUSE_AP_LD0_REVID_MASK                0xF
+#define EFUSE_AP_LD0_BIN_OFFS          16              /* LD0[80:79] */
+#define EFUSE_AP_LD0_BIN_MASK          0x3
+#define EFUSE_AP_LD0_SWREV_OFFS                50              /* LD0[115:113] */
+#define EFUSE_AP_LD0_SWREV_MASK                0x7
+
+#define EFUSE_AP_LD0_SVC1_OFFS         18              /* LD0[88:81] */
+#define EFUSE_AP_LD0_SVC2_OFFS         26              /* LD0[96:89] */
+#define EFUSE_AP_LD0_SVC3_OFFS         34              /* LD0[104:97] */
+#define EFUSE_AP_LD0_SVC4_OFFS         42              /* LD0[112:105] */
+#define EFUSE_AP_LD0_WP_MASK           0xFF
+
+#define EFUSE_AP_LD0_CLUSTER_DOWN_OFFS 4
+
+/* Return the AP revision of the chip */
+static unsigned int ble_get_ap_type(void)
+{
+       unsigned int chip_rev_id;
+
+       chip_rev_id = mmio_read_32(MVEBU_CSS_GWD_CTRL_IIDR2_REG);
+       chip_rev_id = ((chip_rev_id & GWD_IIDR2_CHIP_ID_MASK) >>
+                       GWD_IIDR2_CHIP_ID_OFFSET);
+
+       return chip_rev_id;
+}
+
+/******************************************************************************
+ * The routine allows to save the CCU and IO windows configuration during DRAM
+ * setup and restore them afterwards before exiting the BLE stage.
+ * Such window configuration is required since not all default settings coming
+ * from the HW and the BootROM allow access to peripherals connected to
+ * all available CPn components.
+ * For instance, when the boot device is located on CP0, the IO window to CP1
+ * is not opened automatically by the HW and if the DRAM SPD is located on CP1
+ * i2c channel, it cannot be read at BLE stage.
+ * Therefore the DRAM init procedure have to provide access to all available
+ * CPn peripherals during the BLE stage by setting the CCU IO window to all
+ * CPnph addresses and by enabling the IO windows accordingly.
+ * Additionally this function configures the CCU GCR to DRAM, which allows
+ * usage or more than 4GB DRAM as it configured by the default CCU DRAM window.
+ *
+ * IN:
+ *     MMAP_SAVE_AND_CONFIG    - save the existing configuration and update it
+ *     MMAP_RESTORE_SAVED      - restore saved configuration
+ * OUT:
+ *     NONE
+ ****************************************************************************
+ */
+static void ble_plat_mmap_config(int restore)
+{
+       if (restore == MMAP_RESTORE_SAVED) {
+               /* Restore all orig. settings that were modified by BLE stage */
+               ccu_restore_win_all(MVEBU_AP0);
+               /* Restore CCU */
+               iow_restore_win_all(MVEBU_AP0);
+               return;
+       }
+
+       /* Store original values */
+       ccu_save_win_all(MVEBU_AP0);
+       /* Save CCU */
+       iow_save_win_all(MVEBU_AP0);
+
+       init_ccu(MVEBU_AP0);
+       /* The configuration saved, now all the changes can be done */
+       init_io_win(MVEBU_AP0);
+}
+
+/****************************************************************************
+ * Setup Adaptive Voltage Switching - this is required for some platforms
+ ****************************************************************************
+ */
+static void ble_plat_avs_config(void)
+{
+       uint32_t reg_val, device_id;
+
+       /* Check which SoC is running and act accordingly */
+       if (ble_get_ap_type() == CHIP_ID_AP807) {
+               VERBOSE("AVS: Setting AP807 AVS CTRL to 0x%x\n",
+                       AVS_A3900_CLK_VALUE);
+               mmio_write_32(AVS_EN_CTRL_REG, AVS_A3900_CLK_VALUE);
+               return;
+       }
+
+       /* Check which SoC is running and act accordingly */
+       device_id = cp110_device_id_get(MVEBU_CP_REGS_BASE(0));
+       switch (device_id) {
+       case MVEBU_80X0_DEV_ID:
+       case MVEBU_80X0_CP115_DEV_ID:
+               /* Set the new AVS value - fix the default one on A80x0 */
+               mmio_write_32(AVS_EN_CTRL_REG, AVS_A8K_CLK_VALUE);
+               break;
+       case MVEBU_70X0_DEV_ID:
+       case MVEBU_70X0_CP115_DEV_ID:
+               /* Only fix AVS for CPU clocks lower than 1600MHz on A70x0 */
+               reg_val = mmio_read_32(MVEBU_AP_SAR_REG_BASE(
+                                               FREQ_MODE_AP_SAR_REG_NUM));
+               reg_val &= SAR_CLOCK_FREQ_MODE_MASK;
+               reg_val >>= SAR_CLOCK_FREQ_MODE_OFFSET;
+               if ((reg_val > CPU_1600_DDR_900_RCLK_900_2) &&
+                   (reg_val < CPU_DDR_RCLK_INVALID))
+                       mmio_write_32(AVS_EN_CTRL_REG, AVS_A7K_LOW_CLK_VALUE);
+               break;
+       default:
+               ERROR("Unsupported Device ID 0x%x\n", device_id);
+       }
+}
+
+/****************************************************************************
+ * SVC flow - v0.10
+ * The feature is intended to configure AVS value according to eFuse values
+ * that are burned individually for each SoC during the test process.
+ * Primary AVS value is stored in HD efuse and processed on power on
+ * by the HW engine
+ * Secondary AVS value is located in LD efuse and contains 4 work points for
+ * various CPU frequencies.
+ * The Secondary AVS value is only taken into account if the SW Revision stored
+ * in the efuse is greater than 0 and the CPU is running in a certain speed.
+ ****************************************************************************
+ */
+static void ble_plat_svc_config(void)
+{
+       uint32_t reg_val, avs_workpoint, freq_pidi_mode;
+       uint64_t efuse;
+       uint32_t device_id, single_cluster;
+       uint8_t  svc[4], perr[4], i, sw_ver;
+
+       /* Due to a bug in A3900 device_id skip SVC config
+        * TODO: add SVC config once it is decided for a3900
+        */
+       if (ble_get_ap_type() == CHIP_ID_AP807) {
+               NOTICE("SVC: SVC is not supported on AP807\n");
+               ble_plat_avs_config();
+               return;
+       }
+
+       /* Set access to LD0 */
+       reg_val = mmio_read_32(MVEBU_AP_EFUSE_SRV_CTRL_REG);
+       reg_val &= ~EFUSE_SRV_CTRL_LD_SELECT_OFFS;
+       mmio_write_32(MVEBU_AP_EFUSE_SRV_CTRL_REG, reg_val);
+
+       /* Obtain the value of LD0[125:63] */
+       efuse = mmio_read_32(MVEBU_AP_LD0_125_95_EFUSE_OFFS);
+       efuse <<= 32;
+       efuse |= mmio_read_32(MVEBU_AP_LD0_94_63_EFUSE_OFFS);
+
+       /* SW Revision:
+        * Starting from SW revision 1 the SVC flow is supported.
+        * SW version 0 (efuse not programmed) should follow the
+        * regular AVS update flow.
+        */
+       sw_ver = (efuse >> EFUSE_AP_LD0_SWREV_OFFS) & EFUSE_AP_LD0_SWREV_MASK;
+       if (sw_ver < 1) {
+               NOTICE("SVC: SW Revision 0x%x. SVC is not supported\n", sw_ver);
+               ble_plat_avs_config();
+               return;
+       }
+
+       /* Frequency mode from SAR */
+       freq_pidi_mode = SAR_CLOCK_FREQ_MODE(
+                               mmio_read_32(
+                                       MVEBU_AP_SAR_REG_BASE(
+                                               FREQ_MODE_AP_SAR_REG_NUM)));
+
+       /* Decode all SVC work points */
+       svc[0] = (efuse >> EFUSE_AP_LD0_SVC1_OFFS) & EFUSE_AP_LD0_WP_MASK;
+       svc[1] = (efuse >> EFUSE_AP_LD0_SVC2_OFFS) & EFUSE_AP_LD0_WP_MASK;
+       svc[2] = (efuse >> EFUSE_AP_LD0_SVC3_OFFS) & EFUSE_AP_LD0_WP_MASK;
+       svc[3] = (efuse >> EFUSE_AP_LD0_SVC4_OFFS) & EFUSE_AP_LD0_WP_MASK;
+       INFO("SVC: Efuse WP: [0]=0x%x, [1]=0x%x, [2]=0x%x, [3]=0x%x\n",
+               svc[0], svc[1], svc[2], svc[3]);
+
+       /* Validate parity of SVC workpoint values */
+       for (i = 0; i < 4; i++) {
+               uint8_t parity, bit;
+
+               perr[i] = 0;
+
+               for (bit = 1, parity = svc[i] & 1; bit < 7; bit++)
+                       parity ^= (svc[i] >> bit) & 1;
+
+               /* Starting from SW version 2, the parity check is mandatory */
+               if ((sw_ver > 1) && (parity != ((svc[i] >> 7) & 1)))
+                       perr[i] = 1; /* register the error */
+       }
+
+       single_cluster = mmio_read_32(MVEBU_AP_LD0_220_189_EFUSE_OFFS);
+       single_cluster = (single_cluster >> EFUSE_AP_LD0_CLUSTER_DOWN_OFFS) & 1;
+
+       device_id = cp110_device_id_get(MVEBU_CP_REGS_BASE(0));
+       if (device_id == MVEBU_80X0_DEV_ID ||
+           device_id == MVEBU_80X0_CP115_DEV_ID) {
+               /* A8040/A8020 */
+               NOTICE("SVC: DEV ID: %s, FREQ Mode: 0x%x\n",
+                       single_cluster == 0 ? "8040" : "8020", freq_pidi_mode);
+               switch (freq_pidi_mode) {
+               case CPU_1800_DDR_1200_RCLK_1200:
+               case CPU_1800_DDR_1050_RCLK_1050:
+                       if (perr[1])
+                               goto perror;
+                       avs_workpoint = svc[1];
+                       break;
+               case CPU_1600_DDR_1050_RCLK_1050:
+               case CPU_1600_DDR_900_RCLK_900_2:
+                       if (perr[2])
+                               goto perror;
+                       avs_workpoint = svc[2];
+                       break;
+               case CPU_1300_DDR_800_RCLK_800:
+               case CPU_1300_DDR_650_RCLK_650:
+                       if (perr[3])
+                               goto perror;
+                       avs_workpoint = svc[3];
+                       break;
+               case CPU_2000_DDR_1200_RCLK_1200:
+               case CPU_2000_DDR_1050_RCLK_1050:
+               default:
+                       if (perr[0])
+                               goto perror;
+                       avs_workpoint = svc[0];
+                       break;
+               }
+       } else if (device_id == MVEBU_70X0_DEV_ID ||
+                  device_id == MVEBU_70X0_CP115_DEV_ID) {
+               /* A7040/A7020/A6040 */
+               NOTICE("SVC: DEV ID: %s, FREQ Mode: 0x%x\n",
+                       single_cluster == 0 ? "7040" : "7020", freq_pidi_mode);
+               switch (freq_pidi_mode) {
+               case CPU_1400_DDR_800_RCLK_800:
+                       if (single_cluster) {/* 7020 */
+                               if (perr[1])
+                                       goto perror;
+                               avs_workpoint = svc[1];
+                       } else {
+                               if (perr[0])
+                                       goto perror;
+                               avs_workpoint = svc[0];
+                       }
+                       break;
+               case CPU_1200_DDR_800_RCLK_800:
+                       if (single_cluster) {/* 7020 */
+                               if (perr[2])
+                                       goto perror;
+                               avs_workpoint = svc[2];
+                       } else {
+                               if (perr[1])
+                                       goto perror;
+                               avs_workpoint = svc[1];
+                       }
+                       break;
+               case CPU_800_DDR_800_RCLK_800:
+               case CPU_1000_DDR_800_RCLK_800:
+                       if (single_cluster) {/* 7020 */
+                               if (perr[3])
+                                       goto perror;
+                               avs_workpoint = svc[3];
+                       } else {
+                               if (perr[2])
+                                       goto perror;
+                               avs_workpoint = svc[2];
+                       }
+                       break;
+               case CPU_600_DDR_800_RCLK_800:
+                       if (perr[3])
+                               goto perror;
+                       avs_workpoint = svc[3]; /* Same for 6040 and 7020 */
+                       break;
+               case CPU_1600_DDR_800_RCLK_800: /* 7020 only */
+               default:
+                       if (single_cluster) {/* 7020 */
+                               if (perr[0])
+                                       goto perror;
+                               avs_workpoint = svc[0];
+                       } else
+                               avs_workpoint = 0;
+                       break;
+               }
+       } else {
+               ERROR("SVC: Unsupported Device ID 0x%x\n", device_id);
+               return;
+       }
+
+       /* Set AVS control if needed */
+       if (avs_workpoint == 0) {
+               ERROR("SVC: AVS work point not changed\n");
+               return;
+       }
+
+       /* Remove parity bit */
+       avs_workpoint &= 0x7F;
+
+       reg_val  = mmio_read_32(AVS_EN_CTRL_REG);
+       NOTICE("SVC: AVS work point changed from 0x%x to 0x%x\n",
+               (reg_val & AVS_VDD_LOW_LIMIT_MASK) >> AVS_LOW_VDD_LIMIT_OFFSET,
+               avs_workpoint);
+       reg_val &= ~(AVS_VDD_LOW_LIMIT_MASK | AVS_VDD_HIGH_LIMIT_MASK);
+       reg_val |= 0x1 << AVS_ENABLE_OFFSET;
+       reg_val |= avs_workpoint << AVS_HIGH_VDD_LIMIT_OFFSET;
+       reg_val |= avs_workpoint << AVS_LOW_VDD_LIMIT_OFFSET;
+       mmio_write_32(AVS_EN_CTRL_REG, reg_val);
+       return;
+
+perror:
+       ERROR("Failed SVC WP[%d] parity check!\n", i);
+       ERROR("Ignoring the WP values\n");
+}
+
+#if PLAT_RECOVERY_IMAGE_ENABLE
+static int ble_skip_image_i2c(struct skip_image *skip_im)
+{
+       ERROR("skipping image using i2c is not supported\n");
+       /* not supported */
+       return 0;
+}
+
+static int ble_skip_image_other(struct skip_image *skip_im)
+{
+       ERROR("implementation missing for skip image request\n");
+       /* not supported, make your own implementation */
+       return 0;
+}
+
+static int ble_skip_image_gpio(struct skip_image *skip_im)
+{
+       unsigned int val;
+       unsigned int mpp_address = 0;
+       unsigned int offset = 0;
+
+       switch (skip_im->info.test.cp_ap) {
+       case(CP):
+               mpp_address = MVEBU_CP_GPIO_DATA_IN(skip_im->info.test.cp_index,
+                                                   skip_im->info.gpio.num);
+               if (skip_im->info.gpio.num > NUM_OF_GPIO_PER_REG)
+                       offset = skip_im->info.gpio.num - NUM_OF_GPIO_PER_REG;
+               else
+                       offset = skip_im->info.gpio.num;
+               break;
+       case(AP):
+               mpp_address = MVEBU_AP_GPIO_DATA_IN;
+               offset = skip_im->info.gpio.num;
+               break;
+       }
+
+       val = mmio_read_32(mpp_address);
+       val &= (1 << offset);
+       if ((!val && skip_im->info.gpio.button_state == HIGH) ||
+           (val && skip_im->info.gpio.button_state == LOW)) {
+               mmio_write_32(SCRATCH_PAD_REG2, SCRATCH_PAD_SKIP_VAL);
+               return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * This function checks if there's a skip image request:
+ * return values:
+ * 1: (true) images request been made.
+ * 0: (false) no image request been made.
+ */
+static int  ble_skip_current_image(void)
+{
+       struct skip_image *skip_im;
+
+       /*fetching skip image info*/
+       skip_im = (struct skip_image *)plat_marvell_get_skip_image_data();
+
+       if (skip_im == NULL)
+               return 0;
+
+       /* check if skipping image request has already been made */
+       if (mmio_read_32(SCRATCH_PAD_REG2) == SCRATCH_PAD_SKIP_VAL)
+               return 0;
+
+       switch (skip_im->detection_method) {
+       case GPIO:
+               return ble_skip_image_gpio(skip_im);
+       case I2C:
+               return ble_skip_image_i2c(skip_im);
+       case USER_DEFINED:
+               return ble_skip_image_other(skip_im);
+       }
+
+       return 0;
+}
+#endif
+
+/* Switch to ARO from PLL in ap807 */
+static void aro_to_pll(void)
+{
+       unsigned int reg;
+
+       /* switch from ARO to PLL */
+       reg = mmio_read_32(AP807_CPU_ARO_0_CTRL_0);
+       reg |= AP807_CPU_ARO_SEL_PLL_MASK;
+       mmio_write_32(AP807_CPU_ARO_0_CTRL_0, reg);
+
+       reg = mmio_read_32(AP807_CPU_ARO_1_CTRL_0);
+       reg |= AP807_CPU_ARO_SEL_PLL_MASK;
+       mmio_write_32(AP807_CPU_ARO_1_CTRL_0, reg);
+
+       mdelay(1000);
+
+       /* disable ARO clk driver */
+       reg = mmio_read_32(AP807_CPU_ARO_0_CTRL_0);
+       reg |= (AP807_CPU_ARO_CLK_EN_MASK);
+       mmio_write_32(AP807_CPU_ARO_0_CTRL_0, reg);
+
+       reg = mmio_read_32(AP807_CPU_ARO_1_CTRL_0);
+       reg |= (AP807_CPU_ARO_CLK_EN_MASK);
+       mmio_write_32(AP807_CPU_ARO_1_CTRL_0, reg);
+}
+
+int ble_plat_setup(int *skip)
+{
+       int ret;
+
+       /* Power down unused CPUs */
+       plat_marvell_early_cpu_powerdown();
+
+       /*
+        * Save the current CCU configuration and make required changes:
+        * - Allow access to DRAM larger than 4GB
+        * - Open memory access to all CPn peripherals
+        */
+       ble_plat_mmap_config(MMAP_SAVE_AND_CONFIG);
+
+#if PLAT_RECOVERY_IMAGE_ENABLE
+       /* Check if there's a skip request to bootRom recovery Image */
+       if (ble_skip_current_image()) {
+               /* close memory access to all CPn peripherals. */
+               ble_plat_mmap_config(MMAP_RESTORE_SAVED);
+               *skip = 1;
+               return 0;
+       }
+#endif
+       /* Do required CP-110 setups for BLE stage */
+       cp110_ble_init(MVEBU_CP_REGS_BASE(0));
+
+       /* Setup AVS */
+       ble_plat_svc_config();
+
+       /* work with PLL clock driver in AP807 */
+       if (ble_get_ap_type() == CHIP_ID_AP807)
+               aro_to_pll();
+
+       /* Do required AP setups for BLE stage */
+       ap_ble_init();
+
+       /* Update DRAM topology (scan DIMM SPDs) */
+       plat_marvell_dram_update_topology();
+
+       /* Kick it in */
+       ret = dram_init();
+
+       /* Restore the original CCU configuration before exit from BLE */
+       ble_plat_mmap_config(MMAP_RESTORE_SAVED);
+
+       return ret;
+}
diff --git a/plat/marvell/a8k/common/plat_pm.c b/plat/marvell/a8k/common/plat_pm.c
new file mode 100644 (file)
index 0000000..c716ee0
--- /dev/null
@@ -0,0 +1,829 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <cache_llc.h>
+#include <console.h>
+#include <gicv2.h>
+#include <marvell_pm.h>
+#include <mmio.h>
+#include <mss_pm_ipc.h>
+#include <plat_marvell.h>
+#include <platform.h>
+#include <plat_pm_trace.h>
+#include <platform.h>
+
+#define MVEBU_PRIVATE_UID_REG          0x30
+#define MVEBU_RFU_GLOBL_SW_RST         0x84
+#define MVEBU_CCU_RVBAR(cpu)           (MVEBU_REGS_BASE + 0x640 + (cpu * 4))
+#define MVEBU_CCU_CPU_UN_RESET(cpu)    (MVEBU_REGS_BASE + 0x650 + (cpu * 4))
+
+#define MPIDR_CPU_GET(mpidr)           ((mpidr) & MPIDR_CPU_MASK)
+#define MPIDR_CLUSTER_GET(mpidr)       MPIDR_AFFLVL1_VAL((mpidr))
+
+#define MVEBU_GPIO_MASK(index)         (1 << (index % 32))
+#define MVEBU_MPP_MASK(index)          (0xF << (4 * (index % 8)))
+#define MVEBU_GPIO_VALUE(index, value) (value << (index % 32))
+
+#define MVEBU_USER_CMD_0_REG           (MVEBU_DRAM_MAC_BASE + 0x20)
+#define MVEBU_USER_CMD_CH0_OFFSET      28
+#define MVEBU_USER_CMD_CH0_MASK                (1 << MVEBU_USER_CMD_CH0_OFFSET)
+#define MVEBU_USER_CMD_CH0_EN          (1 << MVEBU_USER_CMD_CH0_OFFSET)
+#define MVEBU_USER_CMD_CS_OFFSET       24
+#define MVEBU_USER_CMD_CS_MASK         (0xF << MVEBU_USER_CMD_CS_OFFSET)
+#define MVEBU_USER_CMD_CS_ALL          (0xF << MVEBU_USER_CMD_CS_OFFSET)
+#define MVEBU_USER_CMD_SR_OFFSET       6
+#define MVEBU_USER_CMD_SR_MASK         (0x3 << MVEBU_USER_CMD_SR_OFFSET)
+#define MVEBU_USER_CMD_SR_ENTER                (0x1 << MVEBU_USER_CMD_SR_OFFSET)
+#define MVEBU_MC_PWR_CTRL_REG          (MVEBU_DRAM_MAC_BASE + 0x54)
+#define MVEBU_MC_AC_ON_DLY_OFFSET      8
+#define MVEBU_MC_AC_ON_DLY_MASK                (0xF << MVEBU_MC_AC_ON_DLY_OFFSET)
+#define MVEBU_MC_AC_ON_DLY_DEF_VAR     (8 << MVEBU_MC_AC_ON_DLY_OFFSET)
+#define MVEBU_MC_AC_OFF_DLY_OFFSET     4
+#define MVEBU_MC_AC_OFF_DLY_MASK       (0xF << MVEBU_MC_AC_OFF_DLY_OFFSET)
+#define MVEBU_MC_AC_OFF_DLY_DEF_VAR    (0xC << MVEBU_MC_AC_OFF_DLY_OFFSET)
+#define MVEBU_MC_PHY_AUTO_OFF_OFFSET   0
+#define MVEBU_MC_PHY_AUTO_OFF_MASK     (1 << MVEBU_MC_PHY_AUTO_OFF_OFFSET)
+#define MVEBU_MC_PHY_AUTO_OFF_EN       (1 << MVEBU_MC_PHY_AUTO_OFF_OFFSET)
+
+/* this lock synchronize AP multiple cores execution with MSS */
+DEFINE_BAKERY_LOCK(pm_sys_lock);
+
+/* Weak definitions may be overridden in specific board */
+#pragma weak plat_marvell_get_pm_cfg
+
+/* AP806 CPU power down /power up definitions */
+enum CPU_ID {
+       CPU0,
+       CPU1,
+       CPU2,
+       CPU3
+};
+
+#define REG_WR_VALIDATE_TIMEOUT                (2000)
+
+#define FEATURE_DISABLE_STATUS_REG                     \
+                       (MVEBU_REGS_BASE + 0x6F8230)
+#define FEATURE_DISABLE_STATUS_CPU_CLUSTER_OFFSET      4
+#define FEATURE_DISABLE_STATUS_CPU_CLUSTER_MASK                \
+                       (0x1 << FEATURE_DISABLE_STATUS_CPU_CLUSTER_OFFSET)
+
+#ifdef MVEBU_SOC_AP807
+       #define PWRC_CPUN_CR_PWR_DN_RQ_OFFSET           1
+       #define PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET      0
+#else
+#define PWRC_CPUN_CR_PWR_DN_RQ_OFFSET          0
+       #define PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET      31
+#endif
+
+#define PWRC_CPUN_CR_REG(cpu_id)               \
+                       (MVEBU_REGS_BASE + 0x680000 + (cpu_id * 0x10))
+#define PWRC_CPUN_CR_PWR_DN_RQ_MASK            \
+                       (0x1 << PWRC_CPUN_CR_PWR_DN_RQ_OFFSET)
+#define PWRC_CPUN_CR_ISO_ENABLE_OFFSET         16
+#define PWRC_CPUN_CR_ISO_ENABLE_MASK           \
+                       (0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)
+#define PWRC_CPUN_CR_LDO_BYPASS_RDY_MASK       \
+                       (0x1 << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET)
+
+#define CCU_B_PRCRN_REG(cpu_id)                        \
+                       (MVEBU_REGS_BASE + 0x1A50 + \
+                       ((cpu_id / 2) * (0x400)) + ((cpu_id % 2) * 4))
+#define CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET   0
+#define CCU_B_PRCRN_CPUPORESET_STATIC_MASK     \
+                       (0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET)
+
+/* power switch fingers */
+#define AP807_PWRC_LDO_CR0_REG                 \
+                       (MVEBU_REGS_BASE + 0x680000 + 0x100)
+#define AP807_PWRC_LDO_CR0_OFFSET              16
+#define AP807_PWRC_LDO_CR0_MASK                        \
+                       (0xff << AP807_PWRC_LDO_CR0_OFFSET)
+#define AP807_PWRC_LDO_CR0_VAL                 0xfd
+
+/*
+ * Power down CPU:
+ * Used to reduce power consumption, and avoid SoC unnecessary temperature rise.
+ */
+static int plat_marvell_cpu_powerdown(int cpu_id)
+{
+       uint32_t        reg_val;
+       int             exit_loop = REG_WR_VALIDATE_TIMEOUT;
+
+       INFO("Powering down CPU%d\n", cpu_id);
+
+       /* 1. Isolation enable */
+       reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+       reg_val |= 0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET;
+       mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+       /* 2. Read and check Isolation enabled - verify bit set to 1 */
+       do {
+               reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+               exit_loop--;
+       } while (!(reg_val & (0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)) &&
+                exit_loop > 0);
+
+       /* 3. Switch off CPU power */
+       reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+       reg_val &= ~PWRC_CPUN_CR_PWR_DN_RQ_MASK;
+       mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+       /* 4. Read and check Switch Off - verify bit set to 0 */
+       exit_loop = REG_WR_VALIDATE_TIMEOUT;
+       do {
+               reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+               exit_loop--;
+       } while (reg_val & PWRC_CPUN_CR_PWR_DN_RQ_MASK && exit_loop > 0);
+
+       if (exit_loop <= 0)
+               goto cpu_poweroff_error;
+
+       /* 5. De-Assert power ready */
+       reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+       reg_val &= ~PWRC_CPUN_CR_LDO_BYPASS_RDY_MASK;
+       mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+       /* 6. Assert CPU POR reset */
+       reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
+       reg_val &= ~CCU_B_PRCRN_CPUPORESET_STATIC_MASK;
+       mmio_write_32(CCU_B_PRCRN_REG(cpu_id), reg_val);
+
+       /* 7. Read and poll on Validate the CPU is out of reset */
+       exit_loop = REG_WR_VALIDATE_TIMEOUT;
+       do {
+               reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
+               exit_loop--;
+       } while (reg_val & CCU_B_PRCRN_CPUPORESET_STATIC_MASK && exit_loop > 0);
+
+       if (exit_loop <= 0)
+               goto cpu_poweroff_error;
+
+       INFO("Successfully powered down CPU%d\n", cpu_id);
+
+       return 0;
+
+cpu_poweroff_error:
+       ERROR("ERROR: Can't power down CPU%d\n", cpu_id);
+       return -1;
+}
+
+/*
+ * Power down CPUs 1-3 at early boot stage,
+ * to reduce power consumption and SoC temperature.
+ * This is triggered by BLE prior to DDR initialization.
+ *
+ * Note:
+ * All CPUs will be powered up by plat_marvell_cpu_powerup on Linux boot stage,
+ * which is triggered by PSCI ops (pwr_domain_on).
+ */
+int plat_marvell_early_cpu_powerdown(void)
+{
+       uint32_t cpu_cluster_status =
+               mmio_read_32(FEATURE_DISABLE_STATUS_REG) &
+                            FEATURE_DISABLE_STATUS_CPU_CLUSTER_MASK;
+       /* if cpu_cluster_status bit is set,
+        * that means we have only single cluster
+        */
+       int cluster_count = cpu_cluster_status ? 1 : 2;
+
+       INFO("Powering off unused CPUs\n");
+
+       /* CPU1 is in AP806 cluster-0, which always exists, so power it down */
+       if (plat_marvell_cpu_powerdown(CPU1) == -1)
+               return -1;
+
+       /*
+        * CPU2-3 are in AP806 2nd cluster (cluster-1),
+        * which doesn't exists in dual-core systems.
+        * so need to check if we have dual-core (single cluster)
+        * or quad-code (2 clusters)
+        */
+       if (cluster_count == 2) {
+               /* CPU2-3 are part of 2nd cluster */
+               if (plat_marvell_cpu_powerdown(CPU2) == -1)
+                       return -1;
+               if (plat_marvell_cpu_powerdown(CPU3) == -1)
+                       return -1;
+       }
+
+       return 0;
+}
+
+/*
+ * Power up CPU - part of Linux boot stage
+ */
+static int plat_marvell_cpu_powerup(u_register_t mpidr)
+{
+       uint32_t        reg_val;
+       int     cpu_id = MPIDR_CPU_GET(mpidr),
+               cluster = MPIDR_CLUSTER_GET(mpidr);
+       int     exit_loop = REG_WR_VALIDATE_TIMEOUT;
+
+       /* calculate absolute CPU ID */
+       cpu_id = cluster * PLAT_MARVELL_CLUSTER_CORE_COUNT + cpu_id;
+
+       INFO("Powering on CPU%d\n", cpu_id);
+
+#ifdef MVEBU_SOC_AP807
+       /* Activate 2 power switch fingers */
+       reg_val = mmio_read_32(AP807_PWRC_LDO_CR0_REG);
+       reg_val &= ~(AP807_PWRC_LDO_CR0_MASK);
+       reg_val |= (AP807_PWRC_LDO_CR0_VAL << AP807_PWRC_LDO_CR0_OFFSET);
+       mmio_write_32(AP807_PWRC_LDO_CR0_REG, reg_val);
+       udelay(100);
+#endif
+
+       /* 1. Switch CPU power ON */
+       reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+       reg_val |= 0x1 << PWRC_CPUN_CR_PWR_DN_RQ_OFFSET;
+       mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+       /* 2. Wait for CPU on, up to 100 uSec: */
+       udelay(100);
+
+       /* 3. Assert power ready */
+       reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+       reg_val |= 0x1 << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET;
+       mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+       /* 4. Read & Validate power ready
+        * used in order to generate 16 Host CPU cycles
+        */
+       do {
+               reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+               exit_loop--;
+       } while (!(reg_val & (0x1 << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET)) &&
+                exit_loop > 0);
+
+       if (exit_loop <= 0)
+               goto cpu_poweron_error;
+
+       /* 5. Isolation disable */
+       reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+       reg_val &= ~PWRC_CPUN_CR_ISO_ENABLE_MASK;
+       mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+       /* 6. Read and check Isolation enabled - verify bit set to 1 */
+       exit_loop = REG_WR_VALIDATE_TIMEOUT;
+       do {
+               reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+               exit_loop--;
+       } while ((reg_val & (0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)) &&
+                exit_loop > 0);
+
+       /* 7. De Assert CPU POR reset & Core reset */
+       reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
+       reg_val |= 0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET;
+       mmio_write_32(CCU_B_PRCRN_REG(cpu_id), reg_val);
+
+       /* 8. Read & Validate CPU POR reset */
+       exit_loop = REG_WR_VALIDATE_TIMEOUT;
+       do {
+               reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
+               exit_loop--;
+       } while (!(reg_val & (0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET)) &&
+                exit_loop > 0);
+
+       if (exit_loop <= 0)
+               goto cpu_poweron_error;
+
+       INFO("Successfully powered on CPU%d\n", cpu_id);
+
+       return 0;
+
+cpu_poweron_error:
+       ERROR("ERROR: Can't power up CPU%d\n", cpu_id);
+       return -1;
+}
+
+static int plat_marvell_cpu_on(u_register_t mpidr)
+{
+       int cpu_id;
+       int cluster;
+
+       /* Set barierr */
+       dsbsy();
+
+       /* Get cpu number - use CPU ID */
+       cpu_id =  MPIDR_CPU_GET(mpidr);
+
+       /* Get cluster number - use affinity level 1 */
+       cluster = MPIDR_CLUSTER_GET(mpidr);
+
+       /* Set CPU private UID */
+       mmio_write_32(MVEBU_REGS_BASE + MVEBU_PRIVATE_UID_REG, cluster + 0x4);
+
+       /* Set the cpu start address to BL1 entry point (align to 0x10000) */
+       mmio_write_32(MVEBU_CCU_RVBAR(cpu_id),
+                     PLAT_MARVELL_CPU_ENTRY_ADDR >> 16);
+
+       /* Get the cpu out of reset */
+       mmio_write_32(MVEBU_CCU_CPU_UN_RESET(cpu_id), 0x10001);
+
+       return 0;
+}
+
+/*****************************************************************************
+ * A8K handler called to check the validity of the power state
+ * parameter.
+ *****************************************************************************
+ */
+static int a8k_validate_power_state(unsigned int power_state,
+                           psci_power_state_t *req_state)
+{
+       int pstate = psci_get_pstate_type(power_state);
+       int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+       int i;
+
+       if (pwr_lvl > PLAT_MAX_PWR_LVL)
+               return PSCI_E_INVALID_PARAMS;
+
+       /* Sanity check the requested state */
+       if (pstate == PSTATE_TYPE_STANDBY) {
+               /*
+                * It's possible to enter standby only on power level 0
+                * Ignore any other power level.
+                */
+               if (pwr_lvl != MARVELL_PWR_LVL0)
+                       return PSCI_E_INVALID_PARAMS;
+
+               req_state->pwr_domain_state[MARVELL_PWR_LVL0] =
+                                       MARVELL_LOCAL_STATE_RET;
+       } else {
+               for (i = MARVELL_PWR_LVL0; i <= pwr_lvl; i++)
+                       req_state->pwr_domain_state[i] =
+                                       MARVELL_LOCAL_STATE_OFF;
+       }
+
+       /*
+        * We expect the 'state id' to be zero.
+        */
+       if (psci_get_pstate_id(power_state))
+               return PSCI_E_INVALID_PARAMS;
+
+       return PSCI_E_SUCCESS;
+}
+
+/*****************************************************************************
+ * A8K handler called when a CPU is about to enter standby.
+ *****************************************************************************
+ */
+static void a8k_cpu_standby(plat_local_state_t cpu_state)
+{
+       ERROR("%s: needs to be implemented\n", __func__);
+       panic();
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain is about to be turned on. The
+ * mpidr determines the CPU to be turned on.
+ *****************************************************************************
+ */
+static int a8k_pwr_domain_on(u_register_t mpidr)
+{
+       /* Power up CPU (CPUs 1-3 are powered off at start of BLE) */
+       plat_marvell_cpu_powerup(mpidr);
+
+       if (is_pm_fw_running()) {
+               unsigned int target =
+                               ((mpidr & 0xFF) + (((mpidr >> 8) & 0xFF) * 2));
+
+               /*
+                * pm system synchronization - used to synchronize
+                * multiple core access to MSS
+                */
+               bakery_lock_get(&pm_sys_lock);
+
+               /* send CPU ON IPC Message to MSS */
+               mss_pm_ipc_msg_send(target, PM_IPC_MSG_CPU_ON, 0);
+
+               /* trigger IPC message to MSS */
+               mss_pm_ipc_msg_trigger();
+
+               /* pm system synchronization */
+               bakery_lock_release(&pm_sys_lock);
+
+               /* trace message */
+               PM_TRACE(TRACE_PWR_DOMAIN_ON | target);
+       } else {
+               /* proprietary CPU ON exection flow */
+               plat_marvell_cpu_on(mpidr);
+       }
+
+       return 0;
+}
+
+/*****************************************************************************
+ * A8K handler called to validate the entry point.
+ *****************************************************************************
+ */
+static int a8k_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+       return PSCI_E_SUCCESS;
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ *****************************************************************************
+ */
+static void a8k_pwr_domain_off(const psci_power_state_t *target_state)
+{
+       if (is_pm_fw_running()) {
+               unsigned int idx = plat_my_core_pos();
+
+               /* Prevent interrupts from spuriously waking up this cpu */
+               gicv2_cpuif_disable();
+
+               /* pm system synchronization - used to synchronize multiple
+                * core access to MSS
+                */
+               bakery_lock_get(&pm_sys_lock);
+
+               /* send CPU OFF IPC Message to MSS */
+               mss_pm_ipc_msg_send(idx, PM_IPC_MSG_CPU_OFF, target_state);
+
+               /* trigger IPC message to MSS */
+               mss_pm_ipc_msg_trigger();
+
+               /* pm system synchronization */
+               bakery_lock_release(&pm_sys_lock);
+
+               /* trace message */
+               PM_TRACE(TRACE_PWR_DOMAIN_OFF);
+       } else {
+               INFO("%s: is not supported without SCP\n", __func__);
+       }
+}
+
+/* Get PM config to power off the SoC */
+void *plat_marvell_get_pm_cfg(void)
+{
+       return NULL;
+}
+
+/*
+ * This function should be called on restore from
+ * "suspend to RAM" state when the execution flow
+ * has to bypass BootROM image to RAM copy and speed up
+ * the system recovery
+ *
+ */
+static void plat_marvell_exit_bootrom(void)
+{
+       marvell_exit_bootrom(PLAT_MARVELL_TRUSTED_ROM_BASE);
+}
+
+/*
+ * Prepare for the power off of the system via GPIO
+ */
+static void plat_marvell_power_off_gpio(struct power_off_method *pm_cfg,
+                                       register_t *gpio_addr,
+                                       register_t *gpio_data)
+{
+       unsigned int gpio;
+       unsigned int idx;
+       unsigned int shift;
+       unsigned int reg;
+       unsigned int addr;
+       gpio_info_t *info;
+       unsigned int tog_bits;
+
+       assert((pm_cfg->cfg.gpio.pin_count < PMIC_GPIO_MAX_NUMBER) &&
+              (pm_cfg->cfg.gpio.step_count < PMIC_GPIO_MAX_TOGGLE_STEP));
+
+       /* Prepare GPIOs for PMIC */
+       for (gpio = 0; gpio < pm_cfg->cfg.gpio.pin_count; gpio++) {
+               info = &pm_cfg->cfg.gpio.info[gpio];
+               /* Set PMIC GPIO to output mode */
+               reg = mmio_read_32(MVEBU_CP_GPIO_DATA_OUT_EN(
+                                  info->cp_index, info->gpio_index));
+               mmio_write_32(MVEBU_CP_GPIO_DATA_OUT_EN(
+                             info->cp_index, info->gpio_index),
+                             reg & ~MVEBU_GPIO_MASK(info->gpio_index));
+
+               /* Set the appropriate MPP to GPIO mode */
+               reg = mmio_read_32(MVEBU_PM_MPP_REGS(info->cp_index,
+                                                    info->gpio_index));
+               mmio_write_32(MVEBU_PM_MPP_REGS(info->cp_index,
+                                               info->gpio_index),
+                       reg & ~MVEBU_MPP_MASK(info->gpio_index));
+       }
+
+       /* Wait for MPP & GPIO pre-configurations done */
+       mdelay(pm_cfg->cfg.gpio.delay_ms);
+
+       /* Toggle the GPIO values, and leave final step to be triggered
+        * after  DDR self-refresh is enabled
+        */
+       for (idx = 0; idx < pm_cfg->cfg.gpio.step_count; idx++) {
+               tog_bits = pm_cfg->cfg.gpio.seq[idx];
+
+               /* The GPIOs must be within same GPIO register,
+                * thus could get the original value by first GPIO
+                */
+               info = &pm_cfg->cfg.gpio.info[0];
+               reg = mmio_read_32(MVEBU_CP_GPIO_DATA_OUT(
+                                  info->cp_index, info->gpio_index));
+               addr = MVEBU_CP_GPIO_DATA_OUT(info->cp_index, info->gpio_index);
+
+               for (gpio = 0; gpio < pm_cfg->cfg.gpio.pin_count; gpio++) {
+                       shift = pm_cfg->cfg.gpio.info[gpio].gpio_index % 32;
+                       if (GPIO_LOW == (tog_bits & (1 << gpio)))
+                               reg &= ~(1 << shift);
+                       else
+                               reg |= (1 << shift);
+               }
+
+               /* Set the GPIO register, for last step just store
+                * register address and values to system registers
+                */
+               if (idx < pm_cfg->cfg.gpio.step_count - 1) {
+                       mmio_write_32(MVEBU_CP_GPIO_DATA_OUT(
+                                     info->cp_index, info->gpio_index), reg);
+                       mdelay(pm_cfg->cfg.gpio.delay_ms);
+               } else {
+                       /* Save GPIO register and address values for
+                        * finishing the power down operation later
+                        */
+                       *gpio_addr = addr;
+                       *gpio_data = reg;
+               }
+       }
+}
+
+/*
+ * Prepare for the power off of the system
+ */
+static void plat_marvell_power_off_prepare(struct power_off_method *pm_cfg,
+                                          register_t *addr, register_t *data)
+{
+       switch (pm_cfg->type) {
+       case PMIC_GPIO:
+               plat_marvell_power_off_gpio(pm_cfg, addr, data);
+               break;
+       default:
+               break;
+       }
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ *****************************************************************************
+ */
+static void a8k_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+       if (is_pm_fw_running()) {
+               unsigned int idx;
+
+               /* Prevent interrupts from spuriously waking up this cpu */
+               gicv2_cpuif_disable();
+
+               idx = plat_my_core_pos();
+
+               /* pm system synchronization - used to synchronize multiple
+                * core access to MSS
+                */
+               bakery_lock_get(&pm_sys_lock);
+
+               /* send CPU Suspend IPC Message to MSS */
+               mss_pm_ipc_msg_send(idx, PM_IPC_MSG_CPU_SUSPEND, target_state);
+
+               /* trigger IPC message to MSS */
+               mss_pm_ipc_msg_trigger();
+
+               /* pm system synchronization */
+               bakery_lock_release(&pm_sys_lock);
+
+               /* trace message */
+               PM_TRACE(TRACE_PWR_DOMAIN_SUSPEND);
+       } else {
+               uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
+
+               INFO("Suspending to RAM\n");
+
+               /* Prevent interrupts from spuriously waking up this cpu */
+               gicv2_cpuif_disable();
+
+               mailbox[MBOX_IDX_SUSPEND_MAGIC] = MVEBU_MAILBOX_SUSPEND_STATE;
+               mailbox[MBOX_IDX_ROM_EXIT_ADDR] = (uintptr_t)&plat_marvell_exit_bootrom;
+
+#if PLAT_MARVELL_SHARED_RAM_CACHED
+               flush_dcache_range(PLAT_MARVELL_MAILBOX_BASE +
+               MBOX_IDX_SUSPEND_MAGIC * sizeof(uintptr_t),
+               2 * sizeof(uintptr_t));
+#endif
+               /* Flush and disable LLC before going off-power */
+               llc_disable(0);
+
+               isb();
+               /*
+                * Do not halt here!
+                * The function must return for allowing the caller function
+                * psci_power_up_finish() to do the proper context saving and
+                * to release the CPU lock.
+                */
+       }
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ *****************************************************************************
+ */
+static void a8k_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+       /* arch specific configuration */
+       marvell_psci_arch_init(0);
+
+       /* Interrupt initialization */
+       gicv2_pcpu_distif_init();
+       gicv2_cpuif_enable();
+
+       if (is_pm_fw_running()) {
+               /* trace message */
+               PM_TRACE(TRACE_PWR_DOMAIN_ON_FINISH);
+       }
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ * TODO: At the moment we reuse the on finisher and reinitialize the secure
+ * context. Need to implement a separate suspend finisher.
+ *****************************************************************************
+ */
+static void a8k_pwr_domain_suspend_finish(
+                                       const psci_power_state_t *target_state)
+{
+       if (is_pm_fw_running()) {
+               /* arch specific configuration */
+               marvell_psci_arch_init(0);
+
+               /* Interrupt initialization */
+               gicv2_cpuif_enable();
+
+               /* trace message */
+               PM_TRACE(TRACE_PWR_DOMAIN_SUSPEND_FINISH);
+       } else {
+               uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
+
+               /* Only primary CPU requres platform init */
+               if (!plat_my_core_pos()) {
+                       /* Initialize the console to provide
+                        * early debug support
+                        */
+                       console_init(PLAT_MARVELL_BOOT_UART_BASE,
+                       PLAT_MARVELL_BOOT_UART_CLK_IN_HZ,
+                       MARVELL_CONSOLE_BAUDRATE);
+
+                       bl31_plat_arch_setup();
+                       marvell_bl31_platform_setup();
+                       /*
+                        * Remove suspend to RAM marker from the mailbox
+                        * for treating a regular reset as a cold boot
+                        */
+                       mailbox[MBOX_IDX_SUSPEND_MAGIC] = 0;
+                       mailbox[MBOX_IDX_ROM_EXIT_ADDR] = 0;
+#if PLAT_MARVELL_SHARED_RAM_CACHED
+                       flush_dcache_range(PLAT_MARVELL_MAILBOX_BASE +
+                       MBOX_IDX_SUSPEND_MAGIC * sizeof(uintptr_t),
+                       2 * sizeof(uintptr_t));
+#endif
+               }
+       }
+}
+
+/*****************************************************************************
+ * This handler is called by the PSCI implementation during the `SYSTEM_SUSPEND`
+ * call to get the `power_state` parameter. This allows the platform to encode
+ * the appropriate State-ID field within the `power_state` parameter which can
+ * be utilized in `pwr_domain_suspend()` to suspend to system affinity level.
+ *****************************************************************************
+ */
+static void a8k_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+       /* lower affinities use PLAT_MAX_OFF_STATE */
+       for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
+               req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+}
+
+static void
+__dead2 a8k_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state)
+{
+       struct power_off_method *pm_cfg;
+       unsigned int srcmd;
+       unsigned int sdram_reg;
+       register_t gpio_data = 0, gpio_addr = 0;
+
+       if (is_pm_fw_running()) {
+               psci_power_down_wfi();
+               panic();
+       }
+
+       pm_cfg = (struct power_off_method *)plat_marvell_get_pm_cfg();
+
+       /* Prepare for power off */
+       plat_marvell_power_off_prepare(pm_cfg, &gpio_addr, &gpio_data);
+
+       /* First step to enable DDR self-refresh
+        * to keep the data during suspend
+        */
+       mmio_write_32(MVEBU_MC_PWR_CTRL_REG, 0x8C1);
+
+       /* Save DDR self-refresh second step register
+        * and value to be issued later
+        */
+       sdram_reg = MVEBU_USER_CMD_0_REG;
+       srcmd = mmio_read_32(sdram_reg);
+       srcmd &= ~(MVEBU_USER_CMD_CH0_MASK | MVEBU_USER_CMD_CS_MASK |
+                MVEBU_USER_CMD_SR_MASK);
+       srcmd |= (MVEBU_USER_CMD_CH0_EN | MVEBU_USER_CMD_CS_ALL |
+                MVEBU_USER_CMD_SR_ENTER);
+
+       /*
+        * Wait for DRAM is done using registers access only.
+        * At this stage any access to DRAM (procedure call) will
+        * release it from the self-refresh mode
+        */
+       __asm__ volatile (
+               /* Align to a cache line */
+               "       .balign 64\n\t"
+
+               /* Enter self refresh */
+               "       str %[srcmd], [%[sdram_reg]]\n\t"
+
+               /*
+                * Wait 100 cycles for DDR to enter self refresh, by
+                * doing 50 times two instructions.
+                */
+               "       mov x1, #50\n\t"
+               "1:     subs x1, x1, #1\n\t"
+               "       bne 1b\n\t"
+
+               /* Issue the command to trigger the SoC power off */
+               "       str     %[gpio_data], [%[gpio_addr]]\n\t"
+
+               /* Trap the processor */
+               "       b .\n\t"
+               : : [srcmd] "r" (srcmd), [sdram_reg] "r" (sdram_reg),
+                   [gpio_addr] "r" (gpio_addr),  [gpio_data] "r" (gpio_data)
+               : "x1");
+
+       panic();
+}
+
+/*****************************************************************************
+ * A8K handlers to shutdown/reboot the system
+ *****************************************************************************
+ */
+static void __dead2 a8k_system_off(void)
+{
+       ERROR("%s:  needs to be implemented\n", __func__);
+       panic();
+}
+
+void plat_marvell_system_reset(void)
+{
+       mmio_write_32(MVEBU_RFU_BASE + MVEBU_RFU_GLOBL_SW_RST, 0x0);
+}
+
+static void __dead2 a8k_system_reset(void)
+{
+       plat_marvell_system_reset();
+
+       /* we shouldn't get to this point */
+       panic();
+}
+
+/*****************************************************************************
+ * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
+ * platform layer will take care of registering the handlers with PSCI.
+ *****************************************************************************
+ */
+const plat_psci_ops_t plat_arm_psci_pm_ops = {
+       .cpu_standby = a8k_cpu_standby,
+       .pwr_domain_on = a8k_pwr_domain_on,
+       .pwr_domain_off = a8k_pwr_domain_off,
+       .pwr_domain_suspend = a8k_pwr_domain_suspend,
+       .pwr_domain_on_finish = a8k_pwr_domain_on_finish,
+       .get_sys_suspend_power_state = a8k_get_sys_suspend_power_state,
+       .pwr_domain_suspend_finish = a8k_pwr_domain_suspend_finish,
+       .pwr_domain_pwr_down_wfi = a8k_pwr_domain_pwr_down_wfi,
+       .system_off = a8k_system_off,
+       .system_reset = a8k_system_reset,
+       .validate_power_state = a8k_validate_power_state,
+       .validate_ns_entrypoint = a8k_validate_ns_entrypoint
+};
diff --git a/plat/marvell/a8k/common/plat_pm_trace.c b/plat/marvell/a8k/common/plat_pm_trace.c
new file mode 100644 (file)
index 0000000..683e56f
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <mmio.h>
+#include <mss_mem.h>
+#include <platform.h>
+#include <plat_pm_trace.h>
+
+#ifdef PM_TRACE_ENABLE
+
+/* core trace APIs */
+core_trace_func funcTbl[PLATFORM_CORE_COUNT] = {
+       pm_core_0_trace,
+       pm_core_1_trace,
+       pm_core_2_trace,
+       pm_core_3_trace};
+
+/*****************************************************************************
+ * pm_core0_trace
+ * pm_core1_trace
+ * pm_core2_trace
+ * pm_core_3trace
+ *
+ * This functions set trace info into core cyclic trace queue in MSS SRAM
+ * memory space
+ *****************************************************************************
+ */
+void pm_core_0_trace(unsigned int trace)
+{
+       unsigned int current_position_core_0 =
+                       mmio_read_32(AP_MSS_ATF_CORE_0_CTRL_BASE);
+       mmio_write_32((AP_MSS_ATF_CORE_0_INFO_BASE  +
+                    (current_position_core_0 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+                    mmio_read_32(AP_MSS_TIMER_BASE));
+       mmio_write_32((AP_MSS_ATF_CORE_0_INFO_TRACE +
+                    (current_position_core_0 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+                    trace);
+       mmio_write_32(AP_MSS_ATF_CORE_0_CTRL_BASE,
+                    ((current_position_core_0 + 1) &
+                    AP_MSS_ATF_TRACE_SIZE_MASK));
+}
+
+void pm_core_1_trace(unsigned int trace)
+{
+       unsigned int current_position_core_1 =
+                       mmio_read_32(AP_MSS_ATF_CORE_1_CTRL_BASE);
+       mmio_write_32((AP_MSS_ATF_CORE_1_INFO_BASE +
+                    (current_position_core_1 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+                    mmio_read_32(AP_MSS_TIMER_BASE));
+       mmio_write_32((AP_MSS_ATF_CORE_1_INFO_TRACE +
+                    (current_position_core_1 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+                    trace);
+       mmio_write_32(AP_MSS_ATF_CORE_1_CTRL_BASE,
+                    ((current_position_core_1 + 1) &
+                    AP_MSS_ATF_TRACE_SIZE_MASK));
+}
+
+void pm_core_2_trace(unsigned int trace)
+{
+       unsigned int current_position_core_2 =
+                       mmio_read_32(AP_MSS_ATF_CORE_2_CTRL_BASE);
+       mmio_write_32((AP_MSS_ATF_CORE_2_INFO_BASE +
+                    (current_position_core_2 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+                    mmio_read_32(AP_MSS_TIMER_BASE));
+       mmio_write_32((AP_MSS_ATF_CORE_2_INFO_TRACE +
+                    (current_position_core_2 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+                    trace);
+       mmio_write_32(AP_MSS_ATF_CORE_2_CTRL_BASE,
+                    ((current_position_core_2 + 1) &
+                    AP_MSS_ATF_TRACE_SIZE_MASK));
+}
+
+void pm_core_3_trace(unsigned int trace)
+{
+       unsigned int current_position_core_3 =
+                       mmio_read_32(AP_MSS_ATF_CORE_3_CTRL_BASE);
+       mmio_write_32((AP_MSS_ATF_CORE_3_INFO_BASE +
+                    (current_position_core_3 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+                    mmio_read_32(AP_MSS_TIMER_BASE));
+       mmio_write_32((AP_MSS_ATF_CORE_3_INFO_TRACE +
+                    (current_position_core_3 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+                    trace);
+       mmio_write_32(AP_MSS_ATF_CORE_3_CTRL_BASE,
+                    ((current_position_core_3 + 1) &
+                    AP_MSS_ATF_TRACE_SIZE_MASK));
+}
+#endif /* PM_TRACE_ENABLE */
diff --git a/plat/marvell/a8k/common/plat_thermal.c b/plat/marvell/a8k/common/plat_thermal.c
new file mode 100644 (file)
index 0000000..02fe820
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <mvebu_def.h>
+#include <thermal.h>
+
+#define THERMAL_TIMEOUT                                        1200
+
+#define THERMAL_SEN_CTRL_LSB_STRT_OFFSET               0
+#define THERMAL_SEN_CTRL_LSB_STRT_MASK                 \
+                               (0x1 << THERMAL_SEN_CTRL_LSB_STRT_OFFSET)
+#define THERMAL_SEN_CTRL_LSB_RST_OFFSET                        1
+#define THERMAL_SEN_CTRL_LSB_RST_MASK                  \
+                               (0x1 << THERMAL_SEN_CTRL_LSB_RST_OFFSET)
+#define THERMAL_SEN_CTRL_LSB_EN_OFFSET                 2
+#define THERMAL_SEN_CTRL_LSB_EN_MASK                   \
+                               (0x1 << THERMAL_SEN_CTRL_LSB_EN_OFFSET)
+
+#define THERMAL_SEN_CTRL_STATS_VALID_OFFSET            16
+#define THERMAL_SEN_CTRL_STATS_VALID_MASK              \
+                               (0x1 << THERMAL_SEN_CTRL_STATS_VALID_OFFSET)
+#define THERMAL_SEN_CTRL_STATS_TEMP_OUT_OFFSET         0
+#define THERMAL_SEN_CTRL_STATS_TEMP_OUT_MASK           \
+                       (0x3FF << THERMAL_SEN_CTRL_STATS_TEMP_OUT_OFFSET)
+
+#define THERMAL_SEN_OUTPUT_MSB                         512
+#define THERMAL_SEN_OUTPUT_COMP                                1024
+
+struct tsen_regs {
+       uint32_t ext_tsen_ctrl_lsb;
+       uint32_t ext_tsen_ctrl_msb;
+       uint32_t ext_tsen_status;
+};
+
+static int ext_tsen_probe(struct tsen_config *tsen_cfg)
+{
+       uint32_t reg, timeout = 0;
+       struct tsen_regs *base;
+
+       if (tsen_cfg == NULL && tsen_cfg->regs_base == NULL) {
+               ERROR("initial thermal sensor configuration is missing\n");
+               return -1;
+       }
+       base = (struct tsen_regs *)tsen_cfg->regs_base;
+
+       INFO("initializing thermal sensor\n");
+
+       /* initialize thermal sensor hardware reset once */
+       reg = mmio_read_32((uintptr_t)&base->ext_tsen_ctrl_lsb);
+       reg &= ~THERMAL_SEN_CTRL_LSB_RST_OFFSET; /* de-assert TSEN_RESET */
+       reg |= THERMAL_SEN_CTRL_LSB_EN_MASK; /* set TSEN_EN to 1 */
+       reg |= THERMAL_SEN_CTRL_LSB_STRT_MASK; /* set TSEN_START to 1 */
+       mmio_write_32((uintptr_t)&base->ext_tsen_ctrl_lsb, reg);
+
+       reg = mmio_read_32((uintptr_t)&base->ext_tsen_status);
+       while ((reg & THERMAL_SEN_CTRL_STATS_VALID_MASK) == 0 &&
+              timeout < THERMAL_TIMEOUT) {
+               udelay(100);
+               reg = mmio_read_32((uintptr_t)&base->ext_tsen_status);
+               timeout++;
+       }
+
+       if ((reg & THERMAL_SEN_CTRL_STATS_VALID_MASK) == 0) {
+               ERROR("thermal sensor is not ready\n");
+               return -1;
+       }
+
+       tsen_cfg->tsen_ready = 1;
+
+       VERBOSE("thermal sensor was initialized\n");
+
+       return 0;
+}
+
+static int ext_tsen_read(struct tsen_config *tsen_cfg, int *temp)
+{
+       uint32_t reg;
+       struct tsen_regs *base;
+
+       if (tsen_cfg == NULL && !tsen_cfg->tsen_ready) {
+               ERROR("thermal sensor was not initialized\n");
+               return -1;
+       }
+       base = (struct tsen_regs *)tsen_cfg->regs_base;
+
+       reg = mmio_read_32((uintptr_t)&base->ext_tsen_status);
+       reg = ((reg & THERMAL_SEN_CTRL_STATS_TEMP_OUT_MASK) >>
+               THERMAL_SEN_CTRL_STATS_TEMP_OUT_OFFSET);
+
+       /*
+        * TSEN output format is signed as a 2s complement number
+        * ranging from-512 to +511. when MSB is set, need to
+        * calculate the complement number
+        */
+       if (reg >= THERMAL_SEN_OUTPUT_MSB)
+               reg -= THERMAL_SEN_OUTPUT_COMP;
+
+       if (tsen_cfg->tsen_divisor == 0) {
+               ERROR("thermal sensor divisor cannot be zero\n");
+               return -1;
+       }
+
+       *temp = ((tsen_cfg->tsen_gain * ((int)reg)) +
+                tsen_cfg->tsen_offset) / tsen_cfg->tsen_divisor;
+
+       return 0;
+}
+
+static struct tsen_config tsen_cfg = {
+       .tsen_offset = 153400,
+       .tsen_gain = 425,
+       .tsen_divisor = 1000,
+       .tsen_ready = 0,
+       .regs_base = (void *)MVEBU_AP_EXT_TSEN_BASE,
+       .ptr_tsen_probe = ext_tsen_probe,
+       .ptr_tsen_read = ext_tsen_read
+};
+
+struct tsen_config *marvell_thermal_config_get(void)
+{
+       return &tsen_cfg;
+}
diff --git a/plat/marvell/common/aarch64/marvell_common.c b/plat/marvell/common/aarch64/marvell_common.c
new file mode 100644 (file)
index 0000000..abc501a
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <plat_marvell.h>
+#include <platform_def.h>
+#include <xlat_tables.h>
+
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak plat_get_ns_image_entrypoint
+#pragma weak plat_marvell_get_mmap
+
+/*
+ * Set up the page tables for the generic and platform-specific memory regions.
+ * The extents of the generic memory regions are specified by the function
+ * arguments and consist of:
+ * - Trusted SRAM seen by the BL image;
+ * - Code section;
+ * - Read-only data section;
+ * - Coherent memory region, if applicable.
+ */
+void marvell_setup_page_tables(uintptr_t total_base,
+                              size_t total_size,
+                              uintptr_t code_start,
+                              uintptr_t code_limit,
+                              uintptr_t rodata_start,
+                              uintptr_t rodata_limit
+#if USE_COHERENT_MEM
+                              ,
+                              uintptr_t coh_start,
+                              uintptr_t coh_limit
+#endif
+                          )
+{
+       /*
+        * Map the Trusted SRAM with appropriate memory attributes.
+        * Subsequent mappings will adjust the attributes for specific regions.
+        */
+       VERBOSE("Trusted SRAM seen by this BL image: %p - %p\n",
+               (void *) total_base, (void *) (total_base + total_size));
+       mmap_add_region(total_base, total_base,
+                       total_size,
+                       MT_MEMORY | MT_RW | MT_SECURE);
+
+       /* Re-map the code section */
+       VERBOSE("Code region: %p - %p\n",
+               (void *) code_start, (void *) code_limit);
+       mmap_add_region(code_start, code_start,
+                       code_limit - code_start,
+                       MT_CODE | MT_SECURE);
+
+       /* Re-map the read-only data section */
+       VERBOSE("Read-only data region: %p - %p\n",
+               (void *) rodata_start, (void *) rodata_limit);
+       mmap_add_region(rodata_start, rodata_start,
+                       rodata_limit - rodata_start,
+                       MT_RO_DATA | MT_SECURE);
+
+#if USE_COHERENT_MEM
+       /* Re-map the coherent memory region */
+       VERBOSE("Coherent region: %p - %p\n",
+               (void *) coh_start, (void *) coh_limit);
+       mmap_add_region(coh_start, coh_start,
+                       coh_limit - coh_start,
+                       MT_DEVICE | MT_RW | MT_SECURE);
+#endif
+
+       /* Now (re-)map the platform-specific memory regions */
+       mmap_add(plat_marvell_get_mmap());
+
+       /* Create the page tables to reflect the above mappings */
+       init_xlat_tables();
+}
+
+unsigned long plat_get_ns_image_entrypoint(void)
+{
+       return PLAT_MARVELL_NS_IMAGE_OFFSET;
+}
+
+/*****************************************************************************
+ * Gets SPSR for BL32 entry
+ *****************************************************************************
+ */
+uint32_t marvell_get_spsr_for_bl32_entry(void)
+{
+       /*
+        * The Secure Payload Dispatcher service is responsible for
+        * setting the SPSR prior to entry into the BL32 image.
+        */
+       return 0;
+}
+
+/*****************************************************************************
+ * Gets SPSR for BL33 entry
+ *****************************************************************************
+ */
+uint32_t marvell_get_spsr_for_bl33_entry(void)
+{
+       unsigned long el_status;
+       unsigned int mode;
+       uint32_t spsr;
+
+       /* Figure out what mode we enter the non-secure world in */
+       el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+       el_status &= ID_AA64PFR0_ELX_MASK;
+
+       mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+       /*
+        * TODO: Consider the possibility of specifying the SPSR in
+        * the FIP ToC and allowing the platform to have a say as
+        * well.
+        */
+       spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+       return spsr;
+}
+
+/*****************************************************************************
+ * Returns ARM platform specific memory map regions.
+ *****************************************************************************
+ */
+const mmap_region_t *plat_marvell_get_mmap(void)
+{
+       return plat_marvell_mmap;
+}
+
diff --git a/plat/marvell/common/aarch64/marvell_helpers.S b/plat/marvell/common/aarch64/marvell_helpers.S
new file mode 100644 (file)
index 0000000..a3dc917
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <asm_macros.S>
+#include <cortex_a72.h>
+#include <marvell_def.h>
+#include <platform_def.h>
+#ifndef PLAT_a3700
+#include <ccu.h>
+#include <cache_llc.h>
+#endif
+
+       .weak   plat_marvell_calc_core_pos
+       .weak   plat_my_core_pos
+       .globl  plat_crash_console_init
+       .globl  plat_crash_console_putc
+       .globl  platform_mem_init
+       .globl  disable_mmu_dcache
+       .globl  invalidate_tlb_all
+       .globl  platform_unmap_sram
+       .globl  disable_sram
+       .globl  disable_icache
+       .globl  invalidate_icache_all
+       .globl  marvell_exit_bootrom
+       .globl  ca72_l2_enable_unique_clean
+
+       /* -----------------------------------------------------
+        *  unsigned int plat_my_core_pos(void)
+        *  This function uses the plat_marvell_calc_core_pos()
+        *  definition to get the index of the calling CPU.
+        * -----------------------------------------------------
+        */
+func plat_my_core_pos
+       mrs     x0, mpidr_el1
+       b       plat_marvell_calc_core_pos
+endfunc plat_my_core_pos
+
+       /* -----------------------------------------------------
+        *  unsigned int plat_marvell_calc_core_pos(uint64_t mpidr)
+        *  Helper function to calculate the core position.
+        *  With this function: CorePos = (ClusterId * 2) +
+        *                                CoreId
+        * -----------------------------------------------------
+        */
+func plat_marvell_calc_core_pos
+       and     x1, x0, #MPIDR_CPU_MASK
+       and     x0, x0, #MPIDR_CLUSTER_MASK
+       add     x0, x1, x0, LSR #7
+       ret
+endfunc plat_marvell_calc_core_pos
+
+       /* ---------------------------------------------
+        * int plat_crash_console_init(void)
+        * Function to initialize the crash console
+        * without a C Runtime to print crash report.
+        * Clobber list : x0, x1, x2
+        * ---------------------------------------------
+        */
+func plat_crash_console_init
+       mov_imm x0, PLAT_MARVELL_CRASH_UART_BASE
+       mov_imm x1, PLAT_MARVELL_CRASH_UART_CLK_IN_HZ
+       mov_imm x2, MARVELL_CONSOLE_BAUDRATE
+       b       console_core_init
+endfunc plat_crash_console_init
+
+       /* ---------------------------------------------
+        * int plat_crash_console_putc(int c)
+        * Function to print a character on the crash
+        * console without a C Runtime.
+        * Clobber list : x1, x2
+        * ---------------------------------------------
+        */
+func plat_crash_console_putc
+       mov_imm x1, PLAT_MARVELL_CRASH_UART_BASE
+       b       console_core_putc
+endfunc plat_crash_console_putc
+
+       /* ---------------------------------------------------------------------
+        * We don't need to carry out any memory initialization on ARM
+        * platforms. The Secure RAM is accessible straight away.
+        * ---------------------------------------------------------------------
+        */
+func platform_mem_init
+       ret
+endfunc platform_mem_init
+
+       /* -----------------------------------------------------
+        * Disable icache, dcache, and MMU
+        * -----------------------------------------------------
+        */
+func disable_mmu_dcache
+       mrs     x0, sctlr_el3
+       bic     x0, x0, 0x1             /* M bit - MMU */
+       bic     x0, x0, 0x4             /* C bit - Dcache L1 & L2 */
+       msr     sctlr_el3, x0
+       isb
+       b       mmu_off
+mmu_off:
+       ret
+endfunc disable_mmu_dcache
+
+       /* -----------------------------------------------------
+        * Disable all TLB entries
+        * -----------------------------------------------------
+        */
+func invalidate_tlb_all
+       tlbi    alle3
+       dsb     sy
+       isb
+       ret
+endfunc invalidate_tlb_all
+
+       /* -----------------------------------------------------
+        * Disable the i cache
+        * -----------------------------------------------------
+        */
+func disable_icache
+       mrs     x0, sctlr_el3
+       bic     x0, x0, 0x1000  /* I bit - Icache L1 & L2 */
+       msr     sctlr_el3, x0
+       isb
+       ret
+endfunc disable_icache
+
+       /* -----------------------------------------------------
+        * Disable all of the i caches
+        * -----------------------------------------------------
+        */
+func invalidate_icache_all
+       ic      ialluis
+       isb     sy
+       ret
+endfunc invalidate_icache_all
+
+       /* -----------------------------------------------------
+        * Clear the SRAM enabling bit to unmap SRAM
+        * -----------------------------------------------------
+        */
+func platform_unmap_sram
+       ldr     x0, =CCU_SRAM_WIN_CR
+       str     wzr, [x0]
+       ret
+endfunc platform_unmap_sram
+
+       /* -----------------------------------------------------
+        * Disable the SRAM
+        * -----------------------------------------------------
+        */
+func disable_sram
+       /* Disable the line lockings. They must be disabled expictly
+        * or the OS will have problems using the cache */
+       ldr     x1, =MASTER_LLC_TC0_LOCK
+       str     wzr, [x1]
+
+       /* Invalidate all ways */
+       ldr     w1, =LLC_WAY_MASK
+       ldr     x0, =MASTER_L2X0_INV_WAY
+       str     w1, [x0]
+
+       /* Finally disable LLC */
+       ldr     x0, =MASTER_LLC_CTRL
+       str     wzr, [x0]
+
+       ret
+endfunc disable_sram
+
+       /* -----------------------------------------------------
+        * Operation when exit bootROM:
+        * Disable the MMU
+        * Disable and invalidate the dcache
+        * Unmap and disable the SRAM
+        * Disable and invalidate the icache
+        * -----------------------------------------------------
+        */
+func marvell_exit_bootrom
+       /* Save the system restore address */
+       mov     x28, x0
+
+       /* Close the caches and MMU */
+       bl      disable_mmu_dcache
+
+       /*
+        * There is nothing important in the caches now,
+        * so invalidate them instead of cleaning.
+        */
+       adr     x0, __RW_START__
+       adr     x1, __RW_END__
+       sub     x1, x1, x0
+       bl      inv_dcache_range
+       bl      invalidate_tlb_all
+
+       /*
+        * Clean the memory mapping of SRAM
+        * the DDR mapping will remain to enable boot image to execute
+        */
+       bl      platform_unmap_sram
+
+       /* Disable the SRAM */
+       bl      disable_sram
+
+       /* Disable and invalidate icache */
+       bl      disable_icache
+       bl      invalidate_icache_all
+
+       mov     x0, x28
+       br      x0
+endfunc marvell_exit_bootrom
+
+       /*
+        * Enable L2 UniqueClean evictions with data
+        */
+func ca72_l2_enable_unique_clean
+
+       mrs     x0, CORTEX_A72_L2ACTLR_EL1
+       orr     x0, x0, #CORTEX_A72_L2ACTLR_ENABLE_UNIQUE_CLEAN
+       msr     CORTEX_A72_L2ACTLR_EL1, x0
+
+       ret
+endfunc ca72_l2_enable_unique_clean
diff --git a/plat/marvell/common/marvell_bl1_setup.c b/plat/marvell/common/marvell_bl1_setup.c
new file mode 100644 (file)
index 0000000..981cfbe
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <bl1.h>
+#include <bl1/bl1_private.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <plat_marvell.h>
+#include <sp805.h>
+
+/* Weak definitions may be overridden in specific Marvell standard platform */
+#pragma weak bl1_early_platform_setup
+#pragma weak bl1_plat_arch_setup
+#pragma weak bl1_platform_setup
+#pragma weak bl1_plat_sec_mem_layout
+
+
+/* Data structure which holds the extents of the RAM for BL1*/
+static meminfo_t bl1_ram_layout;
+
+meminfo_t *bl1_plat_sec_mem_layout(void)
+{
+       return &bl1_ram_layout;
+}
+
+/*
+ * BL1 specific platform actions shared between Marvell standard platforms.
+ */
+void marvell_bl1_early_platform_setup(void)
+{
+       const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE;
+
+       /* Initialize the console to provide early debug support */
+       console_init(PLAT_MARVELL_BOOT_UART_BASE,
+                    PLAT_MARVELL_BOOT_UART_CLK_IN_HZ,
+                    MARVELL_CONSOLE_BAUDRATE);
+
+       /* Allow BL1 to see the whole Trusted RAM */
+       bl1_ram_layout.total_base = MARVELL_BL_RAM_BASE;
+       bl1_ram_layout.total_size = MARVELL_BL_RAM_SIZE;
+
+       /* Calculate how much RAM BL1 is using and how much remains free */
+       bl1_ram_layout.free_base = MARVELL_BL_RAM_BASE;
+       bl1_ram_layout.free_size = MARVELL_BL_RAM_SIZE;
+       reserve_mem(&bl1_ram_layout.free_base,
+                   &bl1_ram_layout.free_size,
+                   BL1_RAM_BASE,
+                   bl1_size);
+}
+
+void bl1_early_platform_setup(void)
+{
+       marvell_bl1_early_platform_setup();
+}
+
+/*
+ * Perform the very early platform specific architecture setup shared between
+ * MARVELL standard platforms. This only does basic initialization. Later
+ * architectural setup (bl1_arch_setup()) does not do anything platform
+ * specific.
+ */
+void marvell_bl1_plat_arch_setup(void)
+{
+       marvell_setup_page_tables(bl1_ram_layout.total_base,
+                                 bl1_ram_layout.total_size,
+                                 BL1_RO_BASE,
+                                 BL1_RO_LIMIT,
+                                 BL1_RO_DATA_BASE,
+                                 BL1_RO_DATA_END
+#if USE_COHERENT_MEM
+                               , BL_COHERENT_RAM_BASE,
+                                 BL_COHERENT_RAM_END
+#endif
+                               );
+       enable_mmu_el3(0);
+}
+
+void bl1_plat_arch_setup(void)
+{
+       marvell_bl1_plat_arch_setup();
+}
+
+/*
+ * Perform the platform specific architecture setup shared between
+ * MARVELL standard platforms.
+ */
+void marvell_bl1_platform_setup(void)
+{
+       /* Initialise the IO layer and register platform IO devices */
+       plat_marvell_io_setup();
+}
+
+void bl1_platform_setup(void)
+{
+       marvell_bl1_platform_setup();
+}
+
+void bl1_plat_prepare_exit(entry_point_info_t *ep_info)
+{
+#ifdef EL3_PAYLOAD_BASE
+       /*
+        * Program the EL3 payload's entry point address into the CPUs mailbox
+        * in order to release secondary CPUs from their holding pen and make
+        * them jump there.
+        */
+       marvell_program_trusted_mailbox(ep_info->pc);
+       dsbsy();
+       sev();
+#endif
+}
diff --git a/plat/marvell/common/marvell_bl2_setup.c b/plat/marvell/common/marvell_bl2_setup.c
new file mode 100644 (file)
index 0000000..7c87ce3
--- /dev/null
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch_helpers.h>
+#include <bl_common.h>
+#include <console.h>
+#include <marvell_def.h>
+#include <platform_def.h>
+#include <plat_marvell.h>
+#include <string.h>
+
+/* Data structure which holds the extents of the trusted SRAM for BL2 */
+static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
+
+
+/*****************************************************************************
+ * This structure represents the superset of information that is passed to
+ * BL31, e.g. while passing control to it from BL2, bl31_params
+ * and other platform specific parameters
+ *****************************************************************************
+ */
+typedef struct bl2_to_bl31_params_mem {
+       bl31_params_t bl31_params;
+       image_info_t bl31_image_info;
+       image_info_t bl32_image_info;
+       image_info_t bl33_image_info;
+       entry_point_info_t bl33_ep_info;
+       entry_point_info_t bl32_ep_info;
+       entry_point_info_t bl31_ep_info;
+} bl2_to_bl31_params_mem_t;
+
+
+static bl2_to_bl31_params_mem_t bl31_params_mem;
+
+
+/* Weak definitions may be overridden in specific MARVELL standard platform */
+#pragma weak bl2_early_platform_setup
+#pragma weak bl2_platform_setup
+#pragma weak bl2_plat_arch_setup
+#pragma weak bl2_plat_sec_mem_layout
+#pragma weak bl2_plat_get_bl31_params
+#pragma weak bl2_plat_get_bl31_ep_info
+#pragma weak bl2_plat_flush_bl31_params
+#pragma weak bl2_plat_set_bl31_ep_info
+#pragma weak bl2_plat_get_scp_bl2_meminfo
+#pragma weak bl2_plat_get_bl32_meminfo
+#pragma weak bl2_plat_set_bl32_ep_info
+#pragma weak bl2_plat_get_bl33_meminfo
+#pragma weak bl2_plat_set_bl33_ep_info
+
+
+meminfo_t *bl2_plat_sec_mem_layout(void)
+{
+       return &bl2_tzram_layout;
+}
+
+/*****************************************************************************
+ * This function assigns a pointer to the memory that the platform has kept
+ * aside to pass platform specific and trusted firmware related information
+ * to BL31. This memory is allocated by allocating memory to
+ * bl2_to_bl31_params_mem_t structure which is a superset of all the
+ * structure whose information is passed to BL31
+ * NOTE: This function should be called only once and should be done
+ * before generating params to BL31
+ *****************************************************************************
+ */
+bl31_params_t *bl2_plat_get_bl31_params(void)
+{
+       bl31_params_t *bl2_to_bl31_params;
+
+       /*
+        * Initialise the memory for all the arguments that needs to
+        * be passed to BL31
+        */
+       memset(&bl31_params_mem, 0, sizeof(bl2_to_bl31_params_mem_t));
+
+       /* Assign memory for TF related information */
+       bl2_to_bl31_params = &bl31_params_mem.bl31_params;
+       SET_PARAM_HEAD(bl2_to_bl31_params, PARAM_BL31, VERSION_1, 0);
+
+       /* Fill BL31 related information */
+       bl2_to_bl31_params->bl31_image_info = &bl31_params_mem.bl31_image_info;
+       SET_PARAM_HEAD(bl2_to_bl31_params->bl31_image_info, PARAM_IMAGE_BINARY,
+               VERSION_1, 0);
+
+       /* Fill BL32 related information if it exists */
+#if BL32_BASE
+       bl2_to_bl31_params->bl32_ep_info = &bl31_params_mem.bl32_ep_info;
+       SET_PARAM_HEAD(bl2_to_bl31_params->bl32_ep_info, PARAM_EP,
+               VERSION_1, 0);
+       bl2_to_bl31_params->bl32_image_info = &bl31_params_mem.bl32_image_info;
+       SET_PARAM_HEAD(bl2_to_bl31_params->bl32_image_info, PARAM_IMAGE_BINARY,
+               VERSION_1, 0);
+#endif
+
+       /* Fill BL33 related information */
+       bl2_to_bl31_params->bl33_ep_info = &bl31_params_mem.bl33_ep_info;
+       SET_PARAM_HEAD(bl2_to_bl31_params->bl33_ep_info,
+               PARAM_EP, VERSION_1, 0);
+
+       /* BL33 expects to receive the primary CPU MPID (through x0) */
+       bl2_to_bl31_params->bl33_ep_info->args.arg0 = 0xffff & read_mpidr();
+
+       bl2_to_bl31_params->bl33_image_info = &bl31_params_mem.bl33_image_info;
+       SET_PARAM_HEAD(bl2_to_bl31_params->bl33_image_info, PARAM_IMAGE_BINARY,
+               VERSION_1, 0);
+
+       return bl2_to_bl31_params;
+}
+
+/* Flush the TF params and the TF plat params */
+void bl2_plat_flush_bl31_params(void)
+{
+       flush_dcache_range((unsigned long)&bl31_params_mem,
+                       sizeof(bl2_to_bl31_params_mem_t));
+}
+
+/*****************************************************************************
+ * This function returns a pointer to the shared memory that the platform
+ * has kept to point to entry point information of BL31 to BL2
+ *****************************************************************************
+ */
+struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
+{
+#if DEBUG
+       bl31_params_mem.bl31_ep_info.args.arg1 = MARVELL_BL31_PLAT_PARAM_VAL;
+#endif
+
+       return &bl31_params_mem.bl31_ep_info;
+}
+
+/*****************************************************************************
+ * BL1 has passed the extents of the trusted SRAM that should be visible to BL2
+ * in x0. This memory layout is sitting at the base of the free trusted SRAM.
+ * Copy it to a safe location before its reclaimed by later BL2 functionality.
+ *****************************************************************************
+ */
+void marvell_bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+       /* Initialize the console to provide early debug support */
+       console_init(PLAT_MARVELL_BOOT_UART_BASE,
+                    PLAT_MARVELL_BOOT_UART_CLK_IN_HZ,
+                    MARVELL_CONSOLE_BAUDRATE);
+
+       /* Setup the BL2 memory layout */
+       bl2_tzram_layout = *mem_layout;
+
+       /* Initialise the IO layer and register platform IO devices */
+       plat_marvell_io_setup();
+}
+
+void bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+       marvell_bl2_early_platform_setup(mem_layout);
+}
+
+void bl2_platform_setup(void)
+{
+       /* Nothing to do */
+}
+
+/*****************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only initializes the mmu in a quick and dirty way.
+ *****************************************************************************
+ */
+void marvell_bl2_plat_arch_setup(void)
+{
+       marvell_setup_page_tables(bl2_tzram_layout.total_base,
+                                 bl2_tzram_layout.total_size,
+                                 BL_CODE_BASE,
+                                 BL_CODE_END,
+                                 BL_RO_DATA_BASE,
+                                 BL_RO_DATA_END
+#if USE_COHERENT_MEM
+                               , BL_COHERENT_RAM_BASE,
+                                 BL_COHERENT_RAM_END
+#endif
+                             );
+       enable_mmu_el1(0);
+}
+
+void bl2_plat_arch_setup(void)
+{
+       marvell_bl2_plat_arch_setup();
+}
+
+/*****************************************************************************
+ * Populate the extents of memory available for loading SCP_BL2 (if used),
+ * i.e. anywhere in trusted RAM as long as it doesn't overwrite BL2.
+ *****************************************************************************
+ */
+void bl2_plat_get_scp_bl2_meminfo(meminfo_t *scp_bl2_meminfo)
+{
+       *scp_bl2_meminfo = bl2_tzram_layout;
+}
+
+/*****************************************************************************
+ * Before calling this function BL31 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL31 and set SPSR and security state.
+ * On MARVELL std. platforms we only set the security state of the entrypoint
+ *****************************************************************************
+ */
+void bl2_plat_set_bl31_ep_info(image_info_t *bl31_image_info,
+                              entry_point_info_t *bl31_ep_info)
+{
+       SET_SECURITY_STATE(bl31_ep_info->h.attr, SECURE);
+       bl31_ep_info->spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+                                       DISABLE_ALL_EXCEPTIONS);
+}
+
+/*****************************************************************************
+ * Populate the extents of memory available for loading BL32
+ *****************************************************************************
+ */
+#ifdef BL32_BASE
+void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)
+{
+       /*
+        * Populate the extents of memory available for loading BL32.
+        */
+       bl32_meminfo->total_base = BL32_BASE;
+       bl32_meminfo->free_base = BL32_BASE;
+       bl32_meminfo->total_size =
+                       (TRUSTED_DRAM_BASE + TRUSTED_DRAM_SIZE) - BL32_BASE;
+       bl32_meminfo->free_size =
+                       (TRUSTED_DRAM_BASE + TRUSTED_DRAM_SIZE) - BL32_BASE;
+}
+#endif
+
+/*****************************************************************************
+ * Before calling this function BL32 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL32 and set SPSR and security state.
+ * On MARVELL std. platforms we only set the security state of the entrypoint
+ *****************************************************************************
+ */
+void bl2_plat_set_bl32_ep_info(image_info_t *bl32_image_info,
+                              entry_point_info_t *bl32_ep_info)
+{
+       SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
+       bl32_ep_info->spsr = marvell_get_spsr_for_bl32_entry();
+}
+
+/*****************************************************************************
+ * Before calling this function BL33 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL33 and set SPSR and security state.
+ * On MARVELL std. platforms we only set the security state of the entrypoint
+ *****************************************************************************
+ */
+void bl2_plat_set_bl33_ep_info(image_info_t *image,
+                              entry_point_info_t *bl33_ep_info)
+{
+
+       SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
+       bl33_ep_info->spsr = marvell_get_spsr_for_bl33_entry();
+}
+
+/*****************************************************************************
+ * Populate the extents of memory available for loading BL33
+ *****************************************************************************
+ */
+void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)
+{
+       bl33_meminfo->total_base = MARVELL_DRAM_BASE;
+       bl33_meminfo->total_size = MARVELL_DRAM_SIZE;
+       bl33_meminfo->free_base = MARVELL_DRAM_BASE;
+       bl33_meminfo->free_size = MARVELL_DRAM_SIZE;
+}
diff --git a/plat/marvell/common/marvell_bl31_setup.c b/plat/marvell/common/marvell_bl31_setup.c
new file mode 100644 (file)
index 0000000..a74816b
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <console.h>
+#include <debug.h>
+#include <marvell_def.h>
+#include <marvell_plat_priv.h>
+#include <plat_marvell.h>
+#include <platform.h>
+
+#ifdef USE_CCI
+#include <cci.h>
+#endif
+
+/*
+ * The next 3 constants identify the extents of the code, RO data region and the
+ * limit of the BL31 image.  These addresses are used by the MMU setup code and
+ * therefore they must be page-aligned.  It is the responsibility of the linker
+ * script to ensure that __RO_START__, __RO_END__ & __BL31_END__ linker symbols
+ * refer to page-aligned addresses.
+ */
+#define BL31_END (unsigned long)(&__BL31_END__)
+
+/*
+ * Placeholder variables for copying the arguments that have been passed to
+ * BL31 from BL2.
+ */
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak bl31_early_platform_setup
+#pragma weak bl31_platform_setup
+#pragma weak bl31_plat_arch_setup
+#pragma weak bl31_plat_get_next_image_ep_info
+#pragma weak plat_get_syscnt_freq2
+
+/*****************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * the security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ *****************************************************************************
+ */
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+       entry_point_info_t *next_image_info;
+
+       assert(sec_state_is_valid(type));
+       next_image_info = (type == NON_SECURE)
+                       ? &bl33_image_ep_info : &bl32_image_ep_info;
+
+       return next_image_info;
+}
+
+/*****************************************************************************
+ * Perform any BL31 early platform setup common to ARM standard platforms.
+ * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
+ * in BL2 & S-EL3 in BL1) before they are lost (potentially). This needs to be
+ * done before the MMU is initialized so that the memory layout can be used
+ * while creating page tables. BL2 has flushed this information to memory, so
+ * we are guaranteed to pick up good data.
+ *****************************************************************************
+ */
+void marvell_bl31_early_platform_setup(bl31_params_t *from_bl2,
+                               void *plat_params_from_bl2)
+{
+       /* Initialize the console to provide early debug support */
+       console_init(PLAT_MARVELL_BOOT_UART_BASE,
+                    PLAT_MARVELL_BOOT_UART_CLK_IN_HZ,
+                    MARVELL_CONSOLE_BAUDRATE);
+
+#if RESET_TO_BL31
+       /* There are no parameters from BL2 if BL31 is a reset vector */
+       assert(from_bl2 == NULL);
+       assert(plat_params_from_bl2 == NULL);
+
+#ifdef BL32_BASE
+       /* Populate entry point information for BL32 */
+       SET_PARAM_HEAD(&bl32_image_ep_info,
+                               PARAM_EP,
+                               VERSION_1,
+                               0);
+       SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+       bl32_image_ep_info.pc = BL32_BASE;
+       bl32_image_ep_info.spsr = marvell_get_spsr_for_bl32_entry();
+#endif /* BL32_BASE */
+
+       /* Populate entry point information for BL33 */
+       SET_PARAM_HEAD(&bl33_image_ep_info,
+                               PARAM_EP,
+                               VERSION_1,
+                               0);
+       /*
+        * Tell BL31 where the non-trusted software image
+        * is located and the entry state information
+        */
+       bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
+       bl33_image_ep_info.spsr = marvell_get_spsr_for_bl33_entry();
+       SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+#else
+       /*
+        * Check params passed from BL2 should not be NULL,
+        */
+       assert(from_bl2 != NULL);
+       assert(from_bl2->h.type == PARAM_BL31);
+       assert(from_bl2->h.version >= VERSION_1);
+       /*
+        * In debug builds, we pass a special value in 'plat_params_from_bl2'
+        * to verify platform parameters from BL2 to BL31.
+        * In release builds, it's not used.
+        */
+       assert(((unsigned long long)plat_params_from_bl2) ==
+               MARVELL_BL31_PLAT_PARAM_VAL);
+
+       /*
+        * Copy BL32 (if populated by BL2) and BL33 entry point information.
+        * They are stored in Secure RAM, in BL2's address space.
+        */
+       if (from_bl2->bl32_ep_info)
+               bl32_image_ep_info = *from_bl2->bl32_ep_info;
+       bl33_image_ep_info = *from_bl2->bl33_ep_info;
+#endif
+}
+
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+                               void *plat_params_from_bl2)
+{
+       marvell_bl31_early_platform_setup(from_bl2, plat_params_from_bl2);
+
+#ifdef USE_CCI
+       /*
+        * Initialize CCI for this cluster during cold boot.
+        * No need for locks as no other CPU is active.
+        */
+       plat_marvell_interconnect_init();
+
+       /*
+        * Enable CCI coherency for the primary CPU's cluster.
+        * Platform specific PSCI code will enable coherency for other
+        * clusters.
+        */
+       plat_marvell_interconnect_enter_coherency();
+#endif
+}
+
+/*****************************************************************************
+ * Perform any BL31 platform setup common to ARM standard platforms
+ *****************************************************************************
+ */
+void marvell_bl31_platform_setup(void)
+{
+       /* Initialize the GIC driver, cpu and distributor interfaces */
+       plat_marvell_gic_driver_init();
+       plat_marvell_gic_init();
+
+       /* For Armada-8k-plus family, the SoC includes more than
+        * a single AP die, but the default die that boots is AP #0.
+        * For other families there is only one die (#0).
+        * Initialize psci arch from die 0
+        */
+       marvell_psci_arch_init(0);
+}
+
+/*****************************************************************************
+ * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
+ * standard platforms
+ *****************************************************************************
+ */
+void marvell_bl31_plat_runtime_setup(void)
+{
+       /* Initialize the runtime console */
+       console_init(PLAT_MARVELL_BL31_RUN_UART_BASE,
+                    PLAT_MARVELL_BL31_RUN_UART_CLK_IN_HZ,
+                    MARVELL_CONSOLE_BAUDRATE);
+}
+
+void bl31_platform_setup(void)
+{
+       marvell_bl31_platform_setup();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+       marvell_bl31_plat_runtime_setup();
+}
+
+/*****************************************************************************
+ * Perform the very early platform specific architectural setup shared between
+ * ARM standard platforms. This only does basic initialization. Later
+ * architectural setup (bl31_arch_setup()) does not do anything platform
+ * specific.
+ *****************************************************************************
+ */
+void marvell_bl31_plat_arch_setup(void)
+{
+       marvell_setup_page_tables(BL31_BASE,
+                                 BL31_END - BL31_BASE,
+                                 BL_CODE_BASE,
+                                 BL_CODE_END,
+                                 BL_RO_DATA_BASE,
+                                 BL_RO_DATA_END
+#if USE_COHERENT_MEM
+                               , BL_COHERENT_RAM_BASE,
+                                 BL_COHERENT_RAM_END
+#endif
+                       );
+
+#if BL31_CACHE_DISABLE
+       enable_mmu_el3(DISABLE_DCACHE);
+       INFO("Cache is disabled in BL3\n");
+#else
+       enable_mmu_el3(0);
+#endif
+}
+
+void bl31_plat_arch_setup(void)
+{
+       marvell_bl31_plat_arch_setup();
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+       return PLAT_REF_CLK_IN_HZ;
+}
diff --git a/plat/marvell/common/marvell_cci.c b/plat/marvell/common/marvell_cci.c
new file mode 100644 (file)
index 0000000..2df4802
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <cci.h>
+#include <plat_marvell.h>
+
+static const int cci_map[] = {
+       PLAT_MARVELL_CCI_CLUSTER0_SL_IFACE_IX,
+       PLAT_MARVELL_CCI_CLUSTER1_SL_IFACE_IX
+};
+
+/****************************************************************************
+ * The following functions are defined as weak to allow a platform to override
+ * the way ARM CCI driver is initialised and used.
+ ****************************************************************************
+ */
+#pragma weak plat_marvell_interconnect_init
+#pragma weak plat_marvell_interconnect_enter_coherency
+#pragma weak plat_marvell_interconnect_exit_coherency
+
+
+/****************************************************************************
+ * Helper function to initialize ARM CCI driver.
+ ****************************************************************************
+ */
+void plat_marvell_interconnect_init(void)
+{
+       cci_init(PLAT_MARVELL_CCI_BASE, cci_map, ARRAY_SIZE(cci_map));
+}
+
+/****************************************************************************
+ * Helper function to place current master into coherency
+ ****************************************************************************
+ */
+void plat_marvell_interconnect_enter_coherency(void)
+{
+       cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
+
+/****************************************************************************
+ * Helper function to remove current master from coherency
+ ****************************************************************************
+ */
+void plat_marvell_interconnect_exit_coherency(void)
+{
+       cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
diff --git a/plat/marvell/common/marvell_common.mk b/plat/marvell/common/marvell_common.mk
new file mode 100644 (file)
index 0000000..3ee2f3d
--- /dev/null
@@ -0,0 +1,67 @@
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:     BSD-3-Clause
+# https://spdx.org/licenses
+
+MARVELL_PLAT_BASE              := plat/marvell
+MARVELL_PLAT_INCLUDE_BASE      := include/plat/marvell
+
+include $(MARVELL_PLAT_BASE)/version.mk
+include $(MARVELL_PLAT_BASE)/marvell.mk
+
+VERSION_STRING                 +=(Marvell-${SUBVERSION})
+
+SEPARATE_CODE_AND_RODATA       := 1
+
+# flag to switch from PLL to ARO
+ARO_ENABLE                     := 0
+$(eval $(call add_define,ARO_ENABLE))
+# Enable/Disable LLC
+LLC_ENABLE                     := 1
+$(eval $(call add_define,LLC_ENABLE))
+
+PLAT_INCLUDES          +=      -I. -Iinclude/common/tbbr               \
+                               -I$(MARVELL_PLAT_INCLUDE_BASE)/common   \
+                               -I$(MARVELL_PLAT_INCLUDE_BASE)/common/aarch64
+
+
+PLAT_BL_COMMON_SOURCES  +=      lib/xlat_tables/xlat_tables_common.c                   \
+                               lib/xlat_tables/aarch64/xlat_tables.c                   \
+                               $(MARVELL_PLAT_BASE)/common/aarch64/marvell_common.c    \
+                               $(MARVELL_PLAT_BASE)/common/aarch64/marvell_helpers.S
+
+BL1_SOURCES            +=      drivers/delay_timer/delay_timer.c                       \
+                               drivers/io/io_fip.c                                     \
+                               drivers/io/io_memmap.c                                  \
+                               drivers/io/io_storage.c                                 \
+                               $(MARVELL_PLAT_BASE)/common/marvell_bl1_setup.c         \
+                               $(MARVELL_PLAT_BASE)/common/marvell_io_storage.c        \
+                               $(MARVELL_PLAT_BASE)/common/plat_delay_timer.c
+
+ifdef EL3_PAYLOAD_BASE
+# Need the arm_program_trusted_mailbox() function to release secondary CPUs from
+# their holding pen
+endif
+
+BL2_SOURCES            +=      drivers/io/io_fip.c                                     \
+                               drivers/io/io_memmap.c                                  \
+                               drivers/io/io_storage.c                                 \
+                               $(MARVELL_PLAT_BASE)/common/marvell_bl2_setup.c         \
+                               $(MARVELL_PLAT_BASE)/common/marvell_io_storage.c
+
+BL31_SOURCES           +=      $(MARVELL_PLAT_BASE)/common/marvell_bl31_setup.c        \
+                               $(MARVELL_PLAT_BASE)/common/marvell_pm.c                \
+                               $(MARVELL_PLAT_BASE)/common/marvell_topology.c          \
+                               plat/common/plat_psci_common.c                          \
+                               $(MARVELL_PLAT_BASE)/common/plat_delay_timer.c          \
+                               drivers/delay_timer/delay_timer.c
+
+# PSCI functionality
+$(eval $(call add_define,CONFIG_ARM64))
+
+# MSS (SCP) build
+ifeq (${MSS_SUPPORT}, 1)
+include $(MARVELL_PLAT_BASE)/common/mss/mss_common.mk
+endif
+
+fip: mrvl_flash
diff --git a/plat/marvell/common/marvell_ddr_info.c b/plat/marvell/common/marvell_ddr_info.c
new file mode 100644 (file)
index 0000000..68bff99
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <debug.h>
+#include <platform_def.h>
+#include <ddr_info.h>
+#include <mmio.h>
+
+#define DRAM_CH0_MMAP_LOW_REG(iface, cs, base) \
+       (base + DRAM_CH0_MMAP_LOW_OFFSET + (iface) * 0x10000 + (cs) * 0x8)
+#define DRAM_CH0_MMAP_HIGH_REG(iface, cs, base)        \
+       (DRAM_CH0_MMAP_LOW_REG(iface, cs, base) + 4)
+#define DRAM_CS_VALID_ENABLED_MASK             0x1
+#define DRAM_AREA_LENGTH_OFFS                  16
+#define DRAM_AREA_LENGTH_MASK                  (0x1f << DRAM_AREA_LENGTH_OFFS)
+#define DRAM_START_ADDRESS_L_OFFS              23
+#define DRAM_START_ADDRESS_L_MASK              \
+                                       (0x1ff << DRAM_START_ADDRESS_L_OFFS)
+#define DRAM_START_ADDR_HTOL_OFFS              32
+
+#define DRAM_MAX_CS_NUM                                2
+
+#define DRAM_CS_ENABLED(iface, cs, base) \
+       (mmio_read_32(DRAM_CH0_MMAP_LOW_REG(iface, cs, base)) & \
+        DRAM_CS_VALID_ENABLED_MASK)
+#define GET_DRAM_REGION_SIZE_CODE(iface, cs, base) \
+       (mmio_read_32(DRAM_CH0_MMAP_LOW_REG(iface, cs, base)) & \
+       DRAM_AREA_LENGTH_MASK) >> DRAM_AREA_LENGTH_OFFS
+
+/* Mapping between DDR area length and real DDR size is specific and looks like
+ * bellow:
+ * 0 => 384 MB
+ * 1 => 768 MB
+ * 2 => 1536 MB
+ * 3 => 3 GB
+ * 4 => 6 GB
+ *
+ * 7 => 8 MB
+ * 8 => 16 MB
+ * 9 => 32 MB
+ * 10 => 64 MB
+ * 11 => 128 MB
+ * 12 => 256 MB
+ * 13 => 512 MB
+ * 14 => 1 GB
+ * 15 => 2 GB
+ * 16 => 4 GB
+ * 17 => 8 GB
+ * 18 => 16 GB
+ * 19 => 32 GB
+ * 20 => 64 GB
+ * 21 => 128 GB
+ * 22 => 256 GB
+ * 23 => 512 GB
+ * 24 => 1 TB
+ * 25 => 2 TB
+ * 26 => 4 TB
+ *
+ * to calculate real size we need to use two different formulas:
+ * -- GET_DRAM_REGION_SIZE_ODD for values 0-4 (DRAM_REGION_SIZE_ODD)
+ * -- GET_DRAM_REGION_SIZE_EVEN for values 7-26 (DRAM_REGION_SIZE_EVEN)
+ * using mentioned formulas we cover whole mapping between "Area length" value
+ * and real size (see above mapping).
+ */
+#define DRAM_REGION_SIZE_EVEN(C)       (((C) >= 7) && ((C) <= 26))
+#define GET_DRAM_REGION_SIZE_EVEN(C)   ((uint64_t)1 << ((C) + 16))
+#define DRAM_REGION_SIZE_ODD(C)                ((C) <= 4)
+#define GET_DRAM_REGION_SIZE_ODD(C)    ((uint64_t)0x18000000 << (C))
+
+
+uint64_t mvebu_get_dram_size(uint64_t ap_base_addr)
+{
+       uint64_t mem_size = 0;
+       uint8_t region_code;
+       uint8_t cs, iface;
+
+       for (iface = 0; iface < DRAM_MAX_IFACE; iface++) {
+               for (cs = 0; cs < DRAM_MAX_CS_NUM; cs++) {
+
+                       /* Exit loop on first disabled DRAM CS */
+                       if (!DRAM_CS_ENABLED(iface, cs, ap_base_addr))
+                               break;
+
+                       /* Decode area length for current CS
+                        * from register value
+                        */
+                       region_code =
+                               GET_DRAM_REGION_SIZE_CODE(iface, cs,
+                                                         ap_base_addr);
+
+                       if (DRAM_REGION_SIZE_EVEN(region_code)) {
+                               mem_size +=
+                                       GET_DRAM_REGION_SIZE_EVEN(region_code);
+                       } else if (DRAM_REGION_SIZE_ODD(region_code)) {
+                               mem_size +=
+                                       GET_DRAM_REGION_SIZE_ODD(region_code);
+                       } else {
+                               WARN("%s: Invalid mem region (0x%x) CS#%d\n",
+                                     __func__, region_code, cs);
+                               return 0;
+                       }
+               }
+       }
+
+       return mem_size;
+}
diff --git a/plat/marvell/common/marvell_gicv2.c b/plat/marvell/common/marvell_gicv2.c
new file mode 100644 (file)
index 0000000..ba8e409
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <gicv2.h>
+#include <plat_marvell.h>
+#include <platform.h>
+#include <platform_def.h>
+
+/*
+ * The following functions are defined as weak to allow a platform to override
+ * the way the GICv2 driver is initialised and used.
+ */
+#pragma weak plat_marvell_gic_driver_init
+#pragma weak plat_marvell_gic_init
+
+/*
+ * On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
+ * interrupts.
+ */
+static const interrupt_prop_t marvell_interrupt_props[] = {
+       PLAT_MARVELL_G1S_IRQ_PROPS(GICV2_INTR_GROUP0),
+       PLAT_MARVELL_G0_IRQ_PROPS(GICV2_INTR_GROUP0)
+};
+
+static unsigned int target_mask_array[PLATFORM_CORE_COUNT];
+
+/*
+ * Ideally `marvell_gic_data` structure definition should be a `const` but it is
+ * kept as modifiable for overwriting with different GICD and GICC base when
+ * running on FVP with VE memory map.
+ */
+static gicv2_driver_data_t marvell_gic_data = {
+       .gicd_base = PLAT_MARVELL_GICD_BASE,
+       .gicc_base = PLAT_MARVELL_GICC_BASE,
+       .interrupt_props = marvell_interrupt_props,
+       .interrupt_props_num = ARRAY_SIZE(marvell_interrupt_props),
+       .target_masks = target_mask_array,
+       .target_masks_num = ARRAY_SIZE(target_mask_array),
+};
+
+/*
+ * ARM common helper to initialize the GICv2 only driver.
+ */
+void plat_marvell_gic_driver_init(void)
+{
+       gicv2_driver_init(&marvell_gic_data);
+}
+
+void plat_marvell_gic_init(void)
+{
+       gicv2_distif_init();
+       gicv2_pcpu_distif_init();
+       gicv2_set_pe_target_mask(plat_my_core_pos());
+       gicv2_cpuif_enable();
+}
diff --git a/plat/marvell/common/marvell_io_storage.c b/plat/marvell/common/marvell_io_storage.c
new file mode 100644 (file)
index 0000000..cb9ece2
--- /dev/null
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <assert.h>
+#include <bl_common.h>         /* For ARRAY_SIZE */
+#include <debug.h>
+#include <firmware_image_package.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_memmap.h>
+#include <io_storage.h>
+#include <platform_def.h>
+#include <string.h>
+
+/* IO devices */
+static const io_dev_connector_t *fip_dev_con;
+static uintptr_t fip_dev_handle;
+static const io_dev_connector_t *memmap_dev_con;
+static uintptr_t memmap_dev_handle;
+
+static const io_block_spec_t fip_block_spec = {
+       .offset = PLAT_MARVELL_FIP_BASE,
+       .length = PLAT_MARVELL_FIP_MAX_SIZE
+};
+
+static const io_uuid_spec_t bl2_uuid_spec = {
+       .uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+};
+
+static const io_uuid_spec_t scp_bl2_uuid_spec = {
+       .uuid = UUID_SCP_FIRMWARE_SCP_BL2,
+};
+
+static const io_uuid_spec_t bl31_uuid_spec = {
+       .uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+};
+static const io_uuid_spec_t bl32_uuid_spec = {
+       .uuid = UUID_SECURE_PAYLOAD_BL32,
+};
+static const io_uuid_spec_t bl33_uuid_spec = {
+       .uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+};
+
+static int open_fip(const uintptr_t spec);
+static int open_memmap(const uintptr_t spec);
+
+struct plat_io_policy {
+       uintptr_t *dev_handle;
+       uintptr_t image_spec;
+       int (*check)(const uintptr_t spec);
+};
+
+/* By default, Marvell platforms load images from the FIP */
+static const struct plat_io_policy policies[] = {
+       [FIP_IMAGE_ID] = {
+               &memmap_dev_handle,
+               (uintptr_t)&fip_block_spec,
+               open_memmap
+       },
+       [BL2_IMAGE_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&bl2_uuid_spec,
+               open_fip
+       },
+       [SCP_BL2_IMAGE_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&scp_bl2_uuid_spec,
+               open_fip
+       },
+       [BL31_IMAGE_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&bl31_uuid_spec,
+               open_fip
+       },
+       [BL32_IMAGE_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&bl32_uuid_spec,
+               open_fip
+       },
+       [BL33_IMAGE_ID] = {
+               &fip_dev_handle,
+               (uintptr_t)&bl33_uuid_spec,
+               open_fip
+       },
+};
+
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak plat_marvell_io_setup
+#pragma weak plat_marvell_get_alt_image_source
+
+
+static int open_fip(const uintptr_t spec)
+{
+       int result;
+       uintptr_t local_image_handle;
+
+       /* See if a Firmware Image Package is available */
+       result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+       if (result == 0) {
+               result = io_open(fip_dev_handle, spec, &local_image_handle);
+               if (result == 0) {
+                       VERBOSE("Using FIP\n");
+                       io_close(local_image_handle);
+               }
+       }
+       return result;
+}
+
+
+static int open_memmap(const uintptr_t spec)
+{
+       int result;
+       uintptr_t local_image_handle;
+
+       result = io_dev_init(memmap_dev_handle, (uintptr_t)NULL);
+       if (result == 0) {
+               result = io_open(memmap_dev_handle, spec, &local_image_handle);
+               if (result == 0) {
+                       VERBOSE("Using Memmap\n");
+                       io_close(local_image_handle);
+               }
+       }
+       return result;
+}
+
+
+void marvell_io_setup(void)
+{
+       int io_result;
+
+       io_result = register_io_dev_fip(&fip_dev_con);
+       assert(io_result == 0);
+
+       io_result = register_io_dev_memmap(&memmap_dev_con);
+       assert(io_result == 0);
+
+       /* Open connections to devices and cache the handles */
+       io_result = io_dev_open(fip_dev_con, (uintptr_t)NULL,
+                               &fip_dev_handle);
+       assert(io_result == 0);
+
+       io_result = io_dev_open(memmap_dev_con, (uintptr_t)NULL,
+                               &memmap_dev_handle);
+       assert(io_result == 0);
+
+       /* Ignore improbable errors in release builds */
+       (void)io_result;
+}
+
+void plat_marvell_io_setup(void)
+{
+       marvell_io_setup();
+}
+
+int plat_marvell_get_alt_image_source(
+       unsigned int image_id __attribute__((unused)),
+       uintptr_t *dev_handle __attribute__((unused)),
+       uintptr_t *image_spec __attribute__((unused)))
+{
+       /* By default do not try an alternative */
+       return -ENOENT;
+}
+
+/*
+ * Return an IO device handle and specification which can be used to access
+ * an image. Use this to enforce platform load policy
+ */
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+                         uintptr_t *image_spec)
+{
+       int result;
+       const struct plat_io_policy *policy;
+
+       assert(image_id < ARRAY_SIZE(policies));
+
+       policy = &policies[image_id];
+       result = policy->check(policy->image_spec);
+       if (result == 0) {
+               *image_spec = policy->image_spec;
+               *dev_handle = *(policy->dev_handle);
+       } else {
+               VERBOSE("Trying alternative IO\n");
+               result = plat_marvell_get_alt_image_source(image_id, dev_handle,
+                                                      image_spec);
+       }
+
+       return result;
+}
+
+/*
+ * See if a Firmware Image Package is available,
+ * by checking if TOC is valid or not.
+ */
+int marvell_io_is_toc_valid(void)
+{
+       int result;
+
+       result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+
+       return result == 0;
+}
diff --git a/plat/marvell/common/marvell_pm.c b/plat/marvell/common/marvell_pm.c
new file mode 100644 (file)
index 0000000..2a75790
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <psci.h>
+#include <marvell_pm.h>
+
+/* Standard ARM platforms are expected to export plat_arm_psci_pm_ops */
+extern const plat_psci_ops_t plat_arm_psci_pm_ops;
+
+/*****************************************************************************
+ * Private function to program the mailbox for a cpu before it is released
+ * from reset. This function assumes that the mail box base is within
+ * the MARVELL_SHARED_RAM region
+ *****************************************************************************
+ */
+void marvell_program_mailbox(uintptr_t address)
+{
+       uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
+
+       /*
+        * Ensure that the PLAT_MARVELL_MAILBOX_BASE is within
+        * MARVELL_SHARED_RAM region.
+        */
+       assert((PLAT_MARVELL_MAILBOX_BASE >= MARVELL_SHARED_RAM_BASE) &&
+              ((PLAT_MARVELL_MAILBOX_BASE + sizeof(*mailbox)) <=
+              (MARVELL_SHARED_RAM_BASE + MARVELL_SHARED_RAM_SIZE)));
+
+       mailbox[MBOX_IDX_MAGIC] = MVEBU_MAILBOX_MAGIC_NUM;
+       mailbox[MBOX_IDX_SEC_ADDR] = address;
+
+       /* Flush data cache if the mail box shared RAM is cached */
+#if PLAT_MARVELL_SHARED_RAM_CACHED
+       flush_dcache_range((uintptr_t)PLAT_MARVELL_MAILBOX_BASE +
+                          8 * MBOX_IDX_MAGIC,
+                          2 * sizeof(uint64_t));
+#endif
+}
+
+/*****************************************************************************
+ * The ARM Standard platform definition of platform porting API
+ * `plat_setup_psci_ops`.
+ *****************************************************************************
+ */
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+                       const plat_psci_ops_t **psci_ops)
+{
+       *psci_ops = &plat_arm_psci_pm_ops;
+
+       /* Setup mailbox with entry point. */
+       marvell_program_mailbox(sec_entrypoint);
+       return 0;
+}
diff --git a/plat/marvell/common/marvell_topology.c b/plat/marvell/common/marvell_topology.c
new file mode 100644 (file)
index 0000000..a40ff6f
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <plat_marvell.h>
+
+/* The power domain tree descriptor */
+unsigned char marvell_power_domain_tree_desc[PLAT_MARVELL_CLUSTER_COUNT + 1];
+
+/*****************************************************************************
+ * This function dynamically constructs the topology according to
+ * PLAT_MARVELL_CLUSTER_COUNT and returns it.
+ *****************************************************************************
+ */
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+       int i;
+
+       /*
+        * The power domain tree does not have a single system level power
+        * domain i.e. a single root node. The first entry in the power domain
+        * descriptor specifies the number of power domains at the highest power
+        * level.
+        * For Marvell Platform this is the number of cluster power domains.
+        */
+       marvell_power_domain_tree_desc[0] = PLAT_MARVELL_CLUSTER_COUNT;
+
+       for (i = 0; i < PLAT_MARVELL_CLUSTER_COUNT; i++)
+               marvell_power_domain_tree_desc[i + 1] =
+                                       PLAT_MARVELL_CLUSTER_CORE_COUNT;
+
+       return marvell_power_domain_tree_desc;
+}
+
+/*****************************************************************************
+ * This function validates an MPIDR by checking whether it falls within the
+ * acceptable bounds. An error code (-1) is returned if an incorrect mpidr
+ * is passed.
+ *****************************************************************************
+ */
+int marvell_check_mpidr(u_register_t mpidr)
+{
+       unsigned int nb_id, cluster_id, cpu_id;
+
+       mpidr &= MPIDR_AFFINITY_MASK;
+
+       if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK |
+           MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT))
+               return -1;
+
+       /* Get north bridge ID */
+       nb_id = MPIDR_AFFLVL3_VAL(mpidr);
+       cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
+       cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+       if (nb_id >= PLAT_MARVELL_CLUSTER_COUNT)
+               return -1;
+
+       if (cluster_id >= PLAT_MARVELL_CLUSTER_COUNT)
+               return -1;
+
+       if (cpu_id >= PLAT_MARVELL_CLUSTER_CORE_COUNT)
+               return -1;
+
+       return 0;
+}
+
+/*****************************************************************************
+ * This function implements a part of the critical interface between the PSCI
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ *****************************************************************************
+ */
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+       if (marvell_check_mpidr(mpidr) == -1)
+               return -1;
+
+       return plat_marvell_calc_core_pos(mpidr);
+}
diff --git a/plat/marvell/common/mrvl_sip_svc.c b/plat/marvell/common/mrvl_sip_svc.c
new file mode 100644 (file)
index 0000000..ec293af
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <ap_setup.h>
+#include <cache_llc.h>
+#include <debug.h>
+#include <marvell_plat_priv.h>
+#include <runtime_svc.h>
+#include <smcc.h>
+#include "comphy/phy-comphy-cp110.h"
+
+/* #define DEBUG_COMPHY */
+#ifdef DEBUG_COMPHY
+#define debug(format...) NOTICE(format)
+#else
+#define debug(format, arg...)
+#endif
+
+/* Comphy related FID's */
+#define MV_SIP_COMPHY_POWER_ON 0x82000001
+#define MV_SIP_COMPHY_POWER_OFF        0x82000002
+#define MV_SIP_COMPHY_PLL_LOCK 0x82000003
+#define MV_SIP_COMPHY_XFI_TRAIN        0x82000004
+#define MV_SIP_COMPHY_DIG_RESET        0x82000005
+
+/* Miscellaneous FID's' */
+#define MV_SIP_DRAM_SIZE       0x82000010
+#define MV_SIP_LLC_ENABLE      0x82000011
+
+#define MAX_LANE_NR            6
+#define MVEBU_COMPHY_OFFSET    0x441000
+#define MVEBU_SD_OFFSET                0x120000
+
+/* This macro is used to identify COMPHY related calls from SMC function ID */
+#define is_comphy_fid(fid)     \
+       ((fid) >= MV_SIP_COMPHY_POWER_ON && (fid) <= MV_SIP_COMPHY_DIG_RESET)
+
+
+uintptr_t mrvl_sip_smc_handler(uint32_t smc_fid,
+                              u_register_t x1,
+                              u_register_t x2,
+                              u_register_t x3,
+                              u_register_t x4,
+                              void *cookie,
+                              void *handle,
+                              u_register_t flags)
+{
+       u_register_t ret;
+       int i;
+
+       debug("%s: got SMC (0x%x) x1 0x%lx, x2 0x%lx, x3 0x%lx\n",
+                                                __func__, smc_fid, x1, x2, x3);
+       if (is_comphy_fid(smc_fid)) {
+
+               /* some systems passes SD phys address instead of COMPHY phys
+                * address - convert it
+                */
+               if (x1 & MVEBU_SD_OFFSET)
+                       x1 = (x1 & ~0xffffff) + MVEBU_COMPHY_OFFSET;
+
+               if ((x1 & 0xffffff) != MVEBU_COMPHY_OFFSET) {
+                       ERROR("%s: Wrong smc (0x%x) address: %lx\n",
+                             __func__, smc_fid, x1);
+                       SMC_RET1(handle, SMC_UNK);
+               }
+
+               if (x2 >= MAX_LANE_NR) {
+                       ERROR("%s: Wrong smc (0x%x) lane nr: %lx\n",
+                             __func__, smc_fid, x2);
+                       SMC_RET1(handle, SMC_UNK);
+               }
+       }
+
+       switch (smc_fid) {
+
+       /* Comphy related FID's */
+       case MV_SIP_COMPHY_POWER_ON:
+               /* x1:  comphy_base, x2: comphy_index, x3: comphy_mode */
+               ret = mvebu_cp110_comphy_power_on(x1, x2, x3);
+               SMC_RET1(handle, ret);
+       case MV_SIP_COMPHY_POWER_OFF:
+               /* x1:  comphy_base, x2: comphy_index */
+               ret = mvebu_cp110_comphy_power_off(x1, x2);
+               SMC_RET1(handle, ret);
+       case MV_SIP_COMPHY_PLL_LOCK:
+               /* x1:  comphy_base, x2: comphy_index */
+               ret = mvebu_cp110_comphy_is_pll_locked(x1, x2);
+               SMC_RET1(handle, ret);
+       case MV_SIP_COMPHY_XFI_TRAIN:
+               /* x1:  comphy_base, x2: comphy_index */
+               ret = mvebu_cp110_comphy_xfi_rx_training(x1, x2);
+               SMC_RET1(handle, ret);
+       case MV_SIP_COMPHY_DIG_RESET:
+               /* x1:  comphy_base, x2: comphy_index, x3: mode, x4: command */
+               ret = mvebu_cp110_comphy_digital_reset(x1, x2, x3, x4);
+               SMC_RET1(handle, ret);
+
+       /* Miscellaneous FID's' */
+       case MV_SIP_DRAM_SIZE:
+               /* x1:  ap_base_addr */
+               ret = mvebu_get_dram_size(x1);
+               SMC_RET1(handle, ret);
+       case MV_SIP_LLC_ENABLE:
+               for (i = 0; i < ap_get_count(); i++)
+                       llc_runtime_enable(i);
+
+               SMC_RET1(handle, 0);
+
+       default:
+               ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+               SMC_RET1(handle, SMC_UNK);
+       }
+}
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+       marvell_sip_svc,
+       OEN_SIP_START,
+       OEN_SIP_END,
+       SMC_TYPE_FAST,
+       NULL,
+       mrvl_sip_smc_handler
+);
diff --git a/plat/marvell/common/mss/mss_common.mk b/plat/marvell/common/mss/mss_common.mk
new file mode 100644 (file)
index 0000000..898b6dc
--- /dev/null
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:     BSD-3-Clause
+# https://spdx.org/licenses
+#
+
+
+PLAT_MARVELL           :=      plat/marvell
+MSS_SOURCE             :=      $(PLAT_MARVELL)/common/mss
+
+BL2_SOURCES            +=      $(MSS_SOURCE)/mss_scp_bootloader.c              \
+                               $(PLAT_MARVELL)/common/plat_delay_timer.c       \
+                               drivers/delay_timer/delay_timer.c               \
+                               $(MARVELL_DRV)                                  \
+                               $(PLAT_FAMILY_BASE)/$(PLAT)/board/marvell_plat_config.c
+
+BL31_SOURCES           +=      $(MSS_SOURCE)/mss_ipc_drv.c
+
+PLAT_INCLUDES           +=      -I$(MSS_SOURCE)
diff --git a/plat/marvell/common/mss/mss_ipc_drv.c b/plat/marvell/common/mss/mss_ipc_drv.c
new file mode 100644 (file)
index 0000000..731c315
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <plat_marvell.h>
+#include <debug.h>
+#include <string.h>
+#include <mss_ipc_drv.h>
+#include <mmio.h>
+
+#define IPC_MSG_BASE_MASK              MVEBU_REGS_BASE_MASK
+
+#define IPC_CH_NUM_OF_MSG              (16)
+#define IPC_CH_MSG_IDX                 (-1)
+
+unsigned long mv_pm_ipc_msg_base;
+unsigned int  mv_pm_ipc_queue_size;
+
+unsigned int msg_sync;
+int msg_index = IPC_CH_MSG_IDX;
+
+/******************************************************************************
+ * mss_pm_ipc_init
+ *
+ * DESCRIPTION: Initialize PM IPC infrastructure
+ ******************************************************************************
+ */
+int mv_pm_ipc_init(unsigned long ipc_control_addr)
+{
+       struct mss_pm_ipc_ctrl *ipc_control =
+                       (struct mss_pm_ipc_ctrl *)ipc_control_addr;
+
+       /* Initialize PM IPC control block */
+       mv_pm_ipc_msg_base     = ipc_control->msg_base_address |
+                                IPC_MSG_BASE_MASK;
+       mv_pm_ipc_queue_size   = ipc_control->queue_size;
+
+       return 0;
+}
+
+/******************************************************************************
+ * mv_pm_ipc_queue_addr_get
+ *
+ * DESCRIPTION: Returns the IPC queue address
+ ******************************************************************************
+ */
+unsigned int mv_pm_ipc_queue_addr_get(void)
+{
+       unsigned int addr;
+
+       inv_dcache_range((uint64_t)&msg_index, sizeof(msg_index));
+       msg_index = msg_index + 1;
+       if (msg_index >= IPC_CH_NUM_OF_MSG)
+               msg_index = 0;
+
+       addr = (unsigned int)(mv_pm_ipc_msg_base +
+              (msg_index * mv_pm_ipc_queue_size));
+
+       flush_dcache_range((uint64_t)&msg_index, sizeof(msg_index));
+
+       return addr;
+}
+
+/******************************************************************************
+ * mv_pm_ipc_msg_rx
+ *
+ * DESCRIPTION: Retrieve message from IPC channel
+ ******************************************************************************
+ */
+int mv_pm_ipc_msg_rx(unsigned int channel_id, struct mss_pm_ipc_msg *msg)
+{
+       unsigned int addr = mv_pm_ipc_queue_addr_get();
+
+       msg->msg_reply = mmio_read_32(addr + IPC_MSG_REPLY_LOC);
+
+       return 0;
+}
+
+/******************************************************************************
+ * mv_pm_ipc_msg_tx
+ *
+ * DESCRIPTION: Send message via IPC channel
+ ******************************************************************************
+ */
+int mv_pm_ipc_msg_tx(unsigned int channel_id, unsigned int msg_id,
+                                       unsigned int cluster_power_state)
+{
+       unsigned int addr = mv_pm_ipc_queue_addr_get();
+
+       /* Validate the entry for message placed by the host is free */
+       if (mmio_read_32(addr + IPC_MSG_STATE_LOC) == IPC_MSG_FREE) {
+               inv_dcache_range((uint64_t)&msg_sync, sizeof(msg_sync));
+               msg_sync = msg_sync + 1;
+               flush_dcache_range((uint64_t)&msg_sync, sizeof(msg_sync));
+
+               mmio_write_32(addr + IPC_MSG_SYNC_ID_LOC, msg_sync);
+               mmio_write_32(addr + IPC_MSG_ID_LOC, msg_id);
+               mmio_write_32(addr + IPC_MSG_CPU_ID_LOC, channel_id);
+               mmio_write_32(addr + IPC_MSG_POWER_STATE_LOC,
+                             cluster_power_state);
+               mmio_write_32(addr + IPC_MSG_STATE_LOC, IPC_MSG_OCCUPY);
+
+       } else {
+               ERROR("%s: FAILED\n", __func__);
+       }
+
+       return 0;
+}
diff --git a/plat/marvell/common/mss/mss_ipc_drv.h b/plat/marvell/common/mss/mss_ipc_drv.h
new file mode 100644 (file)
index 0000000..28eb907
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __PM_IPC_DRV_H
+#define __PM_IPC_DRV_H
+
+#include <psci.h>
+
+#define MV_PM_FW_IPC_VERSION_MAGIC     (0xCA530000) /* Do NOT change */
+/* Increament for each version */
+#define MV_PM_FW_IPC_VERSION_SEQ       (0x00000001)
+#define MV_PM_FW_IPC_VERSION           (MV_PM_FW_IPC_VERSION_MAGIC | \
+                                        MV_PM_FW_IPC_VERSION_SEQ)
+
+#define IPC_MSG_STATE_LOC              (0x0)
+#define IPC_MSG_SYNC_ID_LOC            (0x4)
+#define IPC_MSG_ID_LOC                 (0x8)
+#define IPC_MSG_RET_CH_ID_LOC  (0xC)
+#define IPC_MSG_CPU_ID_LOC             (0x10)
+#define IPC_MSG_CLUSTER_ID_LOC (0x14)
+#define IPC_MSG_SYSTEM_ID_LOC  (0x18)
+#define IPC_MSG_POWER_STATE_LOC        (0x1C)
+#define IPC_MSG_REPLY_LOC              (0x20)
+#define IPC_MSG_RESERVED_LOC   (0x24)
+
+/* IPC initialization state */
+enum mss_pm_ipc_init_state {
+       IPC_UN_INITIALIZED      = 1,
+       IPC_INITIALIZED         = 2
+};
+
+/* IPC queue direction */
+enum mss_pm_ipc_init_msg_dir {
+       IPC_MSG_TX      = 0,
+       IPC_MSG_RX      = 1
+};
+
+/* IPC message state */
+enum mss_pm_ipc_msg_state {
+       IPC_MSG_FREE    = 1,
+       IPC_MSG_OCCUPY  = 2
+
+};
+
+/* IPC control block */
+struct mss_pm_ipc_ctrl {
+       unsigned int ctrl_base_address;
+       unsigned int msg_base_address;
+       unsigned int num_of_channels;
+       unsigned int channel_size;
+       unsigned int queue_size;
+};
+
+/* IPC message types */
+enum mss_pm_msg_id {
+       PM_IPC_MSG_CPU_SUSPEND          = 1,
+       PM_IPC_MSG_CPU_OFF              = 2,
+       PM_IPC_MSG_CPU_ON               = 3,
+       PM_IPC_MSG_SYSTEM_RESET         = 4,
+       PM_IPC_MSG_SYSTEM_SUSPEND       = 5,
+       PM_IPC_MAX_MSG
+};
+
+struct mss_pm_ipc_msg {
+       unsigned int    msg_sync_id;    /*
+                                        * Sync number, validate message
+                                        * reply corresponding to message
+                                        * received
+                                        */
+       unsigned int    msg_id;         /* Message Id */
+       unsigned int    ret_channel_id; /* IPC channel reply */
+       unsigned int    cpu_id;         /* CPU Id */
+       unsigned int    cluster_id;     /* Cluster Id */
+       unsigned int    system_id;      /* System Id */
+       unsigned int    power_state;
+       unsigned int    msg_reply;      /* Message reply */
+};
+
+/* IPC queue */
+struct mss_pm_ipc_queue {
+       unsigned int    state;
+       struct mss_pm_ipc_msg           msg;
+};
+
+/* IPC channel */
+struct mss_pm_ipc_ch {
+       struct mss_pm_ipc_queue *tx_queue;
+       struct mss_pm_ipc_queue *rx_queue;
+};
+
+/*****************************************************************************
+ * mv_pm_ipc_init
+ *
+ * DESCRIPTION: Initialize PM IPC infrastructure
+ *****************************************************************************
+ */
+int mv_pm_ipc_init(unsigned long ipc_control_addr);
+
+/*****************************************************************************
+ * mv_pm_ipc_msg_rx
+ *
+ * DESCRIPTION: Retrieve message from IPC channel
+ *****************************************************************************
+ */
+int mv_pm_ipc_msg_rx(unsigned int channel_id, struct mss_pm_ipc_msg *msg);
+
+/*****************************************************************************
+ * mv_pm_ipc_msg_tx
+ *
+ * DESCRIPTION: Send message via IPC channel
+ *****************************************************************************
+ */
+int mv_pm_ipc_msg_tx(unsigned int channel_id, unsigned int msg_id,
+                       unsigned int cluster_power_state);
+
+#endif /* __PM_IPC_DRV_H */
diff --git a/plat/marvell/common/mss/mss_mem.h b/plat/marvell/common/mss/mss_mem.h
new file mode 100644 (file)
index 0000000..efff59e
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MSS_PM_MEM_H
+#define __MSS_PM_MEM_H
+
+/* MSS SRAM Memory base */
+#define MSS_SRAM_PM_CONTROL_BASE               (MVEBU_REGS_BASE + 0x520000)
+
+enum mss_pm_ctrl_handshake {
+       MSS_UN_INITIALIZED      = 0,
+       MSS_COMPATIBILITY_ERROR = 1,
+       MSS_ACKNOWLEDGMENT      = 2,
+       HOST_ACKNOWLEDGMENT     = 3
+};
+
+enum mss_pm_ctrl_rtos_env {
+       MSS_MULTI_PROCESS_ENV   = 0,
+       MSS_SINGLE_PROCESS_ENV  = 1,
+       MSS_MAX_PROCESS_ENV
+};
+
+struct mss_pm_ctrl_block {
+       /* This field is used to synchronize the Host
+        * and MSS initialization sequence
+        * Valid Values
+        * 0 - Un-Initialized
+        * 1 - Compatibility Error
+        * 2 - MSS Acknowledgment
+        * 3 - Host Acknowledgment
+        */
+       unsigned int handshake;
+
+       /*
+        * This field include Host IPC version. Once received by the MSS
+        * It will be compared to MSS IPC version and set MSS Acknowledge to
+        * "compatibility error" in case there is no match
+        */
+       unsigned int ipc_version;
+       unsigned int ipc_base_address;
+       unsigned int ipc_state;
+
+       /* Following fields defines firmware core architecture */
+       unsigned int num_of_cores;
+       unsigned int num_of_clusters;
+       unsigned int num_of_cores_per_cluster;
+
+       /* Following fields define pm trace debug base address */
+       unsigned int pm_trace_ctrl_base_address;
+       unsigned int pm_trace_info_base_address;
+       unsigned int pm_trace_info_core_size;
+
+       unsigned int ctrl_blk_size;
+};
+
+#endif /* __MSS_PM_MEM_H */
diff --git a/plat/marvell/common/mss/mss_scp_bl2_format.h b/plat/marvell/common/mss/mss_scp_bl2_format.h
new file mode 100644 (file)
index 0000000..c04df72
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MSS_SCP_BL2_FORMAT_H
+#define __MSS_SCP_BL2_FORMAT_H
+
+#define MAX_NR_OF_FILES        5
+#define FILE_MAGIC     0xddd01ff
+#define HEADER_VERSION 0x1
+
+#define MSS_IDRAM_SIZE 0x10000 /* 64KB */
+#define MG_SRAM_SIZE   0x20000 /* 128KB */
+
+/* Types definitions */
+typedef struct file_header {
+       /* Magic specific for concatenated file (used for validation) */
+       uint32_t magic;
+       uint32_t nr_of_imgs;    /* Number of images concatenated */
+} file_header_t;
+
+/* Types definitions */
+enum cm3_t {
+       MSS_AP,
+       MSS_CP0,
+       MSS_CP1,
+       MSS_CP2,
+       MSS_CP3,
+       MG_CP0,
+       MG_CP1,
+};
+
+typedef struct img_header {
+       uint32_t type;          /* CM3 type, can be one of cm3_t */
+       uint32_t length;        /* Image length */
+       uint32_t version;       /* For sanity checks and future
+                                * extended functionality
+                                */
+} img_header_t;
+
+#endif /* __MSS_SCP_BL2_FORMAT_H */
diff --git a/plat/marvell/common/mss/mss_scp_bootloader.c b/plat/marvell/common/mss/mss_scp_bootloader.c
new file mode 100644 (file)
index 0000000..334fcfc
--- /dev/null
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <arch_helpers.h> /* for cache maintanance operations */
+#include <platform_def.h>
+#include <delay_timer.h>
+
+#include <plat_pm_trace.h>
+#include <mss_scp_bootloader.h>
+#include <mss_ipc_drv.h>
+#include <mss_mem.h>
+#include <mss_scp_bl2_format.h>
+
+#define MSS_DMA_SRCBR(base)            (base + 0xC0)
+#define MSS_DMA_DSTBR(base)            (base + 0xC4)
+#define MSS_DMA_CTRLR(base)            (base + 0xC8)
+#define MSS_M3_RSTCR(base)             (base + 0xFC)
+
+#define MSS_DMA_CTRLR_SIZE_OFFSET      (0)
+#define MSS_DMA_CTRLR_REQ_OFFSET       (15)
+#define MSS_DMA_CTRLR_REQ_SET          (1)
+#define MSS_DMA_CTRLR_ACK_OFFSET       (12)
+#define MSS_DMA_CTRLR_ACK_MASK         (0x1)
+#define MSS_DMA_CTRLR_ACK_READY                (1)
+#define MSS_M3_RSTCR_RST_OFFSET                (0)
+#define MSS_M3_RSTCR_RST_OFF           (1)
+
+#define MSS_DMA_TIMEOUT                        1000
+#define MSS_EXTERNAL_SPACE             0x50000000
+#define MSS_EXTERNAL_ADDR_MASK         0xfffffff
+
+#define DMA_SIZE                       128
+
+#define MSS_HANDSHAKE_TIMEOUT          50
+
+static int mss_check_image_ready(volatile struct mss_pm_ctrl_block *mss_pm_crtl)
+{
+       int timeout = MSS_HANDSHAKE_TIMEOUT;
+
+       /* Wait for SCP to signal it's ready */
+       while ((mss_pm_crtl->handshake != MSS_ACKNOWLEDGMENT) &&
+                                               (timeout-- > 0))
+               mdelay(1);
+
+       if (mss_pm_crtl->handshake != MSS_ACKNOWLEDGMENT)
+               return -1;
+
+       mss_pm_crtl->handshake = HOST_ACKNOWLEDGMENT;
+
+       return 0;
+}
+
+static int mss_image_load(uint32_t src_addr, uint32_t size, uintptr_t mss_regs)
+{
+       uint32_t i, loop_num, timeout;
+
+       /* Check if the img size is not bigger than ID-RAM size of MSS CM3 */
+       if (size > MSS_IDRAM_SIZE) {
+               ERROR("image is too big to fit into MSS CM3 memory\n");
+               return 1;
+       }
+
+       NOTICE("Loading MSS image from addr. 0x%x Size 0x%x to MSS at 0x%lx\n",
+              src_addr, size, mss_regs);
+       /* load image to MSS RAM using DMA */
+       loop_num = (size / DMA_SIZE) + (((size & (DMA_SIZE - 1)) == 0) ? 0 : 1);
+
+       for (i = 0; i < loop_num; i++) {
+               /* write destination and source addresses */
+               mmio_write_32(MSS_DMA_SRCBR(mss_regs),
+                             MSS_EXTERNAL_SPACE |
+                             ((src_addr & MSS_EXTERNAL_ADDR_MASK) +
+                             (i * DMA_SIZE)));
+               mmio_write_32(MSS_DMA_DSTBR(mss_regs), (i * DMA_SIZE));
+
+               dsb(); /* make sure DMA data is ready before triggering it */
+
+               /* set the DMA control register */
+               mmio_write_32(MSS_DMA_CTRLR(mss_regs), ((MSS_DMA_CTRLR_REQ_SET
+                             << MSS_DMA_CTRLR_REQ_OFFSET) |
+                             (DMA_SIZE << MSS_DMA_CTRLR_SIZE_OFFSET)));
+
+               /* Poll DMA_ACK at MSS_DMACTLR until it is ready */
+               timeout = MSS_DMA_TIMEOUT;
+               while (timeout) {
+                       if ((mmio_read_32(MSS_DMA_CTRLR(mss_regs)) >>
+                            MSS_DMA_CTRLR_ACK_OFFSET & MSS_DMA_CTRLR_ACK_MASK)
+                               == MSS_DMA_CTRLR_ACK_READY) {
+                               break;
+                       }
+
+                       udelay(50);
+                       timeout--;
+               }
+
+               if (timeout == 0) {
+                       ERROR("\nDMA failed to load MSS image\n");
+                       return 1;
+               }
+       }
+
+       bl2_plat_configure_mss_windows(mss_regs);
+
+       /* Release M3 from reset */
+       mmio_write_32(MSS_M3_RSTCR(mss_regs), (MSS_M3_RSTCR_RST_OFF <<
+                     MSS_M3_RSTCR_RST_OFFSET));
+
+       NOTICE("Done\n");
+
+       return 0;
+}
+
+/* Load image to MSS AP and do PM related initialization
+ * Note that this routine is different than other CM3 loading routines, because
+ * firmware for AP is dedicated for PM and therefore some additional PM
+ * initialization is required
+ */
+static int mss_ap_load_image(uintptr_t single_img,
+                            uint32_t image_size, uint32_t ap_idx)
+{
+       volatile struct mss_pm_ctrl_block *mss_pm_crtl;
+       int ret;
+
+       /* TODO: add PM Control Info from platform */
+       mss_pm_crtl = (struct mss_pm_ctrl_block *)MSS_SRAM_PM_CONTROL_BASE;
+       mss_pm_crtl->ipc_version                = MV_PM_FW_IPC_VERSION;
+       mss_pm_crtl->num_of_clusters            = PLAT_MARVELL_CLUSTER_COUNT;
+       mss_pm_crtl->num_of_cores_per_cluster   =
+                                               PLAT_MARVELL_CLUSTER_CORE_COUNT;
+       mss_pm_crtl->num_of_cores               = PLAT_MARVELL_CLUSTER_COUNT *
+                                               PLAT_MARVELL_CLUSTER_CORE_COUNT;
+       mss_pm_crtl->pm_trace_ctrl_base_address = AP_MSS_ATF_CORE_CTRL_BASE;
+       mss_pm_crtl->pm_trace_info_base_address = AP_MSS_ATF_CORE_INFO_BASE;
+       mss_pm_crtl->pm_trace_info_core_size    = AP_MSS_ATF_CORE_INFO_SIZE;
+       VERBOSE("MSS Control Block = 0x%x\n", MSS_SRAM_PM_CONTROL_BASE);
+       VERBOSE("mss_pm_crtl->ipc_version                = 0x%x\n",
+               mss_pm_crtl->ipc_version);
+       VERBOSE("mss_pm_crtl->num_of_cores               = 0x%x\n",
+               mss_pm_crtl->num_of_cores);
+       VERBOSE("mss_pm_crtl->num_of_clusters            = 0x%x\n",
+               mss_pm_crtl->num_of_clusters);
+       VERBOSE("mss_pm_crtl->num_of_cores_per_cluster   = 0x%x\n",
+               mss_pm_crtl->num_of_cores_per_cluster);
+       VERBOSE("mss_pm_crtl->pm_trace_ctrl_base_address = 0x%x\n",
+               mss_pm_crtl->pm_trace_ctrl_base_address);
+       VERBOSE("mss_pm_crtl->pm_trace_info_base_address = 0x%x\n",
+               mss_pm_crtl->pm_trace_info_base_address);
+       VERBOSE("mss_pm_crtl->pm_trace_info_core_size    = 0x%x\n",
+               mss_pm_crtl->pm_trace_info_core_size);
+
+       /* TODO: add checksum to image */
+       VERBOSE("Send info about the SCP_BL2 image to be transferred to SCP\n");
+
+       ret = mss_image_load(single_img, image_size,
+                            bl2_plat_get_ap_mss_regs(ap_idx));
+       if (ret != 0) {
+               ERROR("SCP Image load failed\n");
+               return -1;
+       }
+
+       /* check that the image was loaded successfully */
+       ret = mss_check_image_ready(mss_pm_crtl);
+       if (ret != 0)
+               NOTICE("SCP Image doesn't contain PM firmware\n");
+
+       return 0;
+}
+
+/* Load CM3 image (single_img) to CM3 pointed by cm3_type */
+static int load_img_to_cm3(enum cm3_t cm3_type,
+                          uintptr_t single_img, uint32_t image_size)
+{
+       int ret, ap_idx, cp_index;
+       uint32_t ap_count = bl2_plat_get_ap_count();
+
+       switch (cm3_type) {
+       case MSS_AP:
+               for (ap_idx = 0; ap_idx < ap_count; ap_idx++) {
+                       NOTICE("Load image to AP%d MSS\n", ap_idx);
+                       ret = mss_ap_load_image(single_img, image_size, ap_idx);
+                       if (ret != 0)
+                               return ret;
+               }
+               break;
+       case MSS_CP0:
+       case MSS_CP1:
+       case MSS_CP2:
+       case MSS_CP3:
+               /* MSS_AP = 0
+                * MSS_CP1 = 1
+                * .
+                * .
+                * MSS_CP3 = 4
+                * Actual CP index is MSS_CPX - 1
+                */
+               cp_index = cm3_type - 1;
+               for (ap_idx = 0; ap_idx < ap_count; ap_idx++) {
+                       /* Check if we should load this image
+                        * according to number of CPs
+                        */
+                       if (bl2_plat_get_cp_count(ap_idx) <= cp_index) {
+                               NOTICE("Skipping MSS CP%d related image\n",
+                                      cp_index);
+                               break;
+                       }
+
+                       NOTICE("Load image to CP%d MSS AP%d\n",
+                              cp_index, ap_idx);
+                       ret = mss_image_load(single_img, image_size,
+                                            bl2_plat_get_cp_mss_regs(
+                                                    ap_idx, cp_index));
+                       if (ret != 0) {
+                               ERROR("SCP Image load failed\n");
+                               return -1;
+                       }
+               }
+               break;
+       case MG_CP0:
+               /* TODO: */
+               NOTICE("Load image to CP0 MG not supported\n");
+               break;
+       case MG_CP1:
+               /* TODO: */
+               NOTICE("Load image to CP1 MG not supported\n");
+               break;
+       default:
+               ERROR("SCP_BL2 wrong img format (cm3_type=%d)\n", cm3_type);
+               break;
+       }
+
+       return 0;
+}
+
+/* The Armada 8K has 5 service CPUs and Armada 7K has 3. Therefore it was
+ * required to provide a method for loading firmware to all of the service CPUs.
+ * To achieve that, the scp_bl2 image in fact is file containing up to 5
+ * concatenated firmwares and this routine splits concatenated image into single
+ * images dedicated for appropriate service CPU and then load them.
+ */
+static int split_and_load_bl2_image(void *image)
+{
+       file_header_t *file_hdr;
+       img_header_t *img_hdr;
+       uintptr_t single_img;
+       int i;
+
+       file_hdr = (file_header_t *)image;
+
+       if (file_hdr->magic != FILE_MAGIC) {
+               ERROR("SCP_BL2 wrong img format\n");
+               return -1;
+       }
+
+       if (file_hdr->nr_of_imgs > MAX_NR_OF_FILES) {
+               ERROR("SCP_BL2 concatenated image contains to many images\n");
+               return -1;
+       }
+
+       img_hdr = (img_header_t *)((uintptr_t)image + sizeof(file_header_t));
+       single_img = (uintptr_t)image + sizeof(file_header_t) +
+                                   sizeof(img_header_t) * file_hdr->nr_of_imgs;
+
+       NOTICE("SCP_BL2 contains %d concatenated images\n",
+                                                         file_hdr->nr_of_imgs);
+       for (i = 0; i < file_hdr->nr_of_imgs; i++) {
+
+               /* Before loading make sanity check on header */
+               if (img_hdr->version != HEADER_VERSION) {
+                       ERROR("Wrong header, img corrupted exiting\n");
+                       return -1;
+               }
+
+               load_img_to_cm3(img_hdr->type, single_img, img_hdr->length);
+
+               /* Prepare offsets for next run */
+               single_img += img_hdr->length;
+               img_hdr++;
+       }
+
+       return 0;
+}
+
+int scp_bootloader_transfer(void *image, unsigned int image_size)
+{
+#ifdef SCP_BL2_BASE
+       assert((uintptr_t) image == SCP_BL2_BASE);
+#endif
+
+       VERBOSE("Concatenated img size %d\n", image_size);
+
+       if (image_size == 0) {
+               ERROR("SCP_BL2 image size can't be 0 (current size = 0x%x)\n",
+                                                                   image_size);
+               return -1;
+       }
+
+       if (split_and_load_bl2_image(image))
+               return -1;
+
+       return 0;
+}
diff --git a/plat/marvell/common/mss/mss_scp_bootloader.h b/plat/marvell/common/mss/mss_scp_bootloader.h
new file mode 100644 (file)
index 0000000..67c387a
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MSS_SCP_BOOTLOADER_H__
+#define __MSS_SCP_BOOTLOADER_H__
+
+int scp_bootloader_transfer(void *image, unsigned int image_size);
+uintptr_t bl2_plat_get_cp_mss_regs(int ap_idx, int cp_idx);
+uintptr_t bl2_plat_get_ap_mss_regs(int ap_idx);
+uint32_t bl2_plat_get_cp_count(int ap_idx);
+uint32_t bl2_plat_get_ap_count(void);
+void bl2_plat_configure_mss_windows(uintptr_t mss_regs);
+int bl2_plat_mss_check_image_ready(void);
+
+#endif /* __MSS_SCP_BOOTLOADER_H__ */
diff --git a/plat/marvell/common/plat_delay_timer.c b/plat/marvell/common/plat_delay_timer.c
new file mode 100644 (file)
index 0000000..dfc77c7
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch_helpers.h>
+#include <delay_timer.h>
+#include <mvebu_def.h>
+
+#define SYS_COUNTER_FREQ_IN_MHZ        (COUNTER_FREQUENCY/1000000)
+
+static uint32_t plat_get_timer_value(void)
+{
+       /*
+        * Generic delay timer implementation expects the timer to be a down
+        * counter. We apply bitwise NOT operator to the tick values returned
+        * by read_cntpct_el0() to simulate the down counter.
+        */
+       return (uint32_t)(~read_cntpct_el0());
+}
+
+static const timer_ops_t plat_timer_ops = {
+       .get_timer_value        = plat_get_timer_value,
+       .clk_mult               = 1,
+       .clk_div                = SYS_COUNTER_FREQ_IN_MHZ
+};
+
+void plat_delay_timer_init(void)
+{
+       timer_init(&plat_timer_ops);
+}
diff --git a/plat/marvell/marvell.mk b/plat/marvell/marvell.mk
new file mode 100644 (file)
index 0000000..217ad46
--- /dev/null
@@ -0,0 +1,53 @@
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:     BSD-3-Clause
+# https://spdx.org/licenses
+
+# Marvell images
+BOOT_IMAGE                     := boot-image.bin
+BOOT_ENC_IMAGE                 := boot-image-enc.bin
+FLASH_IMAGE                    := flash-image.bin
+
+# Make non-trusted image by default
+MARVELL_SECURE_BOOT            := 0
+$(eval $(call add_define,MARVELL_SECURE_BOOT))
+
+# Enable compilation for Palladium emulation platform
+PALLADIUM                      := 0
+$(eval $(call add_define,PALLADIUM))
+
+ifeq (${MARVELL_SECURE_BOOT},1)
+DOIMAGE_SEC_FLAGS := -c $(DOIMAGE_SEC)
+DOIMAGE_LIBS_CHECK = \
+        if ! [ -d "/usr/include/mbedtls" ]; then \
+                        echo "****************************************" >&2; \
+                        echo "Missing mbedTLS installation! " >&2; \
+                        echo "Please download it from \"tls.mbed.org\"" >&2; \
+                       echo "Alternatively on Debian/Ubuntu system install" >&2; \
+                       echo "\"libmbedtls-dev\" package" >&2; \
+                        echo "Make sure to use version 2.1.0 or later" >&2; \
+                        echo "****************************************" >&2; \
+                exit 1; \
+        else if ! [ -f "/usr/include/libconfig.h" ]; then \
+                        echo "********************************************************" >&2; \
+                        echo "Missing Libconfig installation!" >&2; \
+                        echo "Please download it from \"www.hyperrealm.com/libconfig/\"" >&2; \
+                        echo "Alternatively on Debian/Ubuntu system install packages" >&2; \
+                        echo "\"libconfig8\" and \"libconfig8-dev\"" >&2; \
+                        echo "********************************************************" >&2; \
+                exit 1; \
+        fi \
+        fi
+else #MARVELL_SECURE_BOOT
+DOIMAGE_LIBS_CHECK =
+DOIMAGE_SEC_FLAGS =
+endif #MARVELL_SECURE_BOOT
+
+mrvl_clean:
+       @echo "  Doimage CLEAN"
+       ${Q}${MAKE} PLAT=${PLAT} --no-print-directory -C ${DOIMAGEPATH} clean
+
+${DOIMAGETOOL}: mrvl_clean
+       ${Q}${MAKE} --no-print-directory -C ${DOIMAGEPATH} WTMI_IMG=$(WTMI_IMG)
+
+
diff --git a/plat/marvell/version.mk b/plat/marvell/version.mk
new file mode 100644 (file)
index 0000000..017e119
--- /dev/null
@@ -0,0 +1 @@
+SUBVERSION = devel-18.08.0
index 0fbd3f78549b952b0b0c405a550c5cc7f58d695d..8f391df7e86acb80ffbda501df6802d0a7c4fc0b 100644 (file)
@@ -59,7 +59,7 @@ SECTIONS
          * executable.  No RW data from the next section must creep in.
          * Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __RO_END__ = .;
     } >RAM
 
@@ -161,7 +161,7 @@ SECTIONS
          * as device memory.  No other unexpected data must creep in.
          * Ensure the rest of the current memory page is unused.
          */
-        . = NEXT(PAGE_SIZE);
+        . = ALIGN(PAGE_SIZE);
         __COHERENT_RAM_END__ = .;
     } >RAM2
 #endif
index f5208d09adf4aa6a6fa99b44328efb76e609cef7..56e22402f99929939c2683d33e2a7bfd1cb02e0c 100644 (file)
@@ -14,30 +14,30 @@ void trace_power_flow(unsigned long mpidr, unsigned char mode)
 {
        switch (mode) {
        case CPU_UP:
-               trace_log("core %ld:%ld ON\n",
+               trace_log("core %lld:%lld ON\n",
                          (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
                          (mpidr & MPIDR_CPU_MASK));
                break;
        case CPU_DOWN:
-               trace_log("core %ld:%ld OFF\n",
+               trace_log("core %lld:%lld OFF\n",
                          (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
                          (mpidr & MPIDR_CPU_MASK));
                break;
        case CPU_SUSPEND:
-               trace_log("core %ld:%ld SUSPEND\n",
+               trace_log("core %lld:%lld SUSPEND\n",
                          (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
                          (mpidr & MPIDR_CPU_MASK));
                break;
        case CLUSTER_UP:
-               trace_log("cluster %ld ON\n",
+               trace_log("cluster %lld ON\n",
                          (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
                break;
        case CLUSTER_DOWN:
-               trace_log("cluster %ld OFF\n",
+               trace_log("cluster %lld OFF\n",
                          (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
                break;
        case CLUSTER_SUSPEND:
-               trace_log("cluster %ld SUSPEND\n",
+               trace_log("cluster %lld SUSPEND\n",
                          (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
                break;
        default:
index 5c0a468d70f5350e3a258c033d7257fcce38616c..787dad13fabec780e812d1cd38897ec8440f67d6 100644 (file)
@@ -14,30 +14,30 @@ void trace_power_flow(unsigned long mpidr, unsigned char mode)
 {
        switch (mode) {
        case CPU_UP:
-               trace_log("core %ld:%ld ON\n",
+               trace_log("core %lld:%lld ON\n",
                          (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
                          (mpidr & MPIDR_CPU_MASK));
                break;
        case CPU_DOWN:
-               trace_log("core %ld:%ld OFF\n",
+               trace_log("core %lld:%lld OFF\n",
                          (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
                          (mpidr & MPIDR_CPU_MASK));
                break;
        case CPU_SUSPEND:
-               trace_log("core %ld:%ld SUSPEND\n",
+               trace_log("core %lld:%lld SUSPEND\n",
                          (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
                          (mpidr & MPIDR_CPU_MASK));
                break;
        case CLUSTER_UP:
-               trace_log("cluster %ld ON\n",
+               trace_log("cluster %lld ON\n",
                          (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
                break;
        case CLUSTER_DOWN:
-               trace_log("cluster %ld OFF\n",
+               trace_log("cluster %lld OFF\n",
                          (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
                break;
        case CLUSTER_SUSPEND:
-               trace_log("cluster %ld SUSPEND\n",
+               trace_log("cluster %lld SUSPEND\n",
                          (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
                break;
        default:
index 2fe4e7dbc5b6190dc56e58c5b9e35be210c98b23..f0a7036efede6f3522ad35350136268540f243c3 100644 (file)
@@ -116,9 +116,6 @@ void bl31_early_platform_setup(bl31_params_t *from_bl2,
 {
        plat_params_from_bl2_t *plat_params =
                (plat_params_from_bl2_t *)plat_params_from_bl2;
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-       int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
-#endif
        image_info_t bl32_img_info = { {0} };
        uint64_t tzdram_start, tzdram_end, bl32_start, bl32_end;
 
@@ -227,8 +224,9 @@ void bl31_early_platform_setup(bl31_params_t *from_bl2,
        /* Early platform setup for Tegra SoCs */
        plat_early_platform_setup();
 
-       INFO("BL3-1: Boot CPU: %s Processor [%lx]\n", (impl == DENVER_IMPL) ?
-               "Denver" : "ARM", read_mpidr());
+       INFO("BL3-1: Boot CPU: %s Processor [%lx]\n",
+            (((read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK)
+             == DENVER_IMPL) ? "Denver" : "ARM", read_mpidr());
 }
 
 #ifdef SPD_trusty
index 5a1854b4218668b6222e3b2087841450033de97e..991fe6cf866144fd5a4b723762462e14c0f27b32 100644 (file)
@@ -12,7 +12,6 @@
        .macro pmusram_entry_func _name
        .section .pmusram.entry, "ax"
        .type \_name, %function
-       .func \_name
        .cfi_startproc
        \_name:
        .endm
index 70fd9bfc8eccf26fbe480d7c50da6f878cc785c7..546c09a1e588761d548384fd582ba6106603ff99 100644 (file)
@@ -15,7 +15,6 @@
        .cfi_sections .debug_frame
        .section .sram.text, "ax"
        .type \_name, %function
-       .func \_name
        .cfi_startproc
        \_name:
        .endm
index 65c1bf2d4f05a104bf960f36b6e6f2f4cb1be787..7974b602d9d1edd13b6d24235b875614d4432f4d 100644 (file)
@@ -175,9 +175,5 @@ func plat_reset_handler
        mov     w1, #0x80000000
        str     wzr, [x0, #RPI3_INTC_CONTROL_OFFSET]
        str     w1, [x0, #RPI3_INTC_PRESCALER_OFFSET]
-
-       /* wire mailbox 3 to the FIQ line */
-       mov     w1, RPI3_INTC_MBOX_CONTROL_SLOT3_FIQ
-       str     w1, [x0, #RPI3_INTC_MBOX_CONTROL_OFFSET]
        ret
 endfunc plat_reset_handler
index 2cd0dff259dd5d028518a6ecfe4c6588f7ebef36..4674bfb04f09508b3fcd0ecba15f4ca722ad0fe0 100644 (file)
 #define PLAT_RPI3_FIP_BASE             ULL(0x00020000)
 #define PLAT_RPI3_FIP_MAX_SIZE         ULL(0x001E0000)
 
-/* We have 16M of memory reserved at at 256M */
+/* We have 16M of memory reserved starting at 256M */
 #define SEC_SRAM_BASE                  ULL(0x10000000)
 #define SEC_SRAM_SIZE                  ULL(0x00100000)
 
 #define SEC_DRAM0_BASE                 ULL(0x10100000)
-#define SEC_DRAM0_SIZE                 ULL(0x00200000)
-
-#define NS_DRAM0_BASE                  ULL(0x10300000)
-#define NS_DRAM0_SIZE                  ULL(0x00D00000)
+#define SEC_DRAM0_SIZE                 ULL(0x00F00000)
 /* End of reserved memory */
 
+#define NS_DRAM0_BASE                  ULL(0x11000000)
+#define NS_DRAM0_SIZE                  ULL(0x01000000)
+
 /*
  * BL33 entrypoint.
  */
  */
 #define PLAT_RPI3_TRUSTED_MAILBOX_BASE SHARED_RAM_BASE
 
+/* The secure entry point to be used on warm reset by all CPUs. */
 #define PLAT_RPI3_TM_ENTRYPOINT                PLAT_RPI3_TRUSTED_MAILBOX_BASE
 #define PLAT_RPI3_TM_ENTRYPOINT_SIZE   ULL(8)
 
+/* Hold entries for each CPU. */
 #define PLAT_RPI3_TM_HOLD_BASE         (PLAT_RPI3_TM_ENTRYPOINT + \
                                         PLAT_RPI3_TM_ENTRYPOINT_SIZE)
 #define PLAT_RPI3_TM_HOLD_ENTRY_SIZE   ULL(8)
index df19705e68cf12b7bc1b93932f90d21388cbed18..4276c84f8841057916af11df9e09d1801dbf585e 100644 (file)
@@ -20,7 +20,8 @@ BL1_SOURCES           +=      drivers/io/io_fip.c                     \
                                plat/common/aarch64/platform_mp_stack.S \
                                plat/rpi3/aarch64/plat_helpers.S        \
                                plat/rpi3/rpi3_bl1_setup.c              \
-                               plat/rpi3/rpi3_io_storage.c
+                               plat/rpi3/rpi3_io_storage.c             \
+                               plat/rpi3/rpi3_mbox.c
 
 BL2_SOURCES            +=      common/desc_image_load.c                \
                                drivers/io/io_fip.c                     \
@@ -54,6 +55,26 @@ else
     TF_CFLAGS_aarch64  +=      -mtune=cortex-a53
 endif
 
+# Platform Makefile target
+# ------------------------
+
+RPI3_BL1_PAD_BIN       :=      ${BUILD_PLAT}/bl1_pad.bin
+RPI3_ARMSTUB8_BIN      :=      ${BUILD_PLAT}/armstub8.bin
+
+# Add new default target when compiling this platform
+all: armstub
+
+# This target concatenates BL1 and the FIP so that the base addresses match the
+# ones defined in the memory map
+armstub: bl1 fip
+       @echo "  CAT     $@"
+       ${Q}cp ${BUILD_PLAT}/bl1.bin ${RPI3_BL1_PAD_BIN}
+       ${Q}truncate --size=131072 ${RPI3_BL1_PAD_BIN}
+       ${Q}cat ${RPI3_BL1_PAD_BIN} ${BUILD_PLAT}/fip.bin > ${RPI3_ARMSTUB8_BIN}
+       @${ECHO_BLANK_LINE}
+       @echo "Built $@ successfully"
+       @${ECHO_BLANK_LINE}
+
 # Build config flags
 # ------------------
 
@@ -69,8 +90,11 @@ WORKAROUND_CVE_2017_5715     := 0
 # Disable the PSCI platform compatibility layer by default
 ENABLE_PLAT_COMPAT             := 0
 
-# Enable reset to BL31 by default
-RESET_TO_BL31                  := 1
+# Disable stack protector by default
+ENABLE_STACK_PROTECTOR         := 0
+
+# Reset to BL31 isn't supported
+RESET_TO_BL31                  := 0
 
 # Have different sections for code and rodata
 SEPARATE_CODE_AND_RODATA       := 1
@@ -90,6 +114,9 @@ MULTI_CONSOLE_API            := 1
 # BL33 images are in AArch64 by default
 RPI3_BL33_IN_AARCH32           := 0
 
+# Assume that BL33 isn't the Linux kernel by default
+RPI3_DIRECT_LINUX_BOOT         := 0
+
 # BL32 location
 RPI3_BL32_RAM_LOCATION := tdram
 ifeq (${RPI3_BL32_RAM_LOCATION}, tsram)
@@ -105,9 +132,17 @@ endif
 
 $(eval $(call add_define,RPI3_BL32_RAM_LOCATION_ID))
 $(eval $(call add_define,RPI3_BL33_IN_AARCH32))
+$(eval $(call add_define,RPI3_DIRECT_LINUX_BOOT))
+$(eval $(call add_define,RPI3_PRELOADED_DTB_BASE))
 
 # Verify build config
 # -------------------
+#
+ifneq (${RPI3_DIRECT_LINUX_BOOT}, 0)
+  ifndef RPI3_PRELOADED_DTB_BASE
+    $(error Error: RPI3_PRELOADED_DTB_BASE needed if RPI3_DIRECT_LINUX_BOOT=1)
+  endif
+endif
 
 ifneq (${LOAD_IMAGE_V2}, 1)
   $(error Error: rpi3 needs LOAD_IMAGE_V2=1)
@@ -117,10 +152,19 @@ ifneq (${MULTI_CONSOLE_API}, 1)
   $(error Error: rpi3 needs MULTI_CONSOLE_API=1)
 endif
 
+ifneq (${RESET_TO_BL31}, 0)
+  $(error Error: rpi3 needs RESET_TO_BL31=0)
+endif
+
 ifeq (${ARCH},aarch32)
   $(error Error: AArch32 not supported on rpi3)
 endif
 
+ifneq ($(ENABLE_STACK_PROTECTOR), 0)
+PLAT_BL_COMMON_SOURCES +=      plat/rpi3/rpi3_rng.c                    \
+                               plat/rpi3/rpi3_stack_protector.c
+endif
+
 ifeq (${SPD},opteed)
 BL2_SOURCES    +=                                                      \
                lib/optee/optee_utils.c
index c98715b9a2211eecd511d44c1761d9c263f6f6af..39bb3325656609377c7a10b25ff5a2ace37bd9ff 100644 (file)
@@ -7,6 +7,7 @@
 #include <arch.h>
 #include <arch_helpers.h>
 #include <bl_common.h>
+#include <debug.h>
 #include <platform_def.h>
 #include <xlat_mmu_helpers.h>
 #include <xlat_tables_defs.h>
@@ -56,6 +57,39 @@ void bl1_plat_arch_setup(void)
 
 void bl1_platform_setup(void)
 {
+       uint32_t __unused rev;
+       int __unused rc;
+
+       rc = rpi3_vc_hardware_get_board_revision(&rev);
+
+       if (rc == 0) {
+               const char __unused *model, __unused *info;
+
+               switch (rev) {
+               case 0xA02082:
+                       model = "Raspberry Pi 3 Model B";
+                       info = "(1GB, Sony, UK)";
+                       break;
+               case 0xA22082:
+                       model = "Raspberry Pi 3 Model B";
+                       info = "(1GB, Embest, China)";
+                       break;
+               case 0xA020D3:
+                       model = "Raspberry Pi 3 Model B+";
+                       info = "(1GB, Sony, UK)";
+                       break;
+               default:
+                       model = "Unknown";
+                       info = "(Unknown)";
+                       ERROR("rpi3: Unknown board revision 0x%08x\n", rev);
+                       break;
+               }
+
+               NOTICE("rpi3: Detected: %s %s [0x%08x]\n", model, info, rev);
+       } else {
+               ERROR("rpi3: Unable to detect board revision\n");
+       }
+
        /* Initialise the IO layer and register platform IO devices */
        plat_rpi3_io_setup();
 }
index 58344ae99658e1fff6cb76277d9c765a9ec7034a..5bbb13c868f6c95233c93e550df4dd8cb3b80402 100644 (file)
@@ -59,39 +59,6 @@ void bl31_early_platform_setup(void *from_bl2,
        /* Initialize the console to provide early debug support */
        rpi3_console_init();
 
-#if RESET_TO_BL31
-
-       /* There are no parameters from BL2 if BL31 is a reset vector */
-       assert(from_bl2 == NULL);
-       assert(plat_params_from_bl2 == NULL);
-
-#ifdef BL32_BASE
-       /* Populate entry point information for BL32 */
-       SET_PARAM_HEAD(&bl32_image_ep_info,
-                               PARAM_EP,
-                               VERSION_1,
-                               0);
-       SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
-       bl32_image_ep_info.pc = BL32_BASE;
-       bl32_image_ep_info.spsr = rpi3_get_spsr_for_bl32_entry();
-#endif /* BL32_BASE */
-
-       /* Populate entry point information for BL33 */
-       SET_PARAM_HEAD(&bl33_image_ep_info,
-                               PARAM_EP,
-                               VERSION_1,
-                               0);
-       /*
-        * Tell BL31 where the non-trusted software image
-        * is located and the entry state information
-        */
-       bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
-
-       bl33_image_ep_info.spsr = rpi3_get_spsr_for_bl33_entry();
-       SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
-
-#else /* RESET_TO_BL31 */
-
        /*
         * In debug builds, we pass a special value in 'plat_params_from_bl2'
         * to verify platform parameters from BL2 to BL31.
@@ -130,7 +97,33 @@ void bl31_early_platform_setup(void *from_bl2,
                panic();
        }
 
-#endif /* RESET_TO_BL31 */
+#if RPI3_DIRECT_LINUX_BOOT
+# if RPI3_BL33_IN_AARCH32
+       /*
+        * According to the file ``Documentation/arm/Booting`` of the Linux
+        * kernel tree, Linux expects:
+        * r0 = 0
+        * r1 = machine type number, optional in DT-only platforms (~0 if so)
+        * r2 = Physical address of the device tree blob
+        */
+       VERBOSE("rpi3: Preparing to boot 32-bit Linux kernel\n");
+       bl33_image_ep_info.args.arg0 = 0U;
+       bl33_image_ep_info.args.arg1 = ~0U;
+       bl33_image_ep_info.args.arg2 = (u_register_t) RPI3_PRELOADED_DTB_BASE;
+# else
+       /*
+        * According to the file ``Documentation/arm64/booting.txt`` of the
+        * Linux kernel tree, Linux expects the physical address of the device
+        * tree blob (DTB) in x0, while x1-x3 are reserved for future use and
+        * must be 0.
+        */
+       VERBOSE("rpi3: Preparing to boot 64-bit Linux kernel\n");
+       bl33_image_ep_info.args.arg0 = (u_register_t) RPI3_PRELOADED_DTB_BASE;
+       bl33_image_ep_info.args.arg1 = 0ULL;
+       bl33_image_ep_info.args.arg2 = 0ULL;
+       bl33_image_ep_info.args.arg3 = 0ULL;
+# endif /* RPI3_BL33_IN_AARCH32 */
+#endif /* RPI3_DIRECT_LINUX_BOOT */
 }
 
 void bl31_plat_arch_setup(void)
@@ -148,12 +141,10 @@ void bl31_plat_arch_setup(void)
 
 void bl31_platform_setup(void)
 {
-#if RESET_TO_BL31
        /*
         * Do initial security configuration to allow DRAM/device access
         * (if earlier BL has not already done so).
         */
-#endif /* RESET_TO_BL31 */
 
        return;
 }
index 65f5e7ad00bb09ac357bbbf6cf87e208ed2e62f0..98cf534c73766bb068c07b1800108ce4da1cbdd8 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <arch_helpers.h>
+#include <assert.h>
 #include <bl_common.h>
 #include <console.h>
 #include <debug.h>
@@ -198,15 +199,21 @@ unsigned int plat_get_syscnt_freq2(void)
 
 uint32_t plat_ic_get_pending_interrupt_type(void)
 {
+       ERROR("rpi3: Interrupt routed to EL3.\n");
        return INTR_TYPE_INVAL;
 }
 
-uint32_t plat_interrupt_type_to_line(uint32_t type,
-                                    uint32_t security_state)
+uint32_t plat_interrupt_type_to_line(uint32_t type, uint32_t security_state)
 {
-       /* It is not expected to receive an interrupt route to EL3.
-        * Hence panic() to flag error.
-        */
-       ERROR("Interrupt not expected to be routed to EL3");
-       panic();
+       assert((type == INTR_TYPE_S_EL1) || (type == INTR_TYPE_EL3) ||
+              (type == INTR_TYPE_NS));
+
+       assert(sec_state_is_valid(security_state));
+
+       /* Non-secure interrupts are signalled on the IRQ line always. */
+       if (type == INTR_TYPE_NS)
+               return __builtin_ctz(SCR_IRQ_BIT);
+
+       /* Secure interrupts are signalled on the FIQ line always. */
+       return  __builtin_ctz(SCR_FIQ_BIT);
 }
index 70272e003c3f56b7d8b8e790381a5598c3449758..1a26053b935cdd90b136681241348b9293e793ec 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 #define RPI3_IO_SIZE                   ULL(0x01000000)
 
 /*
- * Serial port (called 'Mini UART' in the BCM docucmentation).
+ * ARM <-> VideoCore mailboxes
  */
-#define RPI3_IO_MINI_UART_OFFSET       ULL(0x00215040)
-#define RPI3_MINI_UART_BASE            (RPI3_IO_BASE + RPI3_IO_MINI_UART_OFFSET)
-#define RPI3_MINI_UART_CLK_IN_HZ       ULL(500000000)
+#define RPI3_MBOX_OFFSET               ULL(0x0000B880)
+#define RPI3_MBOX_BASE                 (RPI3_IO_BASE + RPI3_MBOX_OFFSET)
+/* VideoCore -> ARM */
+#define RPI3_MBOX0_READ_OFFSET         ULL(0x00000000)
+#define RPI3_MBOX0_PEEK_OFFSET         ULL(0x00000010)
+#define RPI3_MBOX0_SENDER_OFFSET       ULL(0x00000014)
+#define RPI3_MBOX0_STATUS_OFFSET       ULL(0x00000018)
+#define RPI3_MBOX0_CONFIG_OFFSET       ULL(0x0000001C)
+/* ARM -> VideoCore */
+#define RPI3_MBOX1_WRITE_OFFSET                ULL(0x00000020)
+#define RPI3_MBOX1_PEEK_OFFSET         ULL(0x00000030)
+#define RPI3_MBOX1_SENDER_OFFSET       ULL(0x00000034)
+#define RPI3_MBOX1_STATUS_OFFSET       ULL(0x00000038)
+#define RPI3_MBOX1_CONFIG_OFFSET       ULL(0x0000003C)
+/* Mailbox status constants */
+#define RPI3_MBOX_STATUS_FULL_MASK     U(0x80000000) /* Set if full */
+#define RPI3_MBOX_STATUS_EMPTY_MASK    U(0x40000000) /* Set if empty */
 
 /*
  * Power management, reset controller, watchdog.
 #define RPI3_PM_BASE                   (RPI3_IO_BASE + RPI3_IO_PM_OFFSET)
 /* Registers on top of RPI3_PM_BASE. */
 #define RPI3_PM_RSTC_OFFSET            ULL(0x0000001C)
+#define RPI3_PM_RSTS_OFFSET            ULL(0x00000020)
 #define RPI3_PM_WDOG_OFFSET            ULL(0x00000024)
 /* Watchdog constants */
-#define RPI3_PM_PASSWORD               ULL(0x5A000000)
-#define RPI3_PM_RSTC_WRCFG_MASK                ULL(0x00000030)
-#define RPI3_PM_RSTC_WRCFG_FULL_RESET  ULL(0x00000020)
+#define RPI3_PM_PASSWORD               U(0x5A000000)
+#define RPI3_PM_RSTC_WRCFG_MASK                U(0x00000030)
+#define RPI3_PM_RSTC_WRCFG_FULL_RESET  U(0x00000020)
+/*
+ * The RSTS register is used by the VideoCore firmware when booting the
+ * Raspberry Pi to know which partition to boot from. The partition value is
+ * formed by bits 0, 2, 4, 6, 8 and 10. Partition 63 is used by said firmware
+ * to indicate halt.
+ */
+#define RPI3_PM_RSTS_WRCFG_HALT                U(0x00000555)
+
+/*
+ * Hardware random number generator.
+ */
+#define RPI3_IO_RNG_OFFSET             ULL(0x00104000)
+#define RPI3_RNG_BASE                  (RPI3_IO_BASE + RPI3_IO_RNG_OFFSET)
+#define RPI3_RNG_CTRL_OFFSET           ULL(0x00000000)
+#define RPI3_RNG_STATUS_OFFSET         ULL(0x00000004)
+#define RPI3_RNG_DATA_OFFSET           ULL(0x00000008)
+#define RPI3_RNG_INT_MASK_OFFSET       ULL(0x00000010)
+/* Enable/disable RNG */
+#define RPI3_RNG_CTRL_ENABLE           U(0x1)
+#define RPI3_RNG_CTRL_DISABLE          U(0x0)
+/* Number of currently available words */
+#define RPI3_RNG_STATUS_NUM_WORDS_SHIFT        U(24)
+#define RPI3_RNG_STATUS_NUM_WORDS_MASK U(0xFF)
+/* Value to mask interrupts caused by the RNG */
+#define RPI3_RNG_INT_MASK_DISABLE      U(0x1)
+
+/*
+ * Serial port (called 'Mini UART' in the BCM docucmentation).
+ */
+#define RPI3_IO_MINI_UART_OFFSET       ULL(0x00215040)
+#define RPI3_MINI_UART_BASE            (RPI3_IO_BASE + RPI3_IO_MINI_UART_OFFSET)
+#define RPI3_MINI_UART_CLK_IN_HZ       ULL(500000000)
 
 /*
  * Local interrupt controller
diff --git a/plat/rpi3/rpi3_mbox.c b/plat/rpi3/rpi3_mbox.c
new file mode 100644 (file)
index 0000000..77e17af
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <mmio.h>
+#include <platform_def.h>
+
+#include "rpi3_hw.h"
+
+/* This struct must be aligned to 16 bytes */
+typedef struct __packed __aligned(16) rpi3_mbox_request {
+       uint32_t        size; /* Buffer size in bytes */
+       uint32_t        code; /* Request/response code */
+       uint32_t        tags[0];
+} rpi3_mbox_request_t;
+
+#define RPI3_MBOX_BUFFER_SIZE          U(256)
+static uint8_t __aligned(16) rpi3_mbox_buffer[RPI3_MBOX_BUFFER_SIZE];
+
+/* Constants to perform a request/check the status of a request. */
+#define RPI3_MBOX_PROCESS_REQUEST      U(0x00000000)
+#define RPI3_MBOX_REQUEST_SUCCESSFUL   U(0x80000000)
+#define RPI3_MBOX_REQUEST_ERROR                U(0x80000001)
+
+/* Command constants */
+#define RPI3_TAG_HARDWARE_GET_BOARD_REVISION   U(0x00010002)
+#define RPI3_TAG_END                           U(0x00000000)
+
+#define RPI3_TAG_REQUEST               U(0x00000000)
+#define RPI3_TAG_IS_RESPONSE           U(0x80000000) /* Set if response */
+#define RPI3_TAG_RESPONSE_LENGTH_MASK  U(0x7FFFFFFF)
+
+#define RPI3_CHANNEL_ARM_TO_VC         U(0x8)
+#define RPI3_CHANNEL_MASK              U(0xF)
+
+#define RPI3_MAILBOX_MAX_RETRIES       U(1000000)
+
+/*******************************************************************************
+ * Helpers to send requests to the VideoCore using the mailboxes.
+ ******************************************************************************/
+static void rpi3_vc_mailbox_request_send(void)
+{
+       uint32_t st, data;
+       uintptr_t resp_addr, addr;
+       unsigned int retries;
+
+       /* This is the location of the request buffer */
+       addr = (uintptr_t) &rpi3_mbox_buffer;
+
+       /* Make sure that the changes are seen by the VideoCore */
+       flush_dcache_range(addr, RPI3_MBOX_BUFFER_SIZE);
+
+       /* Wait until the outbound mailbox is empty */
+       retries = 0U;
+
+       do {
+               st = mmio_read_32(RPI3_MBOX_BASE + RPI3_MBOX1_STATUS_OFFSET);
+
+               retries++;
+               if (retries == RPI3_MAILBOX_MAX_RETRIES) {
+                       ERROR("rpi3: mbox: Send request timeout\n");
+                       return;
+               }
+
+       } while ((st & RPI3_MBOX_STATUS_EMPTY_MASK) == 0U);
+
+       /* Send base address of this message to start request */
+       mmio_write_32(RPI3_MBOX_BASE + RPI3_MBOX1_WRITE_OFFSET,
+                     RPI3_CHANNEL_ARM_TO_VC | (uint32_t) addr);
+
+       /* Wait until the inbound mailbox isn't empty */
+       retries = 0U;
+
+       do {
+               st = mmio_read_32(RPI3_MBOX_BASE + RPI3_MBOX0_STATUS_OFFSET);
+
+               retries++;
+               if (retries == RPI3_MAILBOX_MAX_RETRIES) {
+                       ERROR("rpi3: mbox: Receive response timeout\n");
+                       return;
+               }
+
+       } while ((st & RPI3_MBOX_STATUS_EMPTY_MASK) != 0U);
+
+       /* Get location and channel */
+       data = mmio_read_32(RPI3_MBOX_BASE + RPI3_MBOX0_READ_OFFSET);
+
+       if ((data & RPI3_CHANNEL_MASK) != RPI3_CHANNEL_ARM_TO_VC) {
+               ERROR("rpi3: mbox: Wrong channel: 0x%08x\n", data);
+               panic();
+       }
+
+       resp_addr = (uintptr_t)(data & ~RPI3_CHANNEL_MASK);
+       if (addr != resp_addr) {
+               ERROR("rpi3: mbox: Unexpected address: 0x%08x\n", data);
+               panic();
+       }
+
+       /* Make sure that the data seen by the CPU is up to date */
+       inv_dcache_range(addr, RPI3_MBOX_BUFFER_SIZE);
+}
+
+/*******************************************************************************
+ * Request board revision. Returns the revision and 0 on success, -1 on error.
+ ******************************************************************************/
+int rpi3_vc_hardware_get_board_revision(uint32_t *revision)
+{
+       uint32_t tag_request_size = sizeof(uint32_t);
+       rpi3_mbox_request_t *req = (rpi3_mbox_request_t *) rpi3_mbox_buffer;
+
+       assert(revision != NULL);
+
+       VERBOSE("rpi3: mbox: Sending request at %p\n", (void *)req);
+
+       req->size = sizeof(rpi3_mbox_buffer);
+       req->code = RPI3_MBOX_PROCESS_REQUEST;
+
+       req->tags[0] = RPI3_TAG_HARDWARE_GET_BOARD_REVISION;
+       req->tags[1] = tag_request_size; /* Space available for the response */
+       req->tags[2] = RPI3_TAG_REQUEST;
+       req->tags[3] = 0; /* Placeholder for the response */
+
+       req->tags[4] = RPI3_TAG_END;
+
+       rpi3_vc_mailbox_request_send();
+
+       if (req->code != RPI3_MBOX_REQUEST_SUCCESSFUL) {
+               ERROR("rpi3: mbox: Code = 0x%08x\n", req->code);
+               return -1;
+       }
+
+       if (req->tags[2] != (RPI3_TAG_IS_RESPONSE | tag_request_size)) {
+               ERROR("rpi3: mbox: get board revision failed (0x%08x)\n",
+                     req->tags[2]);
+               return -1;
+       }
+
+       *revision = req->tags[3];
+
+       return 0;
+}
index 1d067fb13df6425ac20f33dd1fb8f739c1958d5f..b6adc8a5305f27eceb469772ff22be70e9c35b9b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include "rpi3_hw.h"
 
-/*
- * The secure entry point to be used on warm reset.
- */
-static uintptr_t secure_entrypoint;
-
 /* Make composite power state parameter till power level 0 */
 #if PSCI_EXTENDED_STATE_ID
 
@@ -150,41 +145,61 @@ void rpi3_pwr_domain_on_finish(const psci_power_state_t *target_state)
 }
 
 /*******************************************************************************
- * Platform handler to reboot the system
+ * Platform handlers for system reset and system off.
  ******************************************************************************/
-#define RESET_TIMEOUT  10
 
-static void __dead2 rpi3_system_reset(void)
-{
-       /* Setup watchdog for reset */
+/* 10 ticks (Watchdog timer = Timer clock / 16) */
+#define RESET_TIMEOUT  U(10)
 
-       static const uintptr_t base = RPI3_PM_BASE;
+static void __dead2 rpi3_watchdog_reset(void)
+{
        uint32_t rstc;
 
-       INFO("rpi3: PSCI System Reset: invoking watchdog reset\n");
-
        console_flush();
 
-       rstc = mmio_read_32(base + RPI3_PM_RSTC_OFFSET);
-       rstc &= ~RPI3_PM_RSTC_WRCFG_MASK;
-       rstc |= RPI3_PM_RSTC_WRCFG_FULL_RESET;
-
-       dmbst();
+       dsbsy();
+       isb();
 
-       /*
-        * Watchdog timer = Timer clock / 16
-        * Password (31:16) | Value (11:0)
-        */
-       mmio_write_32(base + RPI3_PM_WDOG_OFFSET,
+       mmio_write_32(RPI3_PM_BASE + RPI3_PM_WDOG_OFFSET,
                      RPI3_PM_PASSWORD | RESET_TIMEOUT);
-       mmio_write_32(base + RPI3_PM_RSTC_OFFSET,
-                     RPI3_PM_PASSWORD | rstc);
+
+       rstc = mmio_read_32(RPI3_PM_BASE + RPI3_PM_RSTC_OFFSET);
+       rstc &= ~RPI3_PM_RSTC_WRCFG_MASK;
+       rstc |= RPI3_PM_PASSWORD | RPI3_PM_RSTC_WRCFG_FULL_RESET;
+       mmio_write_32(RPI3_PM_BASE + RPI3_PM_RSTC_OFFSET, rstc);
 
        for (;;) {
                wfi();
        }
 }
 
+static void __dead2 rpi3_system_reset(void)
+{
+       INFO("rpi3: PSCI_SYSTEM_RESET: Invoking watchdog reset\n");
+
+       rpi3_watchdog_reset();
+}
+
+static void __dead2 rpi3_system_off(void)
+{
+       uint32_t rsts;
+
+       INFO("rpi3: PSCI_SYSTEM_OFF: Invoking watchdog reset\n");
+
+       /*
+        * This function doesn't actually make the Raspberry Pi turn itself off,
+        * the hardware doesn't allow it. It simply reboots it and the RSTS
+        * value tells the bootcode.bin firmware not to continue the regular
+        * bootflow and to stay in a low power mode.
+        */
+
+       rsts = mmio_read_32(RPI3_PM_BASE + RPI3_PM_RSTS_OFFSET);
+       rsts |= RPI3_PM_PASSWORD | RPI3_PM_RSTS_WRCFG_HALT;
+       mmio_write_32(RPI3_PM_BASE + RPI3_PM_RSTS_OFFSET, rsts);
+
+       rpi3_watchdog_reset();
+}
+
 /*******************************************************************************
  * Platform handlers and setup function.
  ******************************************************************************/
@@ -192,6 +207,7 @@ static const plat_psci_ops_t plat_rpi3_psci_pm_ops = {
        .cpu_standby = rpi3_cpu_standby,
        .pwr_domain_on = rpi3_pwr_domain_on,
        .pwr_domain_on_finish = rpi3_pwr_domain_on_finish,
+       .system_off = rpi3_system_off,
        .system_reset = rpi3_system_reset,
        .validate_power_state = rpi3_validate_power_state,
 };
@@ -199,10 +215,9 @@ static const plat_psci_ops_t plat_rpi3_psci_pm_ops = {
 int plat_setup_psci_ops(uintptr_t sec_entrypoint,
                        const plat_psci_ops_t **psci_ops)
 {
-       uintptr_t *mailbox = (void *)PLAT_RPI3_TRUSTED_MAILBOX_BASE;
+       uintptr_t *entrypoint = (void *) PLAT_RPI3_TM_ENTRYPOINT;
 
-       *mailbox = sec_entrypoint;
-       secure_entrypoint = (uintptr_t)sec_entrypoint;
+       *entrypoint = sec_entrypoint;
        *psci_ops = &plat_rpi3_psci_pm_ops;
 
        return 0;
index a9fbfe479a25849e5dbd5b77864b5828585f6ad5..91b7add854b9374491e4e7a1ad715d80e79e6437 100644 (file)
@@ -33,4 +33,10 @@ uint32_t rpi3_get_spsr_for_bl33_entry(void);
 /* IO storage utility functions */
 void plat_rpi3_io_setup(void);
 
+/* Hardware RNG functions */
+void rpi3_rng_read(void *buf, size_t len);
+
+/* VideoCore firmware commands */
+int rpi3_vc_hardware_get_board_revision(uint32_t *revision);
+
 #endif /*__RPI3_PRIVATE_H__ */
diff --git a/plat/rpi3/rpi3_rng.c b/plat/rpi3/rpi3_rng.c
new file mode 100644 (file)
index 0000000..111b3b6
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <mmio.h>
+#include <string.h>
+
+#include "rpi3_hw.h"
+
+/* Initial amount of values to discard */
+#define RNG_WARMUP_COUNT       U(0x40000)
+
+static void rpi3_rng_initialize(void)
+{
+       uint32_t int_mask, ctrl;
+
+       /* Return if it is already enabled */
+       ctrl = mmio_read_32(RPI3_RNG_BASE + RPI3_RNG_CTRL_OFFSET);
+       if ((ctrl & RPI3_RNG_CTRL_ENABLE) != 0U) {
+               return;
+       }
+
+       /* Mask interrupts */
+       int_mask = mmio_read_32(RPI3_RNG_BASE + RPI3_RNG_INT_MASK_OFFSET);
+       int_mask |= RPI3_RNG_INT_MASK_DISABLE;
+       mmio_write_32(RPI3_RNG_BASE + RPI3_RNG_INT_MASK_OFFSET, int_mask);
+
+       /* Discard several values when initializing to give it time to warmup */
+       mmio_write_32(RPI3_RNG_BASE + RPI3_RNG_STATUS_OFFSET, RNG_WARMUP_COUNT);
+
+       mmio_write_32(RPI3_RNG_BASE + RPI3_RNG_CTRL_OFFSET,
+                     RPI3_RNG_CTRL_ENABLE);
+}
+
+static uint32_t rpi3_rng_get_word(void)
+{
+       size_t nwords;
+
+       do {
+               /* Get number of available words to read */
+               nwords = (mmio_read_32(RPI3_RNG_BASE + RPI3_RNG_STATUS_OFFSET)
+                                      >> RPI3_RNG_STATUS_NUM_WORDS_SHIFT)
+                                      & RPI3_RNG_STATUS_NUM_WORDS_MASK;
+       } while (nwords == 0U);
+
+       return mmio_read_32(RPI3_RNG_BASE + RPI3_RNG_DATA_OFFSET);
+}
+
+void rpi3_rng_read(void *buf, size_t len)
+{
+       uint32_t data;
+       size_t left = len;
+       uint32_t *dst = buf;
+
+       assert(buf != NULL);
+       assert(len != 0U);
+       assert(check_uptr_overflow((uintptr_t) buf, (uintptr_t) len) == 0);
+
+       rpi3_rng_initialize();
+
+       while (left >= sizeof(uint32_t)) {
+               data = rpi3_rng_get_word();
+               *dst++ = data;
+               left -= sizeof(uint32_t);
+       }
+
+       if (left > 0U) {
+               data = rpi3_rng_get_word();
+               memcpy(dst, &data, left);
+       }
+}
diff --git a/plat/rpi3/rpi3_stack_protector.c b/plat/rpi3/rpi3_stack_protector.c
new file mode 100644 (file)
index 0000000..d939cd3
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sys/types.h>
+#include <utils.h>
+
+#include "rpi3_private.h"
+
+/* Get 128 bits of entropy and fuse the values together to form the canary. */
+#define TRNG_NBYTES    16U
+
+u_register_t plat_get_stack_protector_canary(void)
+{
+       size_t i;
+       u_register_t buf[TRNG_NBYTES / sizeof(u_register_t)];
+       u_register_t ret = 0U;
+
+       rpi3_rng_read(buf, sizeof(buf));
+
+       for (i = 0U; i < ARRAY_SIZE(buf); i++)
+               ret ^= buf[i];
+
+       return ret;
+}
index 546f84aa13f16929d592ac0347c518f7be125f02..96427a1613abbfbca0feba19d97615d3650a9cdc 100644 (file)
@@ -18,6 +18,10 @@ ERRATA_A53_855873            := 1
 # Libraries
 include lib/xlat_tables_v2/xlat_tables.mk
 
+ifeq (${SPD},opteed)
+TF_CFLAGS_aarch64      +=      -DBL32_BASE=0xfc000000
+endif
+
 PLAT_PATH              :=      plat/socionext/synquacer
 PLAT_INCLUDES          :=      -I$(PLAT_PATH)/include          \
                                -I$(PLAT_PATH)/drivers/scpi     \
index 461c8deced2240c761ee832b0ed291471cc1fee3..30d06e9ebc0ca0b246728e574fdf4a8b1f07fab4 100644 (file)
@@ -70,15 +70,31 @@ void bl31_early_platform_setup(bl31_params_t *from_bl2,
        assert(from_bl2 == NULL);
        assert(plat_params_from_bl2 == NULL);
 
+       /* Initialize power controller before setting up topology */
+       plat_sq_pwrc_setup();
+
 #ifdef BL32_BASE
-       /* Populate entry point information for BL32 */
-       SET_PARAM_HEAD(&bl32_image_ep_info,
-                               PARAM_EP,
-                               VERSION_1,
-                               0);
-       SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
-       bl32_image_ep_info.pc = BL32_BASE;
-       bl32_image_ep_info.spsr = sq_get_spsr_for_bl32_entry();
+       struct draminfo di = {0};
+
+       scpi_get_draminfo(&di);
+
+       /*
+        * Check if OP-TEE has been loaded in Secure RAM allocated
+        * from DRAM1 region
+        */
+       if ((di.base1 + di.size1) <= BL32_BASE) {
+               NOTICE("OP-TEE has been loaded by SCP firmware\n");
+               /* Populate entry point information for BL32 */
+               SET_PARAM_HEAD(&bl32_image_ep_info,
+                                       PARAM_EP,
+                                       VERSION_1,
+                                       0);
+               SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+               bl32_image_ep_info.pc = BL32_BASE;
+               bl32_image_ep_info.spsr = sq_get_spsr_for_bl32_entry();
+       } else {
+               NOTICE("OP-TEE has not been loaded by SCP firmware\n");
+       }
 #endif /* BL32_BASE */
 
        /* Populate entry point information for BL33 */
@@ -125,9 +141,6 @@ void bl31_platform_setup(void)
 
        /* Allow access to the System counter timer module */
        sq_configure_sys_timer();
-
-       /* Initialize power controller before setting up topology */
-       plat_sq_pwrc_setup();
 }
 
 void bl31_plat_runtime_setup(void)
index 01ec2a26d10ba180497e02c0af869c2301acd311..59d6ed29411fcc530a16bca2738969042e79a77e 100644 (file)
@@ -34,7 +34,7 @@
  * Address of the entrypoint vector table in OPTEE. It is
  * initialised once on the primary core after a cold boot.
  ******************************************************************************/
-optee_vectors_t *optee_vector_table;
+struct optee_vectors *optee_vector_table;
 
 /*******************************************************************************
  * Array to keep track of per-cpu OPTEE state
index b77b6d342174c77f2167195fa6fe55552d7cdfa1..a5f0a4168ebbda734d4c97a27ee7b5ab594f6d5e 100644 (file)
@@ -144,7 +144,7 @@ uint64_t opteed_enter_sp(uint64_t *c_rt_ctx);
 void __dead2 opteed_exit_sp(uint64_t c_rt_ctx, uint64_t ret);
 uint64_t opteed_synchronous_sp_entry(optee_context_t *optee_ctx);
 void __dead2 opteed_synchronous_sp_exit(optee_context_t *optee_ctx, uint64_t ret);
-void opteed_init_optee_ep_state(struct entry_point_info *optee_ep,
+void opteed_init_optee_ep_state(struct entry_point_info *optee_entry_point,
                                uint32_t rw,
                                uint64_t pc,
                                uint64_t pageable_part,
index d6d092decd0bdee1c31129d22c743b4fbb84b109..28afc1d4be5403fc6e7dcf50712e479d17c7eafe 100644 (file)
@@ -932,43 +932,43 @@ uint64_t sdei_smc_handler(uint32_t smc_fid,
        case SDEI_VERSION:
                SDEI_LOG("> VER\n");
                ret = sdei_version();
-               SDEI_LOG("< VER:%lx\n", ret);
+               SDEI_LOG("< VER:%llx\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_EVENT_REGISTER:
                x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
-               SDEI_LOG("> REG(n:%d e:%lx a:%lx f:%x m:%lx)\n", (int) x1,
+               SDEI_LOG("> REG(n:%d e:%llx a:%llx f:%x m:%llx)\n", (int) x1,
                                x2, x3, (int) x4, x5);
                ret = sdei_event_register(x1, x2, x3, x4, x5);
-               SDEI_LOG("< REG:%ld\n", ret);
+               SDEI_LOG("< REG:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_EVENT_ENABLE:
                SDEI_LOG("> ENABLE(n:%d)\n", (int) x1);
                ret = sdei_event_enable(x1);
-               SDEI_LOG("< ENABLE:%ld\n", ret);
+               SDEI_LOG("< ENABLE:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_EVENT_DISABLE:
                SDEI_LOG("> DISABLE(n:%d)\n", (int) x1);
                ret = sdei_event_disable(x1);
-               SDEI_LOG("< DISABLE:%ld\n", ret);
+               SDEI_LOG("< DISABLE:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_EVENT_CONTEXT:
                SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1());
                ret = sdei_event_context(handle, x1);
-               SDEI_LOG("< CTX:%ld\n", ret);
+               SDEI_LOG("< CTX:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_EVENT_COMPLETE_AND_RESUME:
                resume = 1;
 
        case SDEI_EVENT_COMPLETE:
-               SDEI_LOG("> COMPLETE(r:%d sta/ep:%lx):%lx\n", resume, x1,
+               SDEI_LOG("> COMPLETE(r:%d sta/ep:%llx):%lx\n", resume, x1,
                                read_mpidr_el1());
                ret = sdei_event_complete(resume, x1);
-               SDEI_LOG("< COMPLETE:%lx\n", ret);
+               SDEI_LOG("< COMPLETE:%llx\n", ret);
 
                /*
                 * Set error code only if the call failed. If the call
@@ -985,19 +985,19 @@ uint64_t sdei_smc_handler(uint32_t smc_fid,
        case SDEI_EVENT_STATUS:
                SDEI_LOG("> STAT(n:%d)\n", (int) x1);
                ret = sdei_event_status(x1);
-               SDEI_LOG("< STAT:%ld\n", ret);
+               SDEI_LOG("< STAT:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_EVENT_GET_INFO:
                SDEI_LOG("> INFO(n:%d, %d)\n", (int) x1, (int) x2);
                ret = sdei_event_get_info(x1, x2);
-               SDEI_LOG("< INFO:%ld\n", ret);
+               SDEI_LOG("< INFO:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_EVENT_UNREGISTER:
                SDEI_LOG("> UNREG(n:%d)\n", (int) x1);
                ret = sdei_event_unregister(x1);
-               SDEI_LOG("< UNREG:%ld\n", ret);
+               SDEI_LOG("< UNREG:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_PE_UNMASK:
@@ -1009,49 +1009,49 @@ uint64_t sdei_smc_handler(uint32_t smc_fid,
        case SDEI_PE_MASK:
                SDEI_LOG("> MASK:%lx\n", read_mpidr_el1());
                ret = sdei_pe_mask();
-               SDEI_LOG("< MASK:%ld\n", ret);
+               SDEI_LOG("< MASK:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_INTERRUPT_BIND:
                SDEI_LOG("> BIND(%d)\n", (int) x1);
                ret = sdei_interrupt_bind(x1);
-               SDEI_LOG("< BIND:%ld\n", ret);
+               SDEI_LOG("< BIND:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_INTERRUPT_RELEASE:
                SDEI_LOG("> REL(%d)\n", (int) x1);
                ret = sdei_interrupt_release(x1);
-               SDEI_LOG("< REL:%ld\n", ret);
+               SDEI_LOG("< REL:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_SHARED_RESET:
                SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1());
                ret = sdei_shared_reset();
-               SDEI_LOG("< S_RESET:%ld\n", ret);
+               SDEI_LOG("< S_RESET:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_PRIVATE_RESET:
                SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1());
                ret = sdei_private_reset();
-               SDEI_LOG("< P_RESET:%ld\n", ret);
+               SDEI_LOG("< P_RESET:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_EVENT_ROUTING_SET:
-               SDEI_LOG("> ROUTE_SET(n:%d f:%lx aff:%lx)\n", (int) x1, x2, x3);
+               SDEI_LOG("> ROUTE_SET(n:%d f:%llx aff:%llx)\n", (int) x1, x2, x3);
                ret = sdei_event_routing_set(x1, x2, x3);
-               SDEI_LOG("< ROUTE_SET:%ld\n", ret);
+               SDEI_LOG("< ROUTE_SET:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_FEATURES:
-               SDEI_LOG("> FTRS(f:%lx)\n", x1);
+               SDEI_LOG("> FTRS(f:%llx)\n", x1);
                ret = sdei_features(x1);
-               SDEI_LOG("< FTRS:%lx\n", ret);
+               SDEI_LOG("< FTRS:%llx\n", ret);
                SMC_RET1(handle, ret);
 
        case SDEI_EVENT_SIGNAL:
-               SDEI_LOG("> SIGNAL(e:%lx t:%lx)\n", x1, x2);
+               SDEI_LOG("> SIGNAL(e:%llx t:%llx)\n", x1, x2);
                ret = sdei_signal(x1, x2);
-               SDEI_LOG("< SIGNAL:%ld\n", ret);
+               SDEI_LOG("< SIGNAL:%lld\n", ret);
                SMC_RET1(handle, ret);
 
        default:
index 218245d8e20dd269e40373ea84ea1e3630484a5b..9c218dfe85e42a7dffb33a134c5e4a2473d8a199 100644 (file)
@@ -23,19 +23,19 @@ vector_base spm_shim_exceptions_ptr, .spm_shim_exceptions
         */
 vector_entry SynchronousExceptionSP0, .spm_shim_exceptions
        b       .
-       check_vector_size SynchronousExceptionSP0
+end_vector_entry SynchronousExceptionSP0
 
 vector_entry IrqSP0, .spm_shim_exceptions
        b       .
-       check_vector_size IrqSP0
+end_vector_entry IrqSP0
 
 vector_entry FiqSP0, .spm_shim_exceptions
        b       .
-       check_vector_size FiqSP0
+end_vector_entry FiqSP0
 
 vector_entry SErrorSP0, .spm_shim_exceptions
        b       .
-       check_vector_size SErrorSP0
+end_vector_entry SErrorSP0
 
        /* -----------------------------------------------------
         * Current EL with SPx: 0x200 - 0x400
@@ -43,19 +43,19 @@ vector_entry SErrorSP0, .spm_shim_exceptions
         */
 vector_entry SynchronousExceptionSPx, .spm_shim_exceptions
        b       .
-       check_vector_size SynchronousExceptionSPx
+end_vector_entry SynchronousExceptionSPx
 
 vector_entry IrqSPx, .spm_shim_exceptions
        b       .
-       check_vector_size IrqSPx
+end_vector_entry IrqSPx
 
 vector_entry FiqSPx, .spm_shim_exceptions
        b       .
-       check_vector_size FiqSPx
+end_vector_entry FiqSPx
 
 vector_entry SErrorSPx, .spm_shim_exceptions
        b       .
-       check_vector_size SErrorSPx
+end_vector_entry SErrorSPx
 
        /* -----------------------------------------------------
         * Lower EL using AArch64 : 0x400 - 0x600. No exceptions
@@ -93,19 +93,19 @@ do_smc:
 handle_sys_trap:
 panic:
        b       panic
-       check_vector_size SynchronousExceptionA64
+end_vector_entry SynchronousExceptionA64
 
 vector_entry IrqA64, .spm_shim_exceptions
        b       .
-       check_vector_size IrqA64
+end_vector_entry IrqA64
 
 vector_entry FiqA64, .spm_shim_exceptions
        b       .
-       check_vector_size FiqA64
+end_vector_entry FiqA64
 
 vector_entry SErrorA64, .spm_shim_exceptions
        b       .
-       check_vector_size SErrorA64
+end_vector_entry SErrorA64
 
        /* -----------------------------------------------------
         * Lower EL using AArch32 : 0x600 - 0x800
@@ -113,16 +113,16 @@ vector_entry SErrorA64, .spm_shim_exceptions
         */
 vector_entry SynchronousExceptionA32, .spm_shim_exceptions
        b       .
-       check_vector_size SynchronousExceptionA32
+end_vector_entry SynchronousExceptionA32
 
 vector_entry IrqA32, .spm_shim_exceptions
        b       .
-       check_vector_size IrqA32
+end_vector_entry IrqA32
 
 vector_entry FiqA32, .spm_shim_exceptions
        b       .
-       check_vector_size FiqA32
+end_vector_entry FiqA32
 
 vector_entry SErrorA32, .spm_shim_exceptions
        b       .
-       check_vector_size SErrorA32
+end_vector_entry SErrorA32
index b9b67f72201770bdb51a4e442c3326110bbc4634..0d61306faf663cea387fadea82c28a911c8f7c18 100644 (file)
@@ -107,38 +107,22 @@ void spm_sp_setup(sp_context_t *sp_ctx)
         * MMU-related registers
         * ---------------------
         */
+       xlat_ctx_t *xlat_ctx = sp_ctx->xlat_ctx_handle;
 
-       /* Set attributes in the right indices of the MAIR */
-       u_register_t mair_el1 =
-               MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX) |
-               MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX) |
-               MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
-
-       write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1, mair_el1);
-
-       /* Setup TCR_EL1. */
-       u_register_t tcr_ps_bits = tcr_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE);
-
-       u_register_t tcr_el1 =
-               /* Size of region addressed by TTBR0_EL1 = 2^(64-T0SZ) bytes. */
-               (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE))                |
-               /* Inner and outer WBWA, shareable. */
-               TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA  |
-               /* Set the granularity to 4KB. */
-               TCR_TG0_4K                                                      |
-               /* Limit Intermediate Physical Address Size. */
-               tcr_ps_bits << TCR_EL1_IPS_SHIFT                                |
-               /* Disable translations using TBBR1_EL1. */
-               TCR_EPD1_BIT
-               /* The remaining fields related to TBBR1_EL1 are left as zero. */
-       ;
+       uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
 
-       tcr_el1 &= ~(
-               /* Enable translations using TBBR0_EL1 */
-               TCR_EPD0_BIT
-       );
+       setup_mmu_cfg((uint64_t *)&mmu_cfg_params, 0, xlat_ctx->base_table,
+                     xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
+                     EL1_EL0_REGIME);
+
+       write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1,
+                     mmu_cfg_params[MMU_CFG_MAIR]);
+
+       write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1,
+                     mmu_cfg_params[MMU_CFG_TCR]);
 
-       write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1, tcr_el1);
+       write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
+                     mmu_cfg_params[MMU_CFG_TTBR0]);
 
        /* Setup SCTLR_EL1 */
        u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1);
@@ -174,13 +158,6 @@ void spm_sp_setup(sp_context_t *sp_ctx)
 
        write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
 
-       uint64_t *xlat_base =
-                       ((xlat_ctx_t *)sp_ctx->xlat_ctx_handle)->base_table;
-
-       /* Point TTBR0_EL1 at the tables of the context created for the SP. */
-       write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
-                       (u_register_t)xlat_base);
-
        /*
         * Setup other system registers
         * ----------------------------
index 8a1958ffaf4e75eb37a202efd7dff89c56b6ec62..7b10e3e308984a972c5d46c6dda595dd298eb0c6 100644 (file)
@@ -75,7 +75,7 @@ ${BINARY}: ${OBJECTS} Makefile
        @echo "  LD      $@"
        @echo 'const char build_msg[] = "Built : "__TIME__", "__DATE__; \
                 const char platform_msg[] = "${PLAT_MSG}";' | \
-                ${CC} -c ${CFLAGS} -xc - -o src/build_msg.o
+                ${HOSTCC} -c ${CFLAGS} -xc - -o src/build_msg.o
        ${Q}${HOSTCC} src/build_msg.o ${OBJECTS} ${LIB_DIR} ${LIB} -o $@
 
 %.o: %.c
diff --git a/tools/doimage/Makefile b/tools/doimage/Makefile
new file mode 100644 (file)
index 0000000..bc74369
--- /dev/null
@@ -0,0 +1,48 @@
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:     BSD-3-Clause
+# https://spdx.org/licenses
+
+PROJECT = doimage
+OBJECTS = doimage.o
+
+CFLAGS = -Wall -Werror
+ifeq (${DEBUG},1)
+  CFLAGS += -g -O0 -DDEBUG
+else
+  CFLAGS += -O2
+endif
+
+ifeq (${MARVELL_SECURE_BOOT},1)
+DOIMAGE_CC_FLAGS := -DCONFIG_MVEBU_SECURE_BOOT
+DOIMAGE_LD_FLAGS := -lconfig -lmbedtls -lmbedcrypto -lmbedx509
+endif
+
+CFLAGS += ${DOIMAGE_CC_FLAGS}
+
+# Make soft links and include from local directory otherwise wrong headers
+# could get pulled in from firmware tree.
+INCLUDE_PATHS = -I.
+
+CC := gcc
+RM := rm -rf
+
+.PHONY: all clean
+
+all: ${PROJECT}
+
+${PROJECT}: ${OBJECTS} Makefile
+       @echo "  LD      $@"
+       ${Q}${CC} ${OBJECTS} ${DOIMAGE_LD_FLAGS} -o $@
+       @echo
+       @echo "Built $@ successfully"
+       @echo
+
+%.o: %.c %.h Makefile
+       @echo "  CC      $<"
+       ${Q}${CC} -c ${CFLAGS} ${INCLUDE_PATHS} $< -o $@
+
+clean:
+       ${Q}${RM} ${PROJECT}
+       ${Q}${RM} ${OBJECTS}
diff --git a/tools/doimage/doimage.c b/tools/doimage/doimage.c
new file mode 100644 (file)
index 0000000..56dabba
--- /dev/null
@@ -0,0 +1,1755 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+
+#ifdef CONFIG_MVEBU_SECURE_BOOT
+#include <libconfig.h> /* for parsing config file */
+
+#if !defined(MBEDTLS_CONFIG_FILE)
+#include "mbedtls/config.h"
+#else
+#include MBEDTLS_CONFIG_FILE
+#endif
+
+/* mbedTLS stuff */
+#if defined(MBEDTLS_BIGNUM_C) && defined(MBEDTLS_ENTROPY_C) && \
+       defined(MBEDTLS_SHA256_C) && \
+       defined(MBEDTLS_PK_PARSE_C) && defined(MBEDTLS_FS_IO) && \
+       defined(MBEDTLS_CTR_DRBG_C)
+#include <mbedtls/error.h>
+#include <mbedtls/entropy.h>
+#include <mbedtls/ctr_drbg.h>
+#include <mbedtls/md.h>
+#include <mbedtls/pk.h>
+#include <mbedtls/sha256.h>
+#include <mbedtls/x509.h>
+#else
+#error "Bad mbedTLS configuration!"
+#endif
+#endif /* CONFIG_MVEBU_SECURE_BOOT */
+
+#define MAX_FILENAME           256
+#define CSK_ARR_SZ             16
+#define CSK_ARR_EMPTY_FILE     "*"
+#define AES_KEY_BIT_LEN                256
+#define AES_KEY_BYTE_LEN       (AES_KEY_BIT_LEN >> 3)
+#define AES_BLOCK_SZ           16
+#define RSA_SIGN_BYTE_LEN      256
+#define MAX_RSA_DER_BYTE_LEN   524
+/* Number of address pairs in control array */
+#define CP_CTRL_EL_ARRAY_SZ    32
+
+#define VERSION_STRING         "Marvell(C) doimage utility version 3.2"
+
+/* A8K definitions */
+
+/* Extension header types */
+#define EXT_TYPE_SECURITY      0x1
+#define EXT_TYPE_BINARY                0x2
+
+#define MAIN_HDR_MAGIC         0xB105B002
+
+/* PROLOG alignment considerations:
+ *  128B: To allow supporting XMODEM protocol.
+ *  8KB: To align the boot image to the largest NAND page size, and simplify
+ *  the read operations from NAND.
+ *  We choose the largest page size, in order to use a single image for all
+ *  NAND page sizes.
+ */
+#define PROLOG_ALIGNMENT       (8 << 10)
+
+/* UART argument bitfield */
+#define UART_MODE_UNMODIFIED   0x0
+#define UART_MODE_DISABLE      0x1
+#define UART_MODE_UPDATE       0x2
+
+typedef struct _main_header {
+       uint32_t        magic;                  /*  0-3  */
+       uint32_t        prolog_size;            /*  4-7  */
+       uint32_t        prolog_checksum;        /*  8-11 */
+       uint32_t        boot_image_size;        /* 12-15 */
+       uint32_t        boot_image_checksum;    /* 16-19 */
+       uint32_t        rsrvd0;                 /* 20-23 */
+       uint32_t        load_addr;              /* 24-27 */
+       uint32_t        exec_addr;              /* 28-31 */
+       uint8_t         uart_cfg;               /*  32   */
+       uint8_t         baudrate;               /*  33   */
+       uint8_t         ext_count;              /*  34   */
+       uint8_t         aux_flags;              /*  35   */
+       uint32_t        io_arg_0;               /* 36-39 */
+       uint32_t        io_arg_1;               /* 40-43 */
+       uint32_t        io_arg_2;               /* 43-47 */
+       uint32_t        io_arg_3;               /* 48-51 */
+       uint32_t        rsrvd1;                 /* 52-55 */
+       uint32_t        rsrvd2;                 /* 56-59 */
+       uint32_t        rsrvd3;                 /* 60-63 */
+} header_t;
+
+typedef struct _ext_header {
+       uint8_t         type;
+       uint8_t         offset;
+       uint16_t        reserved;
+       uint32_t        size;
+} ext_header_t;
+
+typedef struct _sec_entry {
+       uint8_t         kak_key[MAX_RSA_DER_BYTE_LEN];
+       uint32_t        jtag_delay;
+       uint32_t        box_id;
+       uint32_t        flash_id;
+       uint32_t        jtag_en;
+       uint32_t        encrypt_en;
+       uint32_t        efuse_dis;
+       uint8_t         header_sign[RSA_SIGN_BYTE_LEN];
+       uint8_t         image_sign[RSA_SIGN_BYTE_LEN];
+       uint8_t         csk_keys[CSK_ARR_SZ][MAX_RSA_DER_BYTE_LEN];
+       uint8_t         csk_sign[RSA_SIGN_BYTE_LEN];
+       uint32_t        cp_ctrl_arr[CP_CTRL_EL_ARRAY_SZ];
+       uint32_t        cp_efuse_arr[CP_CTRL_EL_ARRAY_SZ];
+} sec_entry_t;
+
+/* A8K definitions end */
+
+/* UART argument bitfield */
+#define UART_MODE_UNMODIFIED   0x0
+#define UART_MODE_DISABLE      0x1
+#define UART_MODE_UPDATE       0x2
+
+#define uart_set_mode(arg, mode)       (arg |= (mode & 0x3))
+
+typedef struct _sec_options {
+#ifdef CONFIG_MVEBU_SECURE_BOOT
+       char aes_key_file[MAX_FILENAME+1];
+       char kak_key_file[MAX_FILENAME+1];
+       char csk_key_file[CSK_ARR_SZ][MAX_FILENAME+1];
+       uint32_t        box_id;
+       uint32_t        flash_id;
+       uint32_t        jtag_delay;
+       uint8_t         csk_index;
+       uint8_t         jtag_enable;
+       uint8_t         efuse_disable;
+       uint32_t        cp_ctrl_arr[CP_CTRL_EL_ARRAY_SZ];
+       uint32_t        cp_efuse_arr[CP_CTRL_EL_ARRAY_SZ];
+       mbedtls_pk_context      kak_pk;
+       mbedtls_pk_context      csk_pk[CSK_ARR_SZ];
+       uint8_t         aes_key[AES_KEY_BYTE_LEN];
+       uint8_t         *encrypted_image;
+       uint32_t        enc_image_sz;
+#endif
+} sec_options;
+
+typedef struct _options {
+       char bin_ext_file[MAX_FILENAME+1];
+       char sec_cfg_file[MAX_FILENAME+1];
+       sec_options *sec_opts;
+       uint32_t  load_addr;
+       uint32_t  exec_addr;
+       uint32_t  baudrate;
+       uint8_t   disable_print;
+       int8_t    key_index; /* For header signatures verification only */
+       uint32_t  nfc_io_args;
+} options_t;
+
+void usage_err(char *msg)
+{
+       fprintf(stderr, "Error: %s\n", msg);
+       fprintf(stderr, "run 'doimage -h' to get usage information\n");
+       exit(-1);
+}
+
+void usage(void)
+{
+       printf("\n\n%s\n\n", VERSION_STRING);
+       printf("Usage: doimage [options] <input_file> [output_file]\n");
+       printf("create bootrom image from u-boot and boot extensions\n\n");
+
+       printf("Arguments\n");
+       printf("  input_file   name of boot image file.\n");
+       printf("               if -p is used, name of the bootrom image file");
+       printf("               to parse.\n");
+       printf("  output_file  name of output bootrom image file\n");
+
+       printf("\nOptions\n");
+       printf("  -s        target SOC name. supports a8020,a7020\n");
+       printf("            different SOCs may have different boot image\n");
+       printf("            format so it's mandatory to know the target SOC\n");
+       printf("  -i        boot I/F name. supports nand, spi, nor\n");
+       printf("            This affects certain parameters coded in the\n");
+       printf("            image header\n");
+       printf("  -l        boot image load address. default is 0x0\n");
+       printf("  -e        boot image entry address. default is 0x0\n");
+       printf("  -b        binary extension image file.\n");
+       printf("            This image is executed before the boot image.\n");
+       printf("            This is typically used to initialize the memory ");
+       printf("            controller.\n");
+       printf("            Currently supports only a single file.\n");
+#ifdef CONFIG_MVEBU_SECURE_BOOT
+       printf("  -c        Make trusted boot image using parameters\n");
+       printf("            from the configuration file.\n");
+#endif
+       printf("  -p        Parse and display a pre-built boot image\n");
+#ifdef CONFIG_MVEBU_SECURE_BOOT
+       printf("  -k        Key index for RSA signatures verification\n");
+       printf("            when parsing the boot image\n");
+#endif
+       printf("  -m        Disable prints of bootrom and binary extension\n");
+       printf("  -u        UART baudrate used for bootrom prints.\n");
+       printf("            Must be multiple of 1200\n");
+       printf("  -h        Show this help message\n");
+       printf(" IO-ROM NFC-NAND boot parameters:\n");
+       printf("  -n        NAND device block size in KB [Default is 64KB].\n");
+       printf("  -t        NAND cell technology (SLC [Default] or MLC)\n");
+
+       exit(-1);
+}
+
+/* globals */
+options_t opts = {
+       .bin_ext_file = "NA",
+       .sec_cfg_file = "NA",
+       .sec_opts = 0,
+       .load_addr = 0x0,
+       .exec_addr = 0x0,
+       .disable_print = 0,
+       .baudrate = 0,
+       .key_index = -1,
+};
+
+int get_file_size(char *filename)
+{
+       struct stat st;
+
+       if (stat(filename, &st) == 0)
+               return st.st_size;
+
+       return -1;
+}
+
+uint32_t checksum32(uint32_t *start, int len)
+{
+       uint32_t sum = 0;
+       uint32_t *startp = start;
+
+       do {
+               sum += *startp;
+               startp++;
+               len -= 4;
+       } while (len > 0);
+
+       return sum;
+}
+
+/*******************************************************************************
+ *    create_rsa_signature (memory buffer content)
+ *          Create RSASSA-PSS/SHA-256 signature for memory buffer
+ *          using RSA Private Key
+ *    INPUT:
+ *          pk_ctx     Private Key context
+ *          input      memory buffer
+ *          ilen       buffer length
+ *          pers       personalization string for seeding the RNG.
+ *                     For instance a private key file name.
+ *    OUTPUT:
+ *          signature  RSA-2048 signature
+ *    RETURN:
+ *          0 on success
+ */
+#ifdef CONFIG_MVEBU_SECURE_BOOT
+int create_rsa_signature(mbedtls_pk_context    *pk_ctx,
+                        const unsigned char    *input,
+                        size_t                 ilen,
+                        const char             *pers,
+                        uint8_t                *signature)
+{
+       mbedtls_entropy_context         entropy;
+       mbedtls_ctr_drbg_context        ctr_drbg;
+       unsigned char                   hash[32];
+       unsigned char                   buf[MBEDTLS_MPI_MAX_SIZE];
+       int                             rval;
+
+       /* Not sure this is required,
+        * but it's safer to start with empty buffers
+        */
+       memset(hash, 0, sizeof(hash));
+       memset(buf, 0, sizeof(buf));
+
+       mbedtls_ctr_drbg_init(&ctr_drbg);
+       mbedtls_entropy_init(&entropy);
+
+       /* Seed the random number generator */
+       rval = mbedtls_ctr_drbg_seed(&ctr_drbg, mbedtls_entropy_func, &entropy,
+                               (const unsigned char *)pers, strlen(pers));
+       if (rval != 0) {
+               fprintf(stderr, " Failed in ctr_drbg_init call (%d)!\n", rval);
+               goto sign_exit;
+       }
+
+       /* The PK context should be already initialized.
+        * Set the padding type for this PK context
+        */
+       mbedtls_rsa_set_padding(mbedtls_pk_rsa(*pk_ctx),
+                               MBEDTLS_RSA_PKCS_V21, MBEDTLS_MD_SHA256);
+
+       /* First compute the SHA256 hash for the input blob */
+       mbedtls_sha256(input, ilen, hash, 0);
+
+       /* Then calculate the hash signature */
+       rval = mbedtls_rsa_rsassa_pss_sign(mbedtls_pk_rsa(*pk_ctx),
+                                          mbedtls_ctr_drbg_random,
+                                          &ctr_drbg,
+                                          MBEDTLS_RSA_PRIVATE,
+                                          MBEDTLS_MD_SHA256, 0, hash, buf);
+       if (rval != 0) {
+               fprintf(stderr,
+                       "Failed to create RSA signature for %s. Error %d\n",
+                       pers, rval);
+               goto sign_exit;
+       }
+       memcpy(signature, buf, 256);
+
+sign_exit:
+       mbedtls_ctr_drbg_free(&ctr_drbg);
+       mbedtls_entropy_free(&entropy);
+
+       return rval;
+} /* end of create_rsa_signature */
+
+/*******************************************************************************
+ *    verify_rsa_signature (memory buffer content)
+ *          Verify RSASSA-PSS/SHA-256 signature for memory buffer
+ *          using RSA Public Key
+ *    INPUT:
+ *          pub_key    Public Key buffer
+ *          ilen       Public Key buffer length
+ *          input      memory buffer
+ *          ilen       buffer length
+ *          pers       personalization string for seeding the RNG.
+ *          signature  RSA-2048 signature
+ *    OUTPUT:
+ *          none
+ *    RETURN:
+ *          0 on success
+ */
+int verify_rsa_signature(const unsigned char   *pub_key,
+                        size_t                 klen,
+                        const unsigned char    *input,
+                        size_t                 ilen,
+                        const char             *pers,
+                        uint8_t                *signature)
+{
+       mbedtls_entropy_context         entropy;
+       mbedtls_ctr_drbg_context        ctr_drbg;
+       mbedtls_pk_context              pk_ctx;
+       unsigned char                   hash[32];
+       int                             rval;
+
+       /* Not sure this is required,
+        * but it's safer to start with empty buffer
+        */
+       memset(hash, 0, sizeof(hash));
+
+       mbedtls_pk_init(&pk_ctx);
+       mbedtls_ctr_drbg_init(&ctr_drbg);
+       mbedtls_entropy_init(&entropy);
+
+       /* Seed the random number generator */
+       rval = mbedtls_ctr_drbg_seed(&ctr_drbg, mbedtls_entropy_func, &entropy,
+                               (const unsigned char *)pers, strlen(pers));
+       if (rval != 0) {
+               fprintf(stderr, " Failed in ctr_drbg_init call (%d)!\n", rval);
+               goto verify_exit;
+       }
+
+       /* Check ability to read the public key */
+       rval = mbedtls_pk_parse_public_key(&pk_ctx, pub_key,
+                                          MAX_RSA_DER_BYTE_LEN);
+       if (rval != 0) {
+               fprintf(stderr, " Failed in pk_parse_public_key (%#x)!\n",
+                       rval);
+               goto verify_exit;
+       }
+
+       /* Set the padding type for the new PK context */
+       mbedtls_rsa_set_padding(mbedtls_pk_rsa(pk_ctx),
+                               MBEDTLS_RSA_PKCS_V21,
+                               MBEDTLS_MD_SHA256);
+
+       /* Compute the SHA256 hash for the input buffer */
+       mbedtls_sha256(input, ilen, hash, 0);
+
+       rval = mbedtls_rsa_rsassa_pss_verify(mbedtls_pk_rsa(pk_ctx),
+                                            mbedtls_ctr_drbg_random,
+                                            &ctr_drbg,
+                                            MBEDTLS_RSA_PUBLIC,
+                                            MBEDTLS_MD_SHA256, 0,
+                                            hash, signature);
+       if (rval != 0)
+               fprintf(stderr, "Failed to verify signature (%d)!\n", rval);
+
+verify_exit:
+
+       mbedtls_pk_free(&pk_ctx);
+       mbedtls_ctr_drbg_free(&ctr_drbg);
+       mbedtls_entropy_free(&entropy);
+       return rval;
+} /* end of verify_rsa_signature */
+
+/*******************************************************************************
+ *    image_encrypt
+ *           Encrypt image buffer using AES-256-CBC scheme.
+ *           The resulting image is saved into opts.sec_opts->encrypted_image
+ *           and the adjusted image size into opts.sec_opts->enc_image_sz
+ *           First AES_BLOCK_SZ bytes of the output image contain IV
+ *    INPUT:
+ *          buf        Source buffer to encrypt
+ *          blen       Source buffer length
+ *    OUTPUT:
+ *          none
+ *    RETURN:
+ *          0 on success
+ */
+int image_encrypt(uint8_t *buf, uint32_t blen)
+{
+       struct timeval          tv;
+       char                    *ptmp = (char *)&tv;
+       unsigned char           digest[32];
+       unsigned char           IV[AES_BLOCK_SZ];
+       int                     i, k;
+       mbedtls_aes_context     aes_ctx;
+       int                     rval = -1;
+       uint8_t                 *test_img = 0;
+
+       if (AES_BLOCK_SZ > 32) {
+               fprintf(stderr, "Unsupported AES block size %d\n",
+                       AES_BLOCK_SZ);
+               return rval;
+       }
+
+       mbedtls_aes_init(&aes_ctx);
+       memset(IV, 0, AES_BLOCK_SZ);
+       memset(digest, 0, 32);
+
+       /* Generate initialization vector and init the AES engine
+        * Use file name XOR current time and finally SHA-256
+        * [0...AES_BLOCK_SZ-1]
+        */
+       k = strlen(opts.sec_opts->aes_key_file);
+       if (k > AES_BLOCK_SZ)
+               k = AES_BLOCK_SZ;
+       memcpy(IV, opts.sec_opts->aes_key_file, k);
+       gettimeofday(&tv, 0);
+
+       for (i = 0, k = 0; i < AES_BLOCK_SZ; i++,
+            k = (k+1) % sizeof(struct timeval))
+               IV[i] ^= ptmp[k];
+
+       /* compute SHA-256 digest of the results
+        * and use it as the init vector (IV)
+        */
+       mbedtls_sha256(IV, AES_BLOCK_SZ, digest, 0);
+       memcpy(IV, digest, AES_BLOCK_SZ);
+       mbedtls_aes_setkey_enc(&aes_ctx, opts.sec_opts->aes_key,
+                              AES_KEY_BIT_LEN);
+
+       /* The output image has to include extra space for IV
+        * and to be aligned to the AES block size.
+        * The input image buffer has to be already aligned to AES_BLOCK_SZ
+        * and padded with zeroes
+        */
+       opts.sec_opts->enc_image_sz = (blen + 2 * AES_BLOCK_SZ - 1) &
+                                     ~(AES_BLOCK_SZ - 1);
+       opts.sec_opts->encrypted_image = calloc(opts.sec_opts->enc_image_sz, 1);
+       if (opts.sec_opts->encrypted_image == 0) {
+               fprintf(stderr, "Failed to allocate encrypted image!\n");
+               goto encrypt_exit;
+       }
+
+       /* Put IV into the output buffer next to the encrypted image
+        * Since the IV is modified by the encryption function,
+        * this should be done now
+        */
+       memcpy(opts.sec_opts->encrypted_image +
+                  opts.sec_opts->enc_image_sz - AES_BLOCK_SZ,
+                  IV, AES_BLOCK_SZ);
+       rval = mbedtls_aes_crypt_cbc(&aes_ctx, MBEDTLS_AES_ENCRYPT,
+                            opts.sec_opts->enc_image_sz - AES_BLOCK_SZ,
+                            IV, buf, opts.sec_opts->encrypted_image);
+       if (rval != 0) {
+               fprintf(stderr, "Failed to encrypt the image! Error %d\n",
+                       rval);
+               goto encrypt_exit;
+       }
+
+       mbedtls_aes_free(&aes_ctx);
+
+       /* Try to decrypt the image and compare it with the original data */
+       mbedtls_aes_init(&aes_ctx);
+       mbedtls_aes_setkey_dec(&aes_ctx, opts.sec_opts->aes_key,
+                              AES_KEY_BIT_LEN);
+
+       test_img = calloc(opts.sec_opts->enc_image_sz - AES_BLOCK_SZ, 1);
+       if (test_img == 0) {
+               fprintf(stderr, "Failed to allocate test image!d\n");
+               rval = -1;
+               goto encrypt_exit;
+       }
+
+       memcpy(IV, opts.sec_opts->encrypted_image +
+                  opts.sec_opts->enc_image_sz - AES_BLOCK_SZ,
+                  AES_BLOCK_SZ);
+       rval = mbedtls_aes_crypt_cbc(&aes_ctx, MBEDTLS_AES_DECRYPT,
+                            opts.sec_opts->enc_image_sz - AES_BLOCK_SZ,
+                            IV, opts.sec_opts->encrypted_image, test_img);
+       if (rval != 0) {
+               fprintf(stderr, "Failed to decrypt the image! Error %d\n",
+                       rval);
+               goto encrypt_exit;
+       }
+
+       for (i = 0; i < blen; i++) {
+               if (buf[i] != test_img[i]) {
+                       fprintf(stderr, "Failed to compare the image after");
+                       fprintf(stderr, " decryption! Byte count is %d\n", i);
+                       rval = -1;
+                       goto encrypt_exit;
+               }
+       }
+
+encrypt_exit:
+
+       mbedtls_aes_free(&aes_ctx);
+       if (test_img)
+               free(test_img);
+
+       return rval;
+} /* end of image_encrypt */
+
+/*******************************************************************************
+ *    verify_secure_header_signatures
+ *          Verify CSK array, header and image signatures and print results
+ *    INPUT:
+ *          main_hdr       Main header
+ *          sec_ext        Secure extension
+ *    OUTPUT:
+ *          none
+ *    RETURN:
+ *          0 on success
+ */
+int verify_secure_header_signatures(header_t *main_hdr, sec_entry_t *sec_ext)
+{
+       uint8_t *image = (uint8_t *)main_hdr + main_hdr->prolog_size;
+       uint8_t signature[RSA_SIGN_BYTE_LEN];
+       int             rval = -1;
+
+       /* Save headers signature and reset it in the secure header */
+       memcpy(signature, sec_ext->header_sign, RSA_SIGN_BYTE_LEN);
+       memset(sec_ext->header_sign, 0, RSA_SIGN_BYTE_LEN);
+
+       fprintf(stdout, "\nCheck RSA Signatures\n");
+       fprintf(stdout, "#########################\n");
+       fprintf(stdout, "CSK Block Signature: ");
+       if (verify_rsa_signature(sec_ext->kak_key,
+                                MAX_RSA_DER_BYTE_LEN,
+                                &sec_ext->csk_keys[0][0],
+                                sizeof(sec_ext->csk_keys),
+                                "CSK Block Signature: ",
+                                sec_ext->csk_sign) != 0) {
+               fprintf(stdout, "ERROR\n");
+               goto ver_error;
+       }
+       fprintf(stdout, "OK\n");
+
+       if (opts.key_index != -1) {
+               fprintf(stdout, "Image Signature:     ");
+               if (verify_rsa_signature(sec_ext->csk_keys[opts.key_index],
+                                        MAX_RSA_DER_BYTE_LEN,
+                                        image, main_hdr->boot_image_size,
+                                        "Image Signature: ",
+                                        sec_ext->image_sign) != 0) {
+                       fprintf(stdout, "ERROR\n");
+                       goto ver_error;
+               }
+               fprintf(stdout, "OK\n");
+
+               fprintf(stdout, "Header Signature:    ");
+               if (verify_rsa_signature(sec_ext->csk_keys[opts.key_index],
+                                        MAX_RSA_DER_BYTE_LEN,
+                                        (uint8_t *)main_hdr,
+                                        main_hdr->prolog_size,
+                                        "Header Signature: ",
+                                        signature) != 0) {
+                       fprintf(stdout, "ERROR\n");
+                       goto ver_error;
+               }
+               fprintf(stdout, "OK\n");
+       } else {
+               fprintf(stdout, "SKIP Image and Header Signatures");
+               fprintf(stdout, " check (undefined key index)\n");
+       }
+
+       rval = 0;
+
+ver_error:
+       memcpy(sec_ext->header_sign, signature, RSA_SIGN_BYTE_LEN);
+       return rval;
+}
+
+/*******************************************************************************
+ *    verify_and_copy_file_name_entry
+ *    INPUT:
+ *          element_name
+ *          element
+ *    OUTPUT:
+ *          copy_to
+ *    RETURN:
+ *          0 on success
+ */
+int verify_and_copy_file_name_entry(const char *element_name,
+                                   const char *element, char *copy_to)
+{
+       int element_length = strlen(element);
+
+       if (element_length >= MAX_FILENAME) {
+               fprintf(stderr, "The file name %s for %s is too long (%d). ",
+                       element, element_name, element_length);
+               fprintf(stderr, "Maximum allowed %d characters!\n",
+                       MAX_FILENAME);
+               return -1;
+       } else if (element_length == 0) {
+               fprintf(stderr, "The file name for %s is empty!\n",
+                       element_name);
+               return -1;
+       }
+       memcpy(copy_to, element, element_length);
+
+       return 0;
+}
+
+/*******************************************************************************
+ *    parse_sec_config_file
+ *          Read the secure boot configuration from a file
+ *          into internal structures
+ *    INPUT:
+ *          filename      File name
+ *    OUTPUT:
+ *          none
+ *    RETURN:
+ *          0 on success
+ */
+int parse_sec_config_file(char *filename)
+{
+       config_t                sec_cfg;
+       int                     array_sz, element, rval = -1;
+       const char              *cfg_string;
+       int32_t                 cfg_int32;
+       const config_setting_t  *csk_array, *control_array;
+       sec_options             *sec_opt = 0;
+
+       config_init(&sec_cfg);
+
+       if (config_read_file(&sec_cfg, filename) != CONFIG_TRUE) {
+               fprintf(stderr, "Failed to read data from config file ");
+               fprintf(stderr, "%s\n\t%s at line %d\n",
+                       filename, config_error_text(&sec_cfg),
+                       config_error_line(&sec_cfg));
+               goto exit_parse;
+       }
+
+       sec_opt = (sec_options *)calloc(sizeof(sec_options), 1);
+       if (sec_opt == 0) {
+               fprintf(stderr,
+                       "Cannot allocate memory for secure boot options!\n");
+               goto exit_parse;
+       }
+
+       /* KAK file name */
+       if (config_lookup_string(&sec_cfg, "kak_key_file",
+                                &cfg_string) != CONFIG_TRUE) {
+               fprintf(stderr, "The \"kak_key_file\" undefined!\n");
+               goto exit_parse;
+       }
+       if (verify_and_copy_file_name_entry("kak_key_file",
+                                           cfg_string, sec_opt->kak_key_file))
+               goto exit_parse;
+
+
+       /* AES file name - can be empty/undefined */
+       if (config_lookup_string(&sec_cfg, "aes_key_file",
+                                &cfg_string) == CONFIG_TRUE) {
+               if (verify_and_copy_file_name_entry("aes_key_file",
+                                                   cfg_string,
+                                                   sec_opt->aes_key_file))
+                       goto exit_parse;
+       }
+
+       /* CSK file names array */
+       csk_array = config_lookup(&sec_cfg, "csk_key_file");
+       if (csk_array == NULL) {
+               fprintf(stderr, "The \"csk_key_file\" undefined!\n");
+               goto exit_parse;
+       }
+       array_sz = config_setting_length(csk_array);
+       if (array_sz > CSK_ARR_SZ) {
+               fprintf(stderr, "The \"csk_key_file\" array is too big! ");
+               fprintf(stderr, "Only first %d elements will be used\n",
+                       CSK_ARR_SZ);
+               array_sz = CSK_ARR_SZ;
+       } else if (array_sz == 0) {
+               fprintf(stderr, "The \"csk_key_file\" array is empty!\n");
+               goto exit_parse;
+       }
+
+       for (element = 0; element < array_sz; element++) {
+               cfg_string = config_setting_get_string_elem(csk_array, element);
+               if (verify_and_copy_file_name_entry(
+                               "csk_key_file", cfg_string,
+                               sec_opt->csk_key_file[element])) {
+                       fprintf(stderr, "Bad csk_key_file[%d] entry!\n",
+                               element);
+                       goto exit_parse;
+               }
+       }
+
+       /* JTAG options */
+       if (config_lookup_bool(&sec_cfg, "jtag.enable",
+                              &cfg_int32) != CONFIG_TRUE) {
+               fprintf(stderr, "Error obtaining \"jtag.enable\" element. ");
+               fprintf(stderr, "Using default - FALSE\n");
+               cfg_int32 = 0;
+       }
+       sec_opt->jtag_enable = cfg_int32;
+
+       if (config_lookup_int(&sec_cfg, "jtag.delay",
+                             &cfg_int32) != CONFIG_TRUE) {
+               fprintf(stderr, "Error obtaining \"jtag.delay\" element. ");
+               fprintf(stderr, "Using default - 0us\n");
+               cfg_int32 = 0;
+       }
+       sec_opt->jtag_delay = cfg_int32;
+
+       /* eFUSE option */
+       if (config_lookup_bool(&sec_cfg, "efuse_disable",
+                              &cfg_int32) != CONFIG_TRUE) {
+               fprintf(stderr, "Error obtaining \"efuse_disable\" element. ");
+               fprintf(stderr, "Using default - TRUE\n");
+               cfg_int32 = 1;
+       }
+       sec_opt->efuse_disable = cfg_int32;
+
+       /* Box ID option */
+       if (config_lookup_int(&sec_cfg, "box_id", &cfg_int32) != CONFIG_TRUE) {
+               fprintf(stderr, "Error obtaining \"box_id\" element. ");
+               fprintf(stderr, "Using default - 0x0\n");
+               cfg_int32 = 0;
+       }
+       sec_opt->box_id = cfg_int32;
+
+       /* Flash ID option */
+       if (config_lookup_int(&sec_cfg, "flash_id",
+                             &cfg_int32) != CONFIG_TRUE) {
+               fprintf(stderr, "Error obtaining \"flash_id\" element. ");
+               fprintf(stderr, "Using default - 0x0\n");
+               cfg_int32 = 0;
+       }
+       sec_opt->flash_id = cfg_int32;
+
+       /* CSK index option */
+       if (config_lookup_int(&sec_cfg, "csk_key_index",
+                             &cfg_int32) != CONFIG_TRUE) {
+               fprintf(stderr, "Error obtaining \"flash_id\" element. "
+               fprintf(stderr, "Using default - 0x0\n");
+               cfg_int32 = 0;
+       }
+       sec_opt->csk_index = cfg_int32;
+
+       /* Secure boot control array */
+       control_array = config_lookup(&sec_cfg, "control");
+       if (control_array != NULL) {
+               array_sz = config_setting_length(control_array);
+               if (array_sz == 0)
+                       fprintf(stderr, "The \"control\" array is empty!\n");
+       } else {
+               fprintf(stderr, "The \"control\" is undefined!\n");
+               array_sz = 0;
+       }
+
+       for (element = 0; element < CP_CTRL_EL_ARRAY_SZ; element++) {
+               sec_opt->cp_ctrl_arr[element] =
+                       config_setting_get_int_elem(control_array, element * 2);
+               sec_opt->cp_efuse_arr[element] =
+                       config_setting_get_int_elem(control_array,
+                                                   element * 2 + 1);
+       }
+
+       opts.sec_opts = sec_opt;
+       rval = 0;
+
+exit_parse:
+       config_destroy(&sec_cfg);
+       if (sec_opt && (rval != 0))
+               free(sec_opt);
+       return rval;
+} /* end of parse_sec_config_file */
+
+int format_sec_ext(char *filename, FILE *out_fd)
+{
+       ext_header_t    header;
+       sec_entry_t     sec_ext;
+       int             index;
+       int             written;
+
+#define DER_BUF_SZ     1600
+
+       /* First, parse the configuration file */
+       if (parse_sec_config_file(filename)) {
+               fprintf(stderr,
+                       "failed parsing configuration file %s\n", filename);
+               return 1;
+       }
+
+       /* Everything except signatures can be created at this stage */
+       header.type = EXT_TYPE_SECURITY;
+       header.offset = 0;
+       header.size = sizeof(sec_entry_t);
+       header.reserved = 0;
+
+       /* Bring up RSA context and read private keys from their files */
+       for (index = 0; index < (CSK_ARR_SZ + 1); index++) {
+               /* for every private key file */
+               mbedtls_pk_context      *pk_ctx = (index == CSK_ARR_SZ) ?
+                                       &opts.sec_opts->kak_pk :
+                                       &opts.sec_opts->csk_pk[index];
+               char            *fname = (index == CSK_ARR_SZ) ?
+                                       opts.sec_opts->kak_key_file :
+                                       opts.sec_opts->csk_key_file[index];
+               uint8_t         *out_der_key = (index == CSK_ARR_SZ) ?
+                                       sec_ext.kak_key :
+                                       sec_ext.csk_keys[index];
+               size_t          output_len;
+               unsigned char   output_buf[DER_BUF_SZ];
+               unsigned char   *der_buf_start;
+
+               /* Handle invalid/reserved file names */
+               if (strncmp(CSK_ARR_EMPTY_FILE, fname,
+                           strlen(CSK_ARR_EMPTY_FILE)) == 0) {
+                       if (opts.sec_opts->csk_index == index) {
+                               fprintf(stderr,
+                                       "CSK file with index %d cannot be %s\n",
+                                       index, CSK_ARR_EMPTY_FILE);
+                               return 1;
+                       } else if (index == CSK_ARR_SZ) {
+                               fprintf(stderr, "KAK file name cannot be %s\n",
+                                       CSK_ARR_EMPTY_FILE);
+                               return 1;
+                       }
+                       /* this key will be empty in CSK array */
+                       continue;
+               }
+
+               mbedtls_pk_init(pk_ctx);
+               /* Read the private RSA key into the context
+                * and verify it (no password)
+                */
+               if (mbedtls_pk_parse_keyfile(pk_ctx, fname, "") != 0) {
+                       fprintf(stderr,
+                               "Cannot read RSA private key file %s\n", fname);
+                       return 1;
+               }
+
+               /* Create a public key out of private one
+                * and store it in DER format
+                */
+               output_len = mbedtls_pk_write_pubkey_der(pk_ctx,
+                                                        output_buf,
+                                                        DER_BUF_SZ);
+               if (output_len < 0) {
+                       fprintf(stderr,
+                               "Failed to create DER coded PUB key (%s)\n",
+                               fname);
+                       return 1;
+               }
+               /* Data in the output buffer is aligned to the buffer end */
+               der_buf_start = output_buf + sizeof(output_buf) - output_len;
+               /* In the header DER data is aligned
+                * to the start of appropriate field
+                */
+               memcpy(out_der_key, der_buf_start, output_len);
+
+       } /* for every private key file */
+
+       /* The CSK block signature can be created here */
+       if (create_rsa_signature(&opts.sec_opts->kak_pk,
+                                &sec_ext.csk_keys[0][0],
+                                sizeof(sec_ext.csk_keys),
+                                opts.sec_opts->csk_key_file[
+                                        opts.sec_opts->csk_index],
+                                sec_ext.csk_sign) != 0) {
+               fprintf(stderr, "Failed to sign CSK keys block!\n");
+               return 1;
+       }
+       /* Check that everything is correct */
+       if (verify_rsa_signature(sec_ext.kak_key, MAX_RSA_DER_BYTE_LEN,
+                                &sec_ext.csk_keys[0][0],
+                                sizeof(sec_ext.csk_keys),
+                                opts.sec_opts->kak_key_file,
+                                sec_ext.csk_sign) != 0) {
+               fprintf(stderr, "Failed to verify CSK keys block signature!\n");
+               return 1;
+       }
+
+       /* AES encryption stuff */
+       if (strlen(opts.sec_opts->aes_key_file) != 0) {
+               FILE            *in_fd;
+
+               in_fd = fopen(opts.sec_opts->aes_key_file, "rb");
+               if (in_fd == NULL) {
+                       fprintf(stderr, "Failed to open AES key file %s\n",
+                               opts.sec_opts->aes_key_file);
+                       return 1;
+               }
+
+               /* Read the AES key in ASCII format byte by byte */
+               for (index = 0; index < AES_KEY_BYTE_LEN; index++) {
+                       if (fscanf(in_fd, "%02hhx",
+                           opts.sec_opts->aes_key + index) != 1) {
+                               fprintf(stderr,
+                                       "Failed to read AES key byte %d ",
+                                       index);
+                               fprintf(stderr,
+                                       "from file %s\n",
+                                       opts.sec_opts->aes_key_file);
+                               fclose(in_fd);
+                               return 1;
+                       }
+               }
+               fclose(in_fd);
+               sec_ext.encrypt_en = 1;
+       } else {
+               sec_ext.encrypt_en = 0;
+       }
+
+       /* Fill the rest of the trusted boot extension fields */
+       sec_ext.box_id          = opts.sec_opts->box_id;
+       sec_ext.flash_id        = opts.sec_opts->flash_id;
+       sec_ext.efuse_dis       = opts.sec_opts->efuse_disable;
+       sec_ext.jtag_delay      = opts.sec_opts->jtag_delay;
+       sec_ext.jtag_en         = opts.sec_opts->jtag_enable;
+
+       memcpy(sec_ext.cp_ctrl_arr,
+              opts.sec_opts->cp_ctrl_arr,
+              sizeof(uint32_t) * CP_CTRL_EL_ARRAY_SZ);
+       memcpy(sec_ext.cp_efuse_arr,
+              opts.sec_opts->cp_efuse_arr,
+              sizeof(uint32_t) * CP_CTRL_EL_ARRAY_SZ);
+
+       /* Write the resulting extension to file
+        * (image and header signature fields are still empty)
+        */
+
+       /* Write extension header */
+       written = fwrite(&header, sizeof(ext_header_t), 1, out_fd);
+       if (written != 1) {
+               fprintf(stderr,
+                       "Failed to write SEC extension header to the file\n");
+               return 1;
+       }
+       /* Write extension body */
+       written = fwrite(&sec_ext, sizeof(sec_entry_t), 1, out_fd);
+       if (written != 1) {
+               fprintf(stderr,
+                       "Failed to write SEC extension body to the file\n");
+               return 1;
+       }
+
+       return 0;
+}
+
+/*******************************************************************************
+ *    finalize_secure_ext
+ *          Make final changes to secure extension - calculate image and header
+ *          signatures and encrypt the image if needed.
+ *          The main header checksum and image size fields updated accordingly
+ *    INPUT:
+ *          header       Main header
+ *          prolog_buf   the entire prolog buffer
+ *          prolog_size  prolog buffer length
+ *          image_buf    buffer containing the input binary image
+ *          image_size   image buffer size.
+ *    OUTPUT:
+ *          none
+ *    RETURN:
+ *          0 on success
+ */
+int finalize_secure_ext(header_t *header,
+                       uint8_t *prolog_buf, uint32_t prolog_size,
+                       uint8_t *image_buf, int image_size)
+{
+       int             cur_ext, offset;
+       uint8_t         *final_image = image_buf;
+       uint32_t        final_image_sz = image_size;
+       uint8_t         hdr_sign[RSA_SIGN_BYTE_LEN];
+       sec_entry_t     *sec_ext = 0;
+
+       /* Find the Trusted Boot Header between available extensions */
+       for (cur_ext = 0, offset = sizeof(header_t);
+            cur_ext < header->ext_count; cur_ext++) {
+               ext_header_t *ext_hdr = (ext_header_t *)(prolog_buf + offset);
+
+               if (ext_hdr->type == EXT_TYPE_SECURITY) {
+                       sec_ext = (sec_entry_t *)(prolog_buf + offset +
+                                  sizeof(ext_header_t) + ext_hdr->offset);
+                       break;
+               }
+
+               offset += sizeof(ext_header_t);
+               /* If offset is Zero, the extension follows its header */
+               if (ext_hdr->offset == 0)
+                       offset += ext_hdr->size;
+       }
+
+       if (sec_ext == 0) {
+               fprintf(stderr, "Error: No Trusted Boot extension found!\n");
+               return -1;
+       }
+
+       if (sec_ext->encrypt_en) {
+               /* Encrypt the image if needed */
+               fprintf(stdout, "Encrypting the image...\n");
+
+               if (image_encrypt(image_buf, image_size) != 0) {
+                       fprintf(stderr, "Failed to encrypt the image!\n");
+                       return -1;
+               }
+
+               /* Image size and checksum should be updated after encryption.
+                * This way the image could be verified by the BootROM
+                * before decryption.
+                */
+               final_image = opts.sec_opts->encrypted_image;
+               final_image_sz = opts.sec_opts->enc_image_sz;
+
+               header->boot_image_size = final_image_sz;
+               header->boot_image_checksum =
+                       checksum32((uint32_t *)final_image, final_image_sz);
+       } /* AES encryption */
+
+       /* Create the image signature first, since it will be later
+        * signed along with the header signature
+        */
+       if (create_rsa_signature(&opts.sec_opts->csk_pk[
+                                       opts.sec_opts->csk_index],
+                                final_image, final_image_sz,
+                                opts.sec_opts->csk_key_file[
+                                       opts.sec_opts->csk_index],
+                                sec_ext->image_sign) != 0) {
+               fprintf(stderr, "Failed to sign image!\n");
+               return -1;
+       }
+       /* Check that the image signature is correct */
+       if (verify_rsa_signature(sec_ext->csk_keys[opts.sec_opts->csk_index],
+                                MAX_RSA_DER_BYTE_LEN,
+                                final_image, final_image_sz,
+                                opts.sec_opts->csk_key_file[
+                                        opts.sec_opts->csk_index],
+                                sec_ext->image_sign) != 0) {
+               fprintf(stderr, "Failed to verify image signature!\n");
+               return -1;
+       }
+
+       /* Sign the headers and all the extensions block
+        * when the header signature field is empty
+        */
+       if (create_rsa_signature(&opts.sec_opts->csk_pk[
+                                        opts.sec_opts->csk_index],
+                                prolog_buf, prolog_size,
+                                opts.sec_opts->csk_key_file[
+                                        opts.sec_opts->csk_index],
+                                hdr_sign) != 0) {
+               fprintf(stderr, "Failed to sign header!\n");
+               return -1;
+       }
+       /* Check that the header signature is correct */
+       if (verify_rsa_signature(sec_ext->csk_keys[opts.sec_opts->csk_index],
+                                MAX_RSA_DER_BYTE_LEN,
+                                prolog_buf, prolog_size,
+                                opts.sec_opts->csk_key_file[
+                                        opts.sec_opts->csk_index],
+                                hdr_sign) != 0) {
+               fprintf(stderr, "Failed to verify header signature!\n");
+               return -1;
+       }
+
+       /* Finally, copy the header signature into the trusted boot extension */
+       memcpy(sec_ext->header_sign, hdr_sign, RSA_SIGN_BYTE_LEN);
+
+       return 0;
+}
+
+#endif /* CONFIG_MVEBU_SECURE_BOOT */
+
+
+#define FMT_HEX                0
+#define FMT_DEC                1
+#define FMT_BIN                2
+#define FMT_NONE       3
+
+void do_print_field(unsigned int value, char *name,
+                   int start, int size, int format)
+{
+       fprintf(stdout, "[0x%05x : 0x%05x]  %-26s",
+               start, start + size - 1, name);
+
+       switch (format) {
+       case FMT_HEX:
+               printf("0x%x\n", value);
+               break;
+       case FMT_DEC:
+               printf("%d\n", value);
+               break;
+       default:
+               printf("\n");
+               break;
+       }
+}
+
+#define print_field(st, type, field, hex, base) \
+                       do_print_field((int)st->field, #field, \
+                       base + offsetof(type, field), sizeof(st->field), hex)
+
+int print_header(uint8_t *buf, int base)
+{
+       header_t *main_hdr;
+
+       main_hdr = (header_t *)buf;
+
+       fprintf(stdout, "########### Header ##############\n");
+       print_field(main_hdr, header_t, magic, FMT_HEX, base);
+       print_field(main_hdr, header_t, prolog_size, FMT_DEC, base);
+       print_field(main_hdr, header_t, prolog_checksum, FMT_HEX, base);
+       print_field(main_hdr, header_t, boot_image_size, FMT_DEC, base);
+       print_field(main_hdr, header_t, boot_image_checksum, FMT_HEX, base);
+       print_field(main_hdr, header_t, rsrvd0, FMT_HEX, base);
+       print_field(main_hdr, header_t, load_addr, FMT_HEX, base);
+       print_field(main_hdr, header_t, exec_addr, FMT_HEX, base);
+       print_field(main_hdr, header_t, uart_cfg, FMT_HEX, base);
+       print_field(main_hdr, header_t, baudrate, FMT_HEX, base);
+       print_field(main_hdr, header_t, ext_count, FMT_DEC, base);
+       print_field(main_hdr, header_t, aux_flags, FMT_HEX, base);
+       print_field(main_hdr, header_t, io_arg_0, FMT_HEX, base);
+       print_field(main_hdr, header_t, io_arg_1, FMT_HEX, base);
+       print_field(main_hdr, header_t, io_arg_2, FMT_HEX, base);
+       print_field(main_hdr, header_t, io_arg_3, FMT_HEX, base);
+       print_field(main_hdr, header_t, rsrvd1, FMT_HEX, base);
+       print_field(main_hdr, header_t, rsrvd2, FMT_HEX, base);
+       print_field(main_hdr, header_t, rsrvd3, FMT_HEX, base);
+
+       return sizeof(header_t);
+}
+
+int print_ext_hdr(ext_header_t *ext_hdr, int base)
+{
+       print_field(ext_hdr, ext_header_t, type, FMT_HEX, base);
+       print_field(ext_hdr, ext_header_t, offset, FMT_HEX, base);
+       print_field(ext_hdr, ext_header_t, reserved, FMT_HEX, base);
+       print_field(ext_hdr, ext_header_t, size, FMT_DEC, base);
+
+       return base + sizeof(ext_header_t);
+}
+
+void print_sec_ext(ext_header_t *ext_hdr, int base)
+{
+       sec_entry_t     *sec_entry;
+       uint32_t        new_base;
+
+       fprintf(stdout, "\n########### Secure extension ###########\n");
+
+       new_base = print_ext_hdr(ext_hdr, base);
+
+       sec_entry = (sec_entry_t *)(ext_hdr + 1);
+
+       do_print_field(0, "KAK key", new_base, MAX_RSA_DER_BYTE_LEN, FMT_NONE);
+       new_base += MAX_RSA_DER_BYTE_LEN;
+       print_field(sec_entry, sec_entry_t, jtag_delay, FMT_DEC, base);
+       print_field(sec_entry, sec_entry_t, box_id, FMT_HEX, base);
+       print_field(sec_entry, sec_entry_t, flash_id, FMT_HEX, base);
+       print_field(sec_entry, sec_entry_t, encrypt_en, FMT_DEC, base);
+       print_field(sec_entry, sec_entry_t, efuse_dis, FMT_DEC, base);
+       new_base += 6 * sizeof(uint32_t);
+       do_print_field(0, "header signature",
+                      new_base, RSA_SIGN_BYTE_LEN, FMT_NONE);
+       new_base += RSA_SIGN_BYTE_LEN;
+       do_print_field(0, "image signature",
+                      new_base, RSA_SIGN_BYTE_LEN, FMT_NONE);
+       new_base += RSA_SIGN_BYTE_LEN;
+       do_print_field(0, "CSK keys", new_base,
+                      CSK_ARR_SZ * MAX_RSA_DER_BYTE_LEN, FMT_NONE);
+       new_base += CSK_ARR_SZ * MAX_RSA_DER_BYTE_LEN;
+       do_print_field(0, "CSK block signature",
+                      new_base, RSA_SIGN_BYTE_LEN, FMT_NONE);
+       new_base += RSA_SIGN_BYTE_LEN;
+       do_print_field(0, "control", new_base,
+                      CP_CTRL_EL_ARRAY_SZ * 2, FMT_NONE);
+
+}
+
+void print_bin_ext(ext_header_t *ext_hdr, int base)
+{
+       fprintf(stdout, "\n########### Binary extension ###########\n");
+       base = print_ext_hdr(ext_hdr, base);
+       do_print_field(0, "binary image", base, ext_hdr->size, FMT_NONE);
+}
+
+int print_extension(void *buf, int base, int count, int ext_size)
+{
+       ext_header_t *ext_hdr = buf;
+       int pad = ext_size;
+       int curr_size;
+
+       while (count--) {
+               if (ext_hdr->type == EXT_TYPE_BINARY)
+                       print_bin_ext(ext_hdr, base);
+               else if (ext_hdr->type == EXT_TYPE_SECURITY)
+                       print_sec_ext(ext_hdr, base);
+
+               curr_size = sizeof(ext_header_t) + ext_hdr->size;
+               base += curr_size;
+               pad  -= curr_size;
+               ext_hdr = (ext_header_t *)((uintptr_t)ext_hdr + curr_size);
+       }
+
+       if (pad)
+               do_print_field(0, "padding", base, pad, FMT_NONE);
+
+       return ext_size;
+}
+
+int parse_image(uint8_t *buf, int size)
+{
+       int base = 0;
+       int ret = 1;
+       header_t *main_hdr;
+       uint32_t checksum, prolog_checksum;
+
+
+       fprintf(stdout,
+               "################### Prolog Start ######################\n\n");
+       main_hdr = (header_t *)buf;
+       base += print_header(buf, base);
+
+       if (main_hdr->ext_count)
+               base += print_extension(buf + base, base,
+                                       main_hdr->ext_count,
+                                       main_hdr->prolog_size -
+                                       sizeof(header_t));
+
+       if (base < main_hdr->prolog_size) {
+               fprintf(stdout, "\n########### Padding ##############\n");
+               do_print_field(0, "prolog padding",
+                              base, main_hdr->prolog_size - base, FMT_HEX);
+               base = main_hdr->prolog_size;
+       }
+       fprintf(stdout,
+               "\n################### Prolog End ######################\n");
+
+       fprintf(stdout,
+               "\n################### Boot image ######################\n");
+
+       do_print_field(0, "boot image", base, size - base - 4, FMT_NONE);
+
+       fprintf(stdout,
+               "################### Image end ########################\n");
+
+       /* Check sanity for certain values */
+       printf("\nChecking values:\n");
+
+       if (main_hdr->magic == MAIN_HDR_MAGIC) {
+               fprintf(stdout, "Headers magic:    OK!\n");
+       } else {
+               fprintf(stderr,
+                       "\n****** ERROR: HEADER MAGIC 0x%08x != 0x%08x\n",
+                       main_hdr->magic, MAIN_HDR_MAGIC);
+               goto error;
+       }
+
+       /* headers checksum */
+       /* clear the checksum field in header to calculate checksum */
+       prolog_checksum = main_hdr->prolog_checksum;
+       main_hdr->prolog_checksum = 0;
+       checksum = checksum32((uint32_t *)buf, main_hdr->prolog_size);
+
+       if (checksum == prolog_checksum) {
+               fprintf(stdout, "Headers checksum: OK!\n");
+       } else {
+               fprintf(stderr,
+                       "\n***** ERROR: BAD HEADER CHECKSUM 0x%08x != 0x%08x\n",
+                       checksum, prolog_checksum);
+               goto error;
+       }
+
+       /* boot image checksum */
+       checksum = checksum32((uint32_t *)(buf + main_hdr->prolog_size),
+                             main_hdr->boot_image_size);
+       if (checksum == main_hdr->boot_image_checksum) {
+               fprintf(stdout, "Image checksum:   OK!\n");
+       } else {
+               fprintf(stderr,
+                       "\n****** ERROR: BAD IMAGE CHECKSUM 0x%08x != 0x%08x\n",
+                       checksum, main_hdr->boot_image_checksum);
+               goto error;
+       }
+
+#ifdef CONFIG_MVEBU_SECURE_BOOT
+       /* RSA signatures */
+       if (main_hdr->ext_count) {
+               uint8_t         ext_num = main_hdr->ext_count;
+               ext_header_t    *ext_hdr = (ext_header_t *)(main_hdr + 1);
+               unsigned char   hash[32];
+               int             i;
+
+               while (ext_num--) {
+                       if (ext_hdr->type == EXT_TYPE_SECURITY) {
+                               sec_entry_t  *sec_entry =
+                                               (sec_entry_t *)(ext_hdr + 1);
+
+                               ret = verify_secure_header_signatures(
+                                                       main_hdr, sec_entry);
+                               if (ret != 0) {
+                                       fprintf(stderr,
+                                               "\n****** FAILED TO VERIFY ");
+                                       fprintf(stderr,
+                                               "RSA SIGNATURES ********\n");
+                                       goto error;
+                               }
+
+                               mbedtls_sha256(sec_entry->kak_key,
+                                              MAX_RSA_DER_BYTE_LEN, hash, 0);
+                               fprintf(stdout,
+                                       ">>>>>>>>>> KAK KEY HASH >>>>>>>>>>\n");
+                               fprintf(stdout, "SHA256: ");
+                               for (i = 0; i < 32; i++)
+                                       fprintf(stdout, "%02X", hash[i]);
+
+                               fprintf(stdout,
+                                       "\n<<<<<<<<< KAK KEY HASH <<<<<<<<<\n");
+
+                               break;
+                       }
+                       ext_hdr =
+                               (ext_header_t *)((uint8_t *)(ext_hdr + 1) +
+                                ext_hdr->size);
+               }
+       }
+#endif
+
+       ret = 0;
+error:
+       return ret;
+}
+
+int format_bin_ext(char *filename, FILE *out_fd)
+{
+       ext_header_t header;
+       FILE *in_fd;
+       int size, written;
+       int aligned_size, pad_bytes;
+       char c;
+
+       in_fd = fopen(filename, "rb");
+       if (in_fd == NULL) {
+               fprintf(stderr, "failed to open bin extension file %s\n",
+                       filename);
+               return 1;
+       }
+
+       size = get_file_size(filename);
+       if (size <= 0) {
+               fprintf(stderr, "bin extension file size is bad\n");
+               return 1;
+       }
+
+       /* Align extension size to 8 bytes */
+       aligned_size = (size + 7) & (~7);
+       pad_bytes    = aligned_size - size;
+
+       header.type = EXT_TYPE_BINARY;
+       header.offset = 0;
+       header.size = aligned_size;
+       header.reserved = 0;
+
+       /* Write header */
+       written = fwrite(&header, sizeof(ext_header_t), 1, out_fd);
+       if (written != 1) {
+               fprintf(stderr, "failed writing header to extension file\n");
+               return 1;
+       }
+
+       /* Write image */
+       while (size--) {
+               c = getc(in_fd);
+               fputc(c, out_fd);
+       }
+
+       while (pad_bytes--)
+               fputc(0, out_fd);
+
+       fclose(in_fd);
+
+       return 0;
+}
+
+/* ****************************************
+ *
+ * Write all extensions (binary, secure
+ * extensions) to file
+ *
+ * ****************************************/
+
+int format_extensions(char *ext_filename)
+{
+       FILE *out_fd;
+       int ret = 0;
+
+       out_fd = fopen(ext_filename, "wb");
+       if (out_fd == NULL) {
+               fprintf(stderr, "failed to open extension output file %s",
+                       ext_filename);
+               return 1;
+       }
+
+       if (strncmp(opts.bin_ext_file, "NA", MAX_FILENAME)) {
+               if (format_bin_ext(opts.bin_ext_file, out_fd)) {
+                       ret = 1;
+                       goto error;
+               }
+       }
+#ifdef CONFIG_MVEBU_SECURE_BOOT
+       if (strncmp(opts.sec_cfg_file, "NA", MAX_FILENAME)) {
+               if (format_sec_ext(opts.sec_cfg_file, out_fd)) {
+                       ret = 1;
+                       goto error;
+               }
+       }
+#endif
+
+error:
+       fflush(out_fd);
+       fclose(out_fd);
+       return ret;
+}
+
+void update_uart(header_t *header)
+{
+       header->uart_cfg = 0;
+       header->baudrate = 0;
+
+       if (opts.disable_print)
+               uart_set_mode(header->uart_cfg, UART_MODE_DISABLE);
+
+       if (opts.baudrate)
+               header->baudrate = (opts.baudrate / 1200);
+}
+
+/* ****************************************
+ *
+ * Write the image prolog, i.e.
+ * main header and extensions, to file
+ *
+ * ****************************************/
+
+int write_prolog(int ext_cnt, char *ext_filename,
+                uint8_t *image_buf, int image_size, FILE *out_fd)
+{
+       header_t                *header;
+       int main_hdr_size = sizeof(header_t);
+       int prolog_size = main_hdr_size;
+       FILE *ext_fd;
+       char *buf;
+       int written, read;
+       int ret = 1;
+
+
+       if (ext_cnt)
+               prolog_size +=  get_file_size(ext_filename);
+
+       prolog_size = ((prolog_size + PROLOG_ALIGNMENT) &
+                    (~(PROLOG_ALIGNMENT-1)));
+
+       /* Allocate a zeroed buffer to zero the padding bytes */
+       buf = calloc(prolog_size, 1);
+       if (buf == NULL) {
+               fprintf(stderr, "Error: failed allocating checksum buffer\n");
+               return 1;
+       }
+
+       header = (header_t *)buf;
+       header->magic       = MAIN_HDR_MAGIC;
+       header->prolog_size = prolog_size;
+       header->load_addr   = opts.load_addr;
+       header->exec_addr   = opts.exec_addr;
+       header->io_arg_0    = opts.nfc_io_args;
+       header->ext_count   = ext_cnt;
+       header->aux_flags   = 0;
+       header->boot_image_size = (image_size + 3) & (~0x3);
+       header->boot_image_checksum = checksum32((uint32_t *)image_buf,
+                                                image_size);
+
+       update_uart(header);
+
+       /* Populate buffer with main header and extensions */
+       if (ext_cnt) {
+               ext_fd = fopen(ext_filename, "rb");
+               if (ext_fd == NULL) {
+                       fprintf(stderr,
+                               "Error: failed to open extensions file\n");
+                       goto error;
+               }
+
+               read = fread(&buf[main_hdr_size],
+                            get_file_size(ext_filename), 1, ext_fd);
+               if (read != 1) {
+                       fprintf(stderr,
+                               "Error: failed to open extensions file\n");
+                       goto error;
+               }
+
+#ifdef CONFIG_MVEBU_SECURE_BOOT
+               /* Secure boot mode? */
+               if (opts.sec_opts != 0) {
+                       ret = finalize_secure_ext(header, (uint8_t *)buf,
+                                                 prolog_size, image_buf,
+                                                 image_size);
+                       if (ret != 0) {
+                               fprintf(stderr, "Error: failed to handle ");
+                               fprintf(stderr, "secure extension!\n");
+                               goto error;
+                       }
+               } /* secure boot mode */
+#endif
+       }
+
+       /* Update the total prolog checksum */
+       header->prolog_checksum = checksum32((uint32_t *)buf, prolog_size);
+
+       /* Now spill everything to output file */
+       written = fwrite(buf, prolog_size, 1, out_fd);
+       if (written != 1) {
+               fprintf(stderr,
+                       "Error: failed to write prolog to output file\n");
+               goto error;
+       }
+
+       ret = 0;
+
+error:
+       free(buf);
+       return ret;
+}
+
+int write_boot_image(uint8_t *buf, uint32_t image_size, FILE *out_fd)
+{
+       int aligned_size;
+       int written;
+
+       /* Image size must be aligned to 4 bytes */
+       aligned_size = (image_size + 3) & (~0x3);
+
+       written = fwrite(buf, aligned_size, 1, out_fd);
+       if (written != 1) {
+               fprintf(stderr, "Error: Failed to write boot image\n");
+               goto error;
+       }
+
+       return 0;
+error:
+       return 1;
+}
+
+int main(int argc, char *argv[])
+{
+       char in_file[MAX_FILENAME+1];
+       char out_file[MAX_FILENAME+1];
+       char ext_file[MAX_FILENAME+1];
+       FILE *in_fd = NULL;
+       FILE *out_fd = NULL;
+       int parse = 0;
+       int ext_cnt = 0;
+       int opt;
+       int ret = 0;
+       int image_size;
+       uint8_t *image_buf = NULL;
+       int read;
+       uint32_t nand_block_size_kb, mlc_nand;
+
+       /* Create temporary file for building extensions
+        * Use process ID for allowing multiple parallel runs
+        */
+       snprintf(ext_file, MAX_FILENAME, "/tmp/ext_file-%x", getpid());
+
+       while ((opt = getopt(argc, argv, "hpms:i:l:e:a:b:u:n:t:c:k:")) != -1) {
+               switch (opt) {
+               case 'h':
+                       usage();
+                       break;
+               case 'l':
+                       opts.load_addr = strtoul(optarg, NULL, 0);
+                       break;
+               case 'e':
+                       opts.exec_addr = strtoul(optarg, NULL, 0);
+                       break;
+               case 'm':
+                       opts.disable_print = 1;
+                       break;
+               case 'u':
+                       opts.baudrate = strtoul(optarg, NULL, 0);
+                       break;
+               case 'b':
+                       strncpy(opts.bin_ext_file, optarg, MAX_FILENAME);
+                       ext_cnt++;
+                       break;
+               case 'p':
+                       parse = 1;
+                       break;
+               case 'n':
+                       nand_block_size_kb = strtoul(optarg, NULL, 0);
+                       opts.nfc_io_args |= (nand_block_size_kb / 64);
+                       break;
+               case 't':
+                       mlc_nand = 0;
+                       if (!strncmp("MLC", optarg, 3))
+                               mlc_nand = 1;
+                       opts.nfc_io_args |= (mlc_nand << 8);
+                       break;
+#ifdef CONFIG_MVEBU_SECURE_BOOT
+               case 'c': /* SEC extension */
+                       strncpy(opts.sec_cfg_file, optarg, MAX_FILENAME);
+                       ext_cnt++;
+                       break;
+               case 'k':
+                       opts.key_index = strtoul(optarg, NULL, 0);
+                       break;
+#endif
+               default: /* '?' */
+                       usage_err("Unknown argument");
+                       exit(EXIT_FAILURE);
+               }
+       }
+
+       /* Check validity of inputes */
+       if (opts.load_addr % 8)
+               usage_err("Load address must be 8 bytes aligned");
+
+       if (opts.baudrate % 1200)
+               usage_err("Baudrate must be a multiple of 1200");
+
+       /* The remaining arguments are the input
+        * and potentially output file
+        */
+       /* Input file must exist so exit if not */
+       if (optind >= argc)
+               usage_err("missing input file name");
+
+       strncpy(in_file, argv[optind], MAX_FILENAME);
+       optind++;
+
+       /* Output file must exist in non parse mode */
+       if (optind < argc)
+               strncpy(out_file, argv[optind], MAX_FILENAME);
+       else if (!parse)
+               usage_err("missing output file name");
+
+       /* open the input file */
+       in_fd = fopen(in_file, "rb");
+       if (in_fd == NULL) {
+               printf("Error: Failed to open input file %s\n", in_file);
+               goto main_exit;
+       }
+
+       /* Read the input file to buffer */
+       image_size = get_file_size(in_file);
+       image_buf = calloc((image_size + AES_BLOCK_SZ - 1) &
+                          ~(AES_BLOCK_SZ - 1), 1);
+       if (image_buf == NULL) {
+               fprintf(stderr, "Error: failed allocating input buffer\n");
+               return 1;
+       }
+
+       read = fread(image_buf, image_size, 1, in_fd);
+       if (read != 1) {
+               fprintf(stderr, "Error: failed to read input file\n");
+               goto main_exit;
+       }
+
+       /* Parse the input image and leave */
+       if (parse) {
+               if (opts.key_index >= CSK_ARR_SZ) {
+                       fprintf(stderr,
+                               "Wrong key IDX value. Valid values 0 - %d\n",
+                               CSK_ARR_SZ - 1);
+                       goto main_exit;
+               }
+               ret = parse_image(image_buf, image_size);
+               goto main_exit;
+       }
+
+       /* Create a blob file from all extensions */
+       if (ext_cnt) {
+               ret = format_extensions(ext_file);
+               if (ret)
+                       goto main_exit;
+       }
+
+       out_fd = fopen(out_file, "wb");
+       if (out_fd == NULL) {
+               fprintf(stderr,
+                       "Error: Failed to open output file %s\n", out_file);
+               goto main_exit;
+       }
+
+       ret = write_prolog(ext_cnt, ext_file, image_buf, image_size, out_fd);
+       if (ret)
+               goto main_exit;
+
+#ifdef CONFIG_MVEBU_SECURE_BOOT
+       if (opts.sec_opts && (opts.sec_opts->encrypted_image != 0) &&
+           (opts.sec_opts->enc_image_sz != 0)) {
+               ret = write_boot_image(opts.sec_opts->encrypted_image,
+                                      opts.sec_opts->enc_image_sz, out_fd);
+       } else
+#endif
+               ret = write_boot_image(image_buf, image_size, out_fd);
+       if (ret)
+               goto main_exit;
+
+main_exit:
+       if (in_fd)
+               fclose(in_fd);
+
+       if (out_fd)
+               fclose(out_fd);
+
+       if (image_buf)
+               free(image_buf);
+
+       unlink(ext_file);
+
+#ifdef CONFIG_MVEBU_SECURE_BOOT
+       if (opts.sec_opts) {
+               if (opts.sec_opts->encrypted_image)
+                       free(opts.sec_opts->encrypted_image);
+               free(opts.sec_opts);
+       }
+#endif
+       exit(ret);
+}
diff --git a/tools/doimage/doimage.mk b/tools/doimage/doimage.mk
new file mode 100644 (file)
index 0000000..2b751d4
--- /dev/null
@@ -0,0 +1,15 @@
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:     BSD-3-Clause
+# https://spdx.org/licenses
+
+DOIMAGE_FLAGS          ?=      -l 0x4100000 -e 0x4100000
+
+
+#NAND params
+#Open and update the below when using NAND as a boot device.
+
+CONFIG_MVEBU_NAND_BLOCK_SIZE   := 256
+CONFIG_MVEBU_NAND_CELL_TYPE    := SLC
+NAND_DOIMAGE_FLAGS := -t $(CONFIG_MVEBU_NAND_CELL_TYPE) -n $(CONFIG_MVEBU_NAND_BLOCK_SIZE)