2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
4 * SPDX-License-Identifier: BSD-3-Clause
8 #include <arch_helpers.h>
10 #include <common_def.h>
12 #include <context_mgmt.h>
14 #include <platform_def.h>
16 #include <secure_partition.h>
19 #include <xlat_tables_v2.h>
21 #include "spm_private.h"
22 #include "spm_shim_private.h"
24 /* Place translation tables by default along with the ones used by BL31. */
25 #ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
26 #define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
29 /* Allocate and initialise the translation context for the secure partition. */
30 REGISTER_XLAT_CONTEXT2(secure_partition
,
31 PLAT_SP_IMAGE_MMAP_REGIONS
,
32 PLAT_SP_IMAGE_MAX_XLAT_TABLES
,
33 PLAT_VIRT_ADDR_SPACE_SIZE
, PLAT_PHY_ADDR_SPACE_SIZE
,
34 EL1_EL0_REGIME
, PLAT_SP_IMAGE_XLAT_SECTION_NAME
);
36 /* Export a handle on the secure partition translation context */
37 xlat_ctx_t
*secure_partition_xlat_ctx_handle
= &secure_partition_xlat_ctx
;
39 /* Setup context of the Secure Partition */
40 void secure_partition_setup(void)
42 VERBOSE("S-EL1/S-EL0 context setup start...\n");
44 cpu_context_t
*ctx
= cm_get_context(SECURE
);
46 /* Make sure that we got a Secure context. */
49 /* Assert we are in Secure state. */
50 assert((read_scr_el3() & SCR_NS_BIT
) == 0);
52 /* Disable MMU at EL1. */
53 disable_mmu_icache_el1();
55 /* Invalidate TLBs at EL1. */
59 * General-Purpose registers
60 * -------------------------
64 * X0: Virtual address of a buffer shared between EL3 and Secure EL0.
65 * The buffer will be mapped in the Secure EL1 translation regime
66 * with Normal IS WBWA attributes and RO data and Execute Never
67 * instruction access permissions.
69 * X1: Size of the buffer in bytes
71 * X2: cookie value (Implementation Defined)
73 * X3: cookie value (Implementation Defined)
75 * X4 to X30 = 0 (already done by cm_init_my_context())
77 write_ctx_reg(get_gpregs_ctx(ctx
), CTX_GPREG_X0
, PLAT_SPM_BUF_BASE
);
78 write_ctx_reg(get_gpregs_ctx(ctx
), CTX_GPREG_X1
, PLAT_SPM_BUF_SIZE
);
79 write_ctx_reg(get_gpregs_ctx(ctx
), CTX_GPREG_X2
, PLAT_SPM_COOKIE_0
);
80 write_ctx_reg(get_gpregs_ctx(ctx
), CTX_GPREG_X3
, PLAT_SPM_COOKIE_1
);
83 * SP_EL0: A non-zero value will indicate to the SP that the SPM has
84 * initialized the stack pointer for the current CPU through
85 * implementation defined means. The value will be 0 otherwise.
87 write_ctx_reg(get_gpregs_ctx(ctx
), CTX_GPREG_SP_EL0
,
88 PLAT_SP_IMAGE_STACK_BASE
+ PLAT_SP_IMAGE_STACK_PCPU_SIZE
);
91 * Setup translation tables
92 * ------------------------
97 /* Get max granularity supported by the platform. */
99 u_register_t id_aa64mmfr0_el1
= read_id_aa64mmfr0_el1();
101 int tgran64_supported
=
102 ((id_aa64mmfr0_el1
>> ID_AA64MMFR0_EL1_TGRAN64_SHIFT
) &
103 ID_AA64MMFR0_EL1_TGRAN64_MASK
) ==
104 ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED
;
106 int tgran16_supported
=
107 ((id_aa64mmfr0_el1
>> ID_AA64MMFR0_EL1_TGRAN16_SHIFT
) &
108 ID_AA64MMFR0_EL1_TGRAN16_MASK
) ==
109 ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED
;
111 int tgran4_supported
=
112 ((id_aa64mmfr0_el1
>> ID_AA64MMFR0_EL1_TGRAN4_SHIFT
) &
113 ID_AA64MMFR0_EL1_TGRAN4_MASK
) ==
114 ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED
;
116 uintptr_t max_granule_size
;
118 if (tgran64_supported
) {
119 max_granule_size
= 64 * 1024;
120 } else if (tgran16_supported
) {
121 max_granule_size
= 16 * 1024;
123 assert(tgran4_supported
);
124 max_granule_size
= 4 * 1024;
127 VERBOSE("Max translation granule supported: %lu KiB\n",
128 max_granule_size
/ 1024);
130 uintptr_t max_granule_size_mask
= max_granule_size
- 1;
132 /* Base must be aligned to the max granularity */
133 assert((ARM_SP_IMAGE_NS_BUF_BASE
& max_granule_size_mask
) == 0);
135 /* Size must be a multiple of the max granularity */
136 assert((ARM_SP_IMAGE_NS_BUF_SIZE
& max_granule_size_mask
) == 0);
138 #endif /* ENABLE_ASSERTIONS */
140 /* This region contains the exception vectors used at S-EL1. */
141 const mmap_region_t sel1_exception_vectors
=
142 MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START
,
143 SPM_SHIM_EXCEPTIONS_SIZE
,
144 MT_CODE
| MT_SECURE
| MT_PRIVILEGED
);
145 mmap_add_region_ctx(&secure_partition_xlat_ctx
,
146 &sel1_exception_vectors
);
148 mmap_add_ctx(&secure_partition_xlat_ctx
,
149 plat_get_secure_partition_mmap(NULL
));
151 init_xlat_tables_ctx(&secure_partition_xlat_ctx
);
154 * MMU-related registers
155 * ---------------------
158 /* Set attributes in the right indices of the MAIR */
159 u_register_t mair_el1
=
160 MAIR_ATTR_SET(ATTR_DEVICE
, ATTR_DEVICE_INDEX
) |
161 MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR
, ATTR_IWBWA_OWBWA_NTR_INDEX
) |
162 MAIR_ATTR_SET(ATTR_NON_CACHEABLE
, ATTR_NON_CACHEABLE_INDEX
);
164 write_ctx_reg(get_sysregs_ctx(ctx
), CTX_MAIR_EL1
, mair_el1
);
167 u_register_t tcr_ps_bits
= tcr_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE
);
169 u_register_t tcr_el1
=
170 /* Size of region addressed by TTBR0_EL1 = 2^(64-T0SZ) bytes. */
171 (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE
)) |
172 /* Inner and outer WBWA, shareable. */
173 TCR_SH_INNER_SHAREABLE
| TCR_RGN_OUTER_WBA
| TCR_RGN_INNER_WBA
|
174 /* Set the granularity to 4KB. */
176 /* Limit Intermediate Physical Address Size. */
177 tcr_ps_bits
<< TCR_EL1_IPS_SHIFT
|
178 /* Disable translations using TBBR1_EL1. */
180 /* The remaining fields related to TBBR1_EL1 are left as zero. */
184 /* Enable translations using TBBR0_EL1 */
188 write_ctx_reg(get_sysregs_ctx(ctx
), CTX_TCR_EL1
, tcr_el1
);
190 /* Setup SCTLR_EL1 */
191 u_register_t sctlr_el1
= read_ctx_reg(get_sysregs_ctx(ctx
), CTX_SCTLR_EL1
);
195 /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
197 /* RW regions at xlat regime EL1&0 are forced to be XN. */
199 /* Don't trap to EL1 execution of WFI or WFE at EL0. */
200 SCTLR_NTWI_BIT
| SCTLR_NTWE_BIT
|
201 /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */
203 /* Don't trap to EL1 execution of DZ ZVA at EL0. */
205 /* Enable SP Alignment check for EL0 */
207 /* Allow cacheable data and instr. accesses to normal memory. */
208 SCTLR_C_BIT
| SCTLR_I_BIT
|
209 /* Alignment fault checking enabled when at EL1 and EL0. */
216 /* Explicit data accesses at EL0 are little-endian. */
218 /* Accesses to DAIF from EL0 are trapped to EL1. */
222 write_ctx_reg(get_sysregs_ctx(ctx
), CTX_SCTLR_EL1
, sctlr_el1
);
224 /* Point TTBR0_EL1 at the tables of the context created for the SP. */
225 write_ctx_reg(get_sysregs_ctx(ctx
), CTX_TTBR0_EL1
,
226 (u_register_t
)secure_partition_base_xlat_table
);
229 * Setup other system registers
230 * ----------------------------
233 /* Shim Exception Vector Base Address */
234 write_ctx_reg(get_sysregs_ctx(ctx
), CTX_VBAR_EL1
,
235 SPM_SHIM_EXCEPTIONS_PTR
);
238 * FPEN: Forbid the Secure Partition to access FP/SIMD registers.
239 * TTA: Enable access to trace registers.
240 * ZEN (v8.2): Trap SVE instructions and access to SVE registers.
242 write_ctx_reg(get_sysregs_ctx(ctx
), CTX_CPACR_EL1
,
243 CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_ALL
));
246 * Prepare information in buffer shared between EL3 and S-EL0
247 * ----------------------------------------------------------
250 void *shared_buf_ptr
= (void *) PLAT_SPM_BUF_BASE
;
252 /* Copy the boot information into the shared buffer with the SP. */
253 assert((uintptr_t)shared_buf_ptr
+ sizeof(secure_partition_boot_info_t
)
254 <= (PLAT_SPM_BUF_BASE
+ PLAT_SPM_BUF_SIZE
));
256 assert(PLAT_SPM_BUF_BASE
<= (UINTPTR_MAX
- PLAT_SPM_BUF_SIZE
+ 1));
258 const secure_partition_boot_info_t
*sp_boot_info
=
259 plat_get_secure_partition_boot_info(NULL
);
261 assert(sp_boot_info
!= NULL
);
263 memcpy((void *) shared_buf_ptr
, (const void *) sp_boot_info
,
264 sizeof(secure_partition_boot_info_t
));
266 /* Pointer to the MP information from the platform port. */
267 secure_partition_mp_info_t
*sp_mp_info
=
268 ((secure_partition_boot_info_t
*) shared_buf_ptr
)->mp_info
;
270 assert(sp_mp_info
!= NULL
);
273 * Point the shared buffer MP information pointer to where the info will
274 * be populated, just after the boot info.
276 ((secure_partition_boot_info_t
*) shared_buf_ptr
)->mp_info
=
277 (secure_partition_mp_info_t
*) ((uintptr_t)shared_buf_ptr
278 + sizeof(secure_partition_boot_info_t
));
281 * Update the shared buffer pointer to where the MP information for the
282 * payload will be populated
284 shared_buf_ptr
= ((secure_partition_boot_info_t
*) shared_buf_ptr
)->mp_info
;
287 * Copy the cpu information into the shared buffer area after the boot
290 assert(sp_boot_info
->num_cpus
<= PLATFORM_CORE_COUNT
);
292 assert((uintptr_t)shared_buf_ptr
293 <= (PLAT_SPM_BUF_BASE
+ PLAT_SPM_BUF_SIZE
-
294 (sp_boot_info
->num_cpus
* sizeof(*sp_mp_info
))));
296 memcpy(shared_buf_ptr
, (const void *) sp_mp_info
,
297 sp_boot_info
->num_cpus
* sizeof(*sp_mp_info
));
300 * Calculate the linear indices of cores in boot information for the
301 * secure partition and flag the primary CPU
303 sp_mp_info
= (secure_partition_mp_info_t
*) shared_buf_ptr
;
305 for (unsigned int index
= 0; index
< sp_boot_info
->num_cpus
; index
++) {
306 u_register_t mpidr
= sp_mp_info
[index
].mpidr
;
308 sp_mp_info
[index
].linear_id
= plat_core_pos_by_mpidr(mpidr
);
309 if (plat_my_core_pos() == sp_mp_info
[index
].linear_id
)
310 sp_mp_info
[index
].flags
|= MP_INFO_FLAG_PRIMARY_CPU
;
313 VERBOSE("S-EL1/S-EL0 context setup end.\n");